celery-4.1.0/0000755000175000017500000000000013135426347012735 5ustar omeromer00000000000000celery-4.1.0/MANIFEST.in0000644000175000017500000000127313130607475014474 0ustar omeromer00000000000000include CONTRIBUTORS.txt include Changelog include LICENSE include README.rst include MANIFEST.in include TODO include setup.cfg include setup.py recursive-include t *.py recursive-include docs * recursive-include extra/bash-completion * recursive-include extra/centos * recursive-include extra/generic-init.d * recursive-include extra/macOS * recursive-include extra/supervisord * recursive-include extra/systemd * recursive-include extra/zsh-completion * recursive-include examples * recursive-include requirements *.txt *.rst recursive-include celery/utils/static *.png recursive-exclude docs/_build * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-exclude * .*.sw[a-z] celery-4.1.0/TODO0000644000175000017500000000012413130607475013420 0ustar omeromer00000000000000Please see our Issue Tracker at GitHub: https://github.com/celery/celery/issues celery-4.1.0/t/0000755000175000017500000000000013135426347013200 5ustar omeromer00000000000000celery-4.1.0/t/unit/0000755000175000017500000000000013135426347014157 5ustar omeromer00000000000000celery-4.1.0/t/unit/compat_modules/0000755000175000017500000000000013135426347017172 5ustar omeromer00000000000000celery-4.1.0/t/unit/compat_modules/__init__.py0000644000175000017500000000000013130607475021267 0ustar omeromer00000000000000celery-4.1.0/t/unit/compat_modules/test_decorators.py0000644000175000017500000000163713130607475022755 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import warnings from celery.task import base def add(x, y): return x + y @pytest.mark.usefixtures('depends_on_current_app') class test_decorators: def test_task_alias(self): from celery import task assert task.__file__ assert task(add) def setup(self): with warnings.catch_warnings(record=True): from celery import decorators self.decorators = decorators def assert_compat_decorator(self, decorator, type, **opts): task = decorator(**opts)(add) assert task(8, 8) == 16 assert isinstance(task, type) def test_task(self): self.assert_compat_decorator(self.decorators.task, base.BaseTask) def test_periodic_task(self): self.assert_compat_decorator( self.decorators.periodic_task, base.BaseTask, run_every=1, ) celery-4.1.0/t/unit/compat_modules/test_compat.py0000644000175000017500000000312513130607475022065 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from datetime import timedelta from celery.five import bytes_if_py2 from celery.schedules import schedule from celery.task import ( periodic_task, PeriodicTask ) class test_periodic_tasks: def setup(self): self.app.set_current() # @depends_on_current_app @periodic_task(app=self.app, shared=False, run_every=schedule(timedelta(hours=1), app=self.app)) def my_periodic(): pass self.my_periodic = my_periodic def now(self): return self.app.now() def test_must_have_run_every(self): with pytest.raises(NotImplementedError): type(bytes_if_py2('Foo'), (PeriodicTask,), { '__module__': __name__, }) def test_remaining_estimate(self): s = self.my_periodic.run_every assert isinstance( s.remaining_estimate(s.maybe_make_aware(self.now())), timedelta) def test_is_due_not_due(self): due, remaining = self.my_periodic.run_every.is_due(self.now()) assert not due # This assertion may fail if executed in the # first minute of an hour, thus 59 instead of 60 assert remaining > 59 def test_is_due(self): p = self.my_periodic due, remaining = p.run_every.is_due( self.now() - p.run_every.run_every, ) assert due assert remaining == p.run_every.run_every.total_seconds() def test_schedule_repr(self): p = self.my_periodic assert repr(p.run_every) celery-4.1.0/t/unit/compat_modules/test_messaging.py0000644000175000017500000000053013130607475022554 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from celery import messaging @pytest.mark.usefixtures('depends_on_current_app') class test_compat_messaging_module: def test_get_consume_set(self): conn = messaging.establish_connection() messaging.get_consumer_set(conn).close() conn.close() celery-4.1.0/t/unit/compat_modules/test_compat_utils.py0000644000175000017500000000232613130607475023307 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import celery import pytest from celery.app.task import Task as ModernTask from celery.task.base import Task as CompatTask @pytest.mark.usefixtures('depends_on_current_app') class test_MagicModule: def test_class_property_set_without_type(self): assert ModernTask.__dict__['app'].__get__(CompatTask()) def test_class_property_set_on_class(self): assert (ModernTask.__dict__['app'].__set__(None, None) is ModernTask.__dict__['app']) def test_class_property_set(self, app): class X(CompatTask): pass ModernTask.__dict__['app'].__set__(X(), app) assert X.app is app def test_dir(self): assert dir(celery.messaging) def test_direct(self): assert celery.task def test_app_attrs(self): assert (celery.task.control.broadcast == celery.current_app.control.broadcast) def test_decorators_task(self): @celery.decorators.task def _test_decorators_task(): pass def test_decorators_periodic_task(self): @celery.decorators.periodic_task(run_every=3600) def _test_decorators_ptask(): pass celery-4.1.0/t/unit/apps/0000755000175000017500000000000013135426347015122 5ustar omeromer00000000000000celery-4.1.0/t/unit/apps/__init__.py0000644000175000017500000000000013130607475017217 0ustar omeromer00000000000000celery-4.1.0/t/unit/apps/test_multi.py0000644000175000017500000003367113130607475017675 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import errno import pytest import signal import sys from case import Mock, call, patch, skip from celery.apps.multi import ( Cluster, MultiParser, NamespacedOptionParser, Node, format_opt, ) class test_functions: def test_parse_ns_range(self): m = MultiParser() assert m._parse_ns_range('1-3', True), ['1', '2' == '3'] assert m._parse_ns_range('1-3', False) == ['1-3'] assert m._parse_ns_range('1-3,10,11,20', True) == [ '1', '2', '3', '10', '11', '20', ] def test_format_opt(self): assert format_opt('--foo', None) == '--foo' assert format_opt('-c', 1) == '-c 1' assert format_opt('--log', 'foo') == '--log=foo' class test_NamespacedOptionParser: def test_parse(self): x = NamespacedOptionParser(['-c:1,3', '4']) x.parse() assert x.namespaces.get('1,3') == {'-c': '4'} x = NamespacedOptionParser(['-c:jerry,elaine', '5', '--loglevel:kramer=DEBUG', '--flag', '--logfile=foo', '-Q', 'bar', 'a', 'b', '--', '.disable_rate_limits=1']) x.parse() assert x.options == { '--logfile': 'foo', '-Q': 'bar', '--flag': None, } assert x.values, ['a' == 'b'] assert x.namespaces.get('jerry,elaine') == {'-c': '5'} assert x.namespaces.get('kramer') == {'--loglevel': 'DEBUG'} assert x.passthrough == '-- .disable_rate_limits=1' def multi_args(p, *args, **kwargs): return MultiParser(*args, **kwargs).parse(p) class test_multi_args: @patch('celery.apps.multi.gethostname') def test_parse(self, gethostname): gethostname.return_value = 'example.com' p = NamespacedOptionParser([ '-c:jerry,elaine', '5', '--loglevel:kramer=DEBUG', '--flag', '--logfile=foo', '-Q', 'bar', 'jerry', 'elaine', 'kramer', '--', '.disable_rate_limits=1', ]) p.parse() it = multi_args(p, cmd='COMMAND', append='*AP*', prefix='*P*', suffix='*S*') nodes = list(it) def assert_line_in(name, args): assert name in {n.name for n in nodes} argv = None for node in nodes: if node.name == name: argv = node.argv assert argv for arg in args: assert arg in argv assert_line_in( '*P*jerry@*S*', ['COMMAND', '-n *P*jerry@*S*', '-Q bar', '-c 5', '--flag', '--logfile=foo', '-- .disable_rate_limits=1', '*AP*'], ) assert_line_in( '*P*elaine@*S*', ['COMMAND', '-n *P*elaine@*S*', '-Q bar', '-c 5', '--flag', '--logfile=foo', '-- .disable_rate_limits=1', '*AP*'], ) assert_line_in( '*P*kramer@*S*', ['COMMAND', '--loglevel=DEBUG', '-n *P*kramer@*S*', '-Q bar', '--flag', '--logfile=foo', '-- .disable_rate_limits=1', '*AP*'], ) expand = nodes[0].expander assert expand('%h') == '*P*jerry@*S*' assert expand('%n') == '*P*jerry' nodes2 = list(multi_args(p, cmd='COMMAND', append='', prefix='*P*', suffix='*S*')) assert nodes2[0].argv[-1] == '-- .disable_rate_limits=1' p2 = NamespacedOptionParser(['10', '-c:1', '5']) p2.parse() nodes3 = list(multi_args(p2, cmd='COMMAND')) def _args(name, *args): return args + ( '--pidfile={0}.pid'.format(name), '--logfile={0}%I.log'.format(name), '--executable={0}'.format(sys.executable), '', ) assert len(nodes3) == 10 assert nodes3[0].name == 'celery1@example.com' assert nodes3[0].argv == ( 'COMMAND', '-c 5', '-n celery1@example.com') + _args('celery1') for i, worker in enumerate(nodes3[1:]): assert worker.name == 'celery%s@example.com' % (i + 2) node_i = 'celery%s' % (i + 2,) assert worker.argv == ( 'COMMAND', '-n %s@example.com' % (node_i,)) + _args(node_i) nodes4 = list(multi_args(p2, cmd='COMMAND', suffix='""')) assert len(nodes4) == 10 assert nodes4[0].name == 'celery1@' assert nodes4[0].argv == ( 'COMMAND', '-c 5', '-n celery1@') + _args('celery1') p3 = NamespacedOptionParser(['foo@', '-c:foo', '5']) p3.parse() nodes5 = list(multi_args(p3, cmd='COMMAND', suffix='""')) assert nodes5[0].name == 'foo@' assert nodes5[0].argv == ( 'COMMAND', '-c 5', '-n foo@') + _args('foo') p4 = NamespacedOptionParser(['foo', '-Q:1', 'test']) p4.parse() nodes6 = list(multi_args(p4, cmd='COMMAND', suffix='""')) assert nodes6[0].name == 'foo@' assert nodes6[0].argv == ( 'COMMAND', '-Q test', '-n foo@') + _args('foo') p5 = NamespacedOptionParser(['foo@bar', '-Q:1', 'test']) p5.parse() nodes7 = list(multi_args(p5, cmd='COMMAND', suffix='""')) assert nodes7[0].name == 'foo@bar' assert nodes7[0].argv == ( 'COMMAND', '-Q test', '-n foo@bar') + _args('foo') p6 = NamespacedOptionParser(['foo@bar', '-Q:0', 'test']) p6.parse() with pytest.raises(KeyError): list(multi_args(p6)) def test_optmerge(self): p = NamespacedOptionParser(['foo', 'test']) p.parse() p.options = {'x': 'y'} r = p.optmerge('foo') assert r['x'] == 'y' class test_Node: def setup(self): self.p = Mock(name='p') self.p.options = { '--executable': 'python', '--logfile': 'foo.log', } self.p.namespaces = {} self.node = Node('foo@bar.com', options={'-A': 'proj'}) self.expander = self.node.expander = Mock(name='expander') self.node.pid = 303 def test_from_kwargs(self): n = Node.from_kwargs( 'foo@bar.com', max_tasks_per_child=30, A='foo', Q='q1,q2', O='fair', ) assert sorted(n.argv) == sorted([ '-m celery worker --detach', '-A foo', '--executable={0}'.format(n.executable), '-O fair', '-n foo@bar.com', '--logfile=foo%I.log', '-Q q1,q2', '--max-tasks-per-child=30', '--pidfile=foo.pid', '', ]) @patch('os.kill') def test_send(self, kill): assert self.node.send(9) kill.assert_called_with(self.node.pid, 9) @patch('os.kill') def test_send__ESRCH(self, kill): kill.side_effect = OSError() kill.side_effect.errno = errno.ESRCH assert not self.node.send(9) kill.assert_called_with(self.node.pid, 9) @patch('os.kill') def test_send__error(self, kill): kill.side_effect = OSError() kill.side_effect.errno = errno.ENOENT with pytest.raises(OSError): self.node.send(9) kill.assert_called_with(self.node.pid, 9) def test_alive(self): self.node.send = Mock(name='send') assert self.node.alive() is self.node.send.return_value self.node.send.assert_called_with(0) def test_start(self): self.node._waitexec = Mock(name='_waitexec') self.node.start(env={'foo': 'bar'}, kw=2) self.node._waitexec.assert_called_with( self.node.argv, path=self.node.executable, env={'foo': 'bar'}, kw=2, ) @patch('celery.apps.multi.Popen') def test_waitexec(self, Popen, argv=['A', 'B']): on_spawn = Mock(name='on_spawn') on_signalled = Mock(name='on_signalled') on_failure = Mock(name='on_failure') env = Mock(name='env') self.node.handle_process_exit = Mock(name='handle_process_exit') self.node._waitexec( argv, path='python', env=env, on_spawn=on_spawn, on_signalled=on_signalled, on_failure=on_failure, ) Popen.assert_called_with( self.node.prepare_argv(argv, 'python'), env=env) self.node.handle_process_exit.assert_called_with( Popen().wait(), on_signalled=on_signalled, on_failure=on_failure, ) def test_handle_process_exit(self): assert self.node.handle_process_exit(0) == 0 def test_handle_process_exit__failure(self): on_failure = Mock(name='on_failure') assert self.node.handle_process_exit(9, on_failure=on_failure) == 9 on_failure.assert_called_with(self.node, 9) def test_handle_process_exit__signalled(self): on_signalled = Mock(name='on_signalled') assert self.node.handle_process_exit( -9, on_signalled=on_signalled) == 9 on_signalled.assert_called_with(self.node, 9) def test_logfile(self): assert self.node.logfile == self.expander.return_value self.expander.assert_called_with('%n%I.log') class test_Cluster: def setup(self): self.Popen = self.patching('celery.apps.multi.Popen') self.kill = self.patching('os.kill') self.gethostname = self.patching('celery.apps.multi.gethostname') self.gethostname.return_value = 'example.com' self.Pidfile = self.patching('celery.apps.multi.Pidfile') self.cluster = Cluster( [Node('foo@example.com'), Node('bar@example.com'), Node('baz@example.com')], on_stopping_preamble=Mock(name='on_stopping_preamble'), on_send_signal=Mock(name='on_send_signal'), on_still_waiting_for=Mock(name='on_still_waiting_for'), on_still_waiting_progress=Mock(name='on_still_waiting_progress'), on_still_waiting_end=Mock(name='on_still_waiting_end'), on_node_start=Mock(name='on_node_start'), on_node_restart=Mock(name='on_node_restart'), on_node_shutdown_ok=Mock(name='on_node_shutdown_ok'), on_node_status=Mock(name='on_node_status'), on_node_signal=Mock(name='on_node_signal'), on_node_signal_dead=Mock(name='on_node_signal_dead'), on_node_down=Mock(name='on_node_down'), on_child_spawn=Mock(name='on_child_spawn'), on_child_signalled=Mock(name='on_child_signalled'), on_child_failure=Mock(name='on_child_failure'), ) def test_len(self): assert len(self.cluster) == 3 def test_getitem(self): assert self.cluster[0].name == 'foo@example.com' def test_start(self): self.cluster.start_node = Mock(name='start_node') self.cluster.start() self.cluster.start_node.assert_has_calls( call(node) for node in self.cluster ) def test_start_node(self): self.cluster._start_node = Mock(name='_start_node') node = self.cluster[0] assert (self.cluster.start_node(node) is self.cluster._start_node.return_value) self.cluster.on_node_start.assert_called_with(node) self.cluster._start_node.assert_called_with(node) self.cluster.on_node_status.assert_called_with( node, self.cluster._start_node(), ) def test__start_node(self): node = self.cluster[0] node.start = Mock(name='node.start') assert self.cluster._start_node(node) is node.start.return_value node.start.assert_called_with( self.cluster.env, on_spawn=self.cluster.on_child_spawn, on_signalled=self.cluster.on_child_signalled, on_failure=self.cluster.on_child_failure, ) def test_send_all(self): nodes = [Mock(name='n1'), Mock(name='n2')] self.cluster.getpids = Mock(name='getpids') self.cluster.getpids.return_value = nodes self.cluster.send_all(15) self.cluster.on_node_signal.assert_has_calls( call(node, 'TERM') for node in nodes ) for node in nodes: node.send.assert_called_with(15, self.cluster.on_node_signal_dead) @skip.if_win32() def test_kill(self): self.cluster.send_all = Mock(name='.send_all') self.cluster.kill() self.cluster.send_all.assert_called_with(signal.SIGKILL) def test_getpids(self): self.gethostname.return_value = 'e.com' self.prepare_pidfile_for_getpids(self.Pidfile) callback = Mock() p = Cluster([ Node('foo@e.com'), Node('bar@e.com'), Node('baz@e.com'), ]) nodes = p.getpids(on_down=callback) node_0, node_1 = nodes assert node_0.name == 'foo@e.com' assert sorted(node_0.argv) == sorted([ '', '--executable={0}'.format(node_0.executable), '--logfile=foo%I.log', '--pidfile=foo.pid', '-m celery worker --detach', '-n foo@e.com', ]) assert node_0.pid == 10 assert node_1.name == 'bar@e.com' assert sorted(node_1.argv) == sorted([ '', '--executable={0}'.format(node_1.executable), '--logfile=bar%I.log', '--pidfile=bar.pid', '-m celery worker --detach', '-n bar@e.com', ]) assert node_1.pid == 11 # without callback, should work nodes = p.getpids('celery worker') def prepare_pidfile_for_getpids(self, Pidfile): class pids(object): def __init__(self, path): self.path = path def read_pid(self): try: return {'foo.pid': 10, 'bar.pid': 11}[self.path] except KeyError: raise ValueError() self.Pidfile.side_effect = pids celery-4.1.0/t/unit/events/0000755000175000017500000000000013135426347015463 5ustar omeromer00000000000000celery-4.1.0/t/unit/events/test_snapshot.py0000644000175000017500000000654013130607475020736 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, mock, patch from celery.app.events import Events from celery.events.snapshot import Polaroid, evcam class MockTimer(object): installed = [] def call_repeatedly(self, secs, fun, *args, **kwargs): self.installed.append(fun) return Mock(name='TRef') timer = MockTimer() class test_Polaroid: def setup(self): self.state = self.app.events.State() def test_constructor(self): x = Polaroid(self.state, app=self.app) assert x.app is self.app assert x.state is self.state assert x.freq assert x.cleanup_freq assert x.logger assert not x.maxrate def test_install_timers(self): x = Polaroid(self.state, app=self.app) x.timer = timer x.__exit__() x.__enter__() assert x.capture in MockTimer.installed assert x.cleanup in MockTimer.installed x._tref.cancel.assert_not_called() x._ctref.cancel.assert_not_called() x.__exit__() x._tref.cancel.assert_called() x._ctref.cancel.assert_called() x._tref.assert_called() x._ctref.assert_not_called() def test_cleanup(self): x = Polaroid(self.state, app=self.app) cleanup_signal_sent = [False] def handler(**kwargs): cleanup_signal_sent[0] = True x.cleanup_signal.connect(handler) x.cleanup() assert cleanup_signal_sent[0] def test_shutter__capture(self): x = Polaroid(self.state, app=self.app) shutter_signal_sent = [False] def handler(**kwargs): shutter_signal_sent[0] = True x.shutter_signal.connect(handler) x.shutter() assert shutter_signal_sent[0] shutter_signal_sent[0] = False x.capture() assert shutter_signal_sent[0] def test_shutter_maxrate(self): x = Polaroid(self.state, app=self.app, maxrate='1/h') shutter_signal_sent = [0] def handler(**kwargs): shutter_signal_sent[0] += 1 x.shutter_signal.connect(handler) for i in range(30): x.shutter() x.shutter() x.shutter() assert shutter_signal_sent[0] == 1 class test_evcam: class MockReceiver(object): raise_keyboard_interrupt = False def capture(self, **kwargs): if self.__class__.raise_keyboard_interrupt: raise KeyboardInterrupt() class MockEvents(Events): def Receiver(self, *args, **kwargs): return test_evcam.MockReceiver() def setup(self): self.app.events = self.MockEvents() self.app.events.app = self.app @mock.restore_logging() def test_evcam(self): evcam(Polaroid, timer=timer, app=self.app) evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app) self.MockReceiver.raise_keyboard_interrupt = True try: with pytest.raises(SystemExit): evcam(Polaroid, timer=timer, app=self.app) finally: self.MockReceiver.raise_keyboard_interrupt = False @patch('celery.platforms.create_pidlock') def test_evcam_pidfile(self, create_pidlock): evcam(Polaroid, timer=timer, pidfile='/var/pid', app=self.app) create_pidlock.assert_called_with('/var/pid') celery-4.1.0/t/unit/events/__init__.py0000644000175000017500000000000013130607475017560 0ustar omeromer00000000000000celery-4.1.0/t/unit/events/test_events.py0000644000175000017500000002553413130607475020407 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import socket from case import Mock, call from celery.events import Event from celery.events.receiver import CLIENT_CLOCK_SKEW class MockProducer(object): raise_on_publish = False def __init__(self, *args, **kwargs): self.sent = [] def publish(self, msg, *args, **kwargs): if self.raise_on_publish: raise KeyError() self.sent.append(msg) def close(self): pass def has_event(self, kind): for event in self.sent: if event['type'] == kind: return event return False def test_Event(): event = Event('world war II') assert event['type'] == 'world war II' assert event['timestamp'] class test_EventDispatcher: def test_redis_uses_fanout_exchange(self): self.app.connection = Mock() conn = self.app.connection.return_value = Mock() conn.transport.driver_type = 'redis' dispatcher = self.app.events.Dispatcher(conn, enabled=False) assert dispatcher.exchange.type == 'fanout' def test_others_use_topic_exchange(self): self.app.connection = Mock() conn = self.app.connection.return_value = Mock() conn.transport.driver_type = 'amqp' dispatcher = self.app.events.Dispatcher(conn, enabled=False) assert dispatcher.exchange.type == 'topic' def test_takes_channel_connection(self): x = self.app.events.Dispatcher(channel=Mock()) assert x.connection is x.channel.connection.client def test_sql_transports_disabled(self): conn = Mock() conn.transport.driver_type = 'sql' x = self.app.events.Dispatcher(connection=conn) assert not x.enabled def test_send(self): producer = MockProducer() producer.connection = self.app.connection_for_write() connection = Mock() connection.transport.driver_type = 'amqp' eventer = self.app.events.Dispatcher(connection, enabled=False, buffer_while_offline=False) eventer.producer = producer eventer.enabled = True eventer.send('World War II', ended=True) assert producer.has_event('World War II') eventer.enabled = False eventer.send('World War III') assert not producer.has_event('World War III') evs = ('Event 1', 'Event 2', 'Event 3') eventer.enabled = True eventer.producer.raise_on_publish = True eventer.buffer_while_offline = False with pytest.raises(KeyError): eventer.send('Event X') eventer.buffer_while_offline = True for ev in evs: eventer.send(ev) eventer.producer.raise_on_publish = False eventer.flush() for ev in evs: assert producer.has_event(ev) eventer.flush() def test_send_buffer_group(self): buf_received = [None] producer = MockProducer() producer.connection = self.app.connection_for_write() connection = Mock() connection.transport.driver_type = 'amqp' eventer = self.app.events.Dispatcher( connection, enabled=False, buffer_group={'task'}, buffer_limit=2, ) eventer.producer = producer eventer.enabled = True eventer._publish = Mock(name='_publish') def on_eventer_publish(events, *args, **kwargs): buf_received[0] = list(events) eventer._publish.side_effect = on_eventer_publish assert not eventer._group_buffer['task'] eventer.on_send_buffered = Mock(name='on_send_buffered') eventer.send('task-received', uuid=1) prev_buffer = eventer._group_buffer['task'] assert eventer._group_buffer['task'] eventer.on_send_buffered.assert_called_with() eventer.send('task-received', uuid=1) assert not eventer._group_buffer['task'] eventer._publish.assert_has_calls([ call([], eventer.producer, 'task.multi'), ]) # clear in place assert eventer._group_buffer['task'] is prev_buffer assert len(buf_received[0]) == 2 eventer.on_send_buffered = None eventer.send('task-received', uuid=1) def test_flush_no_groups_no_errors(self): eventer = self.app.events.Dispatcher(Mock()) eventer.flush(errors=False, groups=False) def test_enter_exit(self): with self.app.connection_for_write() as conn: d = self.app.events.Dispatcher(conn) d.close = Mock() with d as _d: assert _d d.close.assert_called_with() def test_enable_disable_callbacks(self): on_enable = Mock() on_disable = Mock() with self.app.connection_for_write() as conn: with self.app.events.Dispatcher(conn, enabled=False) as d: d.on_enabled.add(on_enable) d.on_disabled.add(on_disable) d.enable() on_enable.assert_called_with() d.disable() on_disable.assert_called_with() def test_enabled_disable(self): connection = self.app.connection_for_write() channel = connection.channel() try: dispatcher = self.app.events.Dispatcher(connection, enabled=True) dispatcher2 = self.app.events.Dispatcher(connection, enabled=True, channel=channel) assert dispatcher.enabled assert dispatcher.producer.channel assert (dispatcher.producer.serializer == self.app.conf.event_serializer) created_channel = dispatcher.producer.channel dispatcher.disable() dispatcher.disable() # Disable with no active producer dispatcher2.disable() assert not dispatcher.enabled assert dispatcher.producer is None # does not close manually provided channel assert not dispatcher2.channel.closed dispatcher.enable() assert dispatcher.enabled assert dispatcher.producer # XXX test compat attribute assert dispatcher.publisher is dispatcher.producer prev, dispatcher.publisher = dispatcher.producer, 42 try: assert dispatcher.producer == 42 finally: dispatcher.producer = prev finally: channel.close() connection.close() assert created_channel.closed class test_EventReceiver: def test_process(self): message = {'type': 'world-war'} got_event = [False] def my_handler(event): got_event[0] = True connection = Mock() connection.transport_cls = 'memory' r = self.app.events.Receiver( connection, handlers={'world-war': my_handler}, node_id='celery.tests', ) r._receive(message, object()) assert got_event[0] def test_accept_argument(self): r = self.app.events.Receiver(Mock(), accept={'app/foo'}) assert r.accept == {'app/foo'} def test_event_queue_prefix__default(self): r = self.app.events.Receiver(Mock()) assert r.queue.name.startswith('celeryev.') def test_event_queue_prefix__setting(self): self.app.conf.event_queue_prefix = 'eventq' r = self.app.events.Receiver(Mock()) assert r.queue.name.startswith('eventq.') def test_event_queue_prefix__argument(self): r = self.app.events.Receiver(Mock(), queue_prefix='fooq') assert r.queue.name.startswith('fooq.') def test_catch_all_event(self): message = {'type': 'world-war'} got_event = [False] def my_handler(event): got_event[0] = True connection = Mock() connection.transport_cls = 'memory' r = self.app.events.Receiver(connection, node_id='celery.tests') r.handlers['*'] = my_handler r._receive(message, object()) assert got_event[0] def test_itercapture(self): connection = self.app.connection_for_write() try: r = self.app.events.Receiver(connection, node_id='celery.tests') it = r.itercapture(timeout=0.0001, wakeup=False) with pytest.raises(socket.timeout): next(it) with pytest.raises(socket.timeout): r.capture(timeout=0.00001) finally: connection.close() def test_event_from_message_localize_disabled(self): r = self.app.events.Receiver(Mock(), node_id='celery.tests') r.adjust_clock = Mock() ts_adjust = Mock() r.event_from_message( {'type': 'worker-online', 'clock': 313}, localize=False, adjust_timestamp=ts_adjust, ) ts_adjust.assert_not_called() r.adjust_clock.assert_called_with(313) def test_event_from_message_clock_from_client(self): r = self.app.events.Receiver(Mock(), node_id='celery.tests') r.clock.value = 302 r.adjust_clock = Mock() body = {'type': 'task-sent'} r.event_from_message( body, localize=False, adjust_timestamp=Mock(), ) assert body['clock'] == r.clock.value + CLIENT_CLOCK_SKEW def test_receive_multi(self): r = self.app.events.Receiver(Mock(name='connection')) r.process = Mock(name='process') efm = r.event_from_message = Mock(name='event_from_message') def on_efm(*args): return args efm.side_effect = on_efm r._receive([1, 2, 3], Mock()) r.process.assert_has_calls([call(1), call(2), call(3)]) def test_itercapture_limit(self): connection = self.app.connection_for_write() channel = connection.channel() try: events_received = [0] def handler(event): events_received[0] += 1 producer = self.app.events.Dispatcher( connection, enabled=True, channel=channel, ) r = self.app.events.Receiver( connection, handlers={'*': handler}, node_id='celery.tests', ) evs = ['ev1', 'ev2', 'ev3', 'ev4', 'ev5'] for ev in evs: producer.send(ev) it = r.itercapture(limit=4, wakeup=True) next(it) # skip consumer (see itercapture) list(it) assert events_received[0] == 4 finally: channel.close() connection.close() def test_State(app): state = app.events.State() assert dict(state.workers) == {} def test_default_dispatcher(app): with app.events.default_dispatcher() as d: assert d assert d.connection celery-4.1.0/t/unit/events/test_cursesmon.py0000644000175000017500000000454713130607475021122 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import skip class MockWindow(object): def getmaxyx(self): return self.y, self.x @skip.unless_module('curses', import_errors=(ImportError, OSError)) class test_CursesDisplay: def setup(self): from celery.events import cursesmon self.monitor = cursesmon.CursesMonitor(object(), app=self.app) self.win = MockWindow() self.monitor.win = self.win def test_format_row_with_default_widths(self): self.win.x, self.win.y = 91, 24 row = self.monitor.format_row( '783da208-77d0-40ca-b3d6-37dd6dbb55d3', 'task.task.task.task.task.task.task.task.task.tas', 'workerworkerworkerworkerworkerworkerworkerworker', '21:13:20', 'SUCCESS') assert ('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' 'workerworker... task.task.[.]tas 21:13:20 SUCCESS ' == row) def test_format_row_with_truncated_uuid(self): self.win.x, self.win.y = 80, 24 row = self.monitor.format_row( '783da208-77d0-40ca-b3d6-37dd6dbb55d3', 'task.task.task.task.task.task.task.task.task.tas', 'workerworkerworkerworkerworkerworkerworkerworker', '21:13:20', 'SUCCESS') expected = ('783da208-77d0-40ca-b3d... workerworker... ' 'task.task.[.]tas 21:13:20 SUCCESS ') assert row == expected def test_format_title_row(self): self.win.x, self.win.y = 80, 24 row = self.monitor.format_row('UUID', 'TASK', 'WORKER', 'TIME', 'STATE') assert ('UUID WORKER ' 'TASK TIME STATE ' == row) def test_format_row_for_wide_screen_with_short_uuid(self): self.win.x, self.win.y = 140, 24 row = self.monitor.format_row( '783da208-77d0-40ca-b3d6-37dd6dbb55d3', 'task.task.task.task.task.task.task.task.task.tas', 'workerworkerworkerworkerworkerworkerworkerworker', '21:13:20', 'SUCCESS') assert len(row) == 136 assert ('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' 'workerworkerworkerworkerworkerworker... ' 'task.task.task.task.task.task.task.[.]tas ' '21:13:20 SUCCESS ' == row) celery-4.1.0/t/unit/events/test_state.py0000644000175000017500000005251413130607475020221 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle from decimal import Decimal from random import shuffle from time import time from itertools import count from case import Mock, patch, skip from celery import states from celery import uuid from celery.events import Event from celery.events.state import ( HEARTBEAT_EXPIRE_WINDOW, HEARTBEAT_DRIFT_MAX, State, Worker, Task, heartbeat_expires, ) from celery.five import range class replay(object): def __init__(self, state): self.state = state self.rewind() self.setup() self.current_clock = 0 def setup(self): pass def next_event(self): ev = self.events[next(self.position)] ev['local_received'] = ev['timestamp'] try: self.current_clock = ev['clock'] except KeyError: ev['clock'] = self.current_clock = self.current_clock + 1 return ev def __iter__(self): return self def __next__(self): try: self.state.event(self.next_event()) except IndexError: raise StopIteration() next = __next__ def rewind(self): self.position = count(0) return self def play(self): for _ in self: pass class ev_worker_online_offline(replay): def setup(self): self.events = [ Event('worker-online', hostname='utest1'), Event('worker-offline', hostname='utest1'), ] class ev_worker_heartbeats(replay): def setup(self): self.events = [ Event('worker-heartbeat', hostname='utest1', timestamp=time() - HEARTBEAT_EXPIRE_WINDOW * 2), Event('worker-heartbeat', hostname='utest1'), ] class ev_task_states(replay): def setup(self): tid = self.tid = uuid() tid2 = self.tid2 = uuid() self.events = [ Event('task-received', uuid=tid, name='task1', args='(2, 2)', kwargs="{'foo': 'bar'}", retries=0, eta=None, hostname='utest1'), Event('task-started', uuid=tid, hostname='utest1'), Event('task-revoked', uuid=tid, hostname='utest1'), Event('task-retried', uuid=tid, exception="KeyError('bar')", traceback='line 2 at main', hostname='utest1'), Event('task-failed', uuid=tid, exception="KeyError('foo')", traceback='line 1 at main', hostname='utest1'), Event('task-succeeded', uuid=tid, result='4', runtime=0.1234, hostname='utest1'), Event('foo-bar'), Event('task-received', uuid=tid2, name='task2', args='(4, 4)', kwargs="{'foo': 'bar'}", retries=0, eta=None, parent_id=tid, root_id=tid, hostname='utest1'), ] def QTEV(type, uuid, hostname, clock, name=None, timestamp=None): """Quick task event.""" return Event('task-{0}'.format(type), uuid=uuid, hostname=hostname, clock=clock, name=name, timestamp=timestamp or time()) class ev_logical_clock_ordering(replay): def __init__(self, state, offset=0, uids=None): self.offset = offset or 0 self.uids = self.setuids(uids) super(ev_logical_clock_ordering, self).__init__(state) def setuids(self, uids): uids = self.tA, self.tB, self.tC = uids or [uuid(), uuid(), uuid()] return uids def setup(self): offset = self.offset tA, tB, tC = self.uids self.events = [ QTEV('received', tA, 'w1', name='tA', clock=offset + 1), QTEV('received', tB, 'w2', name='tB', clock=offset + 1), QTEV('started', tA, 'w1', name='tA', clock=offset + 3), QTEV('received', tC, 'w2', name='tC', clock=offset + 3), QTEV('started', tB, 'w2', name='tB', clock=offset + 5), QTEV('retried', tA, 'w1', name='tA', clock=offset + 7), QTEV('succeeded', tB, 'w2', name='tB', clock=offset + 9), QTEV('started', tC, 'w2', name='tC', clock=offset + 10), QTEV('received', tA, 'w3', name='tA', clock=offset + 13), QTEV('succeded', tC, 'w2', name='tC', clock=offset + 12), QTEV('started', tA, 'w3', name='tA', clock=offset + 14), QTEV('succeeded', tA, 'w3', name='TA', clock=offset + 16), ] def rewind_with_offset(self, offset, uids=None): self.offset = offset self.uids = self.setuids(uids or self.uids) self.setup() self.rewind() class ev_snapshot(replay): def setup(self): self.events = [ Event('worker-online', hostname='utest1'), Event('worker-online', hostname='utest2'), Event('worker-online', hostname='utest3'), ] for i in range(20): worker = not i % 2 and 'utest2' or 'utest1' type = not i % 2 and 'task2' or 'task1' self.events.append(Event('task-received', name=type, uuid=uuid(), hostname=worker)) class test_Worker: def test_equality(self): assert Worker(hostname='foo').hostname == 'foo' assert Worker(hostname='foo') == Worker(hostname='foo') assert Worker(hostname='foo') != Worker(hostname='bar') assert hash(Worker(hostname='foo')) == hash(Worker(hostname='foo')) assert hash(Worker(hostname='foo')) != hash(Worker(hostname='bar')) def test_heartbeat_expires__Decimal(self): assert heartbeat_expires( Decimal(344313.37), freq=60, expire_window=200) == 344433.37 def test_compatible_with_Decimal(self): w = Worker('george@vandelay.com') timestamp, local_received = Decimal(time()), time() w.event('worker-online', timestamp, local_received, fields={ 'hostname': 'george@vandelay.com', 'timestamp': timestamp, 'local_received': local_received, 'freq': Decimal(5.6335431), }) assert w.alive def test_eq_ne_other(self): assert Worker('a@b.com') == Worker('a@b.com') assert Worker('a@b.com') != Worker('b@b.com') assert Worker('a@b.com') != object() def test_reduce_direct(self): w = Worker('george@vandelay.com') w.event('worker-online', 10.0, 13.0, fields={ 'hostname': 'george@vandelay.com', 'timestamp': 10.0, 'local_received': 13.0, 'freq': 60, }) fun, args = w.__reduce__() w2 = fun(*args) assert w2.hostname == w.hostname assert w2.pid == w.pid assert w2.freq == w.freq assert w2.heartbeats == w.heartbeats assert w2.clock == w.clock assert w2.active == w.active assert w2.processed == w.processed assert w2.loadavg == w.loadavg assert w2.sw_ident == w.sw_ident def test_update(self): w = Worker('george@vandelay.com') w.update({'idx': '301'}, foo=1, clock=30, bah='foo') assert w.idx == '301' assert w.foo == 1 assert w.clock == 30 assert w.bah == 'foo' def test_survives_missing_timestamp(self): worker = Worker(hostname='foo') worker.event('heartbeat') assert worker.heartbeats == [] def test_repr(self): assert repr(Worker(hostname='foo')) def test_drift_warning(self): worker = Worker(hostname='foo') with patch('celery.events.state.warn') as warn: worker.event(None, time() + (HEARTBEAT_DRIFT_MAX * 2), time()) warn.assert_called() assert 'Substantial drift' in warn.call_args[0][0] def test_updates_heartbeat(self): worker = Worker(hostname='foo') worker.event(None, time(), time()) assert len(worker.heartbeats) == 1 h1 = worker.heartbeats[0] worker.event(None, time(), time() - 10) assert len(worker.heartbeats) == 2 assert worker.heartbeats[-1] == h1 class test_Task: def test_equality(self): assert Task(uuid='foo').uuid == 'foo' assert Task(uuid='foo') == Task(uuid='foo') assert Task(uuid='foo') != Task(uuid='bar') assert hash(Task(uuid='foo')) == hash(Task(uuid='foo')) assert hash(Task(uuid='foo')) != hash(Task(uuid='bar')) def test_info(self): task = Task(uuid='abcdefg', name='tasks.add', args='(2, 2)', kwargs='{}', retries=2, result=42, eta=1, runtime=0.0001, expires=1, parent_id='bdefc', root_id='dedfef', foo=None, exception=1, received=time() - 10, started=time() - 8, exchange='celery', routing_key='celery', succeeded=time()) assert sorted(list(task._info_fields)) == sorted(task.info().keys()) assert (sorted(list(task._info_fields + ('received',))) == sorted(task.info(extra=('received',)))) assert (sorted(['args', 'kwargs']) == sorted(task.info(['args', 'kwargs']).keys())) assert not list(task.info('foo')) def test_reduce_direct(self): task = Task(uuid='uuid', name='tasks.add', args='(2, 2)') fun, args = task.__reduce__() task2 = fun(*args) assert task == task2 def test_ready(self): task = Task(uuid='abcdefg', name='tasks.add') task.event('received', time(), time()) assert not task.ready task.event('succeeded', time(), time()) assert task.ready def test_sent(self): task = Task(uuid='abcdefg', name='tasks.add') task.event('sent', time(), time()) assert task.state == states.PENDING def test_merge(self): task = Task() task.event('failed', time(), time()) task.event('started', time(), time()) task.event('received', time(), time(), { 'name': 'tasks.add', 'args': (2, 2), }) assert task.state == states.FAILURE assert task.name == 'tasks.add' assert task.args == (2, 2) task.event('retried', time(), time()) assert task.state == states.RETRY def test_repr(self): assert repr(Task(uuid='xxx', name='tasks.add')) class test_State: def test_repr(self): assert repr(State()) def test_pickleable(self): state = State() r = ev_logical_clock_ordering(state) r.play() assert pickle.loads(pickle.dumps(state)) def test_task_logical_clock_ordering(self): state = State() r = ev_logical_clock_ordering(state) tA, tB, tC = r.uids r.play() now = list(state.tasks_by_time()) assert now[0][0] == tA assert now[1][0] == tC assert now[2][0] == tB for _ in range(1000): shuffle(r.uids) tA, tB, tC = r.uids r.rewind_with_offset(r.current_clock + 1, r.uids) r.play() now = list(state.tasks_by_time()) assert now[0][0] == tA assert now[1][0] == tC assert now[2][0] == tB @skip.todo(reason='not working') def test_task_descending_clock_ordering(self): state = State() r = ev_logical_clock_ordering(state) tA, tB, tC = r.uids r.play() now = list(state.tasks_by_time(reverse=False)) assert now[0][0] == tA assert now[1][0] == tB assert now[2][0] == tC for _ in range(1000): shuffle(r.uids) tA, tB, tC = r.uids r.rewind_with_offset(r.current_clock + 1, r.uids) r.play() now = list(state.tasks_by_time(reverse=False)) assert now[0][0] == tB assert now[1][0] == tC assert now[2][0] == tA def test_get_or_create_task(self): state = State() task, created = state.get_or_create_task('id1') assert task.uuid == 'id1' assert created task2, created2 = state.get_or_create_task('id1') assert task2 is task assert not created2 def test_get_or_create_worker(self): state = State() worker, created = state.get_or_create_worker('george@vandelay.com') assert worker.hostname == 'george@vandelay.com' assert created worker2, created2 = state.get_or_create_worker('george@vandelay.com') assert worker2 is worker assert not created2 def test_get_or_create_worker__with_defaults(self): state = State() worker, created = state.get_or_create_worker( 'george@vandelay.com', pid=30, ) assert worker.hostname == 'george@vandelay.com' assert worker.pid == 30 assert created worker2, created2 = state.get_or_create_worker( 'george@vandelay.com', pid=40, ) assert worker2 is worker assert worker2.pid == 40 assert not created2 def test_worker_online_offline(self): r = ev_worker_online_offline(State()) next(r) assert list(r.state.alive_workers()) assert r.state.workers['utest1'].alive r.play() assert not list(r.state.alive_workers()) assert not r.state.workers['utest1'].alive def test_itertasks(self): s = State() s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} assert len(list(s.itertasks(limit=2))) == 2 def test_worker_heartbeat_expire(self): r = ev_worker_heartbeats(State()) next(r) assert not list(r.state.alive_workers()) assert not r.state.workers['utest1'].alive r.play() assert list(r.state.alive_workers()) assert r.state.workers['utest1'].alive def test_task_states(self): r = ev_task_states(State()) # RECEIVED next(r) assert r.tid in r.state.tasks task = r.state.tasks[r.tid] assert task.state == states.RECEIVED assert task.received assert task.timestamp == task.received assert task.worker.hostname == 'utest1' # STARTED next(r) assert r.state.workers['utest1'].alive assert task.state == states.STARTED assert task.started assert task.timestamp == task.started assert task.worker.hostname == 'utest1' # REVOKED next(r) assert task.state == states.REVOKED assert task.revoked assert task.timestamp == task.revoked assert task.worker.hostname == 'utest1' # RETRY next(r) assert task.state == states.RETRY assert task.retried assert task.timestamp == task.retried assert task.worker.hostname, 'utest1' assert task.exception == "KeyError('bar')" assert task.traceback == 'line 2 at main' # FAILURE next(r) assert task.state == states.FAILURE assert task.failed assert task.timestamp == task.failed assert task.worker.hostname == 'utest1' assert task.exception == "KeyError('foo')" assert task.traceback == 'line 1 at main' # SUCCESS next(r) assert task.state == states.SUCCESS assert task.succeeded assert task.timestamp == task.succeeded assert task.worker.hostname == 'utest1' assert task.result == '4' assert task.runtime == 0.1234 # children, parent, root r.play() assert r.tid2 in r.state.tasks task2 = r.state.tasks[r.tid2] assert task2.parent is task assert task2.root is task assert task2 in task.children def test_task_children_set_if_received_in_wrong_order(self): r = ev_task_states(State()) r.events.insert(0, r.events.pop()) r.play() assert r.state.tasks[r.tid2] in r.state.tasks[r.tid].children assert r.state.tasks[r.tid2].root is r.state.tasks[r.tid] assert r.state.tasks[r.tid2].parent is r.state.tasks[r.tid] def assertStateEmpty(self, state): assert not state.tasks assert not state.workers assert not state.event_count assert not state.task_count def assertState(self, state): assert state.tasks assert state.workers assert state.event_count assert state.task_count def test_freeze_while(self): s = State() r = ev_snapshot(s) r.play() def work(): pass s.freeze_while(work, clear_after=True) assert not s.event_count s2 = State() r = ev_snapshot(s2) r.play() s2.freeze_while(work, clear_after=False) assert s2.event_count def test_clear_tasks(self): s = State() r = ev_snapshot(s) r.play() assert s.tasks s.clear_tasks(ready=False) assert not s.tasks def test_clear(self): r = ev_snapshot(State()) r.play() assert r.state.event_count assert r.state.workers assert r.state.tasks assert r.state.task_count r.state.clear() assert not r.state.event_count assert not r.state.workers assert r.state.tasks assert not r.state.task_count r.state.clear(False) assert not r.state.tasks def test_task_types(self): r = ev_snapshot(State()) r.play() assert sorted(r.state.task_types()) == ['task1', 'task2'] def test_tasks_by_time(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.tasks_by_time())) == 20 assert len(list(r.state.tasks_by_time(reverse=False))) == 20 def test_tasks_by_type(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.tasks_by_type('task1'))) == 10 assert len(list(r.state.tasks_by_type('task2'))) == 10 assert len(r.state.tasks_by_type['task1']) == 10 assert len(r.state.tasks_by_type['task2']) == 10 def test_alive_workers(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.alive_workers())) == 3 def test_tasks_by_worker(self): r = ev_snapshot(State()) r.play() assert len(list(r.state.tasks_by_worker('utest1'))) == 10 assert len(list(r.state.tasks_by_worker('utest2'))) == 10 assert len(r.state.tasks_by_worker['utest1']) == 10 assert len(r.state.tasks_by_worker['utest2']) == 10 def test_survives_unknown_worker_event(self): s = State() s.event({ 'type': 'worker-unknown-event-xxx', 'foo': 'bar', }) s.event({ 'type': 'worker-unknown-event-xxx', 'hostname': 'xxx', 'foo': 'bar', }) def test_survives_unknown_worker_leaving(self): s = State(on_node_leave=Mock(name='on_node_leave')) (worker, created), subject = s.event({ 'type': 'worker-offline', 'hostname': 'unknown@vandelay.com', 'timestamp': time(), 'local_received': time(), 'clock': 301030134894833, }) assert worker == Worker('unknown@vandelay.com') assert not created assert subject == 'offline' assert 'unknown@vandelay.com' not in s.workers s.on_node_leave.assert_called_with(worker) def test_on_node_join_callback(self): s = State(on_node_join=Mock(name='on_node_join')) (worker, created), subject = s.event({ 'type': 'worker-online', 'hostname': 'george@vandelay.com', 'timestamp': time(), 'local_received': time(), 'clock': 34314, }) assert worker assert created assert subject == 'online' assert 'george@vandelay.com' in s.workers s.on_node_join.assert_called_with(worker) def test_survives_unknown_task_event(self): s = State() s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'x', 'hostname': 'y', 'timestamp': time(), 'local_received': time(), 'clock': 0, }) def test_limits_maxtasks(self): s = State(max_tasks_in_memory=1) s.heap_multiplier = 2 s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'x', 'hostname': 'y', 'clock': 3, 'timestamp': time(), 'local_received': time(), }) s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'y', 'hostname': 'y', 'clock': 4, 'timestamp': time(), 'local_received': time(), }) s.event({ 'type': 'task-unknown-event-xxx', 'foo': 'bar', 'uuid': 'z', 'hostname': 'y', 'clock': 5, 'timestamp': time(), 'local_received': time(), }) assert len(s._taskheap) == 2 assert s._taskheap[0].clock == 4 assert s._taskheap[1].clock == 5 s._taskheap.append(s._taskheap[0]) assert list(s.tasks_by_time()) def test_callback(self): scratch = {} def callback(state, event): scratch['recv'] = True s = State(callback=callback) s.event({'type': 'worker-online'}) assert scratch.get('recv') celery-4.1.0/t/unit/__init__.py0000644000175000017500000000000013130607475016254 0ustar omeromer00000000000000celery-4.1.0/t/unit/tasks/0000755000175000017500000000000013135426347015304 5ustar omeromer00000000000000celery-4.1.0/t/unit/tasks/test_chord.py0000644000175000017500000002101613130607475020012 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from contextlib import contextmanager from case import Mock from celery import group, uuid from celery import canvas from celery import result from celery.exceptions import ChordError, Retry from celery.five import range from celery.result import AsyncResult, GroupResult, EagerResult def passthru(x): return x class ChordCase: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add class TSR(GroupResult): is_ready = True value = None def ready(self): return self.is_ready def join(self, propagate=True, **kwargs): if propagate: for value in self.value: if isinstance(value, Exception): raise value return self.value join_native = join def _failed_join_report(self): for value in self.value: if isinstance(value, Exception): yield EagerResult('some_id', value, 'FAILURE') class TSRNoReport(TSR): def _failed_join_report(self): return iter([]) @contextmanager def patch_unlock_retry(app): unlock = app.tasks['celery.chord_unlock'] retry = Mock() retry.return_value = Retry() prev, unlock.retry = unlock.retry, retry try: yield unlock, retry finally: unlock.retry = prev class test_unlock_chord_task(ChordCase): def test_unlock_ready(self): class AlwaysReady(TSR): is_ready = True value = [2, 4, 8, 6] with self._chord_context(AlwaysReady) as (cb, retry, _): cb.type.apply_async.assert_called_with( ([2, 4, 8, 6],), {}, task_id=cb.id, ) # didn't retry assert not retry.call_count def test_deps_ready_fails(self): GroupResult = Mock(name='GroupResult') GroupResult.return_value.ready.side_effect = KeyError('foo') unlock_chord = self.app.tasks['celery.chord_unlock'] with pytest.raises(KeyError): unlock_chord('groupid', Mock(), result=[Mock()], GroupResult=GroupResult, result_from_tuple=Mock()) def test_callback_fails(self): class AlwaysReady(TSR): is_ready = True value = [2, 4, 8, 6] def setup(callback): callback.apply_async.side_effect = IOError() with self._chord_context(AlwaysReady, setup) as (cb, retry, fail): fail.assert_called() assert fail.call_args[0][0] == cb.id assert isinstance(fail.call_args[1]['exc'], ChordError) def test_unlock_ready_failed(self): class Failed(TSR): is_ready = True value = [2, KeyError('foo'), 8, 6] with self._chord_context(Failed) as (cb, retry, fail_current): cb.type.apply_async.assert_not_called() # didn't retry assert not retry.call_count fail_current.assert_called() assert fail_current.call_args[0][0] == cb.id assert isinstance(fail_current.call_args[1]['exc'], ChordError) assert 'some_id' in str(fail_current.call_args[1]['exc']) def test_unlock_ready_failed_no_culprit(self): class Failed(TSRNoReport): is_ready = True value = [2, KeyError('foo'), 8, 6] with self._chord_context(Failed) as (cb, retry, fail_current): fail_current.assert_called() assert fail_current.call_args[0][0] == cb.id assert isinstance(fail_current.call_args[1]['exc'], ChordError) @contextmanager def _chord_context(self, ResultCls, setup=None, **kwargs): @self.app.task(shared=False) def callback(*args, **kwargs): pass self.app.finalize() pts, result.GroupResult = result.GroupResult, ResultCls callback.apply_async = Mock() callback_s = callback.s() callback_s.id = 'callback_id' fail_current = self.app.backend.fail_from_current_stack = Mock() try: with patch_unlock_retry(self.app) as (unlock, retry): signature, canvas.maybe_signature = ( canvas.maybe_signature, passthru, ) if setup: setup(callback) try: assert self.app.tasks['celery.chord_unlock'] is unlock try: unlock( 'group_id', callback_s, result=[ self.app.AsyncResult(r) for r in ['1', 2, 3] ], GroupResult=ResultCls, **kwargs ) except Retry: pass finally: canvas.maybe_signature = signature yield callback_s, retry, fail_current finally: result.GroupResult = pts def test_when_not_ready(self): class NeverReady(TSR): is_ready = False with self._chord_context(NeverReady, interval=10, max_retries=30) \ as (cb, retry, _): cb.type.apply_async.assert_not_called() # did retry retry.assert_called_with(countdown=10, max_retries=30) def test_is_in_registry(self): assert 'celery.chord_unlock' in self.app.tasks class test_chord(ChordCase): def test_eager(self): from celery import chord @self.app.task(shared=False) def addX(x, y): return x + y @self.app.task(shared=False) def sumX(n): return sum(n) self.app.conf.task_always_eager = True x = chord(addX.s(i, i) for i in range(10)) body = sumX.s() result = x(body) assert result.get() == sum(i + i for i in range(10)) def test_apply(self): self.app.conf.task_always_eager = False from celery import chord m = Mock() m.app.conf.task_always_eager = False m.AsyncResult = AsyncResult prev, chord.run = chord.run, m try: x = chord(self.add.s(i, i) for i in range(10)) body = self.add.s(2) result = x(body) assert result.id # does not modify original signature with pytest.raises(KeyError): body.options['task_id'] chord.run.assert_called() finally: chord.run = prev class test_add_to_chord: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add @self.app.task(shared=False, bind=True) def adds(self, sig, lazy=False): return self.add_to_chord(sig, lazy) self.adds = adds def test_add_to_chord(self): self.app.backend = Mock(name='backend') sig = self.add.s(2, 2) sig.delay = Mock(name='sig.delay') self.adds.request.group = uuid() self.adds.request.id = uuid() with pytest.raises(ValueError): # task not part of chord self.adds.run(sig) self.adds.request.chord = self.add.s() res1 = self.adds.run(sig, True) assert res1 == sig assert sig.options['task_id'] assert sig.options['group_id'] == self.adds.request.group assert sig.options['chord'] == self.adds.request.chord sig.delay.assert_not_called() self.app.backend.add_to_chord.assert_called_with( self.adds.request.group, sig.freeze(), ) self.app.backend.reset_mock() sig2 = self.add.s(4, 4) sig2.delay = Mock(name='sig2.delay') res2 = self.adds.run(sig2) assert res2 == sig2.delay.return_value assert sig2.options['task_id'] assert sig2.options['group_id'] == self.adds.request.group assert sig2.options['chord'] == self.adds.request.chord sig2.delay.assert_called_with() self.app.backend.add_to_chord.assert_called_with( self.adds.request.group, sig2.freeze(), ) class test_Chord_task(ChordCase): def test_run(self): self.app.backend = Mock() self.app.backend.cleanup = Mock() self.app.backend.cleanup.__name__ = 'cleanup' Chord = self.app.tasks['celery.chord'] body = self.add.signature() Chord(group(self.add.signature((i, i)) for i in range(5)), body) Chord([self.add.signature((j, j)) for j in range(5)], body) assert self.app.backend.apply_chord.call_count == 2 celery-4.1.0/t/unit/tasks/__init__.py0000644000175000017500000000000013130607475017401 0ustar omeromer00000000000000celery-4.1.0/t/unit/tasks/test_canvas.py0000644000175000017500000005120113130607475020165 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import json import pytest from case import MagicMock, Mock from celery._state import _task_stack from celery.canvas import ( Signature, chain, _chain, group, chord, signature, xmap, xstarmap, chunks, _maybe_group, maybe_signature, maybe_unroll_group, ) from celery.result import AsyncResult, GroupResult, EagerResult SIG = Signature({ 'task': 'TASK', 'args': ('A1',), 'kwargs': {'K1': 'V1'}, 'options': {'task_id': 'TASK_ID'}, 'subtask_type': ''}, ) class test_maybe_unroll_group: def test_when_no_len_and_no_length_hint(self): g = MagicMock(name='group') g.tasks.__len__.side_effect = TypeError() g.tasks.__length_hint__ = Mock() g.tasks.__length_hint__.return_value = 0 assert maybe_unroll_group(g) is g g.tasks.__length_hint__.side_effect = AttributeError() assert maybe_unroll_group(g) is g class CanvasCase: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add @self.app.task(shared=False) def mul(x, y): return x * y self.mul = mul @self.app.task(shared=False) def div(x, y): return x / y self.div = div class test_Signature(CanvasCase): def test_getitem_property_class(self): assert Signature.task assert Signature.args assert Signature.kwargs assert Signature.options assert Signature.subtask_type def test_getitem_property(self): assert SIG.task == 'TASK' assert SIG.args == ('A1',) assert SIG.kwargs == {'K1': 'V1'} assert SIG.options == {'task_id': 'TASK_ID'} assert SIG.subtask_type == '' def test_call(self): x = Signature('foo', (1, 2), {'arg1': 33}, app=self.app) x.type = Mock(name='type') x(3, 4, arg2=66) x.type.assert_called_with(3, 4, 1, 2, arg1=33, arg2=66) def test_link_on_scalar(self): x = Signature('TASK', link=Signature('B')) assert x.options['link'] x.link(Signature('C')) assert isinstance(x.options['link'], list) assert Signature('B') in x.options['link'] assert Signature('C') in x.options['link'] def test_json(self): x = Signature('TASK', link=Signature('B', app=self.app), app=self.app) assert x.__json__() == dict(x) @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): x = Signature('TASK', (2, 4), app=self.app) fun, args = x.__reduce__() assert fun(*args) == x def test_replace(self): x = Signature('TASK', ('A'), {}) assert x.replace(args=('B',)).args == ('B',) assert x.replace(kwargs={'FOO': 'BAR'}).kwargs == { 'FOO': 'BAR', } assert x.replace(options={'task_id': '123'}).options == { 'task_id': '123', } def test_set(self): assert Signature('TASK', x=1).set(task_id='2').options == { 'x': 1, 'task_id': '2', } def test_link(self): x = signature(SIG) x.link(SIG) x.link(SIG) assert SIG in x.options['link'] assert len(x.options['link']) == 1 def test_link_error(self): x = signature(SIG) x.link_error(SIG) x.link_error(SIG) assert SIG in x.options['link_error'] assert len(x.options['link_error']) == 1 def test_flatten_links(self): tasks = [self.add.s(2, 2), self.mul.s(4), self.div.s(2)] tasks[0].link(tasks[1]) tasks[1].link(tasks[2]) assert tasks[0].flatten_links() == tasks def test_OR(self): x = self.add.s(2, 2) | self.mul.s(4) assert isinstance(x, _chain) y = self.add.s(4, 4) | self.div.s(2) z = x | y assert isinstance(y, _chain) assert isinstance(z, _chain) assert len(z.tasks) == 4 with pytest.raises(TypeError): x | 10 ax = self.add.s(2, 2) | (self.add.s(4) | self.add.s(8)) assert isinstance(ax, _chain) assert len(ax.tasks), 3 == 'consolidates chain to chain' def test_INVERT(self): x = self.add.s(2, 2) x.apply_async = Mock() x.apply_async.return_value = Mock() x.apply_async.return_value.get = Mock() x.apply_async.return_value.get.return_value = 4 assert ~x == 4 x.apply_async.assert_called() def test_merge_immutable(self): x = self.add.si(2, 2, foo=1) args, kwargs, options = x._merge((4,), {'bar': 2}, {'task_id': 3}) assert args == (2, 2) assert kwargs == {'foo': 1} assert options == {'task_id': 3} def test_set_immutable(self): x = self.add.s(2, 2) assert not x.immutable x.set(immutable=True) assert x.immutable x.set(immutable=False) assert not x.immutable def test_election(self): x = self.add.s(2, 2) x.freeze('foo') x.type.app.control = Mock() r = x.election() x.type.app.control.election.assert_called() assert r.id == 'foo' def test_AsyncResult_when_not_registered(self): s = signature('xxx.not.registered', app=self.app) assert s.AsyncResult def test_apply_async_when_not_registered(self): s = signature('xxx.not.registered', app=self.app) assert s._apply_async class test_xmap_xstarmap(CanvasCase): def test_apply(self): for type, attr in [(xmap, 'map'), (xstarmap, 'starmap')]: args = [(i, i) for i in range(10)] s = getattr(self.add, attr)(args) s.type = Mock() s.apply_async(foo=1) s.type.apply_async.assert_called_with( (), {'task': self.add.s(), 'it': args}, foo=1, route_name=self.add.name, ) assert type.from_dict(dict(s)) == s assert repr(s) class test_chunks(CanvasCase): def test_chunks(self): x = self.add.chunks(range(100), 10) assert dict(chunks.from_dict(dict(x), app=self.app)) == dict(x) assert x.group() assert len(x.group().tasks) == 10 x.group = Mock() gr = x.group.return_value = Mock() x.apply_async() gr.apply_async.assert_called_with((), {}, route_name=self.add.name) gr.apply_async.reset_mock() x() gr.apply_async.assert_called_with((), {}, route_name=self.add.name) self.app.conf.task_always_eager = True chunks.apply_chunks(app=self.app, **x['kwargs']) class test_chain(CanvasCase): def test_clone_preserves_state(self): x = chain(self.add.s(i, i) for i in range(10)) assert x.clone().tasks == x.tasks assert x.clone().kwargs == x.kwargs assert x.clone().args == x.args def test_repr(self): x = self.add.s(2, 2) | self.add.s(2) assert repr(x) == '%s(2, 2) | add(2)' % (self.add.name,) def test_apply_async(self): c = self.add.s(2, 2) | self.add.s(4) | self.add.s(8) result = c.apply_async() assert result.parent assert result.parent.parent assert result.parent.parent.parent is None def test_splices_chains(self): c = chain( self.add.s(5, 5), chain(self.add.s(6), self.add.s(7), self.add.s(8), app=self.app), app=self.app, ) c.freeze() tasks, _ = c._frozen assert len(tasks) == 4 def test_from_dict_no_tasks(self): assert chain.from_dict(dict(chain(app=self.app)), app=self.app) def test_from_dict_full_subtasks(self): c = chain(self.add.si(1, 2), self.add.si(3, 4), self.add.si(5, 6)) serialized = json.loads(json.dumps(c)) deserialized = chain.from_dict(serialized) for task in deserialized.tasks: assert isinstance(task, Signature) @pytest.mark.usefixtures('depends_on_current_app') def test_app_falls_back_to_default(self): from celery._state import current_app assert chain().app is current_app def test_handles_dicts(self): c = chain( self.add.s(5, 5), dict(self.add.s(8)), app=self.app, ) c.freeze() tasks, _ = c._frozen for task in tasks: assert isinstance(task, Signature) assert task.app is self.app def test_group_to_chord(self): c = ( self.add.s(5) | group([self.add.s(i, i) for i in range(5)], app=self.app) | self.add.s(10) | self.add.s(20) | self.add.s(30) ) c._use_link = True tasks, results = c.prepare_steps((), c.tasks) assert tasks[-1].args[0] == 5 assert isinstance(tasks[-2], chord) assert len(tasks[-2].tasks) == 5 body = tasks[-2].body assert len(body.tasks) == 3 assert body.tasks[0].args[0] == 10 assert body.tasks[1].args[0] == 20 assert body.tasks[2].args[0] == 30 c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) c2._use_link = True tasks2, _ = c2.prepare_steps((), c2.tasks) assert isinstance(tasks2[0], group) def test_group_to_chord__protocol_2__or(self): c = ( group([self.add.s(i, i) for i in range(5)], app=self.app) | self.add.s(10) | self.add.s(20) | self.add.s(30) ) assert isinstance(c, chord) def test_group_to_chord__protocol_2(self): c = chain( group([self.add.s(i, i) for i in range(5)], app=self.app), self.add.s(10), self.add.s(20), self.add.s(30) ) assert isinstance(c, chord) assert isinstance(c.body, _chain) assert len(c.body.tasks) == 3 c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) c2._use_link = False tasks2, _ = c2.prepare_steps((), c2.tasks) assert isinstance(tasks2[0], group) def test_apply_options(self): class static(Signature): def clone(self, *args, **kwargs): return self def s(*args, **kwargs): return static(self.add, args, kwargs, type=self.add, app=self.app) c = s(2, 2) | s(4) | s(8) r1 = c.apply_async(task_id='some_id') assert r1.id == 'some_id' c.apply_async(group_id='some_group_id') assert c.tasks[-1].options['group_id'] == 'some_group_id' c.apply_async(chord='some_chord_id') assert c.tasks[-1].options['chord'] == 'some_chord_id' c.apply_async(link=[s(32)]) assert c.tasks[-1].options['link'] == [s(32)] c.apply_async(link_error=[s('error')]) for task in c.tasks: assert task.options['link_error'] == [s('error')] def test_reverse(self): x = self.add.s(2, 2) | self.add.s(2) assert isinstance(signature(x), _chain) assert isinstance(signature(dict(x)), _chain) def test_always_eager(self): self.app.conf.task_always_eager = True assert ~(self.add.s(4, 4) | self.add.s(8)) == 16 def test_apply(self): x = chain(self.add.s(4, 4), self.add.s(8), self.add.s(10)) res = x.apply() assert isinstance(res, EagerResult) assert res.get() == 26 assert res.parent.get() == 16 assert res.parent.parent.get() == 8 assert res.parent.parent.parent is None def test_empty_chain_returns_none(self): assert chain(app=self.app)() is None assert chain(app=self.app).apply_async() is None def test_call_no_tasks(self): x = chain() assert not x() def test_call_with_tasks(self): x = self.add.s(2, 2) | self.add.s(4) x.apply_async = Mock() x(2, 2, foo=1) x.apply_async.assert_called_with((2, 2), {'foo': 1}) def test_from_dict_no_args__with_args(self): x = dict(self.add.s(2, 2) | self.add.s(4)) x['args'] = None assert isinstance(chain.from_dict(x), _chain) x['args'] = (2,) assert isinstance(chain.from_dict(x), _chain) def test_accepts_generator_argument(self): x = chain(self.add.s(i) for i in range(10)) assert x.tasks[0].type, self.add assert x.type def test_chord_sets_result_parent(self): g = (self.add.s(0, 0) | group(self.add.s(i, i) for i in range(1, 10)) | self.add.s(2, 2) | self.add.s(4, 4)) res = g.freeze() assert isinstance(res, AsyncResult) assert not isinstance(res, GroupResult) assert isinstance(res.parent, AsyncResult) assert not isinstance(res.parent, GroupResult) assert isinstance(res.parent.parent, GroupResult) assert isinstance(res.parent.parent.parent, AsyncResult) assert not isinstance(res.parent.parent.parent, GroupResult) assert res.parent.parent.parent.parent is None seen = set() node = res while node: assert node.id not in seen seen.add(node.id) node = node.parent class test_group(CanvasCase): def test_repr(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) assert repr(x) def test_reverse(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) assert isinstance(signature(x), group) assert isinstance(signature(dict(x)), group) def test_cannot_link_on_group(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) with pytest.raises(TypeError): x.apply_async(link=self.add.s(2, 2)) def test_cannot_link_error_on_group(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) with pytest.raises(TypeError): x.apply_async(link_error=self.add.s(2, 2)) def test_group_with_group_argument(self): g1 = group(self.add.s(2, 2), self.add.s(4, 4), app=self.app) g2 = group(g1, app=self.app) assert g2.tasks is g1.tasks def test_maybe_group_sig(self): assert _maybe_group(self.add.s(2, 2), self.app) == [self.add.s(2, 2)] def test_apply(self): x = group([self.add.s(4, 4), self.add.s(8, 8)]) res = x.apply() assert res.get(), [8 == 16] def test_apply_async(self): x = group([self.add.s(4, 4), self.add.s(8, 8)]) x.apply_async() def test_prepare_with_dict(self): x = group([self.add.s(4, 4), dict(self.add.s(8, 8))], app=self.app) x.apply_async() def test_group_in_group(self): g1 = group(self.add.s(2, 2), self.add.s(4, 4), app=self.app) g2 = group(self.add.s(8, 8), g1, self.add.s(16, 16), app=self.app) g2.apply_async() def test_set_immutable(self): g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app) g1.set_immutable(True) for task in g1.tasks: task.set_immutable.assert_called_with(True) def test_link(self): g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app) sig = Mock(name='sig') g1.link(sig) g1.tasks[0].link.assert_called_with(sig.clone().set(immutable=True)) def test_link_error(self): g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app) sig = Mock(name='sig') g1.link_error(sig) g1.tasks[0].link_error.assert_called_with( sig.clone().set(immutable=True), ) def test_apply_empty(self): x = group(app=self.app) x.apply() res = x.apply_async() assert res assert not res.results def test_apply_async_with_parent(self): _task_stack.push(self.add) try: self.add.push_request(called_directly=False) try: assert not self.add.request.children x = group([self.add.s(4, 4), self.add.s(8, 8)]) res = x() assert self.add.request.children assert res in self.add.request.children assert len(self.add.request.children) == 1 finally: self.add.pop_request() finally: _task_stack.pop() def test_from_dict(self): x = group([self.add.s(2, 2), self.add.s(4, 4)]) x['args'] = (2, 2) assert group.from_dict(dict(x)) x['args'] = None assert group.from_dict(dict(x)) def test_call_empty_group(self): x = group(app=self.app) assert not len(x()) x.delay() x.apply_async() x() def test_skew(self): g = group([self.add.s(i, i) for i in range(10)]) g.skew(start=1, stop=10, step=1) for i, task in enumerate(g.tasks): assert task.options['countdown'] == i + 1 def test_iter(self): g = group([self.add.s(i, i) for i in range(10)]) assert list(iter(g)) == list(g.keys()) @staticmethod def helper_test_get_delay(result): import time t0 = time.time() while not result.ready(): time.sleep(0.01) if time.time() - t0 > 1: return None return result.get() def test_kwargs_direct(self): res = [self.add(x=1, y=1), self.add(x=1, y=1)] assert res == [2, 2] def test_kwargs_apply(self): x = group([self.add.s(), self.add.s()]) res = x.apply(kwargs=dict(x=1, y=1)).get() assert res == [2, 2] def test_kwargs_apply_async(self): self.app.conf.task_always_eager = True x = group([self.add.s(), self.add.s()]) res = self.helper_test_get_delay(x.apply_async(kwargs=dict(x=1, y=1))) assert res == [2, 2] def test_kwargs_delay(self): self.app.conf.task_always_eager = True x = group([self.add.s(), self.add.s()]) res = self.helper_test_get_delay(x.delay(x=1, y=1)) assert res == [2, 2] def test_kwargs_delay_partial(self): self.app.conf.task_always_eager = True x = group([self.add.s(1), self.add.s(x=1)]) res = self.helper_test_get_delay(x.delay(y=1)) assert res == [2, 2] class test_chord(CanvasCase): def test_reverse(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) assert isinstance(signature(x), chord) assert isinstance(signature(dict(x)), chord) def test_clone_clones_body(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) y = x.clone() assert x.kwargs['body'] is not y.kwargs['body'] y.kwargs.pop('body') z = y.clone() assert z.kwargs.get('body') is None def test_argument_is_group(self): x = chord(group(self.add.s(2, 2), self.add.s(4, 4), app=self.app)) assert x.tasks def test_app_when_app(self): app = Mock(name='app') x = chord([self.add.s(4, 4)], app=app) assert x.app is app def test_app_when_app_in_task(self): t1 = Mock(name='t1') t2 = Mock(name='t2') x = chord([t1, self.add.s(4, 4)]) assert x.app is x.tasks[0].app t1.app = None x = chord([t1], body=t2) assert x.app is t2._app def test_app_when_header_is_empty(self): x = chord([], self.add.s(4, 4)) assert x.app is self.add.app @pytest.mark.usefixtures('depends_on_current_app') def test_app_fallback_to_current(self): from celery._state import current_app t1 = Mock(name='t1') t1.app = t1._app = None x = chord([t1], body=t1) assert x.app is current_app def test_set_immutable(self): x = chord([Mock(name='t1'), Mock(name='t2')], app=self.app) x.set_immutable(True) def test_links_to_body(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) x.link(self.div.s(2)) assert not x.options.get('link') assert x.kwargs['body'].options['link'] x.link_error(self.div.s(2)) assert not x.options.get('link_error') assert x.kwargs['body'].options['link_error'] assert x.tasks assert x.body def test_repr(self): x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) assert repr(x) x.kwargs['body'] = None assert 'without body' in repr(x) def test_freeze_tasks_is_not_group(self): x = chord([self.add.s(2, 2)], body=self.add.s(), app=self.app) x.freeze() x.tasks = [self.add.s(2, 2)] x.freeze() class test_maybe_signature(CanvasCase): def test_is_None(self): assert maybe_signature(None, app=self.app) is None def test_is_dict(self): assert isinstance(maybe_signature(dict(self.add.s()), app=self.app), Signature) def test_when_sig(self): s = self.add.s() assert maybe_signature(s, app=self.app) is s celery-4.1.0/t/unit/tasks/test_states.py0000644000175000017500000000216513130607475020222 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from celery import states class test_state_precedence: @pytest.mark.parametrize('r,l', [ (states.SUCCESS, states.PENDING), (states.FAILURE, states.RECEIVED), (states.REVOKED, states.STARTED), (states.SUCCESS, 'CRASHED'), (states.FAILURE, 'CRASHED'), ]) def test_gt(self, r, l): assert states.state(r) > states.state(l) @pytest.mark.parametrize('r,l', [ ('CRASHED', states.REVOKED), ]) def test_gte(self, r, l): assert states.state(r) >= states.state(l) @pytest.mark.parametrize('r,l', [ (states.PENDING, states.SUCCESS), (states.RECEIVED, states.FAILURE), (states.STARTED, states.REVOKED), ('CRASHED', states.SUCCESS), ('CRASHED', states.FAILURE), (states.REVOKED, 'CRASHED'), ]) def test_lt(self, r, l): assert states.state(r) < states.state(l) @pytest.mark.parametrize('r,l', [ (states.REVOKED, 'CRASHED'), ]) def test_lte(self, r, l): assert states.state(r) <= states.state(l) celery-4.1.0/t/unit/tasks/test_trace.py0000644000175000017500000002556113130607475020022 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch from kombu.exceptions import EncodeError from celery import group, uuid from celery import signals from celery import states from celery.exceptions import Ignore, Retry, Reject from celery.app.trace import ( TraceInfo, build_tracer, get_log_policy, log_policy_reject, log_policy_ignore, log_policy_internal, log_policy_expected, log_policy_unexpected, trace_task, _trace_task_ret, _fast_trace_task, setup_worker_optimizations, reset_worker_optimizations, ) def trace(app, task, args=(), kwargs={}, propagate=False, eager=True, request=None, **opts): t = build_tracer(task.name, task, eager=eager, propagate=propagate, app=app, **opts) ret = t('id-1', args, kwargs, request) return ret.retval, ret.info class TraceCase: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add @self.app.task(shared=False, ignore_result=True) def add_cast(x, y): return x + y self.add_cast = add_cast @self.app.task(shared=False) def raises(exc): raise exc self.raises = raises def trace(self, *args, **kwargs): return trace(self.app, *args, **kwargs) class test_trace(TraceCase): def test_trace_successful(self): retval, info = self.trace(self.add, (2, 2), {}) assert info is None assert retval == 4 def test_trace_on_success(self): @self.app.task(shared=False, on_success=Mock()) def add_with_success(x, y): return x + y self.trace(add_with_success, (2, 2), {}) add_with_success.on_success.assert_called() def test_get_log_policy(self): einfo = Mock(name='einfo') einfo.internal = False assert get_log_policy(self.add, einfo, Reject()) is log_policy_reject assert get_log_policy(self.add, einfo, Ignore()) is log_policy_ignore self.add.throws = (TypeError,) assert (get_log_policy(self.add, einfo, KeyError()) is log_policy_unexpected) assert (get_log_policy(self.add, einfo, TypeError()) is log_policy_expected) einfo2 = Mock(name='einfo2') einfo2.internal = True assert (get_log_policy(self.add, einfo2, KeyError()) is log_policy_internal) def test_trace_after_return(self): @self.app.task(shared=False, after_return=Mock()) def add_with_after_return(x, y): return x + y self.trace(add_with_after_return, (2, 2), {}) add_with_after_return.after_return.assert_called() def test_with_prerun_receivers(self): on_prerun = Mock() signals.task_prerun.connect(on_prerun) try: self.trace(self.add, (2, 2), {}) on_prerun.assert_called() finally: signals.task_prerun.receivers[:] = [] def test_with_postrun_receivers(self): on_postrun = Mock() signals.task_postrun.connect(on_postrun) try: self.trace(self.add, (2, 2), {}) on_postrun.assert_called() finally: signals.task_postrun.receivers[:] = [] def test_with_success_receivers(self): on_success = Mock() signals.task_success.connect(on_success) try: self.trace(self.add, (2, 2), {}) on_success.assert_called() finally: signals.task_success.receivers[:] = [] def test_when_chord_part(self): @self.app.task(shared=False) def add(x, y): return x + y add.backend = Mock() request = {'chord': uuid()} self.trace(add, (2, 2), {}, request=request) add.backend.mark_as_done.assert_called() args, kwargs = add.backend.mark_as_done.call_args assert args[0] == 'id-1' assert args[1] == 4 assert args[2].chord == request['chord'] assert not args[3] def test_when_backend_cleanup_raises(self): @self.app.task(shared=False) def add(x, y): return x + y add.backend = Mock(name='backend') add.backend.process_cleanup.side_effect = KeyError() self.trace(add, (2, 2), {}, eager=False) add.backend.process_cleanup.assert_called_with() add.backend.process_cleanup.side_effect = MemoryError() with pytest.raises(MemoryError): self.trace(add, (2, 2), {}, eager=False) def test_when_Ignore(self): @self.app.task(shared=False) def ignored(): raise Ignore() retval, info = self.trace(ignored, (), {}) assert info.state == states.IGNORED def test_when_Reject(self): @self.app.task(shared=False) def rejecting(): raise Reject() retval, info = self.trace(rejecting, (), {}) assert info.state == states.REJECTED def test_backend_cleanup_raises(self): self.add.backend.process_cleanup = Mock() self.add.backend.process_cleanup.side_effect = RuntimeError() self.trace(self.add, (2, 2), {}) @patch('celery.canvas.maybe_signature') def test_callbacks__scalar(self, maybe_signature): sig = Mock(name='sig') request = {'callbacks': [sig], 'root_id': 'root'} maybe_signature.return_value = sig retval, _ = self.trace(self.add, (2, 2), {}, request=request) sig.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', ) @patch('celery.canvas.maybe_signature') def test_chain_proto2(self, maybe_signature): sig = Mock(name='sig') sig2 = Mock(name='sig2') request = {'chain': [sig2, sig], 'root_id': 'root'} maybe_signature.return_value = sig retval, _ = self.trace(self.add, (2, 2), {}, request=request) sig.apply_async.assert_called_with( (4, ), parent_id='id-1', root_id='root', chain=[sig2], ) @patch('celery.canvas.maybe_signature') def test_callbacks__EncodeError(self, maybe_signature): sig = Mock(name='sig') request = {'callbacks': [sig], 'root_id': 'root'} maybe_signature.return_value = sig sig.apply_async.side_effect = EncodeError() retval, einfo = self.trace(self.add, (2, 2), {}, request=request) assert einfo.state == states.FAILURE @patch('celery.canvas.maybe_signature') @patch('celery.app.trace.group.apply_async') def test_callbacks__sigs(self, group_, maybe_signature): sig1 = Mock(name='sig') sig2 = Mock(name='sig2') sig3 = group([Mock(name='g1'), Mock(name='g2')], app=self.app) sig3.apply_async = Mock(name='gapply') request = {'callbacks': [sig1, sig3, sig2], 'root_id': 'root'} def passt(s, *args, **kwargs): return s maybe_signature.side_effect = passt retval, _ = self.trace(self.add, (2, 2), {}, request=request) group_.assert_called_with( (4,), parent_id='id-1', root_id='root', ) sig3.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', ) @patch('celery.canvas.maybe_signature') @patch('celery.app.trace.group.apply_async') def test_callbacks__only_groups(self, group_, maybe_signature): sig1 = group([Mock(name='g1'), Mock(name='g2')], app=self.app) sig2 = group([Mock(name='g3'), Mock(name='g4')], app=self.app) sig1.apply_async = Mock(name='gapply') sig2.apply_async = Mock(name='gapply') request = {'callbacks': [sig1, sig2], 'root_id': 'root'} def passt(s, *args, **kwargs): return s maybe_signature.side_effect = passt retval, _ = self.trace(self.add, (2, 2), {}, request=request) sig1.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', ) sig2.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', ) def test_trace_SystemExit(self): with pytest.raises(SystemExit): self.trace(self.raises, (SystemExit(),), {}) def test_trace_Retry(self): exc = Retry('foo', 'bar') _, info = self.trace(self.raises, (exc,), {}) assert info.state == states.RETRY assert info.retval is exc def test_trace_exception(self): exc = KeyError('foo') _, info = self.trace(self.raises, (exc,), {}) assert info.state == states.FAILURE assert info.retval is exc def test_trace_task_ret__no_content_type(self): _trace_task_ret( self.add.name, 'id1', {}, ((2, 2), {}, {}), None, None, app=self.app, ) def test_fast_trace_task__no_content_type(self): self.app.tasks[self.add.name].__trace__ = build_tracer( self.add.name, self.add, app=self.app, ) _fast_trace_task( self.add.name, 'id1', {}, ((2, 2), {}, {}), None, None, app=self.app, _loc=[self.app.tasks, {}, 'hostname'] ) def test_trace_exception_propagate(self): with pytest.raises(KeyError): self.trace(self.raises, (KeyError('foo'),), {}, propagate=True) @patch('celery.app.trace.build_tracer') @patch('celery.app.trace.report_internal_error') def test_outside_body_error(self, report_internal_error, build_tracer): tracer = Mock() tracer.side_effect = KeyError('foo') build_tracer.return_value = tracer @self.app.task(shared=False) def xtask(): pass trace_task(xtask, 'uuid', (), {}) assert report_internal_error.call_count assert xtask.__trace__ is tracer class test_TraceInfo(TraceCase): class TI(TraceInfo): __slots__ = TraceInfo.__slots__ + ('__dict__',) def test_handle_error_state(self): x = self.TI(states.FAILURE) x.handle_failure = Mock() x.handle_error_state(self.add_cast, self.add_cast.request) x.handle_failure.assert_called_with( self.add_cast, self.add_cast.request, store_errors=self.add_cast.store_errors_even_if_ignored, call_errbacks=True, ) @patch('celery.app.trace.ExceptionInfo') def test_handle_reject(self, ExceptionInfo): x = self.TI(states.FAILURE) x._log_error = Mock(name='log_error') req = Mock(name='req') x.handle_reject(self.add, req) x._log_error.assert_called_with(self.add, req, ExceptionInfo()) class test_stackprotection: def test_stackprotection(self): setup_worker_optimizations(self.app) try: @self.app.task(shared=False, bind=True) def foo(self, i): if i: return foo(0) return self.request assert foo(1).called_directly finally: reset_worker_optimizations() celery-4.1.0/t/unit/tasks/test_result.py0000644000175000017500000007064413130607475020244 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import traceback from contextlib import contextmanager from case import Mock, call, patch, skip from celery import uuid from celery import states from celery.backends.base import SyncBackendMixin from celery.exceptions import ( CPendingDeprecationWarning, ImproperlyConfigured, IncompleteStream, TimeoutError, ) from celery.five import range from celery.result import ( AsyncResult, EagerResult, ResultSet, GroupResult, result_from_tuple, assert_will_not_block, ) from celery.utils.serialization import pickle PYTRACEBACK = """\ Traceback (most recent call last): File "foo.py", line 2, in foofunc don't matter File "bar.py", line 3, in barfunc don't matter Doesn't matter: really!\ """ def mock_task(name, state, result, traceback=None): return dict( id=uuid(), name=name, state=state, result=result, traceback=traceback, ) def save_result(app, task): traceback = task.get('traceback') or 'Some traceback' if task['state'] == states.SUCCESS: app.backend.mark_as_done(task['id'], task['result']) elif task['state'] == states.RETRY: app.backend.mark_as_retry( task['id'], task['result'], traceback=traceback, ) else: app.backend.mark_as_failure( task['id'], task['result'], traceback=traceback, ) def make_mock_group(app, size=10): tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)] [save_result(app, task) for task in tasks] return [app.AsyncResult(task['id']) for task in tasks] class _MockBackend: def add_pending_result(self, *args, **kwargs): return True def wait_for_pending(self, *args, **kwargs): return True class test_AsyncResult: def setup(self): self.app.conf.result_cache_max = 100 self.app.conf.result_serializer = 'pickle' self.task1 = mock_task('task1', states.SUCCESS, 'the') self.task2 = mock_task('task2', states.SUCCESS, 'quick') self.task3 = mock_task('task3', states.FAILURE, KeyError('brown')) self.task4 = mock_task('task3', states.RETRY, KeyError('red')) self.task5 = mock_task( 'task3', states.FAILURE, KeyError('blue'), PYTRACEBACK, ) for task in (self.task1, self.task2, self.task3, self.task4, self.task5): save_result(self.app, task) @self.app.task(shared=False) def mytask(): pass self.mytask = mytask @patch('celery.result.task_join_will_block') def test_assert_will_not_block(self, task_join_will_block): task_join_will_block.return_value = True with pytest.raises(RuntimeError): assert_will_not_block() task_join_will_block.return_value = False assert_will_not_block() @patch('celery.result.task_join_will_block') def test_get_sync_subtask_option(self, task_join_will_block): task_join_will_block.return_value = True tid = uuid() backend = _MockBackend() res_subtask_async = AsyncResult(tid, backend=backend) with pytest.raises(RuntimeError): res_subtask_async.get() res_subtask_async.get(disable_sync_subtasks=False) def test_without_id(self): with pytest.raises(ValueError): AsyncResult(None, app=self.app) def test_compat_properties(self): x = self.app.AsyncResult('1') assert x.task_id == x.id x.task_id = '2' assert x.id == '2' @pytest.mark.usefixtures('depends_on_current_app') def test_reduce_direct(self): x = AsyncResult('1', app=self.app) fun, args = x.__reduce__() assert fun(*args) == x def test_children(self): x = self.app.AsyncResult('1') children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] x._cache = {'children': children, 'status': states.SUCCESS} x.backend = Mock() assert x.children assert len(x.children) == 3 def test_propagates_for_parent(self): x = self.app.AsyncResult(uuid()) x.backend = Mock(name='backend') x.backend.get_task_meta.return_value = {} x.backend.wait_for_pending.return_value = 84 x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE) with pytest.raises(KeyError): x.get(propagate=True) x.backend.wait_for_pending.assert_not_called() x.parent = EagerResult(uuid(), 42, states.SUCCESS) assert x.get(propagate=True) == 84 x.backend.wait_for_pending.assert_called() def test_get_children(self): tid = uuid() x = self.app.AsyncResult(tid) child = [self.app.AsyncResult(uuid()).as_tuple() for i in range(10)] x._cache = {'children': child} assert x.children assert len(x.children) == 10 x._cache = {'status': states.SUCCESS} x.backend._cache[tid] = {'result': None} assert x.children is None def test_build_graph_get_leaf_collect(self): x = self.app.AsyncResult('1') x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None} c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] x.iterdeps = Mock() x.iterdeps.return_value = ( (None, x), (x, c[0]), (c[0], c[1]), (c[1], c[2]) ) x.backend.READY_STATES = states.READY_STATES assert x.graph assert x.get_leaf() is 2 it = x.collect() assert list(it) == [ (x, None), (c[0], 0), (c[1], 1), (c[2], 2), ] def test_iterdeps(self): x = self.app.AsyncResult('1') c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] x._cache = {'status': states.SUCCESS, 'result': None, 'children': c} for child in c: child.backend = Mock() child.backend.get_children.return_value = [] it = x.iterdeps() assert list(it) == [ (None, x), (x, c[0]), (x, c[1]), (x, c[2]), ] x._cache = None x.ready = Mock() x.ready.return_value = False with pytest.raises(IncompleteStream): list(x.iterdeps()) list(x.iterdeps(intermediate=True)) def test_eq_not_implemented(self): assert self.app.AsyncResult('1') != object() @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): a1 = self.app.AsyncResult('uuid') restored = pickle.loads(pickle.dumps(a1)) assert restored.id == 'uuid' a2 = self.app.AsyncResult('uuid') assert pickle.loads(pickle.dumps(a2)).id == 'uuid' def test_maybe_set_cache_empty(self): self.app.AsyncResult('uuid')._maybe_set_cache(None) def test_set_cache__children(self): r1 = self.app.AsyncResult('id1') r2 = self.app.AsyncResult('id2') r1._set_cache({'children': [r2.as_tuple()]}) assert r2 in r1.children def test_successful(self): ok_res = self.app.AsyncResult(self.task1['id']) nok_res = self.app.AsyncResult(self.task3['id']) nok_res2 = self.app.AsyncResult(self.task4['id']) assert ok_res.successful() assert not nok_res.successful() assert not nok_res2.successful() pending_res = self.app.AsyncResult(uuid()) assert not pending_res.successful() def test_raising(self): notb = self.app.AsyncResult(self.task3['id']) withtb = self.app.AsyncResult(self.task5['id']) with pytest.raises(KeyError): notb.get() try: withtb.get() except KeyError: tb = traceback.format_exc() assert ' File "foo.py", line 2, in foofunc' not in tb assert ' File "bar.py", line 3, in barfunc' not in tb assert 'KeyError:' in tb assert "'blue'" in tb else: raise AssertionError('Did not raise KeyError.') @skip.unless_module('tblib') def test_raising_remote_tracebacks(self): withtb = self.app.AsyncResult(self.task5['id']) self.app.conf.task_remote_tracebacks = True try: withtb.get() except KeyError: tb = traceback.format_exc() assert ' File "foo.py", line 2, in foofunc' in tb assert ' File "bar.py", line 3, in barfunc' in tb assert 'KeyError:' in tb assert "'blue'" in tb else: raise AssertionError('Did not raise KeyError.') def test_str(self): ok_res = self.app.AsyncResult(self.task1['id']) ok2_res = self.app.AsyncResult(self.task2['id']) nok_res = self.app.AsyncResult(self.task3['id']) assert str(ok_res) == self.task1['id'] assert str(ok2_res) == self.task2['id'] assert str(nok_res) == self.task3['id'] pending_id = uuid() pending_res = self.app.AsyncResult(pending_id) assert str(pending_res) == pending_id def test_repr(self): ok_res = self.app.AsyncResult(self.task1['id']) ok2_res = self.app.AsyncResult(self.task2['id']) nok_res = self.app.AsyncResult(self.task3['id']) assert repr(ok_res) == '' % (self.task1['id'],) assert repr(ok2_res) == '' % (self.task2['id'],) assert repr(nok_res) == '' % (self.task3['id'],) pending_id = uuid() pending_res = self.app.AsyncResult(pending_id) assert repr(pending_res) == '' % (pending_id,) def test_hash(self): assert (hash(self.app.AsyncResult('x0w991')) == hash(self.app.AsyncResult('x0w991'))) assert (hash(self.app.AsyncResult('x0w991')) != hash(self.app.AsyncResult('x1w991'))) def test_get_traceback(self): ok_res = self.app.AsyncResult(self.task1['id']) nok_res = self.app.AsyncResult(self.task3['id']) nok_res2 = self.app.AsyncResult(self.task4['id']) assert not ok_res.traceback assert nok_res.traceback assert nok_res2.traceback pending_res = self.app.AsyncResult(uuid()) assert not pending_res.traceback def test_get__backend_gives_None(self): res = self.app.AsyncResult(self.task1['id']) res.backend.wait_for = Mock(name='wait_for') res.backend.wait_for.return_value = None assert res.get() is None def test_get(self): ok_res = self.app.AsyncResult(self.task1['id']) ok2_res = self.app.AsyncResult(self.task2['id']) nok_res = self.app.AsyncResult(self.task3['id']) nok2_res = self.app.AsyncResult(self.task4['id']) callback = Mock(name='callback') assert ok_res.get(callback=callback) == 'the' callback.assert_called_with(ok_res.id, 'the') assert ok2_res.get() == 'quick' with pytest.raises(KeyError): nok_res.get() assert nok_res.get(propagate=False) assert isinstance(nok2_res.result, KeyError) assert ok_res.info == 'the' def test_eq_ne(self): r1 = self.app.AsyncResult(self.task1['id']) r2 = self.app.AsyncResult(self.task1['id']) r3 = self.app.AsyncResult(self.task2['id']) assert r1 == r2 assert r1 != r3 assert r1 == r2.id assert r1 != r3.id @pytest.mark.usefixtures('depends_on_current_app') def test_reduce_restore(self): r1 = self.app.AsyncResult(self.task1['id']) fun, args = r1.__reduce__() assert fun(*args) == r1 def test_get_timeout(self): res = self.app.AsyncResult(self.task4['id']) # has RETRY state with pytest.raises(TimeoutError): res.get(timeout=0.001) pending_res = self.app.AsyncResult(uuid()) with patch('celery.result.time') as _time: with pytest.raises(TimeoutError): pending_res.get(timeout=0.001, interval=0.001) _time.sleep.assert_called_with(0.001) def test_get_timeout_longer(self): res = self.app.AsyncResult(self.task4['id']) # has RETRY state with patch('celery.result.time') as _time: with pytest.raises(TimeoutError): res.get(timeout=1, interval=1) _time.sleep.assert_called_with(1) def test_ready(self): oks = (self.app.AsyncResult(self.task1['id']), self.app.AsyncResult(self.task2['id']), self.app.AsyncResult(self.task3['id'])) assert all(result.ready() for result in oks) assert not self.app.AsyncResult(self.task4['id']).ready() assert not self.app.AsyncResult(uuid()).ready() class test_ResultSet: def test_resultset_repr(self): assert repr(self.app.ResultSet( [self.app.AsyncResult(t) for t in ['1', '2', '3']])) def test_eq_other(self): assert self.app.ResultSet([ self.app.AsyncResult(t) for t in [1, 3, 3]]) != 1 rs1 = self.app.ResultSet([self.app.AsyncResult(1)]) rs2 = self.app.ResultSet([self.app.AsyncResult(1)]) assert rs1 == rs2 def test_get(self): x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]]) b = x.results[0].backend = Mock() b.supports_native_join = False x.join_native = Mock() x.join = Mock() x.get() x.join.assert_called() b.supports_native_join = True x.get() x.join_native.assert_called() def test_eq_ne(self): g1 = self.app.ResultSet([ self.app.AsyncResult('id1'), self.app.AsyncResult('id2'), ]) g2 = self.app.ResultSet([ self.app.AsyncResult('id1'), self.app.AsyncResult('id2'), ]) g3 = self.app.ResultSet([ self.app.AsyncResult('id3'), self.app.AsyncResult('id1'), ]) assert g1 == g2 assert g1 != g3 assert g1 != object() def test_takes_app_from_first_task(self): x = ResultSet([self.app.AsyncResult('id1')]) assert x.app is x.results[0].app x.app = self.app assert x.app is self.app def test_get_empty(self): x = self.app.ResultSet([]) assert x.supports_native_join is None x.join = Mock(name='join') x.get() x.join.assert_called() def test_add(self): x = self.app.ResultSet([self.app.AsyncResult(1)]) x.add(self.app.AsyncResult(2)) assert len(x) == 2 x.add(self.app.AsyncResult(2)) assert len(x) == 2 @contextmanager def dummy_copy(self): with patch('celery.result.copy') as copy: def passt(arg): return arg copy.side_effect = passt yield def test_iterate_respects_subpolling_interval(self): r1 = self.app.AsyncResult(uuid()) r2 = self.app.AsyncResult(uuid()) backend = r1.backend = r2.backend = Mock() backend.subpolling_interval = 10 ready = r1.ready = r2.ready = Mock() def se(*args, **kwargs): ready.side_effect = KeyError() return False ready.return_value = False ready.side_effect = se x = self.app.ResultSet([r1, r2]) with self.dummy_copy(): with patch('celery.result.time') as _time: with pytest.warns(CPendingDeprecationWarning): with pytest.raises(KeyError): list(x.iterate()) _time.sleep.assert_called_with(10) backend.subpolling_interval = 0 with patch('celery.result.time') as _time: with pytest.warns(CPendingDeprecationWarning): with pytest.raises(KeyError): ready.return_value = False ready.side_effect = se list(x.iterate()) _time.sleep.assert_not_called() def test_times_out(self): r1 = self.app.AsyncResult(uuid) r1.ready = Mock() r1.ready.return_value = False x = self.app.ResultSet([r1]) with self.dummy_copy(): with patch('celery.result.time'): with pytest.warns(CPendingDeprecationWarning): with pytest.raises(TimeoutError): list(x.iterate(timeout=1)) def test_add_discard(self): x = self.app.ResultSet([]) x.add(self.app.AsyncResult('1')) assert self.app.AsyncResult('1') in x.results x.discard(self.app.AsyncResult('1')) x.discard(self.app.AsyncResult('1')) x.discard('1') assert self.app.AsyncResult('1') not in x.results x.update([self.app.AsyncResult('2')]) def test_clear(self): x = self.app.ResultSet([]) r = x.results x.clear() assert x.results is r class MockAsyncResultFailure(AsyncResult): @property def result(self): return KeyError('baz') @property def state(self): return states.FAILURE def get(self, propagate=True, **kwargs): if propagate: raise self.result return self.result class MockAsyncResultSuccess(AsyncResult): forgotten = False def forget(self): self.forgotten = True @property def result(self): return 42 @property def state(self): return states.SUCCESS def get(self, **kwargs): return self.result class SimpleBackend(SyncBackendMixin): ids = [] def __init__(self, ids=[]): self.ids = ids def _ensure_not_eager(self): pass def get_many(self, *args, **kwargs): return ((id, {'result': i, 'status': states.SUCCESS}) for i, id in enumerate(self.ids)) class test_GroupResult: def setup(self): self.size = 10 self.ts = self.app.GroupResult( uuid(), make_mock_group(self.app, self.size), ) @pytest.mark.usefixtures('depends_on_current_app') def test_is_pickleable(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) assert pickle.loads(pickle.dumps(ts)) == ts ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) assert pickle.loads(pickle.dumps(ts2)) == ts2 @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) fun, args = ts.__reduce__() ts2 = fun(*args) assert ts2.id == ts.id assert ts == ts2 def test_eq_ne(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) ts2 = self.app.GroupResult(ts.id, ts.results) ts3 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) ts4 = self.app.GroupResult(ts.id, [self.app.AsyncResult(uuid())]) assert ts == ts2 assert ts != ts3 assert ts != ts4 assert ts != object() def test_len(self): assert len(self.ts) == self.size def test_eq_other(self): assert self.ts != 1 @pytest.mark.usefixtures('depends_on_current_app') def test_pickleable(self): assert pickle.loads(pickle.dumps(self.ts)) def test_iterate_raises(self): ar = MockAsyncResultFailure(uuid(), app=self.app) ts = self.app.GroupResult(uuid(), [ar]) with pytest.warns(CPendingDeprecationWarning): it = ts.iterate() with pytest.raises(KeyError): next(it) def test_forget(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app), MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) ts.forget() for sub in subs: assert sub.forgotten def test_getitem(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app), MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) assert ts[0] is subs[0] def test_save_restore(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app), MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) ts.save() with pytest.raises(AttributeError): ts.save(backend=object()) assert self.app.GroupResult.restore(ts.id).results == ts.results ts.delete() assert self.app.GroupResult.restore(ts.id) is None with pytest.raises(AttributeError): self.app.GroupResult.restore(ts.id, backend=object()) def test_restore_app(self): subs = [MockAsyncResultSuccess(uuid(), app=self.app)] ts = self.app.GroupResult(uuid(), subs) ts.save() restored = GroupResult.restore(ts.id, app=self.app) assert restored.id == ts.id def test_join_native(self): backend = SimpleBackend() results = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] ts = self.app.GroupResult(uuid(), results) ts.app.backend = backend backend.ids = [result.id for result in results] res = ts.join_native() assert res == list(range(10)) callback = Mock(name='callback') assert not ts.join_native(callback=callback) callback.assert_has_calls([ call(r.id, i) for i, r in enumerate(ts.results) ]) def test_join_native_raises(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) ts.iter_native = Mock() ts.iter_native.return_value = iter([ (uuid(), {'status': states.FAILURE, 'result': KeyError()}) ]) with pytest.raises(KeyError): ts.join_native(propagate=True) def test_failed_join_report(self): res = Mock() ts = self.app.GroupResult(uuid(), [res]) res.state = states.FAILURE res.backend.is_cached.return_value = True assert next(ts._failed_join_report()) is res res.backend.is_cached.return_value = False with pytest.raises(StopIteration): next(ts._failed_join_report()) def test_repr(self): assert repr( self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])) def test_children_is_results(self): ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) assert ts.children is ts.results def test_iter_native(self): backend = SimpleBackend() results = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] ts = self.app.GroupResult(uuid(), results) ts.app.backend = backend backend.ids = [result.id for result in results] assert len(list(ts.iter_native())) == 10 def test_iterate_yields(self): ar = MockAsyncResultSuccess(uuid(), app=self.app) ar2 = MockAsyncResultSuccess(uuid(), app=self.app) ts = self.app.GroupResult(uuid(), [ar, ar2]) with pytest.warns(CPendingDeprecationWarning): it = ts.iterate() assert next(it) == 42 assert next(it) == 42 def test_iterate_eager(self): ar1 = EagerResult(uuid(), 42, states.SUCCESS) ar2 = EagerResult(uuid(), 42, states.SUCCESS) ts = self.app.GroupResult(uuid(), [ar1, ar2]) with pytest.warns(CPendingDeprecationWarning): it = ts.iterate() assert next(it) == 42 assert next(it) == 42 def test_join_timeout(self): ar = MockAsyncResultSuccess(uuid(), app=self.app) ar2 = MockAsyncResultSuccess(uuid(), app=self.app) ar3 = self.app.AsyncResult(uuid()) ts = self.app.GroupResult(uuid(), [ar, ar2, ar3]) with pytest.raises(TimeoutError): ts.join(timeout=0.0000001) ar4 = self.app.AsyncResult(uuid()) ar4.get = Mock() ts2 = self.app.GroupResult(uuid(), [ar4]) assert ts2.join(timeout=0.1) callback = Mock(name='callback') assert not ts2.join(timeout=0.1, callback=callback) callback.assert_called_with(ar4.id, ar4.get()) def test_iter_native_when_empty_group(self): ts = self.app.GroupResult(uuid(), []) assert list(ts.iter_native()) == [] def test_iterate_simple(self): with pytest.warns(CPendingDeprecationWarning): it = self.ts.iterate() results = sorted(list(it)) assert results == list(range(self.size)) def test___iter__(self): assert list(iter(self.ts)) == self.ts.results def test_join(self): joined = self.ts.join() assert joined == list(range(self.size)) def test_successful(self): assert self.ts.successful() def test_failed(self): assert not self.ts.failed() def test_maybe_throw(self): self.ts.results = [Mock(name='r1')] self.ts.maybe_throw() self.ts.results[0].maybe_throw.assert_called_with( callback=None, propagate=True, ) def test_join__on_message(self): with pytest.raises(ImproperlyConfigured): self.ts.join(on_message=Mock()) def test_waiting(self): assert not self.ts.waiting() def test_ready(self): assert self.ts.ready() def test_completed_count(self): assert self.ts.completed_count() == len(self.ts) class test_pending_AsyncResult: def test_result(self, app): res = app.AsyncResult(uuid()) assert res.result is None class test_failed_AsyncResult: def setup(self): self.size = 11 self.app.conf.result_serializer = 'pickle' results = make_mock_group(self.app, 10) failed = mock_task('ts11', states.FAILURE, KeyError('Baz')) save_result(self.app, failed) failed_res = self.app.AsyncResult(failed['id']) self.ts = self.app.GroupResult(uuid(), results + [failed_res]) def test_completed_count(self): assert self.ts.completed_count() == len(self.ts) - 1 def test_iterate_simple(self): with pytest.warns(CPendingDeprecationWarning): it = self.ts.iterate() def consume(): return list(it) with pytest.raises(KeyError): consume() def test_join(self): with pytest.raises(KeyError): self.ts.join() def test_successful(self): assert not self.ts.successful() def test_failed(self): assert self.ts.failed() class test_pending_Group: def setup(self): self.ts = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()), self.app.AsyncResult(uuid())]) def test_completed_count(self): assert self.ts.completed_count() == 0 def test_ready(self): assert not self.ts.ready() def test_waiting(self): assert self.ts.waiting() def test_join(self): with pytest.raises(TimeoutError): self.ts.join(timeout=0.001) def test_join_longer(self): with pytest.raises(TimeoutError): self.ts.join(timeout=1) class test_EagerResult: def setup(self): @self.app.task(shared=False) def raising(x, y): raise KeyError(x, y) self.raising = raising def test_wait_raises(self): res = self.raising.apply(args=[3, 3]) with pytest.raises(KeyError): res.wait() assert res.wait(propagate=False) def test_wait(self): res = EagerResult('x', 'x', states.RETRY) res.wait() assert res.state == states.RETRY assert res.status == states.RETRY def test_forget(self): res = EagerResult('x', 'x', states.RETRY) res.forget() def test_revoke(self): res = self.raising.apply(args=[3, 3]) assert not res.revoke() class test_tuples: def test_AsyncResult(self): x = self.app.AsyncResult(uuid()) assert x, result_from_tuple(x.as_tuple() == self.app) assert x, result_from_tuple(x == self.app) def test_with_parent(self): x = self.app.AsyncResult(uuid()) x.parent = self.app.AsyncResult(uuid()) y = result_from_tuple(x.as_tuple(), self.app) assert y == x assert y.parent == x.parent assert isinstance(y.parent, AsyncResult) def test_compat(self): uid = uuid() x = result_from_tuple([uid, []], app=self.app) assert x.id == uid def test_GroupResult(self): x = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], ) assert x, result_from_tuple(x.as_tuple() == self.app) assert x, result_from_tuple(x == self.app) celery-4.1.0/t/unit/tasks/test_tasks.py0000644000175000017500000005075313130607475020052 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import socket from datetime import datetime, timedelta from case import ContextMock, MagicMock, Mock, patch from kombu import Queue from celery import Task, group, uuid from celery.app.task import _reprtask from celery.exceptions import Ignore, Retry from celery.five import items, range, string_t from celery.result import EagerResult from celery.utils.time import parse_iso8601 def return_True(*args, **kwargs): # Task run functions can't be closures/lambdas, as they're pickled. return True class MockApplyTask(Task): abstract = True applied = 0 def run(self, x, y): return x * y def apply_async(self, *args, **kwargs): self.applied += 1 class TasksCase: def setup(self): self.app.conf.task_protocol = 1 # XXX Still using proto1 self.mytask = self.app.task(shared=False)(return_True) @self.app.task(bind=True, count=0, shared=False) def increment_counter(self, increment_by=1): self.count += increment_by or 1 return self.count self.increment_counter = increment_counter @self.app.task(shared=False) def raising(): raise KeyError('foo') self.raising = raising @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True): self.iterations += 1 rmax = self.max_retries if max_retries is None else max_retries assert repr(self.request) retries = self.request.retries if care and retries >= rmax: return arg1 else: raise self.retry(countdown=0, max_retries=rmax) self.retry_task = retry_task @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_noargs(self, **kwargs): self.iterations += 1 if self.request.retries >= 3: return 42 else: raise self.retry(countdown=0) self.retry_task_noargs = retry_task_noargs @self.app.task(bind=True, max_retries=3, iterations=0, base=MockApplyTask, shared=False) def retry_task_mockapply(self, arg1, arg2, kwarg=1): self.iterations += 1 retries = self.request.retries if retries >= 3: return arg1 raise self.retry(countdown=0) self.retry_task_mockapply = retry_task_mockapply @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs): self.iterations += 1 retries = self.request.retries if retries >= 3: return arg1 + kwarg else: try: raise MyCustomException('Elaine Marie Benes') except MyCustomException as exc: kwargs.update(kwarg=kwarg) raise self.retry(countdown=0, exc=exc) self.retry_task_customexc = retry_task_customexc @self.app.task(bind=True, autoretry_for=(ZeroDivisionError,), shared=False) def autoretry_task_no_kwargs(self, a, b): self.iterations += 1 return a / b self.autoretry_task_no_kwargs = autoretry_task_no_kwargs @self.app.task(bind=True, autoretry_for=(ZeroDivisionError,), retry_kwargs={'max_retries': 5}, shared=False) def autoretry_task(self, a, b): self.iterations += 1 return a / b self.autoretry_task = autoretry_task @self.app.task(bind=True) def task_check_request_context(self): assert self.request.hostname == socket.gethostname() self.task_check_request_context = task_check_request_context # memove all messages from memory-transport from kombu.transport.memory import Channel Channel.queues.clear() class MyCustomException(Exception): """Random custom exception.""" class test_task_retries(TasksCase): def test_retry(self): self.retry_task.max_retries = 3 self.retry_task.iterations = 0 self.retry_task.apply([0xFF, 0xFFFF]) assert self.retry_task.iterations == 4 self.retry_task.max_retries = 3 self.retry_task.iterations = 0 self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10}) assert self.retry_task.iterations == 11 def test_retry_no_args(self): self.retry_task_noargs.max_retries = 3 self.retry_task_noargs.iterations = 0 self.retry_task_noargs.apply(propagate=True).get() assert self.retry_task_noargs.iterations == 4 def test_signature_from_request__passes_headers(self): self.retry_task.push_request() self.retry_task.request.headers = {'custom': 10.1} sig = self.retry_task.signature_from_request() assert sig.options['headers']['custom'] == 10.1 def test_signature_from_request__delivery_info(self): self.retry_task.push_request() self.retry_task.request.delivery_info = { 'exchange': 'testex', 'routing_key': 'testrk', } sig = self.retry_task.signature_from_request() assert sig.options['exchange'] == 'testex' assert sig.options['routing_key'] == 'testrk' def test_retry_kwargs_can_be_empty(self): self.retry_task_mockapply.push_request() try: with pytest.raises(Retry): import sys try: sys.exc_clear() except AttributeError: pass self.retry_task_mockapply.retry(args=[4, 4], kwargs=None) finally: self.retry_task_mockapply.pop_request() def test_retry_not_eager(self): self.retry_task_mockapply.push_request() try: self.retry_task_mockapply.request.called_directly = False exc = Exception('baz') try: self.retry_task_mockapply.retry( args=[4, 4], kwargs={'task_retries': 0}, exc=exc, throw=False, ) assert self.retry_task_mockapply.applied finally: self.retry_task_mockapply.applied = 0 try: with pytest.raises(Retry): self.retry_task_mockapply.retry( args=[4, 4], kwargs={'task_retries': 0}, exc=exc, throw=True) assert self.retry_task_mockapply.applied finally: self.retry_task_mockapply.applied = 0 finally: self.retry_task_mockapply.pop_request() def test_retry_with_kwargs(self): self.retry_task_customexc.max_retries = 3 self.retry_task_customexc.iterations = 0 self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) assert self.retry_task_customexc.iterations == 4 def test_retry_with_custom_exception(self): self.retry_task_customexc.max_retries = 2 self.retry_task_customexc.iterations = 0 result = self.retry_task_customexc.apply( [0xFF, 0xFFFF], {'kwarg': 0xF}, ) with pytest.raises(MyCustomException): result.get() assert self.retry_task_customexc.iterations == 3 def test_max_retries_exceeded(self): self.retry_task.max_retries = 2 self.retry_task.iterations = 0 result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) with pytest.raises(self.retry_task.MaxRetriesExceededError): result.get() assert self.retry_task.iterations == 3 self.retry_task.max_retries = 1 self.retry_task.iterations = 0 result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) with pytest.raises(self.retry_task.MaxRetriesExceededError): result.get() assert self.retry_task.iterations == 2 def test_autoretry_no_kwargs(self): self.autoretry_task_no_kwargs.max_retries = 3 self.autoretry_task_no_kwargs.iterations = 0 self.autoretry_task_no_kwargs.apply((1, 0)) assert self.autoretry_task_no_kwargs.iterations == 4 def test_autoretry(self): self.autoretry_task.max_retries = 3 self.autoretry_task.iterations = 0 self.autoretry_task.apply((1, 0)) assert self.autoretry_task.iterations == 6 def test_retry_wrong_eta_when_not_enable_utc(self): """Issue #3753""" self.app.conf.enable_utc = False self.app.conf.timezone = 'US/Eastern' self.autoretry_task.iterations = 0 self.autoretry_task.default_retry_delay = 2 self.autoretry_task.apply((1, 0)) assert self.autoretry_task.iterations == 6 class test_canvas_utils(TasksCase): def test_si(self): assert self.retry_task.si() assert self.retry_task.si().immutable def test_chunks(self): assert self.retry_task.chunks(range(100), 10) def test_map(self): assert self.retry_task.map(range(100)) def test_starmap(self): assert self.retry_task.starmap(range(100)) def test_on_success(self): self.retry_task.on_success(1, 1, (), {}) class test_tasks(TasksCase): def now(self): return self.app.now() def test_typing(self): @self.app.task() def add(x, y, kw=1): pass with pytest.raises(TypeError): add.delay(1) with pytest.raises(TypeError): add.delay(1, kw=2) with pytest.raises(TypeError): add.delay(1, 2, foobar=3) add.delay(2, 2) def test_typing__disabled(self): @self.app.task(typing=False) def add(x, y, kw=1): pass add.delay(1) add.delay(1, kw=2) add.delay(1, 2, foobar=3) def test_typing__disabled_by_app(self): with self.Celery(set_as_current=False, strict_typing=False) as app: @app.task() def add(x, y, kw=1): pass assert not add.typing add.delay(1) add.delay(1, kw=2) add.delay(1, 2, foobar=3) @pytest.mark.usefixtures('depends_on_current_app') def test_unpickle_task(self): import pickle @self.app.task(shared=True) def xxx(): pass assert pickle.loads(pickle.dumps(xxx)) is xxx.app.tasks[xxx.name] @patch('celery.app.task.current_app') @pytest.mark.usefixtures('depends_on_current_app') def test_bind__no_app(self, current_app): class XTask(Task): _app = None XTask._app = None XTask.__bound__ = False XTask.bind = Mock(name='bind') assert XTask.app is current_app XTask.bind.assert_called_with(current_app) def test_reprtask__no_fmt(self): assert _reprtask(self.mytask) def test_AsyncResult(self): task_id = uuid() result = self.retry_task.AsyncResult(task_id) assert result.backend == self.retry_task.backend assert result.id == task_id def assert_next_task_data_equal(self, consumer, presult, task_name, test_eta=False, test_expires=False, **kwargs): next_task = consumer.queues[0].get(accept=['pickle', 'json']) task_data = next_task.decode() assert task_data['id'] == presult.id assert task_data['task'] == task_name task_kwargs = task_data.get('kwargs', {}) if test_eta: assert isinstance(task_data.get('eta'), string_t) to_datetime = parse_iso8601(task_data.get('eta')) assert isinstance(to_datetime, datetime) if test_expires: assert isinstance(task_data.get('expires'), string_t) to_datetime = parse_iso8601(task_data.get('expires')) assert isinstance(to_datetime, datetime) for arg_name, arg_value in items(kwargs): assert task_kwargs.get(arg_name) == arg_value def test_incomplete_task_cls(self): class IncompleteTask(Task): app = self.app name = 'c.unittest.t.itask' with pytest.raises(NotImplementedError): IncompleteTask().run() def test_task_kwargs_must_be_dictionary(self): with pytest.raises(TypeError): self.increment_counter.apply_async([], 'str') def test_task_args_must_be_list(self): with pytest.raises(TypeError): self.increment_counter.apply_async('s', {}) def test_regular_task(self): assert isinstance(self.mytask, Task) assert self.mytask.run() assert callable(self.mytask) assert self.mytask(), 'Task class runs run() when called' with self.app.connection_or_acquire() as conn: consumer = self.app.amqp.TaskConsumer(conn) with pytest.raises(NotImplementedError): consumer.receive('foo', 'foo') consumer.purge() assert consumer.queues[0].get() is None self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')]) # Without arguments. presult = self.mytask.delay() self.assert_next_task_data_equal( consumer, presult, self.mytask.name) # With arguments. presult2 = self.mytask.apply_async( kwargs=dict(name='George Costanza'), ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', ) # send_task sresult = self.app.send_task(self.mytask.name, kwargs=dict(name='Elaine M. Benes')) self.assert_next_task_data_equal( consumer, sresult, self.mytask.name, name='Elaine M. Benes', ) # With ETA. presult2 = self.mytask.apply_async( kwargs=dict(name='George Costanza'), eta=self.now() + timedelta(days=1), expires=self.now() + timedelta(days=2), ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', test_eta=True, test_expires=True, ) # With countdown. presult2 = self.mytask.apply_async( kwargs=dict(name='George Costanza'), countdown=10, expires=12, ) self.assert_next_task_data_equal( consumer, presult2, self.mytask.name, name='George Costanza', test_eta=True, test_expires=True, ) # Discarding all tasks. consumer.purge() self.mytask.apply_async() assert consumer.purge() == 1 assert consumer.queues[0].get() is None assert not presult.successful() self.mytask.backend.mark_as_done(presult.id, result=None) assert presult.successful() def test_send_event(self): mytask = self.mytask._get_current_object() mytask.app.events = Mock(name='events') mytask.app.events.attach_mock(ContextMock(), 'default_dispatcher') mytask.request.id = 'fb' mytask.send_event('task-foo', id=3122) mytask.app.events.default_dispatcher().send.assert_called_with( 'task-foo', uuid='fb', id=3122, retry=True, retry_policy=self.app.conf.task_publish_retry_policy) def test_replace(self): sig1 = Mock(name='sig1') sig1.options = {} with pytest.raises(Ignore): self.mytask.replace(sig1) @pytest.mark.usefixtures('depends_on_current_app') def test_replace_callback(self): c = group([self.mytask.s()], app=self.app) c.freeze = Mock(name='freeze') c.delay = Mock(name='delay') self.mytask.request.id = 'id' self.mytask.request.group = 'group' self.mytask.request.root_id = 'root_id' self.mytask.request.callbacks = 'callbacks' self.mytask.request.errbacks = 'errbacks' class JsonMagicMock(MagicMock): parent = None def __json__(self): return 'whatever' def reprcall(self, *args, **kwargs): return 'whatever2' mocked_signature = JsonMagicMock(name='s') accumulate_mock = JsonMagicMock(name='accumulate', s=mocked_signature) self.mytask.app.tasks['celery.accumulate'] = accumulate_mock try: self.mytask.replace(c) except Ignore: mocked_signature.return_value.set.assert_called_with( chord=None, link='callbacks', link_error='errbacks', ) def test_replace_group(self): c = group([self.mytask.s()], app=self.app) c.freeze = Mock(name='freeze') c.delay = Mock(name='delay') self.mytask.request.id = 'id' self.mytask.request.group = 'group' self.mytask.request.root_id = 'root_id', with pytest.raises(Ignore): self.mytask.replace(c) def test_add_trail__no_trail(self): mytask = self.increment_counter._get_current_object() mytask.trail = False mytask.add_trail('foo') def test_repr_v2_compat(self): self.mytask.__v2_compat__ = True assert 'v2 compatible' in repr(self.mytask) def test_apply_with_self(self): @self.app.task(__self__=42, shared=False) def tawself(self): return self assert tawself.apply().get() == 42 assert tawself() == 42 def test_context_get(self): self.mytask.push_request() try: request = self.mytask.request request.foo = 32 assert request.get('foo') == 32 assert request.get('bar', 36) == 36 request.clear() finally: self.mytask.pop_request() def test_annotate(self): with patch('celery.app.task.resolve_all_annotations') as anno: anno.return_value = [{'FOO': 'BAR'}] @self.app.task(shared=False) def task(): pass task.annotate() assert task.FOO == 'BAR' def test_after_return(self): self.mytask.push_request() try: self.mytask.request.chord = self.mytask.s() self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None) self.mytask.request.clear() finally: self.mytask.pop_request() def test_update_state(self): @self.app.task(shared=False) def yyy(): pass yyy.push_request() try: tid = uuid() yyy.update_state(tid, 'FROBULATING', {'fooz': 'baaz'}) assert yyy.AsyncResult(tid).status == 'FROBULATING' assert yyy.AsyncResult(tid).result == {'fooz': 'baaz'} yyy.request.id = tid yyy.update_state(state='FROBUZATING', meta={'fooz': 'baaz'}) assert yyy.AsyncResult(tid).status == 'FROBUZATING' assert yyy.AsyncResult(tid).result == {'fooz': 'baaz'} finally: yyy.pop_request() def test_repr(self): @self.app.task(shared=False) def task_test_repr(): pass assert 'task_test_repr' in repr(task_test_repr) def test_has___name__(self): @self.app.task(shared=False) def yyy2(): pass assert yyy2.__name__ class test_apply_task(TasksCase): def test_apply_throw(self): with pytest.raises(KeyError): self.raising.apply(throw=True) def test_apply_with_task_eager_propagates(self): self.app.conf.task_eager_propagates = True with pytest.raises(KeyError): self.raising.apply() def test_apply_request_context_is_ok(self): self.app.conf.task_eager_propagates = True self.task_check_request_context.apply() def test_apply(self): self.increment_counter.count = 0 e = self.increment_counter.apply() assert isinstance(e, EagerResult) assert e.get() == 1 e = self.increment_counter.apply(args=[1]) assert e.get() == 2 e = self.increment_counter.apply(kwargs={'increment_by': 4}) assert e.get() == 6 assert e.successful() assert e.ready() assert repr(e).startswith('= next_run_at assert next_entry.total_run_count == 1 def test_is_due(self): entry = self.create_entry(schedule=timedelta(seconds=10)) assert entry.app is self.app assert entry.schedule.app is self.app due1, next_time_to_run1 = entry.is_due() assert not due1 assert next_time_to_run1 > 9 next_run_at = entry.last_run_at - timedelta(seconds=10) next_entry = entry.next(next_run_at) due2, next_time_to_run2 = next_entry.is_due() assert due2 assert next_time_to_run2 > 9 def test_repr(self): entry = self.create_entry() assert ' 1: return s.sh raise OSError() opens.side_effect = effect s.setup_schedule() s._remove_db.assert_called_with() s._store = {str('__version__'): 1} s.setup_schedule() s._store.clear = Mock() op = s.persistence.open = Mock() op.return_value = s._store s._store[str('tz')] = 'FUNKY' s.setup_schedule() op.assert_called_with(s.schedule_filename, writeback=True) s._store.clear.assert_called_with() s._store[str('utc_enabled')] = False s._store.clear = Mock() s.setup_schedule() s._store.clear.assert_called_with() def test_get_schedule(self): s = create_persistent_scheduler()[0]( schedule_filename='schedule', app=self.app, ) s._store = {str('entries'): {}} s.schedule = {'foo': 'bar'} assert s.schedule == {'foo': 'bar'} assert s._store[str('entries')] == s.schedule class test_Service: def get_service(self): Scheduler, mock_shelve = create_persistent_scheduler() return beat.Service(app=self.app, scheduler_cls=Scheduler), mock_shelve def test_pickleable(self): s = beat.Service(app=self.app, scheduler_cls=Mock) assert loads(dumps(s)) def test_start(self): s, sh = self.get_service() schedule = s.scheduler.schedule assert isinstance(schedule, dict) assert isinstance(s.scheduler, beat.Scheduler) scheduled = list(schedule.keys()) for task_name in keys(sh[str('entries')]): assert task_name in scheduled s.sync() assert sh.closed assert sh.synced assert s._is_stopped.isSet() s.sync() s.stop(wait=False) assert s._is_shutdown.isSet() s.stop(wait=True) assert s._is_shutdown.isSet() p = s.scheduler._store s.scheduler._store = None try: s.scheduler.sync() finally: s.scheduler._store = p def test_start_embedded_process(self): s, sh = self.get_service() s._is_shutdown.set() s.start(embedded_process=True) def test_start_thread(self): s, sh = self.get_service() s._is_shutdown.set() s.start(embedded_process=False) def test_start_tick_raises_exit_error(self): s, sh = self.get_service() s.scheduler.tick_raises_exit = True s.start() assert s._is_shutdown.isSet() def test_start_manages_one_tick_before_shutdown(self): s, sh = self.get_service() s.scheduler.shutdown_service = s s.start() assert s._is_shutdown.isSet() class test_EmbeddedService: @skip.unless_module('_multiprocessing', name='multiprocessing') def xxx_start_stop_process(self): from billiard.process import Process s = beat.EmbeddedService(self.app) assert isinstance(s, Process) assert isinstance(s.service, beat.Service) s.service = MockService() class _Popen(object): terminated = False def terminate(self): self.terminated = True with patch('celery.platforms.close_open_fds'): s.run() assert s.service.started s._popen = _Popen() s.stop() assert s.service.stopped assert s._popen.terminated def test_start_stop_threaded(self): s = beat.EmbeddedService(self.app, thread=True) from threading import Thread assert isinstance(s, Thread) assert isinstance(s.service, beat.Service) s.service = MockService() s.run() assert s.service.started s.stop() assert s.service.stopped class test_schedule: def test_maybe_make_aware(self): x = schedule(10, app=self.app) x.utc_enabled = True d = x.maybe_make_aware(datetime.utcnow()) assert d.tzinfo x.utc_enabled = False d2 = x.maybe_make_aware(datetime.utcnow()) assert d2.tzinfo def test_to_local(self): x = schedule(10, app=self.app) x.utc_enabled = True d = x.to_local(datetime.utcnow()) assert d.tzinfo is None x.utc_enabled = False d = x.to_local(datetime.utcnow()) assert d.tzinfo celery-4.1.0/t/unit/app/test_defaults.py0000644000175000017500000000372313130607475020162 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import sys from importlib import import_module from case import mock from celery.app.defaults import ( _OLD_DEFAULTS, _OLD_SETTING_KEYS, _TO_NEW_KEY, _TO_OLD_KEY, DEFAULTS, NAMESPACES, SETTING_KEYS ) from celery.five import values class test_defaults: def setup(self): self._prev = sys.modules.pop('celery.app.defaults', None) def teardown(self): if self._prev: sys.modules['celery.app.defaults'] = self._prev def test_option_repr(self): assert repr(NAMESPACES['broker']['url']) def test_any(self): val = object() assert self.defaults.Option.typemap['any'](val) is val @mock.sys_platform('darwin') @mock.pypy_version((1, 4, 0)) def test_default_pool_pypy_14(self): assert self.defaults.DEFAULT_POOL == 'solo' @mock.sys_platform('darwin') @mock.pypy_version((1, 5, 0)) def test_default_pool_pypy_15(self): assert self.defaults.DEFAULT_POOL == 'prefork' def test_compat_indices(self): assert not any(key.isupper() for key in DEFAULTS) assert not any(key.islower() for key in _OLD_DEFAULTS) assert not any(key.isupper() for key in _TO_OLD_KEY) assert not any(key.islower() for key in _TO_NEW_KEY) assert not any(key.isupper() for key in SETTING_KEYS) assert not any(key.islower() for key in _OLD_SETTING_KEYS) assert not any(value.isupper() for value in values(_TO_NEW_KEY)) assert not any(value.islower() for value in values(_TO_OLD_KEY)) for key in _TO_NEW_KEY: assert key in _OLD_SETTING_KEYS for key in _TO_OLD_KEY: assert key in SETTING_KEYS def test_find(self): find = self.defaults.find assert find('default_queue')[2].default == 'celery' assert find('task_default_exchange')[2] is None @property def defaults(self): return import_module('celery.app.defaults') celery-4.1.0/t/unit/app/test_schedules.py0000644000175000017500000006746013130607475020342 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import time from contextlib import contextmanager from datetime import datetime, timedelta from pickle import dumps, loads from case import Case, Mock, skip from celery.five import items from celery.schedules import ( ParseException, crontab, crontab_parser, schedule, solar, ) assertions = Case('__init__') @contextmanager def patch_crontab_nowfun(cls, retval): prev_nowfun = cls.nowfun cls.nowfun = lambda: retval try: yield finally: cls.nowfun = prev_nowfun @skip.unless_module('ephem') class test_solar: def setup(self): self.s = solar('sunrise', 60, 30, app=self.app) def test_reduce(self): fun, args = self.s.__reduce__() assert fun(*args) == self.s def test_eq(self): assert self.s == solar('sunrise', 60, 30, app=self.app) assert self.s != solar('sunset', 60, 30, app=self.app) assert self.s != schedule(10) def test_repr(self): assert repr(self.s) def test_is_due(self): self.s.remaining_estimate = Mock(name='rem') self.s.remaining_estimate.return_value = timedelta(seconds=0) assert self.s.is_due(datetime.utcnow()).is_due def test_is_due__not_due(self): self.s.remaining_estimate = Mock(name='rem') self.s.remaining_estimate.return_value = timedelta(hours=10) assert not self.s.is_due(datetime.utcnow()).is_due def test_remaining_estimate(self): self.s.cal = Mock(name='cal') self.s.cal.next_rising().datetime.return_value = datetime.utcnow() self.s.remaining_estimate(datetime.utcnow()) def test_coordinates(self): with pytest.raises(ValueError): solar('sunrise', -120, 60, app=self.app) with pytest.raises(ValueError): solar('sunrise', 120, 60, app=self.app) with pytest.raises(ValueError): solar('sunrise', 60, -200, app=self.app) with pytest.raises(ValueError): solar('sunrise', 60, 200, app=self.app) def test_invalid_event(self): with pytest.raises(ValueError): solar('asdqwewqew', 60, 60, app=self.app) class test_schedule: def test_ne(self): s1 = schedule(10, app=self.app) s2 = schedule(12, app=self.app) s3 = schedule(10, app=self.app) assert s1 == s3 assert s1 != s2 def test_pickle(self): s1 = schedule(10, app=self.app) fun, args = s1.__reduce__() s2 = fun(*args) assert s1 == s2 # This is needed for test_crontab_parser because datetime.utcnow doesn't pickle # in python 2 def utcnow(): return datetime.utcnow() class test_crontab_parser: def crontab(self, *args, **kwargs): return crontab(*args, **dict(kwargs, app=self.app)) def test_crontab_reduce(self): c = self.crontab('*') assert c == loads(dumps(c)) c = self.crontab( minute='1', hour='2', day_of_week='3', day_of_month='4', month_of_year='5', nowfun=utcnow) assert c == loads(dumps(c)) def test_range_steps_not_enough(self): with pytest.raises(crontab_parser.ParseException): crontab_parser(24)._range_steps([1]) def test_parse_star(self): assert crontab_parser(24).parse('*') == set(range(24)) assert crontab_parser(60).parse('*') == set(range(60)) assert crontab_parser(7).parse('*') == set(range(7)) assert crontab_parser(31, 1).parse('*') == set(range(1, 31 + 1)) assert crontab_parser(12, 1).parse('*') == set(range(1, 12 + 1)) def test_parse_range(self): assert crontab_parser(60).parse('1-10') == set(range(1, 10 + 1)) assert crontab_parser(24).parse('0-20') == set(range(0, 20 + 1)) assert crontab_parser().parse('2-10') == set(range(2, 10 + 1)) assert crontab_parser(60, 1).parse('1-10') == set(range(1, 10 + 1)) def test_parse_range_wraps(self): assert crontab_parser(12).parse('11-1') == {11, 0, 1} assert crontab_parser(60, 1).parse('2-1') == set(range(1, 60 + 1)) def test_parse_groups(self): assert crontab_parser().parse('1,2,3,4') == {1, 2, 3, 4} assert crontab_parser().parse('0,15,30,45') == {0, 15, 30, 45} assert crontab_parser(min_=1).parse('1,2,3,4') == {1, 2, 3, 4} def test_parse_steps(self): assert crontab_parser(8).parse('*/2') == {0, 2, 4, 6} assert crontab_parser().parse('*/2') == {i * 2 for i in range(30)} assert crontab_parser().parse('*/3') == {i * 3 for i in range(20)} assert crontab_parser(8, 1).parse('*/2') == {1, 3, 5, 7} assert crontab_parser(min_=1).parse('*/2') == { i * 2 + 1 for i in range(30) } assert crontab_parser(min_=1).parse('*/3') == { i * 3 + 1 for i in range(20) } def test_parse_composite(self): assert crontab_parser(8).parse('*/2') == {0, 2, 4, 6} assert crontab_parser().parse('2-9/5') == {2, 7} assert crontab_parser().parse('2-10/5') == {2, 7} assert crontab_parser(min_=1).parse('55-5/3') == {55, 58, 1, 4} assert crontab_parser().parse('2-11/5,3') == {2, 3, 7} assert crontab_parser().parse('2-4/3,*/5,0-21/4') == { 0, 2, 4, 5, 8, 10, 12, 15, 16, 20, 25, 30, 35, 40, 45, 50, 55, } assert crontab_parser().parse('1-9/2') == {1, 3, 5, 7, 9} assert crontab_parser(8, 1).parse('*/2') == {1, 3, 5, 7} assert crontab_parser(min_=1).parse('2-9/5') == {2, 7} assert crontab_parser(min_=1).parse('2-10/5') == {2, 7} assert crontab_parser(min_=1).parse('2-11/5,3') == {2, 3, 7} assert crontab_parser(min_=1).parse('2-4/3,*/5,1-21/4') == { 1, 2, 5, 6, 9, 11, 13, 16, 17, 21, 26, 31, 36, 41, 46, 51, 56, } assert crontab_parser(min_=1).parse('1-9/2') == {1, 3, 5, 7, 9} def test_parse_errors_on_empty_string(self): with pytest.raises(ParseException): crontab_parser(60).parse('') def test_parse_errors_on_empty_group(self): with pytest.raises(ParseException): crontab_parser(60).parse('1,,2') def test_parse_errors_on_empty_steps(self): with pytest.raises(ParseException): crontab_parser(60).parse('*/') def test_parse_errors_on_negative_number(self): with pytest.raises(ParseException): crontab_parser(60).parse('-20') def test_parse_errors_on_lt_min(self): crontab_parser(min_=1).parse('1') with pytest.raises(ValueError): crontab_parser(12, 1).parse('0') with pytest.raises(ValueError): crontab_parser(24, 1).parse('12-0') def test_parse_errors_on_gt_max(self): crontab_parser(1).parse('0') with pytest.raises(ValueError): crontab_parser(1).parse('1') with pytest.raises(ValueError): crontab_parser(60).parse('61-0') def test_expand_cronspec_eats_iterables(self): assert crontab._expand_cronspec(iter([1, 2, 3]), 100) == {1, 2, 3} assert crontab._expand_cronspec(iter([1, 2, 3]), 100, 1) == {1, 2, 3} def test_expand_cronspec_invalid_type(self): with pytest.raises(TypeError): crontab._expand_cronspec(object(), 100) def test_repr(self): assert '*' in repr(self.crontab('*')) def test_eq(self): assert (self.crontab(day_of_week='1, 2') == self.crontab(day_of_week='1-2')) assert (self.crontab(day_of_month='1, 16, 31') == self.crontab(day_of_month='*/15')) assert ( self.crontab( minute='1', hour='2', day_of_week='5', day_of_month='10', month_of_year='5') == self.crontab( minute='1', hour='2', day_of_week='5', day_of_month='10', month_of_year='5')) assert crontab(minute='1') != crontab(minute='2') assert (self.crontab(month_of_year='1') != self.crontab(month_of_year='2')) assert object() != self.crontab(minute='1') assert self.crontab(minute='1') != object() assert crontab(month_of_year='1') != schedule(10) class test_crontab_remaining_estimate: def crontab(self, *args, **kwargs): return crontab(*args, **dict(kwargs, app=self.app)) def next_ocurrance(self, crontab, now): crontab.nowfun = lambda: now return now + crontab.remaining_estimate(now) def test_next_minute(self): next = self.next_ocurrance( self.crontab(), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 11, 14, 31) def test_not_next_minute(self): next = self.next_ocurrance( self.crontab(), datetime(2010, 9, 11, 14, 59, 15), ) assert next == datetime(2010, 9, 11, 15, 0) def test_this_hour(self): next = self.next_ocurrance( self.crontab(minute=[5, 42]), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 11, 14, 42) def test_not_this_hour(self): next = self.next_ocurrance( self.crontab(minute=[5, 10, 15]), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 11, 15, 5) def test_today(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], hour=[12, 17]), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 11, 17, 5) def test_not_today(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], hour=[12]), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 12, 12, 5) def test_weekday(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_week='sat'), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 18, 14, 30) def test_not_weekday(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='mon-fri'), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 13, 0, 5) def test_monthday(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_month=18), datetime(2010, 9, 11, 14, 30, 15), ) assert next == datetime(2010, 9, 18, 14, 30) def test_not_monthday(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_month=29), datetime(2010, 1, 22, 14, 30, 15), ) assert next == datetime(2010, 1, 29, 0, 5) def test_weekday_monthday(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_week='mon', day_of_month=18), datetime(2010, 1, 18, 14, 30, 15), ) assert next == datetime(2010, 10, 18, 14, 30) def test_monthday_not_weekday(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='sat', day_of_month=29), datetime(2010, 1, 29, 0, 5, 15), ) assert next == datetime(2010, 5, 29, 0, 5) def test_weekday_not_monthday(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='mon', day_of_month=18), datetime(2010, 1, 11, 0, 5, 15), ) assert next == datetime(2010, 1, 18, 0, 5) def test_not_weekday_not_monthday(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='mon', day_of_month=18), datetime(2010, 1, 10, 0, 5, 15), ) assert next == datetime(2010, 1, 18, 0, 5) def test_leapday(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_month=29), datetime(2012, 1, 29, 14, 30, 15), ) assert next == datetime(2012, 2, 29, 14, 30) def test_not_leapday(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_month=29), datetime(2010, 1, 29, 14, 30, 15), ) assert next == datetime(2010, 3, 29, 14, 30) def test_weekmonthdayyear(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_week='fri', day_of_month=29, month_of_year=1), datetime(2010, 1, 22, 14, 30, 15), ) assert next == datetime(2010, 1, 29, 14, 30) def test_monthdayyear_not_week(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='wed,thu', day_of_month=29, month_of_year='1,4,7'), datetime(2010, 1, 29, 14, 30, 15), ) assert next == datetime(2010, 4, 29, 0, 5) def test_weekdaymonthyear_not_monthday(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_week='fri', day_of_month=29, month_of_year='1-10'), datetime(2010, 1, 29, 14, 30, 15), ) assert next == datetime(2010, 10, 29, 14, 30) def test_weekmonthday_not_monthyear(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='fri', day_of_month=29, month_of_year='2-10'), datetime(2010, 1, 29, 14, 30, 15), ) assert next == datetime(2010, 10, 29, 0, 5) def test_weekday_not_monthdayyear(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='mon', day_of_month=18, month_of_year='2-10'), datetime(2010, 1, 11, 0, 5, 15), ) assert next == datetime(2010, 10, 18, 0, 5) def test_monthday_not_weekdaymonthyear(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='mon', day_of_month=29, month_of_year='2-4'), datetime(2010, 1, 29, 0, 5, 15), ) assert next == datetime(2010, 3, 29, 0, 5) def test_monthyear_not_weekmonthday(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='mon', day_of_month=29, month_of_year='2-4'), datetime(2010, 2, 28, 0, 5, 15), ) assert next == datetime(2010, 3, 29, 0, 5) def test_not_weekmonthdayyear(self): next = self.next_ocurrance( self.crontab(minute=[5, 42], day_of_week='fri,sat', day_of_month=29, month_of_year='2-10'), datetime(2010, 1, 28, 14, 30, 15), ) assert next == datetime(2010, 5, 29, 0, 5) def test_invalid_specification(self): # *** WARNING *** # This test triggers an infinite loop in case of a regression with pytest.raises(RuntimeError): self.next_ocurrance( self.crontab(day_of_month=31, month_of_year=4), datetime(2010, 1, 28, 14, 30, 15), ) def test_leapyear(self): next = self.next_ocurrance( self.crontab(minute=30, hour=14, day_of_month=29, month_of_year=2), datetime(2012, 2, 29, 14, 30), ) assert next == datetime(2016, 2, 29, 14, 30) class test_crontab_is_due: def setup(self): self.now = self.app.now() self.next_minute = 60 - self.now.second - 1e-6 * self.now.microsecond self.every_minute = self.crontab() self.quarterly = self.crontab(minute='*/15') self.hourly = self.crontab(minute=30) self.daily = self.crontab(hour=7, minute=30) self.weekly = self.crontab(hour=7, minute=30, day_of_week='thursday') self.monthly = self.crontab( hour=7, minute=30, day_of_week='thursday', day_of_month='8-14', ) self.monthly_moy = self.crontab( hour=22, day_of_week='*', month_of_year='2', day_of_month='26,27,28', ) self.yearly = self.crontab( hour=7, minute=30, day_of_week='thursday', day_of_month='8-14', month_of_year=3, ) def crontab(self, *args, **kwargs): return crontab(*args, app=self.app, **kwargs) def test_default_crontab_spec(self): c = self.crontab() assert c.minute == set(range(60)) assert c.hour == set(range(24)) assert c.day_of_week == set(range(7)) assert c.day_of_month == set(range(1, 32)) assert c.month_of_year == set(range(1, 13)) def test_simple_crontab_spec(self): c = self.crontab(minute=30) assert c.minute == {30} assert c.hour == set(range(24)) assert c.day_of_week == set(range(7)) assert c.day_of_month == set(range(1, 32)) assert c.month_of_year == set(range(1, 13)) @pytest.mark.parametrize('minute,expected', [ (30, {30}), ('30', {30}), ((30, 40, 50), {30, 40, 50}), ((30, 40, 50, 51), {30, 40, 50, 51}) ]) def test_crontab_spec_minute_formats(self, minute, expected): c = self.crontab(minute=minute) assert c.minute == expected @pytest.mark.parametrize('minute', [60, '0-100']) def test_crontab_spec_invalid_minute(self, minute): with pytest.raises(ValueError): self.crontab(minute=minute) @pytest.mark.parametrize('hour,expected', [ (6, {6}), ('5', {5}), ((4, 8, 12), {4, 8, 12}), ]) def test_crontab_spec_hour_formats(self, hour, expected): c = self.crontab(hour=hour) assert c.hour == expected @pytest.mark.parametrize('hour', [24, '0-30']) def test_crontab_spec_invalid_hour(self, hour): with pytest.raises(ValueError): self.crontab(hour=hour) @pytest.mark.parametrize('day_of_week,expected', [ (5, {5}), ('5', {5}), ('fri', {5}), ('tuesday,sunday,fri', {0, 2, 5}), ('mon-fri', {1, 2, 3, 4, 5}), ('*/2', {0, 2, 4, 6}), ]) def test_crontab_spec_dow_formats(self, day_of_week, expected): c = self.crontab(day_of_week=day_of_week) assert c.day_of_week == expected @pytest.mark.parametrize('day_of_week', [ 'fooday-barday', '1,4,foo', '7', '12', ]) def test_crontab_spec_invalid_dow(self, day_of_week): with pytest.raises(ValueError): self.crontab(day_of_week=day_of_week) @pytest.mark.parametrize('day_of_month,expected', [ (5, {5}), ('5', {5}), ('2,4,6', {2, 4, 6}), ('*/5', {1, 6, 11, 16, 21, 26, 31}), ]) def test_crontab_spec_dom_formats(self, day_of_month, expected): c = self.crontab(day_of_month=day_of_month) assert c.day_of_month == expected @pytest.mark.parametrize('day_of_month', [0, '0-10', 32, '31,32']) def test_crontab_spec_invalid_dom(self, day_of_month): with pytest.raises(ValueError): self.crontab(day_of_month=day_of_month) @pytest.mark.parametrize('month_of_year,expected', [ (1, {1}), ('1', {1}), ('2,4,6', {2, 4, 6}), ('*/2', {1, 3, 5, 7, 9, 11}), ('2-12/2', {2, 4, 6, 8, 10, 12}), ]) def test_crontab_spec_moy_formats(self, month_of_year, expected): c = self.crontab(month_of_year=month_of_year) assert c.month_of_year == expected @pytest.mark.parametrize('month_of_year', [0, '0-5', 13, '12,13']) def test_crontab_spec_invalid_moy(self, month_of_year): with pytest.raises(ValueError): self.crontab(month_of_year=month_of_year) def seconds_almost_equal(self, a, b, precision): for index, skew in enumerate((+1, -1, 0)): try: assertions.assertAlmostEqual(a, b + skew, precision) except Exception as exc: # AssertionError != builtins.AssertionError in py.test if 'AssertionError' in str(exc): if index + 1 >= 3: raise else: break def test_every_minute_execution_is_due(self): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) self.assert_relativedelta(self.every_minute, last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def assert_relativedelta(self, due, last_ran): try: from dateutil.relativedelta import relativedelta except ImportError: return l1, d1, n1 = due.remaining_delta(last_ran) l2, d2, n2 = due.remaining_delta(last_ran, ffwd=relativedelta) if not isinstance(d1, relativedelta): assert l1 == l2 for field, value in items(d1._fields()): assert getattr(d1, field) == value assert not d2.years assert not d2.months assert not d2.days assert not d2.leapdays assert not d2.hours assert not d2.minutes assert not d2.seconds assert not d2.microseconds def test_every_minute_execution_is_not_due(self): last_ran = self.now - timedelta(seconds=self.now.second) due, remaining = self.every_minute.is_due(last_ran) assert not due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_execution_is_due_on_saturday(self): # 29th of May 2010 is a saturday with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 29, 10, 30)): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_execution_is_due_on_sunday(self): # 30th of May 2010 is a sunday with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 30, 10, 30)): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_execution_is_due_on_monday(self): # 31st of May 2010 is a monday with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 31, 10, 30)): last_ran = self.now - timedelta(seconds=61) due, remaining = self.every_minute.is_due(last_ran) assert due self.seconds_almost_equal(remaining, self.next_minute, 1) def test_every_hour_execution_is_due(self): with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 30)): due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 6, 30)) assert due assert remaining == 60 * 60 def test_every_hour_execution_is_not_due(self): with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 29)): due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 9, 30)) assert not due assert remaining == 60 def test_first_quarter_execution_is_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 15)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 6, 30), ) assert due assert remaining == 15 * 60 def test_second_quarter_execution_is_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 30)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 6, 30), ) assert due assert remaining == 15 * 60 def test_first_quarter_execution_is_not_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 14)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 10, 0), ) assert not due assert remaining == 60 def test_second_quarter_execution_is_not_due(self): with patch_crontab_nowfun( self.quarterly, datetime(2010, 5, 10, 10, 29)): due, remaining = self.quarterly.is_due( datetime(2010, 5, 10, 10, 15), ) assert not due assert remaining == 60 def test_daily_execution_is_due(self): with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 7, 30)): due, remaining = self.daily.is_due(datetime(2010, 5, 9, 7, 30)) assert due assert remaining == 24 * 60 * 60 def test_daily_execution_is_not_due(self): with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 10, 30)): due, remaining = self.daily.is_due(datetime(2010, 5, 10, 7, 30)) assert not due assert remaining == 21 * 60 * 60 def test_weekly_execution_is_due(self): with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 6, 7, 30)): due, remaining = self.weekly.is_due(datetime(2010, 4, 30, 7, 30)) assert due assert remaining == 7 * 24 * 60 * 60 def test_weekly_execution_is_not_due(self): with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 7, 10, 30)): due, remaining = self.weekly.is_due(datetime(2010, 5, 6, 7, 30)) assert not due assert remaining == 6 * 24 * 60 * 60 - 3 * 60 * 60 def test_monthly_execution_is_due(self): with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 13, 7, 30)): due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) assert due assert remaining == 28 * 24 * 60 * 60 def test_monthly_execution_is_not_due(self): with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 9, 10, 30)): due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) assert not due assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60 def test_monthly_moy_execution_is_due(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2014, 2, 26, 22, 0)): due, remaining = self.monthly_moy.is_due( datetime(2013, 7, 4, 10, 0), ) assert due assert remaining == 60.0 @skip.todo('unstable test') def test_monthly_moy_execution_is_not_due(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2013, 6, 28, 14, 30)): due, remaining = self.monthly_moy.is_due( datetime(2013, 6, 28, 22, 14), ) assert not due attempt = ( time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) - time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) - 60 * 60 ) assert remaining == attempt def test_monthly_moy_execution_is_due2(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2014, 2, 26, 22, 0)): due, remaining = self.monthly_moy.is_due( datetime(2013, 2, 28, 10, 0), ) assert due assert remaining == 60.0 def test_monthly_moy_execution_is_not_due2(self): with patch_crontab_nowfun( self.monthly_moy, datetime(2014, 2, 26, 21, 0)): due, remaining = self.monthly_moy.is_due( datetime(2013, 6, 28, 22, 14), ) assert not due attempt = 60 * 60 assert remaining == attempt def test_yearly_execution_is_due(self): with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 11, 7, 30)): due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) assert due assert remaining == 364 * 24 * 60 * 60 def test_yearly_execution_is_not_due(self): with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 7, 10, 30)): due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) assert not due assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60 celery-4.1.0/t/unit/app/test_exceptions.py0000644000175000017500000000135413130607475020532 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle from datetime import datetime from celery.exceptions import Reject, Retry class test_Retry: def test_when_datetime(self): x = Retry('foo', KeyError(), when=datetime.utcnow()) assert x.humanize() def test_pickleable(self): x = Retry('foo', KeyError(), when=datetime.utcnow()) assert pickle.loads(pickle.dumps(x)) class test_Reject: def test_attrs(self): x = Reject('foo', requeue=True) assert x.reason == 'foo' assert x.requeue def test_repr(self): assert repr(Reject('foo', True)) def test_pickleable(self): x = Retry('foo', True) assert pickle.loads(pickle.dumps(x)) celery-4.1.0/t/unit/app/__init__.py0000644000175000017500000000000013130607475017034 0ustar omeromer00000000000000celery-4.1.0/t/unit/app/test_control.py0000644000175000017500000003733713130607475020043 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from celery import uuid from celery.app import control from celery.exceptions import DuplicateNodenameWarning from celery.five import items from celery.utils.collections import LimitedSet def _info_for_commandclass(type_): from celery.worker.control import Panel return [ (name, info) for name, info in items(Panel.meta) if info.type == type_ ] def test_client_implements_all_commands(app): commands = _info_for_commandclass('control') assert commands for name, info in commands: assert getattr(app.control, name) def test_inspect_implements_all_commands(app): inspect = app.control.inspect() commands = _info_for_commandclass('inspect') assert commands for name, info in commands: if info.type == 'inspect': assert getattr(inspect, name) class test_flatten_reply: def test_flatten_reply(self): reply = [ {'foo@example.com': {'hello': 10}}, {'foo@example.com': {'hello': 20}}, {'bar@example.com': {'hello': 30}} ] with pytest.warns(DuplicateNodenameWarning) as w: nodes = control.flatten_reply(reply) assert 'Received multiple replies from node name: {0}.'.format( next(iter(reply[0]))) in str(w[0].message.args[0]) assert 'foo@example.com' in nodes assert 'bar@example.com' in nodes class test_inspect: def setup(self): self.app.control.broadcast = Mock(name='broadcast') self.app.control.broadcast.return_value = {} self.inspect = self.app.control.inspect() def test_prepare_reply(self): reply = self.inspect._prepare([ {'w1': {'ok': 1}}, {'w2': {'ok': 1}}, ]) assert reply == { 'w1': {'ok': 1}, 'w2': {'ok': 1}, } i = self.app.control.inspect(destination='w1') assert i._prepare([{'w1': {'ok': 1}}]) == {'ok': 1} def assert_broadcast_called(self, command, destination=None, callback=None, connection=None, limit=None, timeout=None, reply=True, **arguments): self.app.control.broadcast.assert_called_with( command, arguments=arguments, destination=destination or self.inspect.destination, callback=callback or self.inspect.callback, connection=connection or self.inspect.connection, limit=limit if limit is not None else self.inspect.limit, timeout=timeout if timeout is not None else self.inspect.timeout, reply=reply, ) def test_active(self): self.inspect.active() self.assert_broadcast_called('active') def test_clock(self): self.inspect.clock() self.assert_broadcast_called('clock') def test_conf(self): self.inspect.conf() self.assert_broadcast_called('conf', with_defaults=False) def test_conf__with_defaults(self): self.inspect.conf(with_defaults=True) self.assert_broadcast_called('conf', with_defaults=True) def test_hello(self): self.inspect.hello('george@vandelay.com') self.assert_broadcast_called( 'hello', from_node='george@vandelay.com', revoked=None) def test_hello__with_revoked(self): revoked = LimitedSet(100) for i in range(100): revoked.add('id{0}'.format(i)) self.inspect.hello('george@vandelay.com', revoked=revoked._data) self.assert_broadcast_called( 'hello', from_node='george@vandelay.com', revoked=revoked._data) def test_memsample(self): self.inspect.memsample() self.assert_broadcast_called('memsample') def test_memdump(self): self.inspect.memdump() self.assert_broadcast_called('memdump', samples=10) def test_memdump__samples_specified(self): self.inspect.memdump(samples=303) self.assert_broadcast_called('memdump', samples=303) def test_objgraph(self): self.inspect.objgraph() self.assert_broadcast_called( 'objgraph', num=200, type='Request', max_depth=10) def test_scheduled(self): self.inspect.scheduled() self.assert_broadcast_called('scheduled') def test_reserved(self): self.inspect.reserved() self.assert_broadcast_called('reserved') def test_stats(self): self.inspect.stats() self.assert_broadcast_called('stats') def test_revoked(self): self.inspect.revoked() self.assert_broadcast_called('revoked') def test_registered(self): self.inspect.registered() self.assert_broadcast_called('registered', taskinfoitems=()) def test_registered__taskinfoitems(self): self.inspect.registered('rate_limit', 'time_limit') self.assert_broadcast_called( 'registered', taskinfoitems=('rate_limit', 'time_limit'), ) def test_ping(self): self.inspect.ping() self.assert_broadcast_called('ping') def test_active_queues(self): self.inspect.active_queues() self.assert_broadcast_called('active_queues') def test_query_task(self): self.inspect.query_task('foo', 'bar') self.assert_broadcast_called('query_task', ids=('foo', 'bar')) def test_query_task__compat_single_list_argument(self): self.inspect.query_task(['foo', 'bar']) self.assert_broadcast_called('query_task', ids=['foo', 'bar']) def test_query_task__scalar(self): self.inspect.query_task('foo') self.assert_broadcast_called('query_task', ids=('foo',)) def test_report(self): self.inspect.report() self.assert_broadcast_called('report') class test_Control_broadcast: def setup(self): self.app.control.mailbox = Mock(name='mailbox') def test_broadcast(self): self.app.control.broadcast('foobarbaz', arguments={'foo': 2}) self.app.control.mailbox.assert_called() self.app.control.mailbox()._broadcast.assert_called_with( 'foobarbaz', {'foo': 2}, None, False, 1.0, None, None, channel=None, ) def test_broadcast_limit(self): self.app.control.broadcast( 'foobarbaz1', arguments=None, limit=None, destination=[1, 2, 3], ) self.app.control.mailbox.assert_called() self.app.control.mailbox()._broadcast.assert_called_with( 'foobarbaz1', {}, [1, 2, 3], False, 1.0, None, None, channel=None, ) class test_Control: def setup(self): self.app.control.broadcast = Mock(name='broadcast') self.app.control.broadcast.return_value = {} @self.app.task(shared=False) def mytask(): pass self.mytask = mytask def assert_control_called_with_args(self, name, destination=None, _options=None, **args): self.app.control.broadcast.assert_called_with( name, destination=destination, arguments=args, **_options or {}) def test_purge(self): self.app.amqp.TaskConsumer = Mock(name='TaskConsumer') self.app.control.purge() self.app.amqp.TaskConsumer().purge.assert_called_with() def test_rate_limit(self): self.app.control.rate_limit(self.mytask.name, '100/m') self.assert_control_called_with_args( 'rate_limit', destination=None, task_name=self.mytask.name, rate_limit='100/m', ) def test_rate_limit__with_destination(self): self.app.control.rate_limit( self.mytask.name, '100/m', 'a@w.com', limit=100) self.assert_control_called_with_args( 'rate_limit', destination='a@w.com', task_name=self.mytask.name, rate_limit='100/m', _options={'limit': 100}, ) def test_time_limit(self): self.app.control.time_limit(self.mytask.name, soft=10, hard=20) self.assert_control_called_with_args( 'time_limit', destination=None, task_name=self.mytask.name, soft=10, hard=20, ) def test_time_limit__with_destination(self): self.app.control.time_limit( self.mytask.name, soft=10, hard=20, destination='a@q.com', limit=99, ) self.assert_control_called_with_args( 'time_limit', destination='a@q.com', task_name=self.mytask.name, soft=10, hard=20, _options={'limit': 99}, ) def test_add_consumer(self): self.app.control.add_consumer('foo') self.assert_control_called_with_args( 'add_consumer', destination=None, queue='foo', exchange=None, exchange_type='direct', routing_key=None, ) def test_add_consumer__with_options_and_dest(self): self.app.control.add_consumer( 'foo', 'ex', 'topic', 'rkey', destination='a@q.com', limit=78) self.assert_control_called_with_args( 'add_consumer', destination='a@q.com', queue='foo', exchange='ex', exchange_type='topic', routing_key='rkey', _options={'limit': 78}, ) def test_cancel_consumer(self): self.app.control.cancel_consumer('foo') self.assert_control_called_with_args( 'cancel_consumer', destination=None, queue='foo', ) def test_cancel_consumer__with_destination(self): self.app.control.cancel_consumer( 'foo', destination='w1@q.com', limit=3) self.assert_control_called_with_args( 'cancel_consumer', destination='w1@q.com', queue='foo', _options={'limit': 3}, ) def test_shutdown(self): self.app.control.shutdown() self.assert_control_called_with_args('shutdown', destination=None) def test_shutdown__with_destination(self): self.app.control.shutdown(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'shutdown', destination='a@q.com', _options={'limit': 3}) def test_heartbeat(self): self.app.control.heartbeat() self.assert_control_called_with_args('heartbeat', destination=None) def test_heartbeat__with_destination(self): self.app.control.heartbeat(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'heartbeat', destination='a@q.com', _options={'limit': 3}) def test_pool_restart(self): self.app.control.pool_restart() self.assert_control_called_with_args( 'pool_restart', destination=None, modules=None, reload=False, reloader=None) def test_terminate(self): self.app.control.revoke = Mock(name='revoke') self.app.control.terminate('124') self.app.control.revoke.assert_called_with( '124', destination=None, terminate=True, signal=control.TERM_SIGNAME, ) def test_enable_events(self): self.app.control.enable_events() self.assert_control_called_with_args('enable_events', destination=None) def test_enable_events_with_destination(self): self.app.control.enable_events(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'enable_events', destination='a@q.com', _options={'limit': 3}) def test_disable_events(self): self.app.control.disable_events() self.assert_control_called_with_args( 'disable_events', destination=None) def test_disable_events_with_destination(self): self.app.control.disable_events(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'disable_events', destination='a@q.com', _options={'limit': 3}) def test_ping(self): self.app.control.ping() self.assert_control_called_with_args( 'ping', destination=None, _options={'timeout': 1.0, 'reply': True}) def test_ping_with_destination(self): self.app.control.ping(destination='a@q.com', limit=3) self.assert_control_called_with_args( 'ping', destination='a@q.com', _options={ 'limit': 3, 'timeout': 1.0, 'reply': True, }) def test_revoke(self): self.app.control.revoke('foozbaaz') self.assert_control_called_with_args( 'revoke', destination=None, task_id='foozbaaz', signal=control.TERM_SIGNAME, terminate=False, ) def test_revoke__with_options(self): self.app.control.revoke( 'foozbaaz', destination='a@q.com', terminate=True, signal='KILL', limit=404, ) self.assert_control_called_with_args( 'revoke', destination='a@q.com', task_id='foozbaaz', signal='KILL', terminate=True, _options={'limit': 404}, ) def test_election(self): self.app.control.election('some_id', 'topic', 'action') self.assert_control_called_with_args( 'election', destination=None, topic='topic', action='action', id='some_id', _options={'connection': None}, ) def test_autoscale(self): self.app.control.autoscale(300, 10) self.assert_control_called_with_args( 'autoscale', max=300, min=10, destination=None) def test_autoscale__with_options(self): self.app.control.autoscale(300, 10, destination='a@q.com', limit=39) self.assert_control_called_with_args( 'autoscale', max=300, min=10, destination='a@q.com', _options={'limit': 39} ) def test_pool_grow(self): self.app.control.pool_grow(2) self.assert_control_called_with_args( 'pool_grow', n=2, destination=None) def test_pool_grow__with_options(self): self.app.control.pool_grow(2, destination='a@q.com', limit=39) self.assert_control_called_with_args( 'pool_grow', n=2, destination='a@q.com', _options={'limit': 39} ) def test_pool_shrink(self): self.app.control.pool_shrink(2) self.assert_control_called_with_args( 'pool_shrink', n=2, destination=None) def test_pool_shrink__with_options(self): self.app.control.pool_shrink(2, destination='a@q.com', limit=39) self.assert_control_called_with_args( 'pool_shrink', n=2, destination='a@q.com', _options={'limit': 39} ) def test_revoke_from_result(self): self.app.control.revoke = Mock(name='revoke') self.app.AsyncResult('foozbazzbar').revoke() self.app.control.revoke.assert_called_with( 'foozbazzbar', connection=None, reply=False, signal=None, terminate=False, timeout=None) def test_revoke_from_resultset(self): self.app.control.revoke = Mock(name='revoke') uuids = [uuid() for _ in range(10)] r = self.app.GroupResult( uuid(), [self.app.AsyncResult(x) for x in uuids]) r.revoke() self.app.control.revoke.assert_called_with( uuids, connection=None, reply=False, signal=None, terminate=False, timeout=None) celery-4.1.0/t/unit/app/test_celery.py0000644000175000017500000000063413130607475017634 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import celery import pytest def test_version(): assert celery.VERSION assert len(celery.VERSION) >= 3 celery.VERSION = (0, 3, 0) assert celery.__version__.count('.') >= 2 @pytest.mark.parametrize('attr', [ '__author__', '__contact__', '__homepage__', '__docformat__', ]) def test_meta(attr): assert getattr(celery, attr, None) celery-4.1.0/t/unit/app/test_log.py0000644000175000017500000002637713130607475017146 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import logging import pytest import sys from collections import defaultdict from io import StringIO from tempfile import mktemp from case import Mock, mock, patch, skip from case.utils import get_logger_handlers from celery import signals from celery import uuid from celery.app.log import TaskFormatter from celery.five import python_2_unicode_compatible from celery.utils.log import LoggingProxy from celery.utils.log import ( get_logger, ColorFormatter, logger as base_logger, get_task_logger, task_logger, in_sighandler, logger_isa, ) class test_TaskFormatter: def test_no_task(self): class Record(object): msg = 'hello world' levelname = 'info' exc_text = exc_info = None stack_info = None def getMessage(self): return self.msg record = Record() x = TaskFormatter() x.format(record) assert record.task_name == '???' assert record.task_id == '???' class test_logger_isa: def test_isa(self): x = get_task_logger('Z1george') assert logger_isa(x, task_logger) prev_x, x.parent = x.parent, None try: assert not logger_isa(x, task_logger) finally: x.parent = prev_x y = get_task_logger('Z1elaine') y.parent = x assert logger_isa(y, task_logger) assert logger_isa(y, x) assert logger_isa(y, y) z = get_task_logger('Z1jerry') z.parent = y assert logger_isa(z, task_logger) assert logger_isa(z, y) assert logger_isa(z, x) assert logger_isa(z, z) def test_recursive(self): x = get_task_logger('X1foo') prev, x.parent = x.parent, x try: with pytest.raises(RuntimeError): logger_isa(x, task_logger) finally: x.parent = prev y = get_task_logger('X2foo') z = get_task_logger('X2foo') prev_y, y.parent = y.parent, z try: prev_z, z.parent = z.parent, y try: with pytest.raises(RuntimeError): logger_isa(y, task_logger) finally: z.parent = prev_z finally: y.parent = prev_y class test_ColorFormatter: @patch('celery.utils.log.safe_str') @patch('logging.Formatter.formatException') def test_formatException_not_string(self, fe, safe_str): x = ColorFormatter() value = KeyError() fe.return_value = value assert x.formatException(value) is value fe.assert_called() safe_str.assert_not_called() @patch('logging.Formatter.formatException') @patch('celery.utils.log.safe_str') def test_formatException_bytes(self, safe_str, fe): x = ColorFormatter() fe.return_value = b'HELLO' try: raise Exception() except Exception: assert x.formatException(sys.exc_info()) if sys.version_info[0] == 2: safe_str.assert_called() @patch('logging.Formatter.format') def test_format_object(self, _format): x = ColorFormatter() x.use_color = True record = Mock() record.levelname = 'ERROR' record.msg = object() assert x.format(record) @patch('celery.utils.log.safe_str') def test_format_raises(self, safe_str): x = ColorFormatter() def on_safe_str(s): try: raise ValueError('foo') finally: safe_str.side_effect = None safe_str.side_effect = on_safe_str @python_2_unicode_compatible class Record(object): levelname = 'ERROR' msg = 'HELLO' exc_info = 1 exc_text = 'error text' stack_info = None def __str__(self): return on_safe_str('') def getMessage(self): return self.msg record = Record() safe_str.return_value = record msg = x.format(record) assert ' 3 CASE_LOG_REDIRECT_EFFECT = 'Test {0} didn\'t disable LoggingProxy for {1}' CASE_LOG_LEVEL_EFFECT = 'Test {0} modified the level of the root logger' CASE_LOG_HANDLER_EFFECT = 'Test {0} modified handlers for the root logger' @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'memory://', 'broker_transport_options': { 'polling_interval': 0.1 }, 'result_backend': 'cache+memory://', 'task_default_queue': 'testcelery', 'task_default_exchange': 'testcelery', 'task_default_routing_key': 'testcelery', 'task_queues': ( Queue('testcelery', routing_key='testcelery'), ), 'accept_content': ('json', 'pickle'), # Mongo results tests (only executed if installed and running) 'mongodb_backend_settings': { 'host': os.environ.get('MONGO_HOST') or 'localhost', 'port': os.environ.get('MONGO_PORT') or 27017, 'database': os.environ.get('MONGO_DB') or 'celery_unittests', 'taskmeta_collection': ( os.environ.get('MONGO_TASKMETA_COLLECTION') or 'taskmeta_collection' ), 'user': os.environ.get('MONGO_USER'), 'password': os.environ.get('MONGO_PASSWORD'), } } @pytest.fixture(scope='session') def use_celery_app_trap(): return True @pytest.fixture(autouse=True) def reset_cache_backend_state(celery_app): """Fixture that resets the internal state of the cache result backend.""" yield backend = celery_app.__dict__.get('backend') if backend is not None: if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear() @decorator def assert_signal_called(signal, **expected): """Context that verifes signal is called before exiting.""" handler = Mock() def on_call(**kwargs): return handler(**kwargs) signal.connect(on_call) try: yield handler finally: signal.disconnect(on_call) handler.assert_called_with(signal=signal, **expected) @pytest.fixture def app(celery_app): yield celery_app @pytest.fixture(autouse=True, scope='session') def AAA_disable_multiprocessing(): # pytest-cov breaks if a multiprocessing.Process is started, # so disable them completely to make sure it doesn't happen. from case import patch stuff = [ 'multiprocessing.Process', 'billiard.Process', 'billiard.context.Process', 'billiard.process.Process', 'billiard.process.BaseProcess', 'multiprocessing.Process', ] ctxs = [patch(s) for s in stuff] [ctx.__enter__() for ctx in ctxs] yield [ctx.__exit__(*sys.exc_info()) for ctx in ctxs] def alive_threads(): return [thread for thread in threading.enumerate() if thread.is_alive()] @pytest.fixture(autouse=True) def task_join_will_not_block(): from celery import _state from celery import result prev_res_join_block = result.task_join_will_block _state.orig_task_join_will_block = _state.task_join_will_block prev_state_join_block = _state.task_join_will_block result.task_join_will_block = \ _state.task_join_will_block = lambda: False _state._set_task_join_will_block(False) yield result.task_join_will_block = prev_res_join_block _state.task_join_will_block = prev_state_join_block _state._set_task_join_will_block(False) @pytest.fixture(scope='session', autouse=True) def record_threads_at_startup(request): try: request.session._threads_at_startup except AttributeError: request.session._threads_at_startup = alive_threads() @pytest.fixture(autouse=True) def threads_not_lingering(request): yield assert request.session._threads_at_startup == alive_threads() @pytest.fixture(autouse=True) def AAA_reset_CELERY_LOADER_env(): yield assert not os.environ.get('CELERY_LOADER') @pytest.fixture(autouse=True) def test_cases_shortcuts(request, app, patching, celery_config): if request.instance: @app.task def add(x, y): return x + y # IMPORTANT: We set an .app attribute for every test case class. request.instance.app = app request.instance.Celery = TestApp request.instance.assert_signal_called = assert_signal_called request.instance.task_message_from_sig = task_message_from_sig request.instance.TaskMessage = TaskMessage request.instance.TaskMessage1 = TaskMessage1 request.instance.CELERY_TEST_CONFIG = celery_config request.instance.add = add request.instance.patching = patching yield if request.instance: request.instance.app = None @pytest.fixture(autouse=True) def sanity_no_shutdown_flags_set(): yield # Make sure no test left the shutdown flags enabled. from celery.worker import state as worker_state # check for EX_OK assert worker_state.should_stop is not False assert worker_state.should_terminate is not False # check for other true values assert not worker_state.should_stop assert not worker_state.should_terminate @pytest.fixture(autouse=True) def sanity_stdouts(request): yield from celery.utils.log import LoggingProxy assert sys.stdout assert sys.stderr assert sys.__stdout__ assert sys.__stderr__ this = request.node.name if isinstance(sys.stdout, (LoggingProxy, Mock)) or \ isinstance(sys.__stdout__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) if isinstance(sys.stderr, (LoggingProxy, Mock)) or \ isinstance(sys.__stderr__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) @pytest.fixture(autouse=True) def sanity_logging_side_effects(request): root = logging.getLogger() rootlevel = root.level roothandlers = root.handlers yield this = request.node.name root_now = logging.getLogger() if root_now.level != rootlevel: raise RuntimeError(CASE_LOG_LEVEL_EFFECT.format(this)) if root_now.handlers != roothandlers: raise RuntimeError(CASE_LOG_HANDLER_EFFECT.format(this)) def setup_session(scope='session'): using_coverage = ( os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv ) os.environ.update( # warn if config module not found C_WNOCONF='yes', KOMBU_DISABLE_LIMIT_PROTECTION='yes', ) if using_coverage and not PYPY3: from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() warnings.resetwarnings() from celery._state import set_default_app set_default_app(Trap()) def teardown(): # Don't want SUBDEBUG log messages at finalization. try: from multiprocessing.util import get_logger except ImportError: pass else: get_logger().setLevel(logging.WARNING) # Make sure test database is removed. import os if os.path.exists('test.db'): try: os.remove('test.db') except WindowsError: pass # Make sure there are no remaining threads at shutdown. import threading remaining_threads = [thread for thread in threading.enumerate() if thread.getName() != 'MainThread'] if remaining_threads: sys.stderr.write( '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % ( remaining_threads)) def find_distribution_modules(name=__name__, file=__file__): current_dist_depth = len(name.split('.')) - 1 current_dist = os.path.join(os.path.dirname(file), *([os.pardir] * current_dist_depth)) abs = os.path.abspath(current_dist) dist_name = os.path.basename(abs) for dirpath, dirnames, filenames in os.walk(abs): package = (dist_name + dirpath[len(abs):]).replace('/', '.') if '__init__.py' in filenames: yield package for filename in filenames: if filename.endswith('.py') and filename != '__init__.py': yield '.'.join([package, filename])[:-3] def import_all_modules(name=__name__, file=__file__, skip=('celery.decorators', 'celery.task')): for module in find_distribution_modules(name, file): if not module.startswith(skip): try: import_module(module) except ImportError: pass except OSError as exc: warnings.warn(UserWarning( 'Ignored error importing module {0}: {1!r}'.format( module, exc, ))) celery-4.1.0/t/unit/bin/0000755000175000017500000000000013135426347014727 5ustar omeromer00000000000000celery-4.1.0/t/unit/bin/test_beat.py0000644000175000017500000001113613130607475017253 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import logging import pytest import sys from case import Mock, mock, patch from celery import beat from celery import platforms from celery.bin import beat as beat_bin from celery.apps import beat as beatapp def MockBeat(*args, **kwargs): class _Beat(beatapp.Beat): Service = Mock( name='MockBeat.Service', return_value=Mock(name='MockBeat()', max_interval=3.3), ) b = _Beat(*args, **kwargs) sched = b.Service.return_value.get_scheduler = Mock() sched.return_value.max_interval = 3.3 return b class test_Beat: def test_loglevel_string(self): b = beatapp.Beat(app=self.app, loglevel='DEBUG', redirect_stdouts=False) assert b.loglevel == logging.DEBUG b2 = beatapp.Beat(app=self.app, loglevel=logging.DEBUG, redirect_stdouts=False) assert b2.loglevel == logging.DEBUG def test_colorize(self): self.app.log.setup = Mock() b = beatapp.Beat(app=self.app, no_color=True, redirect_stdouts=False) b.setup_logging() self.app.log.setup.assert_called() assert not self.app.log.setup.call_args[1]['colorize'] def test_init_loader(self): b = beatapp.Beat(app=self.app, redirect_stdouts=False) b.init_loader() def test_process_title(self): b = beatapp.Beat(app=self.app, redirect_stdouts=False) b.set_process_title() def test_run(self): b = MockBeat(app=self.app, redirect_stdouts=False) b.install_sync_handler = Mock(name='beat.install_sync_handler') b.Service.return_value.max_interval = 3.0 b.run() b.Service().start.assert_called_with() def psig(self, fun, *args, **kwargs): handlers = {} class Signals(platforms.Signals): def __setitem__(self, sig, handler): handlers[sig] = handler p, platforms.signals = platforms.signals, Signals() try: fun(*args, **kwargs) return handlers finally: platforms.signals = p def test_install_sync_handler(self): b = beatapp.Beat(app=self.app, redirect_stdouts=False) clock = beat.Service(app=self.app) clock.start = Mock(name='beat.Service().start') clock.sync = Mock(name='beat.Service().sync') handlers = self.psig(b.install_sync_handler, clock) with pytest.raises(SystemExit): handlers['SIGINT']('SIGINT', object()) clock.sync.assert_called_with() @mock.restore_logging() def test_setup_logging(self): try: # py3k delattr(sys.stdout, 'logger') except AttributeError: pass b = beatapp.Beat(app=self.app, redirect_stdouts=False) b.redirect_stdouts = False b.app.log.already_setup = False b.setup_logging() with pytest.raises(AttributeError): sys.stdout.logger import sys orig_stdout = sys.__stdout__ @patch('celery.apps.beat.logger') def test_logs_errors(self, logger): b = MockBeat( app=self.app, redirect_stdouts=False, socket_timeout=None, ) b.install_sync_handler = Mock('beat.install_sync_handler') b.install_sync_handler.side_effect = RuntimeError('xxx') with mock.restore_logging(): with pytest.raises(RuntimeError): b.start_scheduler() logger.critical.assert_called() @patch('celery.platforms.create_pidlock') def test_using_pidfile(self, create_pidlock): b = MockBeat(app=self.app, pidfile='pidfilelockfilepid', socket_timeout=None, redirect_stdouts=False) b.install_sync_handler = Mock(name='beat.install_sync_handler') with mock.stdouts(): b.start_scheduler() create_pidlock.assert_called() class test_div: def setup(self): self.Beat = self.app.Beat = self.patching('celery.apps.beat.Beat') self.detached = self.patching('celery.bin.beat.detached') self.Beat.__name__ = 'Beat' def test_main(self): sys.argv = [sys.argv[0], '-s', 'foo'] beat_bin.main(app=self.app) self.Beat().run.assert_called_with() def test_detach(self): cmd = beat_bin.beat() cmd.app = self.app cmd.run(detach=True) self.detached.assert_called() def test_parse_options(self): cmd = beat_bin.beat() cmd.app = self.app options, args = cmd.parse_options('celery beat', ['-s', 'foo']) assert options['schedule'] == 'foo' celery-4.1.0/t/unit/bin/__init__.py0000644000175000017500000000000013130607475017024 0ustar omeromer00000000000000celery-4.1.0/t/unit/bin/test_control.py0000644000175000017500000000715513130607475020026 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch from celery.five import WhateverIO from celery.bin.base import Error from celery.bin.control import _RemoteControl, inspect, control, status class test_RemoteControl: def test_call_interface(self): with pytest.raises(NotImplementedError): _RemoteControl(app=self.app).call() class test_inspect: def test_usage(self): assert inspect(app=self.app).usage('foo') def test_command_info(self): i = inspect(app=self.app) assert i.get_command_info( 'ping', help=True, color=i.colored.red, app=self.app, ) def test_list_commands_color(self): i = inspect(app=self.app) assert i.list_commands(help=True, color=i.colored.red, app=self.app) assert i.list_commands(help=False, color=None, app=self.app) def test_epilog(self): assert inspect(app=self.app).epilog def test_do_call_method_sql_transport_type(self): self.app.connection = Mock() conn = self.app.connection.return_value = Mock(name='Connection') conn.transport.driver_type = 'sql' i = inspect(app=self.app) with pytest.raises(i.Error): i.do_call_method(['ping']) def test_say_directions(self): i = inspect(self.app) i.out = Mock() i.quiet = True i.say_chat('<-', 'hello out') i.out.assert_not_called() i.say_chat('->', 'hello in') i.out.assert_called() i.quiet = False i.out.reset_mock() i.say_chat('<-', 'hello out', 'body') i.out.assert_called() @patch('celery.app.control.Control.inspect') def test_run(self, real): out = WhateverIO() i = inspect(app=self.app, stdout=out) with pytest.raises(Error): i.run() with pytest.raises(Error): i.run('help') with pytest.raises(Error): i.run('xyzzybaz') i.run('ping') real.assert_called() i.run('ping', destination='foo,bar') assert real.call_args[1]['destination'], ['foo' == 'bar'] assert real.call_args[1]['timeout'] == 0.2 callback = real.call_args[1]['callback'] callback({'foo': {'ok': 'pong'}}) assert 'OK' in out.getvalue() with patch('celery.bin.control.dumps') as dumps: i.run('ping', json=True) dumps.assert_called() instance = real.return_value = Mock() instance._request.return_value = None with pytest.raises(Error): i.run('ping') out.seek(0) out.truncate() i.quiet = True i.say_chat('<-', 'hello') assert not out.getvalue() class test_control: def control(self, patch_call, *args, **kwargs): kwargs.setdefault('app', Mock(name='app')) c = control(*args, **kwargs) if patch_call: c.call = Mock(name='control.call') return c def test_call(self): i = self.control(False) i.call('foo', arguments={'kw': 2}) i.app.control.broadcast.assert_called_with( 'foo', arguments={'kw': 2}, reply=True) class test_status: @patch('celery.bin.control.inspect') def test_run(self, inspect_): out, err = WhateverIO(), WhateverIO() ins = inspect_.return_value = Mock() ins.run.return_value = [] s = status(self.app, stdout=out, stderr=err) with pytest.raises(Error): s.run() ins.run.return_value = ['a', 'b', 'c'] s.run() assert '3 nodes online' in out.getvalue() s.run(quiet=True) celery-4.1.0/t/unit/bin/proj/0000755000175000017500000000000013135426347015701 5ustar omeromer00000000000000celery-4.1.0/t/unit/bin/proj/__init__.py0000644000175000017500000000017213130607475020010 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery import Celery hello = Celery(set_as_current=False) celery-4.1.0/t/unit/bin/proj/app.py0000644000175000017500000000017013130607475017027 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery import Celery app = Celery(set_as_current=False) celery-4.1.0/t/unit/bin/test_events.py0000644000175000017500000000522613130607475017647 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import importlib from functools import wraps from case import patch, skip from celery.bin import events def _old_patch(module, name, mocked): module = importlib.import_module(module) def _patch(fun): @wraps(fun) def __patched(*args, **kwargs): prev = getattr(module, name) setattr(module, name, mocked) try: return fun(*args, **kwargs) finally: setattr(module, name, prev) return __patched return _patch class MockCommand(object): executed = [] def execute_from_commandline(self, **kwargs): self.executed.append(True) def proctitle(prog, info=None): proctitle.last = (prog, info) proctitle.last = () # noqa: E305 class test_events: def setup(self): self.ev = events.events(app=self.app) @_old_patch('celery.events.dumper', 'evdump', lambda **kw: 'me dumper, you?') @_old_patch('celery.bin.events', 'set_process_title', proctitle) def test_run_dump(self): assert self.ev.run(dump=True), 'me dumper == you?' assert 'celery events:dump' in proctitle.last[0] @skip.unless_module('curses', import_errors=(ImportError, OSError)) def test_run_top(self): @_old_patch('celery.events.cursesmon', 'evtop', lambda **kw: 'me top, you?') @_old_patch('celery.bin.events', 'set_process_title', proctitle) def _inner(): assert self.ev.run(), 'me top == you?' assert 'celery events:top' in proctitle.last[0] return _inner() @_old_patch('celery.events.snapshot', 'evcam', lambda *a, **k: (a, k)) @_old_patch('celery.bin.events', 'set_process_title', proctitle) def test_run_cam(self): a, kw = self.ev.run(camera='foo.bar.baz', logfile='logfile') assert a[0] == 'foo.bar.baz' assert kw['freq'] == 1.0 assert kw['maxrate'] is None assert kw['loglevel'] == 'INFO' assert kw['logfile'] == 'logfile' assert 'celery events:cam' in proctitle.last[0] @patch('celery.events.snapshot.evcam') @patch('celery.bin.events.detached') def test_run_cam_detached(self, detached, evcam): self.ev.prog_name = 'celery events' self.ev.run_evcam('myapp.Camera', detach=True) detached.assert_called() evcam.assert_called() def test_get_options(self): assert not self.ev.get_options() @_old_patch('celery.bin.events', 'events', MockCommand) def test_main(self): MockCommand.executed = [] events.main() assert MockCommand.executed celery-4.1.0/t/unit/bin/test_celery.py0000644000175000017500000002220313130607475017620 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import sys from case import Mock, patch from celery import __main__ from celery.five import WhateverIO from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK from celery.bin.base import Error from celery.bin import celery as mod from celery.bin.celery import ( Command, help, report, CeleryCommand, determine_exit_status, multi, main as mainfun, ) class test__main__: def test_main(self): with patch('celery.__main__.maybe_patch_concurrency') as mpc: with patch('celery.bin.celery.main') as main: __main__.main() mpc.assert_called_with() main.assert_called_with() def test_main__multi(self): with patch('celery.__main__.maybe_patch_concurrency') as mpc: with patch('celery.bin.celery.main') as main: prev, sys.argv = sys.argv, ['foo', 'multi'] try: __main__.main() mpc.assert_not_called() main.assert_called_with() finally: sys.argv = prev class test_Command: def test_Error_repr(self): x = Error('something happened') assert x.status is not None assert x.reason assert str(x) def setup(self): self.out = WhateverIO() self.err = WhateverIO() self.cmd = Command(self.app, stdout=self.out, stderr=self.err) def test_error(self): self.cmd.out = Mock() self.cmd.error('FOO') self.cmd.out.assert_called() def test_out(self): f = Mock() self.cmd.out('foo', f) def test_call(self): def ok_run(): pass self.cmd.run = ok_run assert self.cmd() == EX_OK def error_run(): raise Error('error', EX_FAILURE) self.cmd.run = error_run assert self.cmd() == EX_FAILURE def test_run_from_argv(self): with pytest.raises(NotImplementedError): self.cmd.run_from_argv('prog', ['foo', 'bar']) def test_pretty_list(self): assert self.cmd.pretty([])[1] == '- empty -' assert 'bar', self.cmd.pretty(['foo' in 'bar'][1]) def test_pretty_dict(self, text='the quick brown fox'): assert 'OK' in str(self.cmd.pretty({'ok': text})[0]) assert 'ERROR' in str(self.cmd.pretty({'error': text})[0]) def test_pretty(self): assert 'OK' in str(self.cmd.pretty('the quick brown')) assert 'OK' in str(self.cmd.pretty(object())) assert 'OK' in str(self.cmd.pretty({'foo': 'bar'})) class test_report: def test_run(self): out = WhateverIO() r = report(app=self.app, stdout=out) assert r.run() == EX_OK assert out.getvalue() class test_help: def test_run(self): out = WhateverIO() h = help(app=self.app, stdout=out) h.parser = Mock() assert h.run() == EX_USAGE assert out.getvalue() assert h.usage('help') h.parser.print_help.assert_called_with() class test_CeleryCommand: def test_execute_from_commandline(self): x = CeleryCommand(app=self.app) x.handle_argv = Mock() x.handle_argv.return_value = 1 with pytest.raises(SystemExit): x.execute_from_commandline() x.handle_argv.return_value = True with pytest.raises(SystemExit): x.execute_from_commandline() x.handle_argv.side_effect = KeyboardInterrupt() with pytest.raises(SystemExit): x.execute_from_commandline() x.respects_app_option = True with pytest.raises(SystemExit): x.execute_from_commandline(['celery', 'multi']) assert not x.respects_app_option x.respects_app_option = True with pytest.raises(SystemExit): x.execute_from_commandline(['manage.py', 'celery', 'multi']) assert not x.respects_app_option def test_with_pool_option(self): x = CeleryCommand(app=self.app) assert x.with_pool_option(['celery', 'events']) is None assert x.with_pool_option(['celery', 'worker']) assert x.with_pool_option(['manage.py', 'celery', 'worker']) def test_load_extensions_no_commands(self): with patch('celery.bin.celery.Extensions') as Ext: ext = Ext.return_value = Mock(name='Extension') ext.load.return_value = None x = CeleryCommand(app=self.app) x.load_extension_commands() def test_load_extensions_commands(self): with patch('celery.bin.celery.Extensions') as Ext: prev, mod.command_classes = list(mod.command_classes), Mock() try: ext = Ext.return_value = Mock(name='Extension') ext.load.return_value = ['foo', 'bar'] x = CeleryCommand(app=self.app) x.load_extension_commands() mod.command_classes.append.assert_called_with( ('Extensions', ['foo', 'bar'], 'magenta'), ) finally: mod.command_classes = prev def test_determine_exit_status(self): assert determine_exit_status('true') == EX_OK assert determine_exit_status('') == EX_FAILURE def test_relocate_args_from_start(self): x = CeleryCommand(app=self.app) assert x._relocate_args_from_start(None) == [] relargs1 = x._relocate_args_from_start([ '-l', 'debug', 'worker', '-c', '3', '--foo', ]) assert relargs1 == ['worker', '-c', '3', '--foo', '-l', 'debug'] relargs2 = x._relocate_args_from_start([ '--pool=gevent', '-l', 'debug', 'worker', '--foo', '-c', '3', ]) assert relargs2 == [ 'worker', '--foo', '-c', '3', '--pool=gevent', '-l', 'debug', ] assert x._relocate_args_from_start(['foo', '--foo=1']) == [ 'foo', '--foo=1', ] def test_register_command(self): prev, CeleryCommand.commands = dict(CeleryCommand.commands), {} try: fun = Mock(name='fun') CeleryCommand.register_command(fun, name='foo') assert CeleryCommand.commands['foo'] is fun finally: CeleryCommand.commands = prev def test_handle_argv(self): x = CeleryCommand(app=self.app) x.execute = Mock() x.handle_argv('celery', []) x.execute.assert_called_with('help', ['help']) x.handle_argv('celery', ['start', 'foo']) x.execute.assert_called_with('start', ['start', 'foo']) def test_execute(self): x = CeleryCommand(app=self.app) Help = x.commands['help'] = Mock() help = Help.return_value = Mock() x.execute('fooox', ['a']) help.run_from_argv.assert_called_with(x.prog_name, [], command='help') help.reset() x.execute('help', ['help']) help.run_from_argv.assert_called_with(x.prog_name, [], command='help') Dummy = x.commands['dummy'] = Mock() dummy = Dummy.return_value = Mock() exc = dummy.run_from_argv.side_effect = Error( 'foo', status='EX_FAILURE', ) x.on_error = Mock(name='on_error') help.reset() x.execute('dummy', ['dummy']) x.on_error.assert_called_with(exc) dummy.run_from_argv.assert_called_with( x.prog_name, [], command='dummy', ) help.run_from_argv.assert_called_with( x.prog_name, [], command='help', ) exc = dummy.run_from_argv.side_effect = x.UsageError('foo') x.on_usage_error = Mock() x.execute('dummy', ['dummy']) x.on_usage_error.assert_called_with(exc) def test_on_usage_error(self): x = CeleryCommand(app=self.app) x.error = Mock() x.on_usage_error(x.UsageError('foo'), command=None) x.error.assert_called() x.on_usage_error(x.UsageError('foo'), command='dummy') def test_prepare_prog_name(self): x = CeleryCommand(app=self.app) main = Mock(name='__main__') main.__file__ = '/opt/foo.py' with patch.dict(sys.modules, __main__=main): assert x.prepare_prog_name('__main__.py') == '/opt/foo.py' assert x.prepare_prog_name('celery') == 'celery' class test_multi: def test_get_options(self): assert multi(app=self.app).get_options() is None def test_run_from_argv(self): with patch('celery.bin.multi.MultiTool') as MultiTool: m = MultiTool.return_value = Mock() multi(self.app).run_from_argv('celery', ['arg'], command='multi') m.execute_from_commandline.assert_called_with(['multi', 'arg']) class test_main: @patch('celery.bin.celery.CeleryCommand') def test_main(self, Command): cmd = Command.return_value = Mock() mainfun() cmd.execute_from_commandline.assert_called_with(None) @patch('celery.bin.celery.CeleryCommand') def test_main_KeyboardInterrupt(self, Command): cmd = Command.return_value = Mock() cmd.execute_from_commandline.side_effect = KeyboardInterrupt() mainfun() cmd.execute_from_commandline.assert_called_with(None) celery-4.1.0/t/unit/bin/test_purge.py0000644000175000017500000000136213130607475017462 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import Mock from celery.five import WhateverIO from celery.bin.purge import purge class test_purge: def test_run(self): out = WhateverIO() a = purge(app=self.app, stdout=out) a._purge = Mock(name='_purge') a._purge.return_value = 0 a.run(force=True) assert 'No messages purged' in out.getvalue() a._purge.return_value = 100 a.run(force=True) assert '100 messages' in out.getvalue() a.out = Mock(name='out') a.ask = Mock(name='ask') a.run(force=False) a.ask.assert_called_with(a.warn_prompt, ('yes', 'no'), 'no') a.ask.return_value = 'yes' a.run(force=False) celery-4.1.0/t/unit/bin/test_list.py0000644000175000017500000000130513130607475017310 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from kombu.five import WhateverIO from celery.bin.base import Error from celery.bin.list import list_ class test_list: def test_list_bindings_no_support(self): l = list_(app=self.app, stderr=WhateverIO()) management = Mock() management.get_bindings.side_effect = NotImplementedError() with pytest.raises(Error): l.list_bindings(management) def test_run(self): l = list_(app=self.app, stderr=WhateverIO()) l.run('bindings') with pytest.raises(Error): l.run(None) with pytest.raises(Error): l.run('foo') celery-4.1.0/t/unit/bin/test_amqp.py0000644000175000017500000001065713130607475017305 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch from celery.five import WhateverIO from celery.bin.amqp import ( AMQPAdmin, AMQShell, dump_message, amqp, main, ) class test_AMQShell: def setup(self): self.fh = WhateverIO() self.adm = self.create_adm() self.shell = AMQShell(connect=self.adm.connect, out=self.fh) def create_adm(self, *args, **kwargs): return AMQPAdmin(app=self.app, out=self.fh, *args, **kwargs) def test_queue_declare(self): self.shell.onecmd('queue.declare foo') assert 'ok' in self.fh.getvalue() def test_missing_command(self): self.shell.onecmd('foo foo') assert 'unknown syntax' in self.fh.getvalue() def RV(self): raise Exception(self.fh.getvalue()) def test_spec_format_response(self): spec = self.shell.amqp['exchange.declare'] assert spec.format_response(None) == 'ok.' assert spec.format_response('NO') == 'NO' def test_missing_namespace(self): self.shell.onecmd('ns.cmd arg') assert 'unknown syntax' in self.fh.getvalue() def test_help(self): self.shell.onecmd('help') assert 'Example:' in self.fh.getvalue() def test_help_command(self): self.shell.onecmd('help queue.declare') assert 'passive:no' in self.fh.getvalue() def test_help_unknown_command(self): self.shell.onecmd('help foo.baz') assert 'unknown syntax' in self.fh.getvalue() def test_onecmd_error(self): self.shell.dispatch = Mock() self.shell.dispatch.side_effect = MemoryError() self.shell.say = Mock() assert not self.shell.needs_reconnect self.shell.onecmd('hello') self.shell.say.assert_called() assert self.shell.needs_reconnect def test_exit(self): with pytest.raises(SystemExit): self.shell.onecmd('exit') assert "don't leave!" in self.fh.getvalue() def test_note_silent(self): self.shell.silent = True self.shell.note('foo bar') assert 'foo bar' not in self.fh.getvalue() def test_reconnect(self): self.shell.onecmd('queue.declare foo') self.shell.needs_reconnect = True self.shell.onecmd('queue.delete foo') def test_completenames(self): assert self.shell.completenames('queue.dec') == ['queue.declare'] assert (sorted(self.shell.completenames('declare')) == sorted(['queue.declare', 'exchange.declare'])) def test_empty_line(self): self.shell.emptyline = Mock() self.shell.default = Mock() self.shell.onecmd('') self.shell.emptyline.assert_called_with() self.shell.onecmd('foo') self.shell.default.assert_called_with('foo') def test_respond(self): self.shell.respond({'foo': 'bar'}) assert 'foo' in self.fh.getvalue() def test_prompt(self): assert self.shell.prompt def test_no_returns(self): self.shell.onecmd('queue.declare foo') self.shell.onecmd('exchange.declare bar direct yes') self.shell.onecmd('queue.bind foo bar baz') self.shell.onecmd('basic.ack 1') def test_dump_message(self): m = Mock() m.body = 'the quick brown fox' m.properties = {'a': 1} m.delivery_info = {'exchange': 'bar'} assert dump_message(m) def test_dump_message_no_message(self): assert 'No messages in queue' in dump_message(None) def test_note(self): self.adm.silent = True self.adm.note('FOO') assert 'FOO' not in self.fh.getvalue() def test_run(self): a = self.create_adm('queue.declare', 'foo') a.run() assert 'ok' in self.fh.getvalue() def test_run_loop(self): a = self.create_adm() a.Shell = Mock() shell = a.Shell.return_value = Mock() shell.cmdloop = Mock() a.run() shell.cmdloop.assert_called_with() shell.cmdloop.side_effect = KeyboardInterrupt() a.run() assert 'bibi' in self.fh.getvalue() @patch('celery.bin.amqp.amqp') def test_main(self, Command): c = Command.return_value = Mock() main() c.execute_from_commandline.assert_called_with() @patch('celery.bin.amqp.AMQPAdmin') def test_command(self, cls): x = amqp(app=self.app) x.run() assert cls.call_args[1]['app'] is self.app celery-4.1.0/t/unit/bin/test_call.py0000644000175000017500000000234713130607475017257 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from datetime import datetime from case import patch from kombu.utils.json import dumps from celery.five import WhateverIO from celery.bin.call import call class test_call: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add @patch('celery.app.base.Celery.send_task') def test_run(self, send_task): a = call(app=self.app, stderr=WhateverIO(), stdout=WhateverIO()) a.run(self.add.name) send_task.assert_called() a.run(self.add.name, args=dumps([4, 4]), kwargs=dumps({'x': 2, 'y': 2})) assert send_task.call_args[1]['args'], [4 == 4] assert send_task.call_args[1]['kwargs'] == {'x': 2, 'y': 2} a.run(self.add.name, expires=10, countdown=10) assert send_task.call_args[1]['expires'] == 10 assert send_task.call_args[1]['countdown'] == 10 now = datetime.now() iso = now.isoformat() a.run(self.add.name, expires=iso) assert send_task.call_args[1]['expires'] == now with pytest.raises(ValueError): a.run(self.add.name, expires='foobaribazibar') celery-4.1.0/t/unit/bin/test_multi.py0000644000175000017500000003122513130607475017473 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import signal import sys from case import Mock, patch from celery.bin.multi import main, MultiTool, __doc__ as doc from celery.five import WhateverIO class test_MultiTool: def setup(self): self.fh = WhateverIO() self.env = {} self.t = MultiTool(env=self.env, fh=self.fh) self.t.cluster_from_argv = Mock(name='cluster_from_argv') self.t._cluster_from_argv = Mock(name='cluster_from_argv') self.t.Cluster = Mock(name='Cluster') self.t.carp = Mock(name='.carp') self.t.usage = Mock(name='.usage') self.t.splash = Mock(name='.splash') self.t.say = Mock(name='.say') self.t.ok = Mock(name='.ok') self.cluster = self.t.Cluster.return_value def _cluster_from_argv(argv): p = self.t.OptionParser(argv) p.parse() return p, self.cluster self.t.cluster_from_argv.return_value = self.cluster self.t._cluster_from_argv.side_effect = _cluster_from_argv def test_findsig(self): self.assert_sig_argument(['a', 'b', 'c', '-1'], 1) self.assert_sig_argument(['--foo=1', '-9'], 9) self.assert_sig_argument(['-INT'], signal.SIGINT) self.assert_sig_argument([], signal.SIGTERM) self.assert_sig_argument(['-s'], signal.SIGTERM) self.assert_sig_argument(['-log'], signal.SIGTERM) def assert_sig_argument(self, args, expected): p = self.t.OptionParser(args) p.parse() assert self.t._find_sig_argument(p) == expected def test_execute_from_commandline(self): self.t.call_command = Mock(name='call_command') self.t.execute_from_commandline( 'multi start --verbose 10 --foo'.split(), cmd='X', ) assert self.t.cmd == 'X' assert self.t.prog_name == 'multi' self.t.call_command.assert_called_with('start', ['10', '--foo']) def test_execute_from_commandline__arguments(self): assert self.t.execute_from_commandline('multi'.split()) assert self.t.execute_from_commandline('multi -bar'.split()) def test_call_command(self): cmd = self.t.commands['foo'] = Mock(name='foo') self.t.retcode = 303 assert (self.t.call_command('foo', ['1', '2', '--foo=3']) is cmd.return_value) cmd.assert_called_with('1', '2', '--foo=3') def test_call_command__error(self): assert self.t.call_command('asdqwewqe', ['1', '2']) == 1 self.t.carp.assert_called() def test_handle_reserved_options(self): assert self.t._handle_reserved_options( ['a', '-q', 'b', '--no-color', 'c']) == ['a', 'b', 'c'] def test_start(self): self.cluster.start.return_value = [0, 0, 1, 0] assert self.t.start('10', '-A', 'proj') self.t.splash.assert_called_with() self.t.cluster_from_argv.assert_called_with(('10', '-A', 'proj')) self.cluster.start.assert_called_with() def test_start__exitcodes(self): self.cluster.start.return_value = [0, 0, 0] assert not self.t.start('foo', 'bar', 'baz') self.cluster.start.assert_called_with() self.cluster.start.return_value = [0, 1, 0] assert self.t.start('foo', 'bar', 'baz') def test_stop(self): self.t.stop('10', '-A', 'proj', retry=3) self.t.splash.assert_called_with() self.t._cluster_from_argv.assert_called_with(('10', '-A', 'proj')) self.cluster.stop.assert_called_with(retry=3, sig=signal.SIGTERM) def test_stopwait(self): self.t.stopwait('10', '-A', 'proj', retry=3) self.t.splash.assert_called_with() self.t._cluster_from_argv.assert_called_with(('10', '-A', 'proj')) self.cluster.stopwait.assert_called_with(retry=3, sig=signal.SIGTERM) def test_restart(self): self.cluster.restart.return_value = [0, 0, 1, 0] self.t.restart('10', '-A', 'proj') self.t.splash.assert_called_with() self.t._cluster_from_argv.assert_called_with(('10', '-A', 'proj')) self.cluster.restart.assert_called_with(sig=signal.SIGTERM) def test_names(self): self.t.cluster_from_argv.return_value = [Mock(), Mock()] self.t.cluster_from_argv.return_value[0].name = 'x' self.t.cluster_from_argv.return_value[1].name = 'y' self.t.names('10', '-A', 'proj') self.t.say.assert_called() def test_get(self): node = self.cluster.find.return_value = Mock(name='node') node.argv = ['A', 'B', 'C'] assert (self.t.get('wanted', '10', '-A', 'proj') is self.t.ok.return_value) self.cluster.find.assert_called_with('wanted') self.t.cluster_from_argv.assert_called_with(('10', '-A', 'proj')) self.t.ok.assert_called_with(' '.join(node.argv)) def test_get__KeyError(self): self.cluster.find.side_effect = KeyError() assert self.t.get('wanted', '10', '-A', 'proj') def test_show(self): nodes = self.t.cluster_from_argv.return_value = [ Mock(name='n1'), Mock(name='n2'), ] nodes[0].argv_with_executable = ['python', 'foo', 'bar'] nodes[1].argv_with_executable = ['python', 'xuzzy', 'baz'] assert self.t.show('10', '-A', 'proj') is self.t.ok.return_value self.t.ok.assert_called_with( '\n'.join(' '.join(node.argv_with_executable) for node in nodes)) def test_kill(self): self.t.kill('10', '-A', 'proj') self.t.splash.assert_called_with() self.t.cluster_from_argv.assert_called_with(('10', '-A', 'proj')) self.cluster.kill.assert_called_with() def test_expand(self): node1 = Mock(name='n1') node2 = Mock(name='n2') node1.expander.return_value = 'A' node2.expander.return_value = 'B' nodes = self.t.cluster_from_argv.return_value = [node1, node2] assert self.t.expand('%p', '10') is self.t.ok.return_value self.t.cluster_from_argv.assert_called_with(('10',)) for node in nodes: node.expander.assert_called_with('%p') self.t.ok.assert_called_with('A\nB') def test_note(self): self.t.quiet = True self.t.note('foo') self.t.say.assert_not_called() self.t.quiet = False self.t.note('foo') self.t.say.assert_called_with('foo', newline=True) def test_splash(self): x = MultiTool() x.note = Mock() x.nosplash = True x.splash() x.note.assert_not_called() x.nosplash = False x.splash() x.note.assert_called() def test_Cluster(self): m = MultiTool() c = m.cluster_from_argv(['A', 'B', 'C']) assert c.env is m.env assert c.cmd == 'celery worker' assert c.on_stopping_preamble == m.on_stopping_preamble assert c.on_send_signal == m.on_send_signal assert c.on_still_waiting_for == m.on_still_waiting_for assert c.on_still_waiting_progress == m.on_still_waiting_progress assert c.on_still_waiting_end == m.on_still_waiting_end assert c.on_node_start == m.on_node_start assert c.on_node_restart == m.on_node_restart assert c.on_node_shutdown_ok == m.on_node_shutdown_ok assert c.on_node_status == m.on_node_status assert c.on_node_signal_dead == m.on_node_signal_dead assert c.on_node_signal == m.on_node_signal assert c.on_node_down == m.on_node_down assert c.on_child_spawn == m.on_child_spawn assert c.on_child_signalled == m.on_child_signalled assert c.on_child_failure == m.on_child_failure def test_on_stopping_preamble(self): self.t.on_stopping_preamble([]) def test_on_send_signal(self): self.t.on_send_signal(Mock(), Mock()) def test_on_still_waiting_for(self): self.t.on_still_waiting_for([Mock(), Mock()]) def test_on_still_waiting_for__empty(self): self.t.on_still_waiting_for([]) def test_on_still_waiting_progress(self): self.t.on_still_waiting_progress([]) def test_on_still_waiting_end(self): self.t.on_still_waiting_end() def test_on_node_signal_dead(self): self.t.on_node_signal_dead(Mock()) def test_on_node_start(self): self.t.on_node_start(Mock()) def test_on_node_restart(self): self.t.on_node_restart(Mock()) def test_on_node_down(self): self.t.on_node_down(Mock()) def test_on_node_shutdown_ok(self): self.t.on_node_shutdown_ok(Mock()) def test_on_node_status__FAIL(self): self.t.on_node_status(Mock(), 1) self.t.say.assert_called_with(self.t.FAILED, newline=True) def test_on_node_status__OK(self): self.t.on_node_status(Mock(), 0) self.t.say.assert_called_with(self.t.OK, newline=True) def test_on_node_signal(self): self.t.on_node_signal(Mock(), Mock()) def test_on_child_spawn(self): self.t.on_child_spawn(Mock(), Mock(), Mock()) def test_on_child_signalled(self): self.t.on_child_signalled(Mock(), Mock()) def test_on_child_failure(self): self.t.on_child_failure(Mock(), Mock()) def test_constant_strings(self): assert self.t.OK assert self.t.DOWN assert self.t.FAILED class test_MultiTool_functional: def setup(self): self.fh = WhateverIO() self.env = {} self.t = MultiTool(env=self.env, fh=self.fh) def test_note(self): self.t.note('hello world') assert self.fh.getvalue() == 'hello world\n' def test_note_quiet(self): self.t.quiet = True self.t.note('hello world') assert not self.fh.getvalue() def test_carp(self): self.t.say = Mock() self.t.carp('foo') self.t.say.assert_called_with('foo', True, self.t.stderr) def test_info(self): self.t.verbose = True self.t.info('hello info') assert self.fh.getvalue() == 'hello info\n' def test_info_not_verbose(self): self.t.verbose = False self.t.info('hello info') assert not self.fh.getvalue() def test_error(self): self.t.carp = Mock() self.t.usage = Mock() assert self.t.error('foo') == 1 self.t.carp.assert_called_with('foo') self.t.usage.assert_called_with() self.t.carp = Mock() assert self.t.error() == 1 self.t.carp.assert_not_called() def test_nosplash(self): self.t.nosplash = True self.t.splash() assert not self.fh.getvalue() def test_splash(self): self.t.nosplash = False self.t.splash() assert 'celery multi' in self.fh.getvalue() def test_usage(self): self.t.usage() assert self.fh.getvalue() def test_help(self): self.t.help([]) assert doc in self.fh.getvalue() def test_expand(self): self.t.expand('foo%n', 'ask', 'klask', 'dask') assert self.fh.getvalue() == 'fooask\nfooklask\nfoodask\n' @patch('celery.apps.multi.gethostname') def test_get(self, gethostname): gethostname.return_value = 'e.com' self.t.get('xuzzy@e.com', 'foo', 'bar', 'baz') assert not self.fh.getvalue() self.t.get('foo@e.com', 'foo', 'bar', 'baz') assert self.fh.getvalue() @patch('celery.apps.multi.gethostname') def test_names(self, gethostname): gethostname.return_value = 'e.com' self.t.names('foo', 'bar', 'baz') assert 'foo@e.com\nbar@e.com\nbaz@e.com' in self.fh.getvalue() def test_execute_from_commandline(self): start = self.t.commands['start'] = Mock() self.t.error = Mock() self.t.execute_from_commandline(['multi', 'start', 'foo', 'bar']) self.t.error.assert_not_called() start.assert_called_with('foo', 'bar') self.t.error = Mock() self.t.execute_from_commandline(['multi', 'frob', 'foo', 'bar']) self.t.error.assert_called_with('Invalid command: frob') self.t.error = Mock() self.t.execute_from_commandline(['multi']) self.t.error.assert_called_with() self.t.error = Mock() self.t.execute_from_commandline(['multi', '-foo']) self.t.error.assert_called_with() self.t.execute_from_commandline( ['multi', 'start', 'foo', '--nosplash', '--quiet', '-q', '--verbose', '--no-color'], ) assert self.t.nosplash assert self.t.quiet assert self.t.verbose assert self.t.no_color @patch('celery.bin.multi.MultiTool') def test_main(self, MultiTool): m = MultiTool.return_value = Mock() with pytest.raises(SystemExit): main() m.execute_from_commandline.assert_called_with(sys.argv) celery-4.1.0/t/unit/bin/test_migrate.py0000644000175000017500000000136713130607475017775 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch from celery.five import WhateverIO from celery.bin.migrate import migrate class test_migrate: @patch('celery.contrib.migrate.migrate_tasks') def test_run(self, migrate_tasks): out = WhateverIO() m = migrate(app=self.app, stdout=out, stderr=WhateverIO()) with pytest.raises(TypeError): m.run() migrate_tasks.assert_not_called() m.run('memory://foo', 'memory://bar') migrate_tasks.assert_called() state = Mock() state.count = 10 state.strtotal = 30 m.on_migrate_task(state, {'task': 'tasks.add', 'id': 'ID'}, None) assert '10/30' in out.getvalue() celery-4.1.0/t/unit/bin/test_base.py0000644000175000017500000003011413130607475017247 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import os import pytest from case import Mock, mock, patch from celery.five import bytes_if_py2 from celery.bin.base import ( Command, Option, Extensions, ) class MyApp(object): user_options = {'preload': None} APP = MyApp() # <-- Used by test_with_custom_app class MockCommand(Command): mock_args = ('arg1', 'arg2', 'arg3') def parse_options(self, prog_name, arguments, command=None): options = dict(foo='bar', prog_name=prog_name) return options, self.mock_args def run(self, *args, **kwargs): return args, kwargs class test_Extensions: def test_load(self): with patch('pkg_resources.iter_entry_points') as iterep: with patch('celery.utils.imports.symbol_by_name') as symbyname: ep = Mock() ep.name = 'ep' ep.module_name = 'foo' ep.attrs = ['bar', 'baz'] iterep.return_value = [ep] cls = symbyname.return_value = Mock() register = Mock() e = Extensions('unit', register) e.load() symbyname.assert_called_with('foo:bar') register.assert_called_with(cls, name='ep') with patch('celery.utils.imports.symbol_by_name') as symbyname: symbyname.side_effect = SyntaxError() with patch('warnings.warn') as warn: e.load() warn.assert_called() with patch('celery.utils.imports.symbol_by_name') as symbyname: symbyname.side_effect = KeyError('foo') with pytest.raises(KeyError): e.load() class test_Command: def test_get_options(self): cmd = Command() cmd.option_list = (1, 2, 3) assert cmd.get_options() == (1, 2, 3) def test_custom_description(self): class C(Command): description = 'foo' c = C() assert c.description == 'foo' def test_format_epilog(self): assert Command()._format_epilog('hello') assert not Command()._format_epilog('') def test_format_description(self): assert Command()._format_description('hello') def test_register_callbacks(self): c = Command(on_error=8, on_usage_error=9) assert c.on_error == 8 assert c.on_usage_error == 9 def test_run_raises_UsageError(self): cb = Mock() c = Command(on_usage_error=cb) c.verify_args = Mock() c.run = Mock() exc = c.run.side_effect = c.UsageError('foo', status=3) assert c() == exc.status cb.assert_called_with(exc) c.verify_args.assert_called_with(()) def test_default_on_usage_error(self): cmd = Command() cmd.handle_error = Mock() exc = Exception() cmd.on_usage_error(exc) cmd.handle_error.assert_called_with(exc) def test_verify_args_missing(self): c = Command() def run(a, b, c): pass c.run = run with pytest.raises(c.UsageError): c.verify_args((1,)) c.verify_args((1, 2, 3)) def test_run_interface(self): with pytest.raises(NotImplementedError): Command().run() @patch('sys.stdout') def test_early_version(self, stdout): cmd = Command() with pytest.raises(SystemExit): cmd.early_version(['--version']) def test_execute_from_commandline(self, app): cmd = MockCommand(app=app) args1, kwargs1 = cmd.execute_from_commandline() # sys.argv assert args1 == cmd.mock_args assert kwargs1['foo'] == 'bar' assert kwargs1.get('prog_name') args2, kwargs2 = cmd.execute_from_commandline(['foo']) # pass list assert args2 == cmd.mock_args assert kwargs2['foo'] == 'bar' assert kwargs2['prog_name'] == 'foo' def test_with_bogus_args(self, app): with mock.stdouts() as (_, stderr): cmd = MockCommand(app=app) cmd.supports_args = False with pytest.raises(SystemExit): cmd.execute_from_commandline(argv=['--bogus']) assert stderr.getvalue() assert 'Unrecognized' in stderr.getvalue() def test_with_custom_config_module(self, app): prev = os.environ.pop('CELERY_CONFIG_MODULE', None) try: cmd = MockCommand(app=app) cmd.setup_app_from_commandline(['--config=foo.bar.baz']) assert os.environ.get('CELERY_CONFIG_MODULE') == 'foo.bar.baz' finally: if prev: os.environ['CELERY_CONFIG_MODULE'] = prev else: os.environ.pop('CELERY_CONFIG_MODULE', None) def test_with_custom_broker(self, app): prev = os.environ.pop('CELERY_BROKER_URL', None) try: cmd = MockCommand(app=app) cmd.setup_app_from_commandline(['--broker=xyzza://']) assert os.environ.get('CELERY_BROKER_URL') == 'xyzza://' finally: if prev: os.environ['CELERY_BROKER_URL'] = prev else: os.environ.pop('CELERY_BROKER_URL', None) def test_with_custom_app(self, app): cmd = MockCommand(app=app) appstr = '.'.join([__name__, 'APP']) cmd.setup_app_from_commandline(['--app=%s' % (appstr,), '--loglevel=INFO']) assert cmd.app is APP cmd.setup_app_from_commandline(['-A', appstr, '--loglevel=INFO']) assert cmd.app is APP def test_setup_app_sets_quiet(self, app): cmd = MockCommand(app=app) cmd.setup_app_from_commandline(['-q']) assert cmd.quiet cmd2 = MockCommand(app=app) cmd2.setup_app_from_commandline(['--quiet']) assert cmd2.quiet def test_setup_app_sets_chdir(self, app): with patch('os.chdir') as chdir: cmd = MockCommand(app=app) cmd.setup_app_from_commandline(['--workdir=/opt']) chdir.assert_called_with('/opt') def test_setup_app_sets_loader(self, app): prev = os.environ.get('CELERY_LOADER') try: cmd = MockCommand(app=app) cmd.setup_app_from_commandline(['--loader=X.Y:Z']) assert os.environ['CELERY_LOADER'] == 'X.Y:Z' finally: if prev is not None: os.environ['CELERY_LOADER'] = prev else: del(os.environ['CELERY_LOADER']) def test_setup_app_no_respect(self, app): cmd = MockCommand(app=app) cmd.respects_app_option = False with patch('celery.bin.base.Celery') as cp: cmd.setup_app_from_commandline(['--app=x.y:z']) cp.assert_called() def test_setup_app_custom_app(self, app): cmd = MockCommand(app=app) app = cmd.app = Mock() app.user_options = {'preload': None} cmd.setup_app_from_commandline([]) assert cmd.app == app def test_find_app_suspects(self, app): cmd = MockCommand(app=app) assert cmd.find_app('t.unit.bin.proj.app') assert cmd.find_app('t.unit.bin.proj') assert cmd.find_app('t.unit.bin.proj:hello') assert cmd.find_app('t.unit.bin.proj.hello') assert cmd.find_app('t.unit.bin.proj.app:app') assert cmd.find_app('t.unit.bin.proj.app.app') with pytest.raises(AttributeError): cmd.find_app('t.unit.bin') with pytest.raises(AttributeError): cmd.find_app(__name__) def test_ask(self, app, patching): try: input = patching('celery.bin.base.input') except AttributeError: input = patching('builtins.input') cmd = MockCommand(app=app) input.return_value = 'yes' assert cmd.ask('q', ('yes', 'no'), 'no') == 'yes' input.return_value = 'nop' assert cmd.ask('q', ('yes', 'no'), 'no') == 'no' def test_host_format(self, app): cmd = MockCommand(app=app) with patch('celery.utils.nodenames.gethostname') as hn: hn.return_value = 'blacktron.example.com' assert cmd.host_format('') == '' assert (cmd.host_format('celery@%h') == 'celery@blacktron.example.com') assert cmd.host_format('celery@%d') == 'celery@example.com' assert cmd.host_format('celery@%n') == 'celery@blacktron' def test_say_chat_quiet(self, app): cmd = MockCommand(app=app) cmd.quiet = True assert cmd.say_chat('<-', 'foo', 'foo') is None def test_say_chat_show_body(self, app): cmd = MockCommand(app=app) cmd.out = Mock() cmd.show_body = True cmd.say_chat('->', 'foo', 'body') cmd.out.assert_called_with('body') def test_say_chat_no_body(self, app): cmd = MockCommand(app=app) cmd.out = Mock() cmd.show_body = False cmd.say_chat('->', 'foo', 'body') @pytest.mark.usefixtures('depends_on_current_app') def test_with_cmdline_config(self, app): cmd = MockCommand(app=app) cmd.enable_config_from_cmdline = True cmd.namespace = 'worker' rest = cmd.setup_app_from_commandline(argv=[ '--loglevel=INFO', '--', 'broker.url=amqp://broker.example.com', '.prefetch_multiplier=100']) assert cmd.app.conf.broker_url == 'amqp://broker.example.com' assert cmd.app.conf.worker_prefetch_multiplier == 100 assert rest == ['--loglevel=INFO'] cmd.app = None cmd.get_app = Mock(name='get_app') cmd.get_app.return_value = app app.user_options['preload'] = [ Option('--foo', action='store_true'), ] cmd.setup_app_from_commandline(argv=[ '--foo', '--loglevel=INFO', '--', 'broker.url=amqp://broker.example.com', '.prefetch_multiplier=100']) assert cmd.app is cmd.get_app() def test_get_default_app(self, app, patching): patching('celery._state.get_current_app') cmd = MockCommand(app=app) from celery._state import get_current_app assert cmd._get_default_app() is get_current_app() def test_set_colored(self, app): cmd = MockCommand(app=app) cmd.colored = 'foo' assert cmd.colored == 'foo' def test_set_no_color(self, app): cmd = MockCommand(app=app) cmd.no_color = False _ = cmd.colored # noqa cmd.no_color = True assert not cmd.colored.enabled def test_find_app(self, app): cmd = MockCommand(app=app) with patch('celery.utils.imports.symbol_by_name') as sbn: from types import ModuleType x = ModuleType(bytes_if_py2('proj')) def on_sbn(*args, **kwargs): def after(*args, **kwargs): x.app = 'quick brown fox' x.__path__ = None return x sbn.side_effect = after return x sbn.side_effect = on_sbn x.__path__ = [True] assert cmd.find_app('proj') == 'quick brown fox' def test_parse_preload_options_shortopt(self): class TestCommand(Command): def add_preload_arguments(self, parser): parser.add_argument('-s', action='store', dest='silent') cmd = TestCommand() acc = cmd.parse_preload_options(['-s', 'yes']) assert acc.get('silent') == 'yes' def test_parse_preload_options_with_equals_and_append(self): class TestCommand(Command): def add_preload_arguments(self, parser): parser.add_argument('--zoom', action='append', default=[]) cmd = Command() acc = cmd.parse_preload_options(['--zoom=1', '--zoom=2']) assert acc, {'zoom': ['1' == '2']} def test_parse_preload_options_without_equals_and_append(self): cmd = Command() opt = Option('--zoom', action='append', default=[]) cmd.preload_options = (opt,) acc = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2']) assert acc, {'zoom': ['1' == '2']} celery-4.1.0/t/unit/bin/test_result.py0000644000175000017500000000157313130607475017662 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import patch from celery.five import WhateverIO from celery.bin.result import result class test_result: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add def test_run(self): with patch('celery.result.AsyncResult.get') as get: out = WhateverIO() r = result(app=self.app, stdout=out) get.return_value = 'Jerry' r.run('id') assert 'Jerry' in out.getvalue() get.return_value = 'Elaine' r.run('id', task=self.add.name) assert 'Elaine' in out.getvalue() with patch('celery.result.AsyncResult.traceback') as tb: r.run('id', task=self.add.name, traceback=True) assert str(tb) in out.getvalue() celery-4.1.0/t/unit/bin/test_celeryevdump.py0000644000175000017500000000422013130607475021040 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from time import time from case import Mock, patch from celery.five import WhateverIO from celery.events.dumper import ( humanize_type, Dumper, evdump, ) class test_Dumper: def setup(self): self.out = WhateverIO() self.dumper = Dumper(out=self.out) def test_humanize_type(self): assert humanize_type('worker-offline') == 'shutdown' assert humanize_type('task-started') == 'task started' def test_format_task_event(self): self.dumper.format_task_event( 'worker@example.com', time(), 'task-started', 'tasks.add', {}) assert self.out.getvalue() def test_on_event(self): event = { 'hostname': 'worker@example.com', 'timestamp': time(), 'uuid': '1ef', 'name': 'tasks.add', 'args': '(2, 2)', 'kwargs': '{}', } self.dumper.on_event(dict(event, type='task-received')) assert self.out.getvalue() self.dumper.on_event(dict(event, type='task-revoked')) self.dumper.on_event(dict(event, type='worker-online')) @patch('celery.events.EventReceiver.capture') def test_evdump(self, capture): capture.side_effect = KeyboardInterrupt() evdump(app=self.app) def test_evdump_error_handler(self): app = Mock(name='app') with patch('celery.events.dumper.Dumper') as Dumper: Dumper.return_value = Mock(name='dumper') recv = app.events.Receiver.return_value = Mock() def se(*_a, **_k): recv.capture.side_effect = SystemExit() raise KeyError() recv.capture.side_effect = se Conn = app.connection_for_read.return_value = Mock(name='conn') conn = Conn.clone.return_value = Mock(name='cloned_conn') conn.connection_errors = (KeyError,) conn.channel_errors = () evdump(app) conn.ensure_connection.assert_called() errback = conn.ensure_connection.call_args[0][0] errback(KeyError(), 1) conn.as_uri.assert_called() celery-4.1.0/t/unit/bin/test_worker.py0000644000175000017500000005534313135426300017650 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import logging import os import pytest import sys from billiard.process import current_process from case import Mock, mock, patch, skip from kombu import Exchange, Queue from celery import platforms from celery import signals from celery.app import trace from celery.apps import worker as cd from celery.bin.worker import worker, main as worker_main from celery.exceptions import ( ImproperlyConfigured, WorkerShutdown, WorkerTerminate, ) from celery.platforms import EX_FAILURE, EX_OK from celery.worker import state @pytest.fixture(autouse=True) def reset_worker_optimizations(): yield trace.reset_worker_optimizations() class Worker(cd.Worker): redirect_stdouts = False def start(self, *args, **kwargs): self.on_start() class test_Worker: Worker = Worker def test_queues_string(self): with mock.stdouts(): w = self.app.Worker() w.setup_queues('foo,bar,baz') assert 'foo' in self.app.amqp.queues def test_cpu_count(self): with mock.stdouts(): with patch('celery.worker.worker.cpu_count') as cpu_count: cpu_count.side_effect = NotImplementedError() w = self.app.Worker(concurrency=None) assert w.concurrency == 2 w = self.app.Worker(concurrency=5) assert w.concurrency == 5 def test_windows_B_option(self): with mock.stdouts(): self.app.IS_WINDOWS = True with pytest.raises(SystemExit): worker(app=self.app).run(beat=True) def test_setup_concurrency_very_early(self): x = worker() x.run = Mock() with pytest.raises(ImportError): x.execute_from_commandline(['worker', '-P', 'xyzybox']) def test_run_from_argv_basic(self): x = worker(app=self.app) x.run = Mock() x.maybe_detach = Mock() def run(*args, **kwargs): pass x.run = run x.run_from_argv('celery', []) x.maybe_detach.assert_called() def test_maybe_detach(self): x = worker(app=self.app) with patch('celery.bin.worker.detached_celeryd') as detached: x.maybe_detach([]) detached.assert_not_called() with pytest.raises(SystemExit): x.maybe_detach(['--detach']) detached.assert_called() def test_invalid_loglevel_gives_error(self): with mock.stdouts(): x = worker(app=self.app) with pytest.raises(SystemExit): x.run(loglevel='GRIM_REAPER') def test_no_loglevel(self): self.app.Worker = Mock() worker(app=self.app).run(loglevel=None) def test_tasklist(self): worker = self.app.Worker() assert worker.app.tasks assert worker.app.finalized assert worker.tasklist(include_builtins=True) worker.tasklist(include_builtins=False) def test_extra_info(self): worker = self.app.Worker() worker.loglevel = logging.WARNING assert not worker.extra_info() worker.loglevel = logging.INFO assert worker.extra_info() def test_loglevel_string(self): with mock.stdouts(): worker = self.Worker(app=self.app, loglevel='INFO') assert worker.loglevel == logging.INFO def test_run_worker(self, patching): handlers = {} class Signals(platforms.Signals): def __setitem__(self, sig, handler): handlers[sig] = handler patching.setattr('celery.platforms.signals', Signals()) with mock.stdouts(): w = self.Worker(app=self.app) w._isatty = False w.on_start() for sig in 'SIGINT', 'SIGHUP', 'SIGTERM': assert sig in handlers handlers.clear() w = self.Worker(app=self.app) w._isatty = True w.on_start() for sig in 'SIGINT', 'SIGTERM': assert sig in handlers assert 'SIGHUP' not in handlers def test_startup_info(self): with mock.stdouts(): worker = self.Worker(app=self.app) worker.on_start() assert worker.startup_info() worker.loglevel = logging.DEBUG assert worker.startup_info() worker.loglevel = logging.INFO assert worker.startup_info() worker.autoscale = 13, 10 assert worker.startup_info() prev_loader = self.app.loader worker = self.Worker( app=self.app, queues='foo,bar,baz,xuzzy,do,re,mi', ) with patch('celery.apps.worker.qualname') as qualname: qualname.return_value = 'acme.backed_beans.Loader' assert worker.startup_info() with patch('celery.apps.worker.qualname') as qualname: qualname.return_value = 'celery.loaders.Loader' assert worker.startup_info() from celery.loaders.app import AppLoader self.app.loader = AppLoader(app=self.app) assert worker.startup_info() self.app.loader = prev_loader worker.task_events = True assert worker.startup_info() # test when there are too few output lines # to draft the ascii art onto prev, cd.ARTLINES = cd.ARTLINES, ['the quick brown fox'] try: assert worker.startup_info() finally: cd.ARTLINES = prev def test_run(self): with mock.stdouts(): self.Worker(app=self.app).on_start() self.Worker(app=self.app, purge=True).on_start() worker = self.Worker(app=self.app) worker.on_start() def test_purge_messages(self): with mock.stdouts(): self.Worker(app=self.app).purge_messages() def test_init_queues(self): with mock.stdouts(): app = self.app c = app.conf app.amqp.queues = app.amqp.Queues({ 'celery': { 'exchange': 'celery', 'routing_key': 'celery', }, 'video': { 'exchange': 'video', 'routing_key': 'video', }, }) worker = self.Worker(app=self.app) worker.setup_queues(['video']) assert 'video' in app.amqp.queues assert 'video' in app.amqp.queues.consume_from assert 'celery' in app.amqp.queues assert 'celery' not in app.amqp.queues.consume_from c.task_create_missing_queues = False del(app.amqp.queues) with pytest.raises(ImproperlyConfigured): self.Worker(app=self.app).setup_queues(['image']) del(app.amqp.queues) c.task_create_missing_queues = True worker = self.Worker(app=self.app) worker.setup_queues(['image']) assert 'image' in app.amqp.queues.consume_from assert app.amqp.queues['image'] == Queue( 'image', Exchange('image'), routing_key='image', ) def test_autoscale_argument(self): with mock.stdouts(): worker1 = self.Worker(app=self.app, autoscale='10,3') assert worker1.autoscale == [10, 3] worker2 = self.Worker(app=self.app, autoscale='10') assert worker2.autoscale == [10, 0] def test_include_argument(self): worker1 = self.Worker(app=self.app, include='os') assert worker1.include == ['os'] worker2 = self.Worker(app=self.app, include='os,sys') assert worker2.include == ['os', 'sys'] self.Worker(app=self.app, include=['os', 'sys']) def test_unknown_loglevel(self): with mock.stdouts(): with pytest.raises(SystemExit): worker(app=self.app).run(loglevel='ALIEN') worker1 = self.Worker(app=self.app, loglevel=0xFFFF) assert worker1.loglevel == 0xFFFF @patch('os._exit') @skip.if_win32() def test_warns_if_running_as_privileged_user(self, _exit, patching): getuid = patching('os.getuid') with mock.stdouts() as (_, stderr): getuid.return_value = 0 self.app.conf.accept_content = ['pickle'] worker = self.Worker(app=self.app) worker.on_start() _exit.assert_called_with(1) patching.setattr('celery.platforms.C_FORCE_ROOT', True) worker = self.Worker(app=self.app) worker.on_start() assert 'a very bad idea' in stderr.getvalue() patching.setattr('celery.platforms.C_FORCE_ROOT', False) self.app.conf.accept_content = ['json'] worker = self.Worker(app=self.app) worker.on_start() assert 'superuser' in stderr.getvalue() def test_redirect_stdouts(self): with mock.stdouts(): self.Worker(app=self.app, redirect_stdouts=False) with pytest.raises(AttributeError): sys.stdout.logger def test_on_start_custom_logging(self): with mock.stdouts(): self.app.log.redirect_stdouts = Mock() worker = self.Worker(app=self.app, redirect_stoutds=True) worker._custom_logging = True worker.on_start() self.app.log.redirect_stdouts.assert_not_called() def test_setup_logging_no_color(self): worker = self.Worker( app=self.app, redirect_stdouts=False, no_color=True, ) prev, self.app.log.setup = self.app.log.setup, Mock() try: worker.setup_logging() assert not self.app.log.setup.call_args[1]['colorize'] finally: self.app.log.setup = prev def test_startup_info_pool_is_str(self): with mock.stdouts(): worker = self.Worker(app=self.app, redirect_stdouts=False) worker.pool_cls = 'foo' worker.startup_info() def test_redirect_stdouts_already_handled(self): logging_setup = [False] @signals.setup_logging.connect def on_logging_setup(**kwargs): logging_setup[0] = True try: worker = self.Worker(app=self.app, redirect_stdouts=False) worker.app.log.already_setup = False worker.setup_logging() assert logging_setup[0] with pytest.raises(AttributeError): sys.stdout.logger finally: signals.setup_logging.disconnect(on_logging_setup) def test_platform_tweaks_macOS(self): class macOSWorker(Worker): proxy_workaround_installed = False def macOS_proxy_detection_workaround(self): self.proxy_workaround_installed = True with mock.stdouts(): worker = macOSWorker(app=self.app, redirect_stdouts=False) def install_HUP_nosupport(controller): controller.hup_not_supported_installed = True class Controller(object): pass prev = cd.install_HUP_not_supported_handler cd.install_HUP_not_supported_handler = install_HUP_nosupport try: worker.app.IS_macOS = True controller = Controller() worker.install_platform_tweaks(controller) assert controller.hup_not_supported_installed assert worker.proxy_workaround_installed finally: cd.install_HUP_not_supported_handler = prev def test_general_platform_tweaks(self): restart_worker_handler_installed = [False] def install_worker_restart_handler(worker): restart_worker_handler_installed[0] = True class Controller(object): pass with mock.stdouts(): prev = cd.install_worker_restart_handler cd.install_worker_restart_handler = install_worker_restart_handler try: worker = self.Worker(app=self.app) worker.app.IS_macOS = False worker.install_platform_tweaks(Controller()) assert restart_worker_handler_installed[0] finally: cd.install_worker_restart_handler = prev def test_on_consumer_ready(self): worker_ready_sent = [False] @signals.worker_ready.connect def on_worker_ready(**kwargs): worker_ready_sent[0] = True with mock.stdouts(): self.Worker(app=self.app).on_consumer_ready(object()) assert worker_ready_sent[0] @mock.stdouts class test_funs: def test_active_thread_count(self): assert cd.active_thread_count() @skip.unless_module('setproctitle') def test_set_process_status(self): worker = Worker(app=self.app, hostname='xyzza') prev1, sys.argv = sys.argv, ['Arg0'] try: st = worker.set_process_status('Running') assert 'celeryd' in st assert 'xyzza' in st assert 'Running' in st prev2, sys.argv = sys.argv, ['Arg0', 'Arg1'] try: st = worker.set_process_status('Running') assert 'celeryd' in st assert 'xyzza' in st assert 'Running' in st assert 'Arg1' in st finally: sys.argv = prev2 finally: sys.argv = prev1 def test_parse_options(self): cmd = worker() cmd.app = self.app opts, args = cmd.parse_options('worker', ['--concurrency=512', '--heartbeat-interval=10']) assert opts['concurrency'] == 512 assert opts['heartbeat_interval'] == 10 def test_main(self): p, cd.Worker = cd.Worker, Worker s, sys.argv = sys.argv, ['worker', '--discard'] try: worker_main(app=self.app) finally: cd.Worker = p sys.argv = s @mock.stdouts class test_signal_handlers: class _Worker(object): hostname = 'foo' stopped = False terminated = False def stop(self, in_sighandler=False): self.stopped = True def terminate(self, in_sighandler=False): self.terminated = True def psig(self, fun, *args, **kwargs): handlers = {} class Signals(platforms.Signals): def __setitem__(self, sig, handler): handlers[sig] = handler p, platforms.signals = platforms.signals, Signals() try: fun(*args, **kwargs) return handlers finally: platforms.signals = p def test_worker_int_handler(self): worker = self._Worker() handlers = self.psig(cd.install_worker_int_handler, worker) next_handlers = {} state.should_stop = None state.should_terminate = None class Signals(platforms.Signals): def __setitem__(self, sig, handler): next_handlers[sig] = handler with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 3 p, platforms.signals = platforms.signals, Signals() try: handlers['SIGINT']('SIGINT', object()) assert state.should_stop assert state.should_stop == EX_FAILURE finally: platforms.signals = p state.should_stop = None try: next_handlers['SIGINT']('SIGINT', object()) assert state.should_terminate assert state.should_terminate == EX_FAILURE finally: state.should_terminate = None with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 1 p, platforms.signals = platforms.signals, Signals() try: with pytest.raises(WorkerShutdown): handlers['SIGINT']('SIGINT', object()) finally: platforms.signals = p with pytest.raises(WorkerTerminate): next_handlers['SIGINT']('SIGINT', object()) @skip.unless_module('multiprocessing') def test_worker_int_handler_only_stop_MainProcess(self): process = current_process() name, process.name = process.name, 'OtherProcess' with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 3 try: worker = self._Worker() handlers = self.psig(cd.install_worker_int_handler, worker) handlers['SIGINT']('SIGINT', object()) assert state.should_stop finally: process.name = name state.should_stop = None with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 1 try: worker = self._Worker() handlers = self.psig(cd.install_worker_int_handler, worker) with pytest.raises(WorkerShutdown): handlers['SIGINT']('SIGINT', object()) finally: process.name = name state.should_stop = None def test_install_HUP_not_supported_handler(self): worker = self._Worker() handlers = self.psig(cd.install_HUP_not_supported_handler, worker) handlers['SIGHUP']('SIGHUP', object()) @skip.unless_module('multiprocessing') def test_worker_term_hard_handler_only_stop_MainProcess(self): process = current_process() name, process.name = process.name, 'OtherProcess' try: with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 3 worker = self._Worker() handlers = self.psig( cd.install_worker_term_hard_handler, worker) try: handlers['SIGQUIT']('SIGQUIT', object()) assert state.should_terminate finally: state.should_terminate = None with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 1 worker = self._Worker() handlers = self.psig( cd.install_worker_term_hard_handler, worker) try: with pytest.raises(WorkerTerminate): handlers['SIGQUIT']('SIGQUIT', object()) finally: state.should_terminate = None finally: process.name = name def test_worker_term_handler_when_threads(self): with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 3 worker = self._Worker() handlers = self.psig(cd.install_worker_term_handler, worker) try: handlers['SIGTERM']('SIGTERM', object()) assert state.should_stop == EX_OK finally: state.should_stop = None def test_worker_term_handler_when_single_thread(self): with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 1 worker = self._Worker() handlers = self.psig(cd.install_worker_term_handler, worker) try: with pytest.raises(WorkerShutdown): handlers['SIGTERM']('SIGTERM', object()) finally: state.should_stop = None @patch('sys.__stderr__') @skip.if_pypy() @skip.if_jython() def test_worker_cry_handler(self, stderr): handlers = self.psig(cd.install_cry_handler) assert handlers['SIGUSR1']('SIGUSR1', object()) is None stderr.write.assert_called() @skip.unless_module('multiprocessing') def test_worker_term_handler_only_stop_MainProcess(self): process = current_process() name, process.name = process.name, 'OtherProcess' try: with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 3 worker = self._Worker() handlers = self.psig(cd.install_worker_term_handler, worker) handlers['SIGTERM']('SIGTERM', object()) assert state.should_stop == EX_OK with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 1 worker = self._Worker() handlers = self.psig(cd.install_worker_term_handler, worker) with pytest.raises(WorkerShutdown): handlers['SIGTERM']('SIGTERM', object()) finally: process.name = name state.should_stop = None @skip.unless_symbol('os.execv') @patch('celery.platforms.close_open_fds') @patch('atexit.register') @patch('os.close') def test_worker_restart_handler(self, _close, register, close_open): argv = [] def _execv(*args): argv.extend(args) execv, os.execv = os.execv, _execv try: worker = self._Worker() handlers = self.psig(cd.install_worker_restart_handler, worker) handlers['SIGHUP']('SIGHUP', object()) assert state.should_stop == EX_OK register.assert_called() callback = register.call_args[0][0] callback() assert argv finally: os.execv = execv state.should_stop = None def test_worker_term_hard_handler_when_threaded(self): with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 3 worker = self._Worker() handlers = self.psig(cd.install_worker_term_hard_handler, worker) try: handlers['SIGQUIT']('SIGQUIT', object()) assert state.should_terminate finally: state.should_terminate = None def test_worker_term_hard_handler_when_single_threaded(self): with patch('celery.apps.worker.active_thread_count') as c: c.return_value = 1 worker = self._Worker() handlers = self.psig(cd.install_worker_term_hard_handler, worker) with pytest.raises(WorkerTerminate): handlers['SIGQUIT']('SIGQUIT', object()) def test_send_worker_shutting_down_signal(self): with patch('celery.apps.worker.signals.worker_shutting_down') as wsd: worker = self._Worker() handlers = self.psig(cd.install_worker_term_handler, worker) try: with pytest.raises(WorkerShutdown): handlers['SIGTERM']('SIGTERM', object()) finally: state.should_stop = None wsd.send.assert_called_with( sender='foo', sig='SIGTERM', how='Warm', exitcode=0, ) celery-4.1.0/t/unit/bin/celery.py0000644000175000017500000000011313130607475016555 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals # here for a test celery-4.1.0/t/unit/bin/test_celeryd_detach.py0000644000175000017500000001124713130607475021302 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, mock, patch from celery.platforms import IS_WINDOWS from celery.bin.celeryd_detach import ( detach, detached_celeryd, main, ) if not IS_WINDOWS: class test_detached: @patch('celery.bin.celeryd_detach.detached') @patch('os.execv') @patch('celery.bin.celeryd_detach.logger') @patch('celery.app.log.Logging.setup_logging_subsystem') def test_execs(self, setup_logs, logger, execv, detached): context = detached.return_value = Mock() context.__enter__ = Mock() context.__exit__ = Mock() detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log', pidfile='/var/pid', hostname='foo@example.com') detached.assert_called_with( '/var/log', '/var/pid', None, None, None, None, False, after_forkers=False, ) execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c']) r = detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log', pidfile='/var/pid', executable='/bin/foo', app=self.app) execv.assert_called_with('/bin/foo', ['/bin/foo', 'a', 'b', 'c']) execv.side_effect = Exception('foo') r = detach( '/bin/boo', ['a', 'b', 'c'], logfile='/var/log', pidfile='/var/pid', hostname='foo@example.com', app=self.app) context.__enter__.assert_called_with() logger.critical.assert_called() setup_logs.assert_called_with( 'ERROR', '/var/log', hostname='foo@example.com') assert r == 1 self.patching('celery.current_app') from celery import current_app r = detach( '/bin/boo', ['a', 'b', 'c'], logfile='/var/log', pidfile='/var/pid', hostname='foo@example.com', app=None) current_app.log.setup_logging_subsystem.assert_called_with( 'ERROR', '/var/log', hostname='foo@example.com', ) class test_PartialOptionParser: def test_parser(self): x = detached_celeryd(self.app) p = x.create_parser('celeryd_detach') options, leftovers = p.parse_known_args([ '--logfile=foo', '--fake', '--enable', 'a', 'b', '-c1', '-d', '2', ]) assert options.logfile == 'foo' assert leftovers, ['--enable', '-c1', '-d' == '2'] options, leftovers = p.parse_known_args([ '--fake', '--enable', '--pidfile=/var/pid/foo.pid', 'a', 'b', '-c1', '-d', '2', ]) assert options.pidfile == '/var/pid/foo.pid' with mock.stdouts(): with pytest.raises(SystemExit): p.parse_args(['--logfile']) p._option_string_actions['--logfile'].nargs = 2 with pytest.raises(SystemExit): p.parse_args(['--logfile=a']) with pytest.raises(SystemExit): p.parse_args(['--fake=abc']) assert p._option_string_actions['--logfile'].nargs == 2 p.parse_args(['--logfile', 'a', 'b']) class test_Command: argv = [ '--foobar=10,2', '-c', '1', '--logfile=/var/log', '-lDEBUG', '--', '.disable_rate_limits=1', ] def test_parse_options(self): x = detached_celeryd(app=self.app) _, argv = x._split_command_line_config(self.argv) o, l = x.parse_options('cd', argv) assert o.logfile == '/var/log' assert l == [ '--foobar=10,2', '-c', '1', '-lDEBUG', '--logfile=/var/log', '--pidfile=celeryd.pid', ] x.parse_options('cd', []) # no args @patch('sys.exit') @patch('celery.bin.celeryd_detach.detach') def test_execute_from_commandline(self, detach, exit): x = detached_celeryd(app=self.app) x.execute_from_commandline(self.argv) exit.assert_called() detach.assert_called_with( path=x.execv_path, uid=None, gid=None, umask=None, fake=False, logfile='/var/log', pidfile='celeryd.pid', workdir=None, executable=None, hostname=None, argv=x.execv_argv + [ '-c', '1', '-lDEBUG', '--logfile=/var/log', '--pidfile=celeryd.pid', '--', '.disable_rate_limits=1' ], app=self.app, ) @patch('celery.bin.celeryd_detach.detached_celeryd') def test_main(self, command): c = command.return_value = Mock() main(self.app) c.execute_from_commandline.assert_called_with() celery-4.1.0/t/unit/utils/0000755000175000017500000000000013135426347015317 5ustar omeromer00000000000000celery-4.1.0/t/unit/utils/test_imports.py0000644000175000017500000000255213130607475020427 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from celery.five import bytes_if_py2 from celery.utils.imports import ( NotAPackage, qualname, gen_task_name, reload_from_cwd, module_file, find_module, ) def test_find_module(): assert find_module('celery') imp = Mock() imp.return_value = None with pytest.raises(NotAPackage): find_module('foo.bar.baz', imp=imp) assert find_module('celery.worker.request') def test_qualname(): Class = type(bytes_if_py2('Fox'), (object,), { '__module__': 'quick.brown', }) assert qualname(Class) == 'quick.brown.Fox' assert qualname(Class()) == 'quick.brown.Fox' def test_reload_from_cwd(patching): reload = patching('celery.utils.imports.reload') reload_from_cwd('foo') reload.assert_called() def test_reload_from_cwd_custom_reloader(): reload = Mock() reload_from_cwd('foo', reload) reload.assert_called() def test_module_file(): m1 = Mock() m1.__file__ = '/opt/foo/xyz.pyc' assert module_file(m1) == '/opt/foo/xyz.py' m2 = Mock() m2.__file__ = '/opt/foo/xyz.py' assert module_file(m1) == '/opt/foo/xyz.py' class test_gen_task_name: def test_no_module(self): app = Mock() app.name == '__main__' assert gen_task_name(app, 'foo', 'axsadaewe') celery-4.1.0/t/unit/utils/test_sysinfo.py0000644000175000017500000000104613130607475020421 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import skip from celery.utils.sysinfo import load_average, df @skip.unless_symbol('os.getloadavg') def test_load_average(patching): getloadavg = patching('os.getloadavg') getloadavg.return_value = 0.54736328125, 0.6357421875, 0.69921875 l = load_average() assert l assert l == (0.55, 0.64, 0.7) @skip.unless_symbol('posix.statvfs_result') def test_df(): x = df('/') assert x.total_blocks assert x.available assert x.capacity assert x.stat celery-4.1.0/t/unit/utils/test_saferepr.py0000644000175000017500000001426513130607475020545 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest import re import struct from case import skip from decimal import Decimal from pprint import pprint from celery.five import ( items, long_t, python_2_unicode_compatible, text_t, values, ) from celery.utils.saferepr import saferepr D_NUMBERS = { b'integer': 1, b'float': 1.3, b'decimal': Decimal('1.3'), b'long': long_t(4), b'complex': complex(13.3), } D_INT_KEYS = {v: k for k, v in items(D_NUMBERS)} QUICK_BROWN_FOX = 'The quick brown fox jumps over the lazy dog.' B_QUICK_BROWN_FOX = b'The quick brown fox jumps over the lazy dog.' D_TEXT = { b'foo': QUICK_BROWN_FOX, b'bar': B_QUICK_BROWN_FOX, b'baz': B_QUICK_BROWN_FOX, b'xuzzy': B_QUICK_BROWN_FOX, } L_NUMBERS = list(values(D_NUMBERS)) D_TEXT_LARGE = { b'bazxuzzyfoobarlongverylonglong': QUICK_BROWN_FOX * 30, } D_ALL = { b'numbers': D_NUMBERS, b'intkeys': D_INT_KEYS, b'text': D_TEXT, b'largetext': D_TEXT_LARGE, } D_D_TEXT = {b'rest': D_TEXT} RE_OLD_SET_REPR = re.compile(r'(?QQQ', 12223, 1234, 3123) if hasattr(bytes, 'hex'): # Python 3.5+ assert '2fbf' in saferepr(val, maxlen=128) else: # Python 3.4 assert saferepr(val, maxlen=128) @skip.unless_python3() def test_binary_bytes__long(self): val = struct.pack('>QQQ', 12223, 1234, 3123) * 1024 result = saferepr(val, maxlen=128) if hasattr(bytes, 'hex'): # Python 3.5+ assert '2fbf' in result assert result.endswith("...'") else: # Python 3.4 assert result def test_repr_raises(self): class O(object): def __repr__(self): raise KeyError('foo') assert 'Unrepresentable' in saferepr(O()) def test_bytes_with_unicode_py2_and_3(self): assert saferepr([b'foo', 'a®rgs'.encode('utf-8')]) celery-4.1.0/t/unit/utils/test_graph.py0000644000175000017500000000316613130607475020035 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import Mock from celery.five import WhateverIO, items from celery.utils.graph import DependencyGraph class test_DependencyGraph: def graph1(self): return DependencyGraph([ ('A', []), ('B', []), ('C', ['A']), ('D', ['C', 'B']), ]) def test_repr(self): assert repr(self.graph1()) def test_topsort(self): order = self.graph1().topsort() # C must start before D assert order.index('C') < order.index('D') # and B must start before D assert order.index('B') < order.index('D') # and A must start before C assert order.index('A') < order.index('C') def test_edges(self): assert sorted(list(self.graph1().edges())) == ['C', 'D'] def test_connect(self): x, y = self.graph1(), self.graph1() x.connect(y) def test_valency_of_when_missing(self): x = self.graph1() assert x.valency_of('foobarbaz') == 0 def test_format(self): x = self.graph1() x.formatter = Mock() obj = Mock() assert x.format(obj) x.formatter.assert_called_with(obj) x.formatter = None assert x.format(obj) is obj def test_items(self): assert dict(items(self.graph1())) == { 'A': [], 'B': [], 'C': ['A'], 'D': ['C', 'B'], } def test_repr_node(self): x = self.graph1() assert x.repr_node('fasdswewqewq') def test_to_dot(self): s = WhateverIO() self.graph1().to_dot(s) assert s.getvalue() celery-4.1.0/t/unit/utils/__init__.py0000644000175000017500000000000013130607475017414 0ustar omeromer00000000000000celery-4.1.0/t/unit/utils/test_deprecated.py0000644000175000017500000000340713130607475021032 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import patch from celery.utils import deprecated class test_deprecated_property: @patch('celery.utils.deprecated.warn') def test_deprecated(self, warn): class X(object): _foo = None @deprecated.Property(deprecation='1.2') def foo(self): return self._foo @foo.setter def foo(self, value): self._foo = value @foo.deleter def foo(self): self._foo = None assert X.foo assert X.foo.__set__(None, 1) assert X.foo.__delete__(None) x = X() x.foo = 10 warn.assert_called_with( stacklevel=3, deprecation='1.2', alternative=None, description='foo', removal=None, ) warn.reset_mock() assert x.foo == 10 warn.assert_called_with( stacklevel=3, deprecation='1.2', alternative=None, description='foo', removal=None, ) warn.reset_mock() del(x.foo) warn.assert_called_with( stacklevel=3, deprecation='1.2', alternative=None, description='foo', removal=None, ) assert x._foo is None def test_deprecated_no_setter_or_deleter(self): class X(object): @deprecated.Property(deprecation='1.2') def foo(self): pass assert X.foo x = X() with pytest.raises(AttributeError): x.foo = 10 with pytest.raises(AttributeError): del(x.foo) class test_warn: @patch('warnings.warn') def test_warn_deprecated(self, warn): deprecated.warn('Foo') warn.assert_called() celery-4.1.0/t/unit/utils/test_functional.py0000644000175000017500000001670513130607475021101 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from kombu.utils.functional import lazy from case import skip from celery.five import range, nextfun from celery.utils.functional import ( DummyContext, fun_accepts_kwargs, fun_takes_argument, head_from_fun, firstmethod, first, maybe_list, mlazy, padlist, regen, seq_concat_seq, seq_concat_item, ) def test_DummyContext(): with DummyContext(): pass with pytest.raises(KeyError): with DummyContext(): raise KeyError() @pytest.mark.parametrize('items,n,default,expected', [ (['George', 'Costanza', 'NYC'], 3, None, ['George', 'Costanza', 'NYC']), (['George', 'Costanza'], 3, None, ['George', 'Costanza', None]), (['George', 'Costanza', 'NYC'], 4, 'Earth', ['George', 'Costanza', 'NYC', 'Earth']), ]) def test_padlist(items, n, default, expected): assert padlist(items, n, default=default) == expected class test_firstmethod: def test_AttributeError(self): assert firstmethod('foo')([object()]) is None def test_handles_lazy(self): class A(object): def __init__(self, value=None): self.value = value def m(self): return self.value assert 'four' == firstmethod('m')([ A(), A(), A(), A('four'), A('five')]) assert 'four' == firstmethod('m')([ A(), A(), A(), lazy(lambda: A('four')), A('five')]) def test_first(): iterations = [0] def predicate(value): iterations[0] += 1 if value == 5: return True return False assert first(predicate, range(10)) == 5 assert iterations[0] == 6 iterations[0] = 0 assert first(predicate, range(10, 20)) is None assert iterations[0] == 10 def test_maybe_list(): assert maybe_list(1) == [1] assert maybe_list([1]) == [1] assert maybe_list(None) is None def test_mlazy(): it = iter(range(20, 30)) p = mlazy(nextfun(it)) assert p() == 20 assert p.evaluated assert p() == 20 assert repr(p) == '20' class test_regen: def test_list(self): l = [1, 2] r = regen(iter(l)) assert regen(l) is l assert r == l assert r == l # again assert r.__length_hint__() == 0 fun, args = r.__reduce__() assert fun(*args) == l def test_gen(self): g = regen(iter(list(range(10)))) assert g[7] == 7 assert g[6] == 6 assert g[5] == 5 assert g[4] == 4 assert g[3] == 3 assert g[2] == 2 assert g[1] == 1 assert g[0] == 0 assert g.data, list(range(10)) assert g[8] == 8 assert g[0] == 0 g = regen(iter(list(range(10)))) assert g[0] == 0 assert g[1] == 1 assert g.data == list(range(10)) g = regen(iter([1])) assert g[0] == 1 with pytest.raises(IndexError): g[1] assert g.data == [1] g = regen(iter(list(range(10)))) assert g[-1] == 9 assert g[-2] == 8 assert g[-3] == 7 assert g[-4] == 6 assert g[-5] == 5 assert g[5] == 5 assert g.data == list(range(10)) assert list(iter(g)) == list(range(10)) class test_head_from_fun: def test_from_cls(self): class X(object): def __call__(x, y, kwarg=1): # noqa pass g = head_from_fun(X()) with pytest.raises(TypeError): g(1) g(1, 2) g(1, 2, kwarg=3) def test_from_fun(self): def f(x, y, kwarg=1): pass g = head_from_fun(f) with pytest.raises(TypeError): g(1) g(1, 2) g(1, 2, kwarg=3) @skip.unless_python3() def test_regression_3678(self): local = {} fun = ('def f(foo, *args, bar="", **kwargs):' ' return foo, args, bar') exec(fun, {}, local) g = head_from_fun(local['f']) g(1) g(1, 2, 3, 4, bar=100) with pytest.raises(TypeError): g(bar=100) @skip.unless_python3() def test_from_fun_with_hints(self): local = {} fun = ('def f_hints(x: int, y: int, kwarg: int=1):' ' pass') exec(fun, {}, local) f_hints = local['f_hints'] g = head_from_fun(f_hints) with pytest.raises(TypeError): g(1) g(1, 2) g(1, 2, kwarg=3) @skip.unless_python3() def test_from_fun_forced_kwargs(self): local = {} fun = ('def f_kwargs(*, a, b="b", c=None):' ' return') exec(fun, {}, local) f_kwargs = local['f_kwargs'] g = head_from_fun(f_kwargs) with pytest.raises(TypeError): g(1) g(a=1) g(a=1, b=2) g(a=1, b=2, c=3) def test_classmethod(self): class A(object): @classmethod def f(cls, x): return x fun = head_from_fun(A.f, bound=False) assert fun(A, 1) == 1 fun = head_from_fun(A.f, bound=True) assert fun(1) == 1 class test_fun_takes_argument: def test_starkwargs(self): assert fun_takes_argument('foo', lambda **kw: 1) def test_named(self): assert fun_takes_argument('foo', lambda a, foo, bar: 1) def fun(a, b, c, d): return 1 assert fun_takes_argument('foo', fun, position=4) def test_starargs(self): assert fun_takes_argument('foo', lambda a, *args: 1) def test_does_not(self): assert not fun_takes_argument('foo', lambda a, bar, baz: 1) assert not fun_takes_argument('foo', lambda: 1) def fun(a, b, foo): return 1 assert not fun_takes_argument('foo', fun, position=4) @pytest.mark.parametrize('a,b,expected', [ ((1, 2, 3), [4, 5], (1, 2, 3, 4, 5)), ((1, 2), [3, 4, 5], [1, 2, 3, 4, 5]), ([1, 2, 3], (4, 5), [1, 2, 3, 4, 5]), ([1, 2], (3, 4, 5), (1, 2, 3, 4, 5)), ]) def test_seq_concat_seq(a, b, expected): res = seq_concat_seq(a, b) assert type(res) is type(expected) # noqa assert res == expected @pytest.mark.parametrize('a,b,expected', [ ((1, 2, 3), 4, (1, 2, 3, 4)), ([1, 2, 3], 4, [1, 2, 3, 4]), ]) def test_seq_concat_item(a, b, expected): res = seq_concat_item(a, b) assert type(res) is type(expected) # noqa assert res == expected class StarKwargsCallable(object): def __call__(self, **kwargs): return 1 class StarArgsStarKwargsCallable(object): def __call__(self, *args, **kwargs): return 1 class StarArgsCallable(object): def __call__(self, *args): return 1 class ArgsCallable(object): def __call__(self, a, b): return 1 class ArgsStarKwargsCallable(object): def __call__(self, a, b, **kwargs): return 1 class test_fun_accepts_kwargs: @pytest.mark.parametrize('fun', [ lambda a, b, **kwargs: 1, lambda *args, **kwargs: 1, lambda foo=1, **kwargs: 1, StarKwargsCallable(), StarArgsStarKwargsCallable(), ArgsStarKwargsCallable(), ]) def test_accepts(self, fun): assert fun_accepts_kwargs(fun) @pytest.mark.parametrize('fun', [ lambda a: 1, lambda a, b: 1, lambda *args: 1, lambda a, kw1=1, kw2=2: 1, StarArgsCallable(), ArgsCallable(), ]) def test_rejects(self, fun): assert not fun_accepts_kwargs(fun) celery-4.1.0/t/unit/utils/test_text.py0000644000175000017500000000374413130607475017722 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from celery.utils.text import ( abbr, abbrtask, ensure_newlines, indent, pretty, truncate, ) RANDTEXT = """\ The quick brown fox jumps over the lazy dog\ """ RANDTEXT_RES = """\ The quick brown fox jumps over the lazy dog\ """ QUEUES = { 'queue1': { 'exchange': 'exchange1', 'exchange_type': 'type1', 'routing_key': 'bind1', }, 'queue2': { 'exchange': 'exchange2', 'exchange_type': 'type2', 'routing_key': 'bind2', }, } QUEUE_FORMAT1 = '.> queue1 exchange=exchange1(type1) key=bind1' QUEUE_FORMAT2 = '.> queue2 exchange=exchange2(type2) key=bind2' class test_Info: def test_textindent(self): assert indent(RANDTEXT, 4) == RANDTEXT_RES def test_format_queues(self, app): app.amqp.queues = app.amqp.Queues(QUEUES) assert (sorted(app.amqp.queues.format().split('\n')) == sorted([QUEUE_FORMAT1, QUEUE_FORMAT2])) def test_ensure_newlines(self): assert len(ensure_newlines('foo\nbar\nbaz\n').splitlines()) == 3 assert len(ensure_newlines('foo\nbar').splitlines()) == 2 @pytest.mark.parametrize('s,maxsize,expected', [ ('ABCDEFGHI', 3, 'ABC...'), ('ABCDEFGHI', 10, 'ABCDEFGHI'), ]) def test_truncate_text(s, maxsize, expected): assert truncate(s, maxsize) == expected @pytest.mark.parametrize('args,expected', [ ((None, 3), '???'), (('ABCDEFGHI', 6), 'ABC...'), (('ABCDEFGHI', 20), 'ABCDEFGHI'), (('ABCDEFGHI', 6, None), 'ABCDEF'), ]) def test_abbr(args, expected): assert abbr(*args) == expected @pytest.mark.parametrize('s,maxsize,expected', [ (None, 3, '???'), ('feeds.tasks.refresh', 10, '[.]refresh'), ('feeds.tasks.refresh', 30, 'feeds.tasks.refresh'), ]) def test_abbrtask(s, maxsize, expected): assert abbrtask(s, maxsize) == expected def test_pretty(): assert pretty(('a', 'b', 'c')) celery-4.1.0/t/unit/utils/test_local.py0000644000175000017500000002151213130607475020021 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import sys from case import Mock, skip from celery.five import python_2_unicode_compatible, string, long_t from celery.local import ( Proxy, PromiseProxy, maybe_evaluate, try_import, ) PY3 = sys.version_info[0] == 3 class test_try_import: def test_imports(self): assert try_import(__name__) def test_when_default(self): default = object() assert try_import('foobar.awqewqe.asdwqewq', default) is default class test_Proxy: def test_std_class_attributes(self): assert Proxy.__name__ == 'Proxy' assert Proxy.__module__ == 'celery.local' assert isinstance(Proxy.__doc__, str) def test_doc(self): def real(): pass x = Proxy(real, __doc__='foo') assert x.__doc__ == 'foo' def test_name(self): def real(): """real function""" return 'REAL' x = Proxy(lambda: real, name='xyz') assert x.__name__ == 'xyz' y = Proxy(lambda: real) assert y.__name__ == 'real' assert x.__doc__ == 'real function' assert x.__class__ == type(real) assert x.__dict__ == real.__dict__ assert repr(x) == repr(real) assert x.__module__ def test_get_current_local(self): x = Proxy(lambda: 10) object.__setattr__(x, '_Proxy_local', Mock()) assert x._get_current_object() def test_bool(self): class X(object): def __bool__(self): return False __nonzero__ = __bool__ x = Proxy(lambda: X()) assert not x def test_slots(self): class X(object): __slots__ = () x = Proxy(X) with pytest.raises(AttributeError): x.__dict__ @skip.if_python3() def test_unicode(self): @python_2_unicode_compatible class X(object): def __unicode__(self): return 'UNICODE' __str__ = __unicode__ def __repr__(self): return 'REPR' x = Proxy(lambda: X()) assert string(x) == 'UNICODE' del(X.__unicode__) del(X.__str__) assert string(x) == 'REPR' def test_dir(self): class X(object): def __dir__(self): return ['a', 'b', 'c'] x = Proxy(lambda: X()) assert dir(x) == ['a', 'b', 'c'] class Y(object): def __dir__(self): raise RuntimeError() y = Proxy(lambda: Y()) assert dir(y) == [] def test_getsetdel_attr(self): class X(object): a = 1 b = 2 c = 3 def __dir__(self): return ['a', 'b', 'c'] v = X() x = Proxy(lambda: v) assert x.__members__ == ['a', 'b', 'c'] assert x.a == 1 assert x.b == 2 assert x.c == 3 setattr(x, 'a', 10) assert x.a == 10 del(x.a) assert x.a == 1 def test_dictproxy(self): v = {} x = Proxy(lambda: v) x['foo'] = 42 assert x['foo'] == 42 assert len(x) == 1 assert 'foo' in x del(x['foo']) with pytest.raises(KeyError): x['foo'] assert iter(x) def test_listproxy(self): v = [] x = Proxy(lambda: v) x.append(1) x.extend([2, 3, 4]) assert x[0] == 1 assert x[:-1] == [1, 2, 3] del(x[-1]) assert x[:-1] == [1, 2] x[0] = 10 assert x[0] == 10 assert 10 in x assert len(x) == 3 assert iter(x) x[0:2] = [1, 2] del(x[0:2]) assert str(x) if sys.version_info[0] < 3: assert x.__cmp__(object()) == -1 def test_complex_cast(self): class O(object): def __complex__(self): return complex(10.333) o = Proxy(O) assert o.__complex__() == complex(10.333) def test_index(self): class O(object): def __index__(self): return 1 o = Proxy(O) assert o.__index__() == 1 def test_coerce(self): class O(object): def __coerce__(self, other): return self, other o = Proxy(O) assert o.__coerce__(3) def test_int(self): assert Proxy(lambda: 10) + 1 == Proxy(lambda: 11) assert Proxy(lambda: 10) - 1 == Proxy(lambda: 9) assert Proxy(lambda: 10) * 2 == Proxy(lambda: 20) assert Proxy(lambda: 10) ** 2 == Proxy(lambda: 100) assert Proxy(lambda: 20) / 2 == Proxy(lambda: 10) assert Proxy(lambda: 20) // 2 == Proxy(lambda: 10) assert Proxy(lambda: 11) % 2 == Proxy(lambda: 1) assert Proxy(lambda: 10) << 2 == Proxy(lambda: 40) assert Proxy(lambda: 10) >> 2 == Proxy(lambda: 2) assert Proxy(lambda: 10) ^ 7 == Proxy(lambda: 13) assert Proxy(lambda: 10) | 40 == Proxy(lambda: 42) assert Proxy(lambda: 10) != Proxy(lambda: -11) assert Proxy(lambda: 10) != Proxy(lambda: -10) assert Proxy(lambda: -10) == Proxy(lambda: -10) assert Proxy(lambda: 10) < Proxy(lambda: 20) assert Proxy(lambda: 20) > Proxy(lambda: 10) assert Proxy(lambda: 10) >= Proxy(lambda: 10) assert Proxy(lambda: 10) <= Proxy(lambda: 10) assert Proxy(lambda: 10) == Proxy(lambda: 10) assert Proxy(lambda: 20) != Proxy(lambda: 10) assert Proxy(lambda: 100).__divmod__(30) assert Proxy(lambda: 100).__truediv__(30) assert abs(Proxy(lambda: -100)) x = Proxy(lambda: 10) x -= 1 assert x == 9 x = Proxy(lambda: 9) x += 1 assert x == 10 x = Proxy(lambda: 10) x *= 2 assert x == 20 x = Proxy(lambda: 20) x /= 2 assert x == 10 x = Proxy(lambda: 10) x %= 2 assert x == 0 x = Proxy(lambda: 10) x <<= 3 assert x == 80 x = Proxy(lambda: 80) x >>= 4 assert x == 5 x = Proxy(lambda: 5) x ^= 1 assert x == 4 x = Proxy(lambda: 4) x **= 4 assert x == 256 x = Proxy(lambda: 256) x //= 2 assert x == 128 x = Proxy(lambda: 128) x |= 2 assert x == 130 x = Proxy(lambda: 130) x &= 10 assert x == 2 x = Proxy(lambda: 10) assert type(x.__float__()) == float assert type(x.__int__()) == int if not PY3: assert type(x.__long__()) == long_t assert hex(x) assert oct(x) def test_hash(self): class X(object): def __hash__(self): return 1234 assert hash(Proxy(lambda: X())) == 1234 def test_call(self): class X(object): def __call__(self): return 1234 assert Proxy(lambda: X())() == 1234 def test_context(self): class X(object): entered = exited = False def __enter__(self): self.entered = True return 1234 def __exit__(self, *exc_info): self.exited = True v = X() x = Proxy(lambda: v) with x as val: assert val == 1234 assert x.entered assert x.exited def test_reduce(self): class X(object): def __reduce__(self): return 123 x = Proxy(lambda: X()) assert x.__reduce__() == 123 class test_PromiseProxy: def test_only_evaluated_once(self): class X(object): attr = 123 evals = 0 def __init__(self): self.__class__.evals += 1 p = PromiseProxy(X) assert p.attr == 123 assert p.attr == 123 assert X.evals == 1 def test_callbacks(self): source = Mock(name='source') p = PromiseProxy(source) cbA = Mock(name='cbA') cbB = Mock(name='cbB') cbC = Mock(name='cbC') p.__then__(cbA, p) p.__then__(cbB, p) assert not p.__evaluated__() assert object.__getattribute__(p, '__pending__') assert repr(p) assert p.__evaluated__() with pytest.raises(AttributeError): object.__getattribute__(p, '__pending__') cbA.assert_called_with(p) cbB.assert_called_with(p) assert p.__evaluated__() p.__then__(cbC, p) cbC.assert_called_with(p) with pytest.raises(AttributeError): object.__getattribute__(p, '__pending__') def test_maybe_evaluate(self): x = PromiseProxy(lambda: 30) assert not x.__evaluated__() assert maybe_evaluate(x) == 30 assert maybe_evaluate(x) == 30 assert maybe_evaluate(30) == 30 assert x.__evaluated__() celery-4.1.0/t/unit/utils/test_serialization.py0000644000175000017500000000400413130607475021601 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import pytz import sys from datetime import datetime, date, time, timedelta from case import Mock, mock from kombu import Queue from celery.utils.serialization import ( UnpickleableExceptionWrapper, get_pickleable_etype, jsonify, ) class test_AAPickle: def test_no_cpickle(self): prev = sys.modules.pop('celery.utils.serialization', None) try: with mock.mask_modules('cPickle'): from celery.utils.serialization import pickle import pickle as orig_pickle assert pickle.dumps is orig_pickle.dumps finally: sys.modules['celery.utils.serialization'] = prev class test_UnpickleExceptionWrapper: def test_init(self): x = UnpickleableExceptionWrapper('foo', 'Bar', [10, lambda x: x]) assert x.exc_args assert len(x.exc_args) == 2 class test_get_pickleable_etype: def test_get_pickleable_etype(self): class Unpickleable(Exception): def __reduce__(self): raise ValueError('foo') assert get_pickleable_etype(Unpickleable) is Exception class test_jsonify: @pytest.mark.parametrize('obj', [ Queue('foo'), ['foo', 'bar', 'baz'], {'foo': 'bar'}, datetime.utcnow(), datetime.utcnow().replace(tzinfo=pytz.utc), datetime.utcnow().replace(microsecond=0), date(2012, 1, 1), time(hour=1, minute=30), time(hour=1, minute=30, microsecond=3), timedelta(seconds=30), 10, 10.3, 'hello', ]) def test_simple(self, obj): assert jsonify(obj) def test_unknown_type_filter(self): unknown_type_filter = Mock() obj = object() assert (jsonify(obj, unknown_type_filter=unknown_type_filter) is unknown_type_filter.return_value) unknown_type_filter.assert_called_with(obj) with pytest.raises(ValueError): jsonify(obj) celery-4.1.0/t/unit/utils/test_dispatcher.py0000644000175000017500000000766413130607475021071 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import gc import sys import time from celery.utils.dispatch import Signal if sys.platform.startswith('java'): def garbage_collect(): # Some JVM GCs will execute finalizers in a different thread, meaning # we need to wait for that to complete before we go on looking for the # effects of that. gc.collect() time.sleep(0.1) elif hasattr(sys, 'pypy_version_info'): def garbage_collect(): # noqa # Collecting weakreferences can take two collections on PyPy. gc.collect() gc.collect() else: def garbage_collect(): # noqa gc.collect() def receiver_1_arg(val, **kwargs): return val class Callable(object): def __call__(self, val, **kwargs): return val def a(self, val, **kwargs): return val a_signal = Signal(providing_args=['val'], use_caching=False) class test_Signal: """Test suite for dispatcher (barely started)""" def _testIsClean(self, signal): """Assert that everything has been cleaned up automatically""" assert not signal.has_listeners() assert signal.receivers == [] def test_exact(self): a_signal.connect(receiver_1_arg, sender=self) try: expected = [(receiver_1_arg, 'test')] result = a_signal.send(sender=self, val='test') assert result == expected finally: a_signal.disconnect(receiver_1_arg, sender=self) self._testIsClean(a_signal) def test_ignored_sender(self): a_signal.connect(receiver_1_arg) try: expected = [(receiver_1_arg, 'test')] result = a_signal.send(sender=self, val='test') assert result == expected finally: a_signal.disconnect(receiver_1_arg) self._testIsClean(a_signal) def test_garbage_collected(self): a = Callable() a_signal.connect(a.a, sender=self) expected = [] del a garbage_collect() result = a_signal.send(sender=self, val='test') assert result == expected self._testIsClean(a_signal) def test_multiple_registration(self): a = Callable() result = None try: a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) a_signal.connect(a) result = a_signal.send(sender=self, val='test') assert len(result) == 1 assert len(a_signal.receivers) == 1 finally: del a del result garbage_collect() self._testIsClean(a_signal) def test_uid_registration(self): def uid_based_receiver_1(**kwargs): pass def uid_based_receiver_2(**kwargs): pass a_signal.connect(uid_based_receiver_1, dispatch_uid='uid') try: a_signal.connect(uid_based_receiver_2, dispatch_uid='uid') assert len(a_signal.receivers) == 1 finally: a_signal.disconnect(dispatch_uid='uid') self._testIsClean(a_signal) def test_robust(self): def fails(val, **kwargs): raise ValueError('this') a_signal.connect(fails) try: a_signal.send(sender=self, val='test') finally: a_signal.disconnect(fails) self._testIsClean(a_signal) def test_disconnection(self): receiver_1 = Callable() receiver_2 = Callable() receiver_3 = Callable() try: try: a_signal.connect(receiver_1) a_signal.connect(receiver_2) a_signal.connect(receiver_3) finally: a_signal.disconnect(receiver_1) del receiver_2 garbage_collect() finally: a_signal.disconnect(receiver_3) self._testIsClean(a_signal) celery-4.1.0/t/unit/utils/test_platforms.py0000644000175000017500000006171313130607475020745 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import errno import os import pytest import sys import signal import tempfile from case import Mock, call, mock, patch, skip from celery import _find_option_with_arg from celery import platforms from celery.exceptions import SecurityError from celery.five import WhateverIO from celery.platforms import ( get_fdmax, ignore_errno, check_privileges, set_process_title, set_mp_process_title, signals, maybe_drop_privileges, setuid, setgid, initgroups, parse_uid, parse_gid, detached, DaemonContext, create_pidlock, Pidfile, LockFailed, setgroups, _setgroups_hack, close_open_fds, fd_by_path, isatty, ) try: import resource except ImportError: # pragma: no cover resource = None # noqa def test_isatty(): fh = Mock(name='fh') assert isatty(fh) is fh.isatty() fh.isatty.side_effect = AttributeError() assert not isatty(fh) class test_find_option_with_arg: def test_long_opt(self): assert _find_option_with_arg( ['--foo=bar'], long_opts=['--foo']) == 'bar' def test_short_opt(self): assert _find_option_with_arg( ['-f', 'bar'], short_opts=['-f']) == 'bar' @skip.if_win32() def test_fd_by_path(): test_file = tempfile.NamedTemporaryFile() try: keep = fd_by_path([test_file.name]) assert keep == [test_file.file.fileno()] with patch('os.open') as _open: _open.side_effect = OSError() assert not fd_by_path([test_file.name]) finally: test_file.close() def test_close_open_fds(patching): _close = patching('os.close') fdmax = patching('celery.platforms.get_fdmax') with patch('os.closerange', create=True) as closerange: fdmax.return_value = 3 close_open_fds() if not closerange.called: _close.assert_has_calls([call(2), call(1), call(0)]) _close.side_effect = OSError() _close.side_effect.errno = errno.EBADF close_open_fds() class test_ignore_errno: def test_raises_EBADF(self): with ignore_errno('EBADF'): exc = OSError() exc.errno = errno.EBADF raise exc def test_otherwise(self): with pytest.raises(OSError): with ignore_errno('EBADF'): exc = OSError() exc.errno = errno.ENOENT raise exc class test_set_process_title: def test_no_setps(self): prev, platforms._setproctitle = platforms._setproctitle, None try: set_process_title('foo') finally: platforms._setproctitle = prev @patch('celery.platforms.set_process_title') @patch('celery.platforms.current_process') def test_mp_no_hostname(self, current_process, set_process_title): current_process().name = 'Foo' set_mp_process_title('foo', info='hello') set_process_title.assert_called_with('foo:Foo', info='hello') @patch('celery.platforms.set_process_title') @patch('celery.platforms.current_process') def test_mp_hostname(self, current_process, set_process_title): current_process().name = 'Foo' set_mp_process_title('foo', hostname='a@q.com', info='hello') set_process_title.assert_called_with('foo: a@q.com:Foo', info='hello') class test_Signals: @patch('signal.getsignal') def test_getitem(self, getsignal): signals['SIGINT'] getsignal.assert_called_with(signal.SIGINT) def test_supported(self): assert signals.supported('INT') assert not signals.supported('SIGIMAGINARY') @skip.if_win32() def test_reset_alarm(self): with patch('signal.alarm') as _alarm: signals.reset_alarm() _alarm.assert_called_with(0) def test_arm_alarm(self): if hasattr(signal, 'setitimer'): with patch('signal.setitimer', create=True) as seti: signals.arm_alarm(30) seti.assert_called() def test_signum(self): assert signals.signum(13) == 13 assert signals.signum('INT') == signal.SIGINT assert signals.signum('SIGINT') == signal.SIGINT with pytest.raises(TypeError): signals.signum('int') signals.signum(object()) @patch('signal.signal') def test_ignore(self, set): signals.ignore('SIGINT') set.assert_called_with(signals.signum('INT'), signals.ignored) signals.ignore('SIGTERM') set.assert_called_with(signals.signum('TERM'), signals.ignored) @patch('signal.signal') def test_reset(self, set): signals.reset('SIGINT') set.assert_called_with(signals.signum('INT'), signals.default) @patch('signal.signal') def test_setitem(self, set): def handle(*args): return args signals['INT'] = handle set.assert_called_with(signal.SIGINT, handle) @patch('signal.signal') def test_setitem_raises(self, set): set.side_effect = ValueError() signals['INT'] = lambda *a: a @skip.if_win32() class test_get_fdmax: @patch('resource.getrlimit') def test_when_infinity(self, getrlimit): with patch('os.sysconf') as sysconfig: sysconfig.side_effect = KeyError() getrlimit.return_value = [None, resource.RLIM_INFINITY] default = object() assert get_fdmax(default) is default @patch('resource.getrlimit') def test_when_actual(self, getrlimit): with patch('os.sysconf') as sysconfig: sysconfig.side_effect = KeyError() getrlimit.return_value = [None, 13] assert get_fdmax(None) == 13 @skip.if_win32() class test_maybe_drop_privileges: def test_on_windows(self): prev, sys.platform = sys.platform, 'win32' try: maybe_drop_privileges() finally: sys.platform = prev @patch('os.getegid') @patch('os.getgid') @patch('os.geteuid') @patch('os.getuid') @patch('celery.platforms.parse_uid') @patch('celery.platforms.parse_gid') @patch('pwd.getpwuid') @patch('celery.platforms.setgid') @patch('celery.platforms.setuid') @patch('celery.platforms.initgroups') def test_with_uid(self, initgroups, setuid, setgid, getpwuid, parse_gid, parse_uid, getuid, geteuid, getgid, getegid): geteuid.return_value = 10 getuid.return_value = 10 class pw_struct(object): pw_gid = 50001 def raise_on_second_call(*args, **kwargs): setuid.side_effect = OSError() setuid.side_effect.errno = errno.EPERM setuid.side_effect = raise_on_second_call getpwuid.return_value = pw_struct() parse_uid.return_value = 5001 parse_gid.return_value = 5001 maybe_drop_privileges(uid='user') parse_uid.assert_called_with('user') getpwuid.assert_called_with(5001) setgid.assert_called_with(50001) initgroups.assert_called_with(5001, 50001) setuid.assert_has_calls([call(5001), call(0)]) setuid.side_effect = raise_on_second_call def to_root_on_second_call(mock, first): return_value = [first] def on_first_call(*args, **kwargs): ret, return_value[0] = return_value[0], 0 return ret mock.side_effect = on_first_call to_root_on_second_call(geteuid, 10) to_root_on_second_call(getuid, 10) with pytest.raises(SecurityError): maybe_drop_privileges(uid='user') getuid.return_value = getuid.side_effect = None geteuid.return_value = geteuid.side_effect = None getegid.return_value = 0 getgid.return_value = 0 setuid.side_effect = raise_on_second_call with pytest.raises(SecurityError): maybe_drop_privileges(gid='group') getuid.reset_mock() geteuid.reset_mock() setuid.reset_mock() getuid.side_effect = geteuid.side_effect = None def raise_on_second_call(*args, **kwargs): setuid.side_effect = OSError() setuid.side_effect.errno = errno.ENOENT setuid.side_effect = raise_on_second_call with pytest.raises(OSError): maybe_drop_privileges(uid='user') @patch('celery.platforms.parse_uid') @patch('celery.platforms.parse_gid') @patch('celery.platforms.setgid') @patch('celery.platforms.setuid') @patch('celery.platforms.initgroups') def test_with_guid(self, initgroups, setuid, setgid, parse_gid, parse_uid): def raise_on_second_call(*args, **kwargs): setuid.side_effect = OSError() setuid.side_effect.errno = errno.EPERM setuid.side_effect = raise_on_second_call parse_uid.return_value = 5001 parse_gid.return_value = 50001 maybe_drop_privileges(uid='user', gid='group') parse_uid.assert_called_with('user') parse_gid.assert_called_with('group') setgid.assert_called_with(50001) initgroups.assert_called_with(5001, 50001) setuid.assert_has_calls([call(5001), call(0)]) setuid.side_effect = None with pytest.raises(SecurityError): maybe_drop_privileges(uid='user', gid='group') setuid.side_effect = OSError() setuid.side_effect.errno = errno.EINVAL with pytest.raises(OSError): maybe_drop_privileges(uid='user', gid='group') @patch('celery.platforms.setuid') @patch('celery.platforms.setgid') @patch('celery.platforms.parse_gid') def test_only_gid(self, parse_gid, setgid, setuid): parse_gid.return_value = 50001 maybe_drop_privileges(gid='group') parse_gid.assert_called_with('group') setgid.assert_called_with(50001) setuid.assert_not_called() @skip.if_win32() class test_setget_uid_gid: @patch('celery.platforms.parse_uid') @patch('os.setuid') def test_setuid(self, _setuid, parse_uid): parse_uid.return_value = 5001 setuid('user') parse_uid.assert_called_with('user') _setuid.assert_called_with(5001) @patch('celery.platforms.parse_gid') @patch('os.setgid') def test_setgid(self, _setgid, parse_gid): parse_gid.return_value = 50001 setgid('group') parse_gid.assert_called_with('group') _setgid.assert_called_with(50001) def test_parse_uid_when_int(self): assert parse_uid(5001) == 5001 @patch('pwd.getpwnam') def test_parse_uid_when_existing_name(self, getpwnam): class pwent(object): pw_uid = 5001 getpwnam.return_value = pwent() assert parse_uid('user') == 5001 @patch('pwd.getpwnam') def test_parse_uid_when_nonexisting_name(self, getpwnam): getpwnam.side_effect = KeyError('user') with pytest.raises(KeyError): parse_uid('user') def test_parse_gid_when_int(self): assert parse_gid(50001) == 50001 @patch('grp.getgrnam') def test_parse_gid_when_existing_name(self, getgrnam): class grent(object): gr_gid = 50001 getgrnam.return_value = grent() assert parse_gid('group') == 50001 @patch('grp.getgrnam') def test_parse_gid_when_nonexisting_name(self, getgrnam): getgrnam.side_effect = KeyError('group') with pytest.raises(KeyError): parse_gid('group') @skip.if_win32() class test_initgroups: @patch('pwd.getpwuid') @patch('os.initgroups', create=True) def test_with_initgroups(self, initgroups_, getpwuid): getpwuid.return_value = ['user'] initgroups(5001, 50001) initgroups_.assert_called_with('user', 50001) @patch('celery.platforms.setgroups') @patch('grp.getgrall') @patch('pwd.getpwuid') def test_without_initgroups(self, getpwuid, getgrall, setgroups): prev = getattr(os, 'initgroups', None) try: delattr(os, 'initgroups') except AttributeError: pass try: getpwuid.return_value = ['user'] class grent(object): gr_mem = ['user'] def __init__(self, gid): self.gr_gid = gid getgrall.return_value = [grent(1), grent(2), grent(3)] initgroups(5001, 50001) setgroups.assert_called_with([1, 2, 3]) finally: if prev: os.initgroups = prev @skip.if_win32() class test_detached: def test_without_resource(self): prev, platforms.resource = platforms.resource, None try: with pytest.raises(RuntimeError): detached() finally: platforms.resource = prev @patch('celery.platforms._create_pidlock') @patch('celery.platforms.signals') @patch('celery.platforms.maybe_drop_privileges') @patch('os.geteuid') @patch(mock.open_fqdn) def test_default(self, open, geteuid, maybe_drop, signals, pidlock): geteuid.return_value = 0 context = detached(uid='user', gid='group') assert isinstance(context, DaemonContext) signals.reset.assert_called_with('SIGCLD') maybe_drop.assert_called_with(uid='user', gid='group') open.return_value = Mock() geteuid.return_value = 5001 context = detached(uid='user', gid='group', logfile='/foo/bar') assert isinstance(context, DaemonContext) assert context.after_chdir context.after_chdir() open.assert_called_with('/foo/bar', 'a') open.return_value.close.assert_called_with() context = detached(pidfile='/foo/bar/pid') assert isinstance(context, DaemonContext) assert context.after_chdir context.after_chdir() pidlock.assert_called_with('/foo/bar/pid') @skip.if_win32() class test_DaemonContext: @patch('multiprocessing.util._run_after_forkers') @patch('os.fork') @patch('os.setsid') @patch('os._exit') @patch('os.chdir') @patch('os.umask') @patch('os.close') @patch('os.closerange') @patch('os.open') @patch('os.dup2') @patch('celery.platforms.close_open_fds') def test_open(self, _close_fds, dup2, open, close, closer, umask, chdir, _exit, setsid, fork, run_after_forkers): x = DaemonContext(workdir='/opt/workdir', umask=0o22) x.stdfds = [0, 1, 2] fork.return_value = 0 with x: assert x._is_open with x: pass assert fork.call_count == 2 setsid.assert_called_with() _exit.assert_not_called() chdir.assert_called_with(x.workdir) umask.assert_called_with(0o22) dup2.assert_called() fork.reset_mock() fork.return_value = 1 x = DaemonContext(workdir='/opt/workdir') x.stdfds = [0, 1, 2] with x: pass assert fork.call_count == 1 _exit.assert_called_with(0) x = DaemonContext(workdir='/opt/workdir', fake=True) x.stdfds = [0, 1, 2] x._detach = Mock() with x: pass x._detach.assert_not_called() x.after_chdir = Mock() with x: pass x.after_chdir.assert_called_with() x = DaemonContext(workdir='/opt/workdir', umask='0755') assert x.umask == 493 x = DaemonContext(workdir='/opt/workdir', umask='493') assert x.umask == 493 x.redirect_to_null(None) with patch('celery.platforms.mputil') as mputil: x = DaemonContext(after_forkers=True) x.open() mputil._run_after_forkers.assert_called_with() x = DaemonContext(after_forkers=False) x.open() @skip.if_win32() class test_Pidfile: @patch('celery.platforms.Pidfile') def test_create_pidlock(self, Pidfile): p = Pidfile.return_value = Mock() p.is_locked.return_value = True p.remove_if_stale.return_value = False with mock.stdouts() as (_, err): with pytest.raises(SystemExit): create_pidlock('/var/pid') assert 'already exists' in err.getvalue() p.remove_if_stale.return_value = True ret = create_pidlock('/var/pid') assert ret is p def test_context(self): p = Pidfile('/var/pid') p.write_pid = Mock() p.remove = Mock() with p as _p: assert _p is p p.write_pid.assert_called_with() p.remove.assert_called_with() def test_acquire_raises_LockFailed(self): p = Pidfile('/var/pid') p.write_pid = Mock() p.write_pid.side_effect = OSError() with pytest.raises(LockFailed): with p: pass @patch('os.path.exists') def test_is_locked(self, exists): p = Pidfile('/var/pid') exists.return_value = True assert p.is_locked() exists.return_value = False assert not p.is_locked() def test_read_pid(self): with mock.open() as s: s.write('1816\n') s.seek(0) p = Pidfile('/var/pid') assert p.read_pid() == 1816 def test_read_pid_partially_written(self): with mock.open() as s: s.write('1816') s.seek(0) p = Pidfile('/var/pid') with pytest.raises(ValueError): p.read_pid() def test_read_pid_raises_ENOENT(self): exc = IOError() exc.errno = errno.ENOENT with mock.open(side_effect=exc): p = Pidfile('/var/pid') assert p.read_pid() is None def test_read_pid_raises_IOError(self): exc = IOError() exc.errno = errno.EAGAIN with mock.open(side_effect=exc): p = Pidfile('/var/pid') with pytest.raises(IOError): p.read_pid() def test_read_pid_bogus_pidfile(self): with mock.open() as s: s.write('eighteensixteen\n') s.seek(0) p = Pidfile('/var/pid') with pytest.raises(ValueError): p.read_pid() @patch('os.unlink') def test_remove(self, unlink): unlink.return_value = True p = Pidfile('/var/pid') p.remove() unlink.assert_called_with(p.path) @patch('os.unlink') def test_remove_ENOENT(self, unlink): exc = OSError() exc.errno = errno.ENOENT unlink.side_effect = exc p = Pidfile('/var/pid') p.remove() unlink.assert_called_with(p.path) @patch('os.unlink') def test_remove_EACCES(self, unlink): exc = OSError() exc.errno = errno.EACCES unlink.side_effect = exc p = Pidfile('/var/pid') p.remove() unlink.assert_called_with(p.path) @patch('os.unlink') def test_remove_OSError(self, unlink): exc = OSError() exc.errno = errno.EAGAIN unlink.side_effect = exc p = Pidfile('/var/pid') with pytest.raises(OSError): p.remove() unlink.assert_called_with(p.path) @patch('os.kill') def test_remove_if_stale_process_alive(self, kill): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.return_value = 1816 kill.return_value = 0 assert not p.remove_if_stale() kill.assert_called_with(1816, 0) p.read_pid.assert_called_with() kill.side_effect = OSError() kill.side_effect.errno = errno.ENOENT assert not p.remove_if_stale() @patch('os.kill') def test_remove_if_stale_process_dead(self, kill): with mock.stdouts(): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.return_value = 1816 p.remove = Mock() exc = OSError() exc.errno = errno.ESRCH kill.side_effect = exc assert p.remove_if_stale() kill.assert_called_with(1816, 0) p.remove.assert_called_with() def test_remove_if_stale_broken_pid(self): with mock.stdouts(): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.side_effect = ValueError() p.remove = Mock() assert p.remove_if_stale() p.remove.assert_called_with() def test_remove_if_stale_no_pidfile(self): p = Pidfile('/var/pid') p.read_pid = Mock() p.read_pid.return_value = None p.remove = Mock() assert p.remove_if_stale() p.remove.assert_called_with() @patch('os.fsync') @patch('os.getpid') @patch('os.open') @patch('os.fdopen') @patch(mock.open_fqdn) def test_write_pid(self, open_, fdopen, osopen, getpid, fsync): getpid.return_value = 1816 osopen.return_value = 13 w = fdopen.return_value = WhateverIO() w.close = Mock() r = open_.return_value = WhateverIO() r.write('1816\n') r.seek(0) p = Pidfile('/var/pid') p.write_pid() w.seek(0) assert w.readline() == '1816\n' w.close.assert_called() getpid.assert_called_with() osopen.assert_called_with( p.path, platforms.PIDFILE_FLAGS, platforms.PIDFILE_MODE, ) fdopen.assert_called_with(13, 'w') fsync.assert_called_with(13) open_.assert_called_with(p.path) @patch('os.fsync') @patch('os.getpid') @patch('os.open') @patch('os.fdopen') @patch(mock.open_fqdn) def test_write_reread_fails(self, open_, fdopen, osopen, getpid, fsync): getpid.return_value = 1816 osopen.return_value = 13 w = fdopen.return_value = WhateverIO() w.close = Mock() r = open_.return_value = WhateverIO() r.write('11816\n') r.seek(0) p = Pidfile('/var/pid') with pytest.raises(LockFailed): p.write_pid() class test_setgroups: @patch('os.setgroups', create=True) def test_setgroups_hack_ValueError(self, setgroups): def on_setgroups(groups): if len(groups) <= 200: setgroups.return_value = True return raise ValueError() setgroups.side_effect = on_setgroups _setgroups_hack(list(range(400))) setgroups.side_effect = ValueError() with pytest.raises(ValueError): _setgroups_hack(list(range(400))) @patch('os.setgroups', create=True) def test_setgroups_hack_OSError(self, setgroups): exc = OSError() exc.errno = errno.EINVAL def on_setgroups(groups): if len(groups) <= 200: setgroups.return_value = True return raise exc setgroups.side_effect = on_setgroups _setgroups_hack(list(range(400))) setgroups.side_effect = exc with pytest.raises(OSError): _setgroups_hack(list(range(400))) exc2 = OSError() exc.errno = errno.ESRCH setgroups.side_effect = exc2 with pytest.raises(OSError): _setgroups_hack(list(range(400))) @skip.if_win32() @patch('celery.platforms._setgroups_hack') def test_setgroups(self, hack): with patch('os.sysconf') as sysconf: sysconf.return_value = 100 setgroups(list(range(400))) hack.assert_called_with(list(range(100))) @skip.if_win32() @patch('celery.platforms._setgroups_hack') def test_setgroups_sysconf_raises(self, hack): with patch('os.sysconf') as sysconf: sysconf.side_effect = ValueError() setgroups(list(range(400))) hack.assert_called_with(list(range(400))) @skip.if_win32() @patch('os.getgroups') @patch('celery.platforms._setgroups_hack') def test_setgroups_raises_ESRCH(self, hack, getgroups): with patch('os.sysconf') as sysconf: sysconf.side_effect = ValueError() esrch = OSError() esrch.errno = errno.ESRCH hack.side_effect = esrch with pytest.raises(OSError): setgroups(list(range(400))) @skip.if_win32() @patch('os.getgroups') @patch('celery.platforms._setgroups_hack') def test_setgroups_raises_EPERM(self, hack, getgroups): with patch('os.sysconf') as sysconf: sysconf.side_effect = ValueError() eperm = OSError() eperm.errno = errno.EPERM hack.side_effect = eperm getgroups.return_value = list(range(400)) setgroups(list(range(400))) getgroups.assert_called_with() getgroups.return_value = [1000] with pytest.raises(OSError): setgroups(list(range(400))) getgroups.assert_called_with() def test_check_privileges(): class Obj(object): fchown = 13 prev, platforms.os = platforms.os, Obj() try: with pytest.raises(SecurityError): check_privileges({'pickle'}) finally: platforms.os = prev prev, platforms.os = platforms.os, object() try: check_privileges({'pickle'}) finally: platforms.os = prev celery-4.1.0/t/unit/utils/test_objects.py0000644000175000017500000000034513130607475020361 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery.utils.objects import Bunch class test_Bunch: def test(self): x = Bunch(foo='foo', bar=2) assert x.foo == 'foo' assert x.bar == 2 celery-4.1.0/t/unit/utils/test_threads.py0000644000175000017500000000465013130607475020365 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import mock, patch from celery.utils.threads import ( _LocalStack, _FastLocalStack, LocalManager, Local, bgThread, ) class test_bgThread: def test_crash(self): class T(bgThread): def body(self): raise KeyError() with patch('os._exit') as _exit: with mock.stdouts(): _exit.side_effect = ValueError() t = T() with pytest.raises(ValueError): t.run() _exit.assert_called_with(1) def test_interface(self): x = bgThread() with pytest.raises(NotImplementedError): x.body() class test_Local: def test_iter(self): x = Local() x.foo = 'bar' ident = x.__ident_func__() assert (ident, {'foo': 'bar'}) in list(iter(x)) delattr(x, 'foo') assert (ident, {'foo': 'bar'}) not in list(iter(x)) with pytest.raises(AttributeError): delattr(x, 'foo') assert x(lambda: 'foo') is not None class test_LocalStack: def test_stack(self): x = _LocalStack() assert x.pop() is None x.__release_local__() ident = x.__ident_func__ x.__ident_func__ = ident with pytest.raises(RuntimeError): x()[0] x.push(['foo']) assert x()[0] == 'foo' x.pop() with pytest.raises(RuntimeError): x()[0] class test_FastLocalStack: def test_stack(self): x = _FastLocalStack() x.push(['foo']) x.push(['bar']) assert x.top == ['bar'] assert len(x) == 2 x.pop() assert x.top == ['foo'] x.pop() assert x.top is None class test_LocalManager: def test_init(self): x = LocalManager() assert x.locals == [] assert x.ident_func def ident(): return 1 loc = Local() x = LocalManager([loc], ident_func=ident) assert x.locals == [loc] x = LocalManager(loc, ident_func=ident) assert x.locals == [loc] assert x.ident_func is ident assert x.locals[0].__ident_func__ is ident assert x.get_ident() == 1 with patch('celery.utils.threads.release_local') as release: x.cleanup() release.assert_called_with(loc) assert repr(x) celery-4.1.0/t/unit/utils/test_debug.py0000644000175000017500000000454313130607475020022 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from celery.utils import debug def test_on_blocking(patching): getframeinfo = patching('inspect.getframeinfo') frame = Mock(name='frame') with pytest.raises(RuntimeError): debug._on_blocking(1, frame) getframeinfo.assert_called_with(frame) def test_blockdetection(patching): signals = patching('celery.utils.debug.signals') with debug.blockdetection(10): signals.arm_alarm.assert_called_with(10) signals.__setitem__.assert_called_with('ALRM', debug._on_blocking) signals.__setitem__.assert_called_with('ALRM', signals['ALRM']) signals.reset_alarm.assert_called_with() def test_sample_mem(patching): mem_rss = patching('celery.utils.debug.mem_rss') prev, debug._mem_sample = debug._mem_sample, [] try: debug.sample_mem() assert debug._mem_sample[0] is mem_rss() finally: debug._mem_sample = prev def test_sample(): x = list(range(100)) assert list(debug.sample(x, 10)) == [ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, ] x = list(range(91)) assert list(debug.sample(x, 10)) == [ 0, 9, 18, 27, 36, 45, 54, 63, 72, 81, ] @pytest.mark.parametrize('f,precision,expected', [ (10, 5, '10'), (10.45645234234, 5, '10.456'), ]) def test_hfloat(f, precision, expected): assert str(debug.hfloat(f, precision)) == expected @pytest.mark.parametrize('byt,expected', [ (2 ** 20, '1MB'), (4 * 2 ** 20, '4MB'), (2 ** 16, '64KB'), (2 ** 16, '64KB'), (2 ** 8, '256b'), ]) def test_humanbytes(byt, expected): assert debug.humanbytes(byt) == expected def test_mem_rss(patching): humanbytes = patching('celery.utils.debug.humanbytes') ps = patching('celery.utils.debug.ps') ret = debug.mem_rss() ps.assert_called_with() ps().memory_info.assert_called_with() humanbytes.assert_called_with(ps().memory_info().rss) assert ret is humanbytes() ps.return_value = None assert debug.mem_rss() is None def test_ps(patching): Process = patching('celery.utils.debug.Process') getpid = patching('os.getpid') prev, debug._process = debug._process, None try: debug.ps() Process.assert_called_with(getpid()) assert debug._process is Process() finally: debug._process = prev celery-4.1.0/t/unit/utils/test_timer2.py0000644000175000017500000000475113130607475020137 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import sys import time from case import Mock, patch, call import celery.utils.timer2 as timer2 class test_Timer: def test_enter_after(self): t = timer2.Timer() try: done = [False] def set_done(): done[0] = True t.call_after(0.3, set_done) mss = 0 while not done[0]: if mss >= 2.0: raise Exception('test timed out') time.sleep(0.1) mss += 0.1 finally: t.stop() def test_exit_after(self): t = timer2.Timer() t.call_after = Mock() t.exit_after(0.3, priority=10) t.call_after.assert_called_with(0.3, sys.exit, 10) def test_ensure_started_not_started(self): t = timer2.Timer() t.running = True t.start = Mock() t.ensure_started() t.start.assert_not_called() t.running = False t.on_start = Mock() t.ensure_started() t.on_start.assert_called_with(t) t.start.assert_called_with() @patch('celery.utils.timer2.sleep') def test_on_tick(self, sleep): on_tick = Mock(name='on_tick') t = timer2.Timer(on_tick=on_tick) ne = t._next_entry = Mock(name='_next_entry') ne.return_value = 3.33 ne.on_nth_call_do(t._is_shutdown.set, 3) t.run() sleep.assert_called_with(3.33) on_tick.assert_has_calls([call(3.33), call(3.33), call(3.33)]) @patch('os._exit') def test_thread_crash(self, _exit): t = timer2.Timer() t._next_entry = Mock() t._next_entry.side_effect = OSError(131) t.run() _exit.assert_called_with(1) def test_gc_race_lost(self): t = timer2.Timer() t._is_stopped.set = Mock() t._is_stopped.set.side_effect = TypeError() t._is_shutdown.set() t.run() t._is_stopped.set.assert_called_with() def test_test_enter(self): t = timer2.Timer() t._do_enter = Mock() e = Mock() t.enter(e, 13, 0) t._do_enter.assert_called_with('enter_at', e, 13, priority=0) def test_test_enter_after(self): t = timer2.Timer() t._do_enter = Mock() t.enter_after() t._do_enter.assert_called_with('enter_after') def test_cancel(self): t = timer2.Timer() tref = Mock() t.cancel(tref) tref.cancel.assert_called_with() celery-4.1.0/t/unit/utils/test_term.py0000644000175000017500000000350013130607475017673 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest from case import skip from celery.utils import term from celery.utils.term import colored, fg from celery.five import text_t @skip.if_win32() class test_colored: @pytest.fixture(autouse=True) def preserve_encoding(self, patching): patching('sys.getdefaultencoding', 'utf-8') @pytest.mark.parametrize('name,color', [ ('black', term.BLACK), ('red', term.RED), ('green', term.GREEN), ('yellow', term.YELLOW), ('blue', term.BLUE), ('magenta', term.MAGENTA), ('cyan', term.CYAN), ('white', term.WHITE), ]) def test_colors(self, name, color): assert fg(30 + color) in str(colored().names[name]('foo')) @pytest.mark.parametrize('name', [ 'bold', 'underline', 'blink', 'reverse', 'bright', 'ired', 'igreen', 'iyellow', 'iblue', 'imagenta', 'icyan', 'iwhite', 'reset', ]) def test_modifiers(self, name): assert str(getattr(colored(), name)('f')) def test_unicode(self): assert text_t(colored().green('∂bar')) assert colored().red('éefoo') + colored().green('∂bar') assert colored().red('foo').no_color() == 'foo' def test_repr(self): assert repr(colored().blue('Ã¥foo')) assert "''" in repr(colored()) def test_more_unicode(self): c = colored() s = c.red('foo', c.blue('bar'), c.green('baz')) assert s.no_color() c._fold_no_color(s, 'øfoo') c._fold_no_color('fooÃ¥', s) c = colored().red('Ã¥foo') assert c._add(c, 'baræ') == '\x1b[1;31m\xe5foo\x1b[0mbar\xe6' c2 = colored().blue('Æ’Æ’z') c3 = c._add(c, c2) assert c3 == '\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m' celery-4.1.0/t/unit/utils/test_utils.py0000644000175000017500000000126013130607475020065 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from celery.utils import chunks, cached_property @pytest.mark.parametrize('items,n,expected', [ (range(11), 2, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]]), (range(11), 3, [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]), (range(10), 2, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]), ]) def test_chunks(items, n, expected): x = chunks(iter(list(items)), n) assert list(x) == expected def test_cached_property(): def fun(obj): return fun.value x = cached_property(fun) assert x.__get__(None) is x assert x.__set__(None, None) is x assert x.__delete__(None) is x celery-4.1.0/t/unit/utils/test_collections.py0000644000175000017500000003136113130607475021250 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle import pytest from collections import Mapping from itertools import count from time import time from case import skip from billiard.einfo import ExceptionInfo from celery.utils.collections import ( AttributeDict, BufferMap, ConfigurationView, DictAttribute, LimitedSet, Messagebuffer, ) from celery.five import items from celery.utils.objects import Bunch class test_DictAttribute: def test_get_set_keys_values_items(self): x = DictAttribute(Bunch()) x['foo'] = 'The quick brown fox' assert x['foo'] == 'The quick brown fox' assert x['foo'] == x.obj.foo assert x.get('foo') == 'The quick brown fox' assert x.get('bar') is None with pytest.raises(KeyError): x['bar'] x.foo = 'The quick yellow fox' assert x['foo'] == 'The quick yellow fox' assert ('foo', 'The quick yellow fox') in list(x.items()) assert 'foo' in list(x.keys()) assert 'The quick yellow fox' in list(x.values()) def test_setdefault(self): x = DictAttribute(Bunch()) x.setdefault('foo', 'NEW') assert x['foo'] == 'NEW' x.setdefault('foo', 'XYZ') assert x['foo'] == 'NEW' def test_contains(self): x = DictAttribute(Bunch()) x['foo'] = 1 assert 'foo' in x assert 'bar' not in x def test_items(self): obj = Bunch(attr1=1) x = DictAttribute(obj) x['attr2'] = 2 assert x['attr1'] == 1 assert x['attr2'] == 2 class test_ConfigurationView: def setup(self): self.view = ConfigurationView( {'changed_key': 1, 'both': 2}, [ {'default_key': 1, 'both': 1}, ], ) def test_setdefault(self): self.view.setdefault('both', 36) assert self.view['both'] == 2 self.view.setdefault('new', 36) assert self.view['new'] == 36 def test_get(self): assert self.view.get('both') == 2 sp = object() assert self.view.get('nonexisting', sp) is sp def test_update(self): changes = dict(self.view.changes) self.view.update(a=1, b=2, c=3) assert self.view.changes == dict(changes, a=1, b=2, c=3) def test_contains(self): assert 'changed_key' in self.view assert 'default_key' in self.view assert 'new' not in self.view def test_repr(self): assert 'changed_key' in repr(self.view) assert 'default_key' in repr(self.view) def test_iter(self): expected = { 'changed_key': 1, 'default_key': 1, 'both': 2, } assert dict(items(self.view)) == expected assert sorted(list(iter(self.view))) == sorted(list(expected.keys())) assert sorted(list(self.view.keys())) == sorted(list(expected.keys())) assert (sorted(list(self.view.values())) == sorted(list(expected.values()))) assert 'changed_key' in list(self.view.keys()) assert 2 in list(self.view.values()) assert ('both', 2) in list(self.view.items()) def test_add_defaults_dict(self): defaults = {'foo': 10} self.view.add_defaults(defaults) assert self.view.foo == 10 def test_add_defaults_object(self): defaults = Bunch(foo=10) self.view.add_defaults(defaults) assert self.view.foo == 10 def test_clear(self): self.view.clear() assert self.view.both == 1 assert 'changed_key' not in self.view def test_bool(self): assert bool(self.view) self.view.maps[:] = [] assert not bool(self.view) def test_len(self): assert len(self.view) == 3 self.view.KEY = 33 assert len(self.view) == 4 self.view.clear() assert len(self.view) == 2 def test_isa_mapping(self): from collections import Mapping assert issubclass(ConfigurationView, Mapping) def test_isa_mutable_mapping(self): from collections import MutableMapping assert issubclass(ConfigurationView, MutableMapping) class test_ExceptionInfo: def test_exception_info(self): try: raise LookupError('The quick brown fox jumps...') except Exception: einfo = ExceptionInfo() assert str(einfo) == einfo.traceback assert isinstance(einfo.exception, LookupError) assert einfo.exception.args == ('The quick brown fox jumps...',) assert einfo.traceback assert repr(einfo) @skip.if_win32() class test_LimitedSet: def test_add(self): s = LimitedSet(maxlen=2) s.add('foo') s.add('bar') for n in 'foo', 'bar': assert n in s s.add('baz') for n in 'bar', 'baz': assert n in s assert 'foo' not in s s = LimitedSet(maxlen=10) for i in range(150): s.add(i) assert len(s) <= 10 # make sure heap is not leaking: assert len(s._heap) < len(s) * ( 100. + s.max_heap_percent_overload) / 100 def test_purge(self): # purge now enforces rules # cant purge(1) now. but .purge(now=...) still works s = LimitedSet(maxlen=10) [s.add(i) for i in range(10)] s.maxlen = 2 s.purge() assert len(s) == 2 # expired s = LimitedSet(maxlen=10, expires=1) [s.add(i) for i in range(10)] s.maxlen = 2 s.purge(now=time() + 100) assert len(s) == 0 # not expired s = LimitedSet(maxlen=None, expires=1) [s.add(i) for i in range(10)] s.maxlen = 2 s.purge(now=lambda: time() - 100) assert len(s) == 2 # expired -> minsize s = LimitedSet(maxlen=10, minlen=10, expires=1) [s.add(i) for i in range(20)] s.minlen = 3 s.purge(now=time() + 3) assert s.minlen == len(s) assert len(s._heap) <= s.maxlen * ( 100. + s.max_heap_percent_overload) / 100 def test_pickleable(self): s = LimitedSet(maxlen=2) s.add('foo') s.add('bar') assert pickle.loads(pickle.dumps(s)) == s def test_iter(self): s = LimitedSet(maxlen=3) items = ['foo', 'bar', 'baz', 'xaz'] for item in items: s.add(item) l = list(iter(s)) for item in items[1:]: assert item in l assert 'foo' not in l assert l == items[1:], 'order by insertion time' def test_repr(self): s = LimitedSet(maxlen=2) items = 'foo', 'bar' for item in items: s.add(item) assert 'LimitedSet(' in repr(s) def test_discard(self): s = LimitedSet(maxlen=2) s.add('foo') s.discard('foo') assert 'foo' not in s assert len(s._data) == 0 s.discard('foo') def test_clear(self): s = LimitedSet(maxlen=2) s.add('foo') s.add('bar') assert len(s) == 2 s.clear() assert not s def test_update(self): s1 = LimitedSet(maxlen=2) s1.add('foo') s1.add('bar') s2 = LimitedSet(maxlen=2) s2.update(s1) assert sorted(list(s2)) == ['bar', 'foo'] s2.update(['bla']) assert sorted(list(s2)) == ['bar', 'bla'] s2.update(['do', 're']) assert sorted(list(s2)) == ['do', 're'] s1 = LimitedSet(maxlen=10, expires=None) s2 = LimitedSet(maxlen=10, expires=None) s3 = LimitedSet(maxlen=10, expires=None) s4 = LimitedSet(maxlen=10, expires=None) s5 = LimitedSet(maxlen=10, expires=None) for i in range(12): s1.add(i) s2.add(i * i) s3.update(s1) s3.update(s2) s4.update(s1.as_dict()) s4.update(s2.as_dict()) s5.update(s1._data) # revoke is using this s5.update(s2._data) assert s3 == s4 assert s3 == s5 s2.update(s4) s4.update(s2) assert s2 == s4 def test_iterable_and_ordering(self): s = LimitedSet(maxlen=35, expires=None) # we use a custom clock here, as time.time() does not have enough # precision when called quickly (can return the same value twice). clock = count(1) for i in reversed(range(15)): s.add(i, now=next(clock)) j = 40 for i in s: assert i < j # each item is smaller and smaller j = i assert i == 0 # last item is zero def test_pop_and_ordering_again(self): s = LimitedSet(maxlen=5) for i in range(10): s.add(i) j = -1 for _ in range(5): i = s.pop() assert j < i i = s.pop() assert i is None def test_as_dict(self): s = LimitedSet(maxlen=2) s.add('foo') assert isinstance(s.as_dict(), Mapping) def test_add_removes_duplicate_from_small_heap(self): s = LimitedSet(maxlen=2) s.add('foo') s.add('foo') s.add('foo') assert len(s) == 1 assert len(s._data) == 1 assert len(s._heap) == 1 def test_add_removes_duplicate_from_big_heap(self): s = LimitedSet(maxlen=1000) [s.add(i) for i in range(2000)] assert len(s) == 1000 [s.add('foo') for i in range(1000)] # heap is refreshed when 15% larger than _data assert len(s._heap) < 1150 [s.add('foo') for i in range(1000)] assert len(s._heap) < 1150 class test_AttributeDict: def test_getattr__setattr(self): x = AttributeDict({'foo': 'bar'}) assert x['foo'] == 'bar' with pytest.raises(AttributeError): x.bar x.bar = 'foo' assert x['bar'] == 'foo' class test_Messagebuffer: def assert_size_and_first(self, buf, size, expected_first_item): assert len(buf) == size assert buf.take() == expected_first_item def test_append_limited(self): b = Messagebuffer(10) for i in range(20): b.put(i) self.assert_size_and_first(b, 10, 10) def test_append_unlimited(self): b = Messagebuffer(None) for i in range(20): b.put(i) self.assert_size_and_first(b, 20, 0) def test_extend_limited(self): b = Messagebuffer(10) b.extend(list(range(20))) self.assert_size_and_first(b, 10, 10) def test_extend_unlimited(self): b = Messagebuffer(None) b.extend(list(range(20))) self.assert_size_and_first(b, 20, 0) def test_extend_eviction_time_limited(self): b = Messagebuffer(3000) b.extend(range(10000)) assert len(b) > 3000 b.evict() assert len(b) == 3000 def test_pop_empty_with_default(self): b = Messagebuffer(10) sentinel = object() assert b.take(sentinel) is sentinel def test_pop_empty_no_default(self): b = Messagebuffer(10) with pytest.raises(b.Empty): b.take() def test_repr(self): assert repr(Messagebuffer(10, [1, 2, 3])) def test_iter(self): b = Messagebuffer(10, list(range(10))) assert len(b) == 10 for i, item in enumerate(b): assert item == i assert len(b) == 0 def test_contains(self): b = Messagebuffer(10, list(range(10))) assert 5 in b def test_reversed(self): assert (list(reversed(Messagebuffer(10, list(range(10))))) == list(reversed(range(10)))) def test_getitem(self): b = Messagebuffer(10, list(range(10))) for i in range(10): assert b[i] == i class test_BufferMap: def test_append_limited(self): b = BufferMap(10) for i in range(20): b.put(i, i) self.assert_size_and_first(b, 10, 10) def assert_size_and_first(self, buf, size, expected_first_item): assert buf.total == size assert buf._LRUpop() == expected_first_item def test_append_unlimited(self): b = BufferMap(None) for i in range(20): b.put(i, i) self.assert_size_and_first(b, 20, 0) def test_extend_limited(self): b = BufferMap(10) b.extend(1, list(range(20))) self.assert_size_and_first(b, 10, 10) def test_extend_unlimited(self): b = BufferMap(None) b.extend(1, list(range(20))) self.assert_size_and_first(b, 20, 0) def test_pop_empty_with_default(self): b = BufferMap(10) sentinel = object() assert b.take(1, sentinel) is sentinel def test_pop_empty_no_default(self): b = BufferMap(10) with pytest.raises(b.Empty): b.take(1) def test_repr(self): assert repr(Messagebuffer(10, [1, 2, 3])) celery-4.1.0/t/unit/utils/test_time.py0000644000175000017500000001605013130607475017666 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import pytz from datetime import datetime, timedelta, tzinfo from pytz import AmbiguousTimeError from case import Mock from celery.utils.time import ( delta_resolution, humanize_seconds, maybe_iso8601, maybe_timedelta, timezone, rate, remaining, make_aware, maybe_make_aware, localize, LocalTimezone, ffwd, utcoffset, ) from celery.utils.iso8601 import parse_iso8601 class test_LocalTimezone: def test_daylight(self, patching): time = patching('celery.utils.time._time') time.timezone = 3600 time.daylight = False x = LocalTimezone() assert x.STDOFFSET == timedelta(seconds=-3600) assert x.DSTOFFSET == x.STDOFFSET time.daylight = True time.altzone = 3600 y = LocalTimezone() assert y.STDOFFSET == timedelta(seconds=-3600) assert y.DSTOFFSET == timedelta(seconds=-3600) assert repr(y) y._isdst = Mock() y._isdst.return_value = True assert y.utcoffset(datetime.now()) assert not y.dst(datetime.now()) y._isdst.return_value = False assert y.utcoffset(datetime.now()) assert not y.dst(datetime.now()) assert y.tzname(datetime.now()) class test_iso8601: def test_parse_with_timezone(self): d = datetime.utcnow().replace(tzinfo=pytz.utc) assert parse_iso8601(d.isoformat()) == d # 2013-06-07T20:12:51.775877+00:00 iso = d.isoformat() iso1 = iso.replace('+00:00', '-01:00') d1 = parse_iso8601(iso1) assert d1.tzinfo._minutes == -60 iso2 = iso.replace('+00:00', '+01:00') d2 = parse_iso8601(iso2) assert d2.tzinfo._minutes == +60 iso3 = iso.replace('+00:00', 'Z') d3 = parse_iso8601(iso3) assert d3.tzinfo == pytz.UTC @pytest.mark.parametrize('delta,expected', [ (timedelta(days=2), datetime(2010, 3, 30, 0, 0)), (timedelta(hours=2), datetime(2010, 3, 30, 11, 0)), (timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)), (timedelta(seconds=2), None), ]) def test_delta_resolution(delta, expected): dt = datetime(2010, 3, 30, 11, 50, 58, 41065) assert delta_resolution(dt, delta) == expected or dt @pytest.mark.parametrize('seconds,expected', [ (4 * 60 * 60 * 24, '4.00 days'), (1 * 60 * 60 * 24, '1.00 day'), (4 * 60 * 60, '4.00 hours'), (1 * 60 * 60, '1.00 hour'), (4 * 60, '4.00 minutes'), (1 * 60, '1.00 minute'), (4, '4.00 seconds'), (1, '1.00 second'), (4.3567631221, '4.36 seconds'), (0, 'now'), ]) def test_humanize_seconds(seconds, expected): assert humanize_seconds(seconds) == expected def test_humanize_seconds__prefix(): assert humanize_seconds(4, prefix='about ') == 'about 4.00 seconds' def test_maybe_iso8601_datetime(): now = datetime.now() assert maybe_iso8601(now) is now @pytest.mark.parametrize('arg,expected', [ (30, timedelta(seconds=30)), (30.6, timedelta(seconds=30.6)), (timedelta(days=2), timedelta(days=2)), ]) def test_maybe_timedelta(arg, expected): assert maybe_timedelta(arg) == expected def test_remaining_relative(): remaining(datetime.utcnow(), timedelta(hours=1), relative=True) class test_timezone: def test_get_timezone_with_pytz(self): assert timezone.get_timezone('UTC') def test_tz_or_local(self): assert timezone.tz_or_local() == timezone.local assert timezone.tz_or_local(timezone.utc) def test_to_local(self): assert timezone.to_local(make_aware(datetime.utcnow(), timezone.utc)) assert timezone.to_local(datetime.utcnow()) def test_to_local_fallback(self): assert timezone.to_local_fallback( make_aware(datetime.utcnow(), timezone.utc)) assert timezone.to_local_fallback(datetime.utcnow()) class test_make_aware: def test_tz_without_localize(self): tz = tzinfo() assert not hasattr(tz, 'localize') wtz = make_aware(datetime.utcnow(), tz) assert wtz.tzinfo == tz def test_when_has_localize(self): class tzz(tzinfo): raises = False def localize(self, dt, is_dst=None): self.localized = True if self.raises and is_dst is None: self.raised = True raise AmbiguousTimeError() return 1 # needed by min() in Python 3 (None not hashable) tz = tzz() make_aware(datetime.utcnow(), tz) assert tz.localized tz2 = tzz() tz2.raises = True make_aware(datetime.utcnow(), tz2) assert tz2.localized assert tz2.raised def test_maybe_make_aware(self): aware = datetime.utcnow().replace(tzinfo=timezone.utc) assert maybe_make_aware(aware) naive = datetime.utcnow() assert maybe_make_aware(naive) assert maybe_make_aware(naive).tzinfo is pytz.utc tz = pytz.timezone('US/Eastern') eastern = datetime.utcnow().replace(tzinfo=tz) assert maybe_make_aware(eastern).tzinfo is tz utcnow = datetime.utcnow() assert maybe_make_aware(utcnow, 'UTC').tzinfo is pytz.utc class test_localize: def test_tz_without_normalize(self): tz = tzinfo() assert not hasattr(tz, 'normalize') assert localize(make_aware(datetime.utcnow(), tz), tz) def test_when_has_normalize(self): class tzz(tzinfo): raises = None def normalize(self, dt, **kwargs): self.normalized = True if self.raises and kwargs and kwargs.get('is_dst') is None: self.raised = True raise self.raises return 1 # needed by min() in Python 3 (None not hashable) tz = tzz() localize(make_aware(datetime.utcnow(), tz), tz) assert tz.normalized tz2 = tzz() tz2.raises = AmbiguousTimeError() localize(make_aware(datetime.utcnow(), tz2), tz2) assert tz2.normalized assert tz2.raised tz3 = tzz() tz3.raises = TypeError() localize(make_aware(datetime.utcnow(), tz3), tz3) assert tz3.normalized assert tz3.raised @pytest.mark.parametrize('s,expected', [ (999, 999), (7.5, 7.5), ('2.5/s', 2.5), ('1456/s', 1456), ('100/m', 100 / 60.0), ('10/h', 10 / 60.0 / 60.0), (0, 0), (None, 0), ('0/m', 0), ('0/h', 0), ('0/s', 0), ('0.0/s', 0), ]) def test_rate_limit_string(s, expected): assert rate(s) == expected class test_ffwd: def test_repr(self): x = ffwd(year=2012) assert repr(x) def test_radd_with_unknown_gives_NotImplemented(self): x = ffwd(year=2012) assert x.__radd__(object()) == NotImplemented class test_utcoffset: def test_utcoffset(self, patching): _time = patching('celery.utils.time._time') _time.daylight = True assert utcoffset(time=_time) is not None _time.daylight = False assert utcoffset(time=_time) is not None celery-4.1.0/t/unit/utils/test_encoding.py0000644000175000017500000000067413130607475020523 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery.utils import encoding class test_encoding: def test_safe_str(self): assert encoding.safe_str(object()) assert encoding.safe_str('foo') def test_safe_repr(self): assert encoding.safe_repr(object()) class foo(object): def __repr__(self): raise ValueError('foo') assert encoding.safe_repr(foo()) celery-4.1.0/t/unit/utils/test_pickle.py0000644000175000017500000000265313130607475020203 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery.utils.serialization import pickle class RegularException(Exception): pass class ArgOverrideException(Exception): def __init__(self, message, status_code=10): self.status_code = status_code Exception.__init__(self, message, status_code) class test_Pickle: def test_pickle_regular_exception(self): exc = None try: raise RegularException('RegularException raised') except RegularException as exc_: exc = exc_ pickled = pickle.dumps({'exception': exc}) unpickled = pickle.loads(pickled) exception = unpickled.get('exception') assert exception assert isinstance(exception, RegularException) assert exception.args == ('RegularException raised',) def test_pickle_arg_override_exception(self): exc = None try: raise ArgOverrideException( 'ArgOverrideException raised', status_code=100, ) except ArgOverrideException as exc_: exc = exc_ pickled = pickle.dumps({'exception': exc}) unpickled = pickle.loads(pickled) exception = unpickled.get('exception') assert exception assert isinstance(exception, ArgOverrideException) assert exception.args == ('ArgOverrideException raised', 100) assert exception.status_code == 100 celery-4.1.0/t/unit/utils/test_nodenames.py0000644000175000017500000000040213130607475020673 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu import Queue from celery.utils.nodenames import worker_direct class test_worker_direct: def test_returns_if_queue(self): q = Queue('foo') assert worker_direct(q) is q celery-4.1.0/t/unit/security/0000755000175000017500000000000013135426347016026 5ustar omeromer00000000000000celery-4.1.0/t/unit/security/case.py0000644000175000017500000000024613130607475017313 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import skip @skip.unless_module('OpenSSL.crypto', name='pyOpenSSL') class SecurityCase: pass celery-4.1.0/t/unit/security/__init__.py0000644000175000017500000000707113130607475020142 0ustar omeromer00000000000000""" Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) Generated with `extra/security/get-cert.sh` """ from __future__ import absolute_import, unicode_literals KEY1 = """-----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQC9Twh0V5q/R1Q8N+Y+CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5 dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45//IWz6/BdPFWaPm0rtYbcxZHqXDZScRp vFDLHhMysdqQWHxXVxpqIXXo4B7bnfnGvXhYwYITeEyQylV/rnH53mdV8wIDAQAB AoGBAKUJN4elr+S9nHP7D6BZNTsJ0Q6eTd0ftfrmx+jVMG8Oh3jh6ZSkG0R5e6iX 0W7I4pgrUWRyWDB98yJy1o+90CAN/D80o8SbmW/zfA2WLBteOujMfCEjNrc/Nodf 6MZ0QQ6PnPH6pp94i3kNmFD8Mlzm+ODrUjPF0dCNf474qeKhAkEA7SXj5cQPyQXM s15oGX5eb6VOk96eAPtEC72cLSh6o+VYmXyGroV1A2JPm6IzH87mTqjWXG229hjt XVvDbdY2uQJBAMxblWFaWJhhU6Y1euazaBl/OyLYlqNz4LZ0RzCulEoV/gMGYU32 PbilD5fpFsyhp5oCxnWNEsUFovYMKjKM3AsCQQCIlOcBoP76ZxWzRK8t56MaKBnu fiuAIzbYkDbPp12i4Wc61wZ2ozR2Y3u4Bh3tturb6M+04hea+1ZSC5StwM85AkAp UPLYpe13kWXaGsHoVqlbTk/kcamzDkCGYufpvcIZYGzkq6uMmZZM+II4klWbtasv BhSdu5Hp54PU/wyg/72VAkBy1/oM3/QJ35Vb6TByHBLFR4nOuORoRclmxcoCPva9 xqkQQn+UgBtOemRXpFCuKaoXonA3nLeB54SWcC6YUOcR -----END RSA PRIVATE KEY-----""" KEY2 = """-----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQDH22L8b9AmST9ABDmQTQ2DWMdDmK5YXZt4AIY81IcsTQ/ccM0C fwXEP9tdkYwtcxMCWdASwY5pfMy9vFp0hyrRQMSNfuoxAgONuNWPyQoIvY3ZXRe6 rS+hb/LN4+vdjX+oxmYiQ2HmSB9rh2bepE6Cw+RLJr5sXXq+xZJ+BLt5tQIDAQAB AoGBAMGBO0Arip/nP6Rd8tYypKjN5nEefX/1cjgoWdC//fj4zCil1vlZv12abm0U JWNEDd2y0/G1Eow0V5BFtFcrIFowU44LZEiSf7sKXlNHRHlbZmDgNXFZOt7nVbHn 6SN+oCYjaPjji8idYeb3VQXPtqMoMn73MuyxD3k3tWmVLonpAkEA6hsu62qhUk5k Nt88UZOauU1YizxsWvT0bHioaceE4TEsbO3NZs7dmdJIcRFcU787lANaaIq7Rw26 qcumME9XhwJBANqMOzsYQ6BX54UzS6x99Jjlq9MEbTCbAEZr/yjopb9f617SwfuE AEKnIq3HL6/Tnhv3V8Zy3wYHgDoGNeTVe+MCQQDi/nyeNAQ8RFqTgh2Ak/jAmCi0 yV/fSgj+bHgQKS/FEuMas/IoL4lbrzQivkyhv5lLSX0ORQaWPM+z+A0qZqRdAkBh XE+Wx/x4ljCh+nQf6AzrgIXHgBVUrfi1Zq9Jfjs4wnaMy793WRr0lpiwaigoYFHz i4Ei+1G30eeh8dpYk3KZAkB0ucTOsQynDlL5rLGYZ+IcfSfH3w2l5EszY47kKQG9 Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V -----END RSA PRIVATE KEY-----""" CERT1 = """-----BEGIN CERTIFICATE----- MIICVzCCAcACCQC72PP7b7H9BTANBgkqhkiG9w0BAQUFADBwMQswCQYDVQQGEwJV UzELMAkGA1UECBMCQ0ExCzAJBgNVBAcTAlNGMQ8wDQYDVQQKEwZDZWxlcnkxDzAN BgNVBAMTBkNFbGVyeTElMCMGCSqGSIb3DQEJARYWY2VydEBjZWxlcnlwcm9qZWN0 Lm9yZzAeFw0xMzA3MjQxMjExMTRaFw0xNDA3MjQxMjExMTRaMHAxCzAJBgNVBAYT AlVTMQswCQYDVQQIEwJDQTELMAkGA1UEBxMCU0YxDzANBgNVBAoTBkNlbGVyeTEP MA0GA1UEAxMGQ0VsZXJ5MSUwIwYJKoZIhvcNAQkBFhZjZXJ0QGNlbGVyeXByb2pl Y3Qub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9Twh0V5q/R1Q8N+Y+ CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45 //IWz6/BdPFWaPm0rtYbcxZHqXDZScRpvFDLHhMysdqQWHxXVxpqIXXo4B7bnfnG vXhYwYITeEyQylV/rnH53mdV8wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAKA4tD3J 94tsnQxFxHP7Frt7IvGMH+3wMqOiXFgYxPJX2tyaPvOLJ/7ERE4MkrvZO7IRC0iA yKBe0pucdrTgsJoDV8juahuyjXOjvU14+q7Wv7pj7zqddVavzK8STLX4/FMIDnbK aMGJl7wyj6V2yy6ANSbmy0uQjHikI6DrZEoK -----END CERTIFICATE-----""" CERT2 = """-----BEGIN CERTIFICATE----- MIICATCCAWoCCQCV/9A2ZBM37TANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 cyBQdHkgTHRkMB4XDTExMDcxOTA5MDkwMloXDTEyMDcxODA5MDkwMlowRTELMAkG A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAx9ti /G/QJkk/QAQ5kE0Ng1jHQ5iuWF2beACGPNSHLE0P3HDNAn8FxD/bXZGMLXMTAlnQ EsGOaXzMvbxadIcq0UDEjX7qMQIDjbjVj8kKCL2N2V0Xuq0voW/yzePr3Y1/qMZm IkNh5kgfa4dm3qROgsPkSya+bF16vsWSfgS7ebUCAwEAATANBgkqhkiG9w0BAQUF AAOBgQBzaZ5vBkzksPhnWb2oobuy6Ne/LMEtdQ//qeVY4sKl2tOJUCSdWRen9fqP e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw== -----END CERTIFICATE-----""" celery-4.1.0/t/unit/security/test_serialization.py0000644000175000017500000000434413130607475022317 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import base64 import os import pytest from kombu.serialization import registry from kombu.utils.encoding import bytes_to_str from celery.exceptions import SecurityError from celery.security.serialization import SecureSerializer, register_auth from celery.security.certificate import Certificate, CertStore from celery.security.key import PrivateKey from . import CERT1, CERT2, KEY1, KEY2 from .case import SecurityCase class test_SecureSerializer(SecurityCase): def _get_s(self, key, cert, certs): store = CertStore() for c in certs: store.add_cert(Certificate(c)) return SecureSerializer(PrivateKey(key), Certificate(cert), store) def test_serialize(self): s = self._get_s(KEY1, CERT1, [CERT1]) assert s.deserialize(s.serialize('foo')) == 'foo' def test_deserialize(self): s = self._get_s(KEY1, CERT1, [CERT1]) with pytest.raises(SecurityError): s.deserialize('bad data') def test_unmatched_key_cert(self): s = self._get_s(KEY1, CERT2, [CERT1, CERT2]) with pytest.raises(SecurityError): s.deserialize(s.serialize('foo')) def test_unknown_source(self): s1 = self._get_s(KEY1, CERT1, [CERT2]) s2 = self._get_s(KEY1, CERT1, []) with pytest.raises(SecurityError): s1.deserialize(s1.serialize('foo')) with pytest.raises(SecurityError): s2.deserialize(s2.serialize('foo')) def test_self_send(self): s1 = self._get_s(KEY1, CERT1, [CERT1]) s2 = self._get_s(KEY1, CERT1, [CERT1]) assert s2.deserialize(s1.serialize('foo')) == 'foo' def test_separate_ends(self): s1 = self._get_s(KEY1, CERT1, [CERT2]) s2 = self._get_s(KEY2, CERT2, [CERT1]) assert s2.deserialize(s1.serialize('foo')) == 'foo' def test_register_auth(self): register_auth(KEY1, CERT1, '') assert 'application/data' in registry._decoders def test_lots_of_sign(self): for i in range(1000): rdata = bytes_to_str(base64.urlsafe_b64encode(os.urandom(265))) s = self._get_s(KEY1, CERT1, [CERT1]) assert s.deserialize(s.serialize(rdata)) == rdata celery-4.1.0/t/unit/security/test_certificate.py0000644000175000017500000000541413130607475021723 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, mock, patch, skip from celery.exceptions import SecurityError from celery.security.certificate import Certificate, CertStore, FSCertStore from . import CERT1, CERT2, KEY1 from .case import SecurityCase class test_Certificate(SecurityCase): def test_valid_certificate(self): Certificate(CERT1) Certificate(CERT2) def test_invalid_certificate(self): with pytest.raises((SecurityError, TypeError)): Certificate(None) with pytest.raises(SecurityError): Certificate('') with pytest.raises(SecurityError): Certificate('foo') with pytest.raises(SecurityError): Certificate(CERT1[:20] + CERT1[21:]) with pytest.raises(SecurityError): Certificate(KEY1) @skip.todo(reason='cert expired') def test_has_expired(self): assert not Certificate(CERT1).has_expired() def test_has_expired_mock(self): x = Certificate(CERT1) x._cert = Mock(name='cert') assert x.has_expired() is x._cert.has_expired() class test_CertStore(SecurityCase): def test_itercerts(self): cert1 = Certificate(CERT1) cert2 = Certificate(CERT2) certstore = CertStore() for c in certstore.itercerts(): assert False certstore.add_cert(cert1) certstore.add_cert(cert2) for c in certstore.itercerts(): assert c in (cert1, cert2) def test_duplicate(self): cert1 = Certificate(CERT1) certstore = CertStore() certstore.add_cert(cert1) with pytest.raises(SecurityError): certstore.add_cert(cert1) class test_FSCertStore(SecurityCase): @patch('os.path.isdir') @patch('glob.glob') @patch('celery.security.certificate.Certificate') def test_init(self, Certificate, glob, isdir): cert = Certificate.return_value = Mock() cert.has_expired.return_value = False isdir.return_value = True glob.return_value = ['foo.cert'] with mock.open(): cert.get_id.return_value = 1 x = FSCertStore('/var/certs') assert 1 in x._certs glob.assert_called_with('/var/certs/*') # they both end up with the same id glob.return_value = ['foo.cert', 'bar.cert'] with pytest.raises(SecurityError): x = FSCertStore('/var/certs') glob.return_value = ['foo.cert'] cert.has_expired.return_value = True with pytest.raises(SecurityError): x = FSCertStore('/var/certs') isdir.return_value = False with pytest.raises(SecurityError): x = FSCertStore('/var/certs') celery-4.1.0/t/unit/security/test_security.py0000644000175000017500000001011713130607475021304 0ustar omeromer00000000000000"""Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) Generated with: .. code-block:: console $ openssl genrsa -des3 -passout pass:test -out key1.key 1024 $ openssl req -new -key key1.key -out key1.csr -passin pass:test $ cp key1.key key1.key.org $ openssl rsa -in key1.key.org -out key1.key -passin pass:test $ openssl x509 -req -days 365 -in cert1.csr \ -signkey key1.key -out cert1.crt $ rm key1.key.org cert1.csr """ from __future__ import absolute_import, unicode_literals import pytest from case import Mock, mock, patch from kombu.serialization import disable_insecure_serializers from celery.exceptions import ImproperlyConfigured, SecurityError from celery.five import builtins from celery.security import disable_untrusted_serializers, setup_security from celery.security.utils import reraise_errors from kombu.serialization import registry from .case import SecurityCase class test_security(SecurityCase): def teardown(self): registry._disabled_content_types.clear() def test_disable_insecure_serializers(self): try: disabled = registry._disabled_content_types assert disabled disable_insecure_serializers( ['application/json', 'application/x-python-serialize'], ) assert 'application/x-yaml' in disabled assert 'application/json' not in disabled assert 'application/x-python-serialize' not in disabled disabled.clear() disable_insecure_serializers(allowed=None) assert 'application/x-yaml' in disabled assert 'application/json' in disabled assert 'application/x-python-serialize' in disabled finally: disable_insecure_serializers(allowed=['json']) @patch('celery.security._disable_insecure_serializers') def test_disable_untrusted_serializers(self, disable): disable_untrusted_serializers(['foo']) disable.assert_called_with(allowed=['foo']) def test_setup_security(self): disabled = registry._disabled_content_types assert len(disabled) == 0 self.app.conf.task_serializer = 'json' self.app.setup_security() assert 'application/x-python-serialize' in disabled disabled.clear() @patch('celery.current_app') def test_setup_security__default_app(self, current_app): setup_security() @patch('celery.security.register_auth') @patch('celery.security._disable_insecure_serializers') def test_setup_registry_complete(self, dis, reg, key='KEY', cert='CERT'): calls = [0] def effect(*args): try: m = Mock() m.read.return_value = 'B' if calls[0] else 'A' return m finally: calls[0] += 1 self.app.conf.task_serializer = 'auth' with mock.open(side_effect=effect): with patch('celery.security.registry') as registry: store = Mock() self.app.setup_security(['json'], key, cert, store) dis.assert_called_with(['json']) reg.assert_called_with('A', 'B', store, 'sha1', 'json') registry._set_default_serializer.assert_called_with('auth') def test_security_conf(self): self.app.conf.task_serializer = 'auth' with pytest.raises(ImproperlyConfigured): self.app.setup_security() _import = builtins.__import__ def import_hook(name, *args, **kwargs): if name == 'OpenSSL': raise ImportError return _import(name, *args, **kwargs) builtins.__import__ = import_hook with pytest.raises(ImproperlyConfigured): self.app.setup_security() builtins.__import__ = _import def test_reraise_errors(self): with pytest.raises(SecurityError): with reraise_errors(errors=(KeyError,)): raise KeyError('foo') with pytest.raises(KeyError): with reraise_errors(errors=(ValueError,)): raise KeyError('bar') celery-4.1.0/t/unit/security/test_key.py0000644000175000017500000000177213130607475020234 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from celery.exceptions import SecurityError from celery.five import bytes_if_py2 from celery.security.key import PrivateKey from . import CERT1, KEY1, KEY2 from .case import SecurityCase class test_PrivateKey(SecurityCase): def test_valid_private_key(self): PrivateKey(KEY1) PrivateKey(KEY2) def test_invalid_private_key(self): with pytest.raises((SecurityError, TypeError)): PrivateKey(None) with pytest.raises(SecurityError): PrivateKey('') with pytest.raises(SecurityError): PrivateKey('foo') with pytest.raises(SecurityError): PrivateKey(KEY1[:20] + KEY1[21:]) with pytest.raises(SecurityError): PrivateKey(CERT1) def test_sign(self): pkey = PrivateKey(KEY1) pkey.sign('test', bytes_if_py2('sha1')) with pytest.raises(ValueError): pkey.sign('test', bytes_if_py2('unknown')) celery-4.1.0/t/unit/backends/0000755000175000017500000000000013135426347015731 5ustar omeromer00000000000000celery-4.1.0/t/unit/backends/test_dynamodb.py0000644000175000017500000002032113130607475021133 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from decimal import Decimal import pytest from case import MagicMock, Mock, patch, sentinel, skip from celery.backends import dynamodb as module from celery.backends.dynamodb import DynamoDBBackend from celery.exceptions import ImproperlyConfigured from celery.five import string @skip.unless_module('boto3') class test_DynamoDBBackend: def setup(self): self._static_timestamp = Decimal(1483425566.52) # noqa self.app.conf.result_backend = 'dynamodb://' @property def backend(self): """:rtype: DynamoDBBackend""" return self.app.backend def test_init_no_boto3(self): prev, module.boto3 = module.boto3, None try: with pytest.raises(ImproperlyConfigured): DynamoDBBackend(app=self.app) finally: module.boto3 = prev def test_init_aws_credentials(self): with pytest.raises(ImproperlyConfigured): DynamoDBBackend( app=self.app, url='dynamodb://a:@' ) def test_get_client_local(self): table_creation_path = \ 'celery.backends.dynamodb.DynamoDBBackend._get_or_create_table' with patch('boto3.client') as mock_boto_client, \ patch(table_creation_path): backend = DynamoDBBackend( app=self.app, url='dynamodb://@localhost:8000' ) client = backend._get_client() assert backend.client is client mock_boto_client.assert_called_once_with( 'dynamodb', endpoint_url='http://localhost:8000', region_name='us-east-1' ) assert backend.endpoint_url == 'http://localhost:8000' def test_get_client_credentials(self): table_creation_path = \ 'celery.backends.dynamodb.DynamoDBBackend._get_or_create_table' with patch('boto3.client') as mock_boto_client, \ patch(table_creation_path): backend = DynamoDBBackend( app=self.app, url='dynamodb://key:secret@test' ) client = backend._get_client() assert client is backend.client mock_boto_client.assert_called_once_with( 'dynamodb', aws_access_key_id='key', aws_secret_access_key='secret', region_name='test' ) assert backend.aws_region == 'test' def test_get_or_create_table_not_exists(self): self.backend._client = MagicMock() mock_create_table = self.backend._client.create_table = MagicMock() mock_describe_table = self.backend._client.describe_table = \ MagicMock() mock_describe_table.return_value = { 'Table': { 'TableStatus': 'ACTIVE' } } self.backend._get_or_create_table() mock_create_table.assert_called_once_with( **self.backend._get_table_schema() ) def test_get_or_create_table_already_exists(self): from botocore.exceptions import ClientError self.backend._client = MagicMock() mock_create_table = self.backend._client.create_table = MagicMock() client_error = ClientError( { 'Error': { 'Code': 'ResourceInUseException', 'Message': 'Table already exists: {}'.format( self.backend.table_name ) } }, 'CreateTable' ) mock_create_table.side_effect = client_error mock_describe_table = self.backend._client.describe_table = \ MagicMock() mock_describe_table.return_value = { 'Table': { 'TableStatus': 'ACTIVE' } } self.backend._get_or_create_table() mock_describe_table.assert_called_once_with( TableName=self.backend.table_name ) def test_wait_for_table_status(self): self.backend._client = MagicMock() mock_describe_table = self.backend._client.describe_table = \ MagicMock() mock_describe_table.side_effect = [ {'Table': { 'TableStatus': 'CREATING' }}, {'Table': { 'TableStatus': 'SOME_STATE' }} ] self.backend._wait_for_table_status(expected='SOME_STATE') assert mock_describe_table.call_count == 2 def test_prepare_get_request(self): expected = { 'TableName': u'celery', 'Key': {u'id': {u'S': u'abcdef'}} } assert self.backend._prepare_get_request('abcdef') == expected def test_prepare_put_request(self): expected = { 'TableName': u'celery', 'Item': { u'id': {u'S': u'abcdef'}, u'result': {u'B': u'val'}, u'timestamp': { u'N': str(Decimal(self._static_timestamp)) } } } with patch('celery.backends.dynamodb.time', self._mock_time): result = self.backend._prepare_put_request('abcdef', 'val') assert result == expected def test_item_to_dict(self): boto_response = { 'Item': { 'id': { 'S': sentinel.key }, 'result': { 'B': sentinel.value }, 'timestamp': { 'N': Decimal(1) } } } converted = self.backend._item_to_dict(boto_response) assert converted == { 'id': sentinel.key, 'result': sentinel.value, 'timestamp': Decimal(1) } def test_get(self): self.backend._client = Mock(name='_client') self.backend._client.get_item = MagicMock() assert self.backend.get('1f3fab') is None self.backend.client.get_item.assert_called_once_with( Key={u'id': {u'S': u'1f3fab'}}, TableName='celery' ) def _mock_time(self): return self._static_timestamp def test_set(self): self.backend._client = MagicMock() self.backend._client.put_item = MagicMock() # should return None with patch('celery.backends.dynamodb.time', self._mock_time): assert self.backend.set(sentinel.key, sentinel.value) is None assert self.backend._client.put_item.call_count == 1 _, call_kwargs = self.backend._client.put_item.call_args expected_kwargs = dict( Item={ u'timestamp': {u'N': str(self._static_timestamp)}, u'id': {u'S': string(sentinel.key)}, u'result': {u'B': sentinel.value} }, TableName='celery' ) assert call_kwargs['Item'] == expected_kwargs['Item'] assert call_kwargs['TableName'] == 'celery' def test_delete(self): self.backend._client = Mock(name='_client') mocked_delete = self.backend._client.delete = Mock('client.delete') mocked_delete.return_value = None # should return None assert self.backend.delete('1f3fab') is None self.backend.client.delete_item.assert_called_once_with( Key={u'id': {u'S': u'1f3fab'}}, TableName='celery' ) def test_backend_by_url(self, url='dynamodb://'): from celery.app import backends from celery.backends.dynamodb import DynamoDBBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is DynamoDBBackend assert url_ == url def test_backend_params_by_url(self): self.app.conf.result_backend = \ 'dynamodb://@us-east-1/celery_results?read=10&write=20' assert self.backend.aws_region == 'us-east-1' assert self.backend.table_name == 'celery_results' assert self.backend.read_capacity_units == 10 assert self.backend.write_capacity_units == 20 assert self.backend.endpoint_url is None celery-4.1.0/t/unit/backends/__init__.py0000644000175000017500000000000013130607475020026 0ustar omeromer00000000000000celery-4.1.0/t/unit/backends/test_riak.py0000644000175000017500000000746513130607475020302 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest from case import MagicMock, Mock, patch, sentinel, skip from celery.backends import riak as module from celery.backends.riak import RiakBackend from celery.exceptions import ImproperlyConfigured RIAK_BUCKET = 'riak_bucket' @skip.unless_module('riak') class test_RiakBackend: def setup(self): self.app.conf.result_backend = 'riak://' @property def backend(self): return self.app.backend def test_init_no_riak(self): prev, module.riak = module.riak, None try: with pytest.raises(ImproperlyConfigured): RiakBackend(app=self.app) finally: module.riak = prev def test_init_no_settings(self): self.app.conf.riak_backend_settings = [] with pytest.raises(ImproperlyConfigured): RiakBackend(app=self.app) def test_init_settings_is_None(self): self.app.conf.riak_backend_settings = None assert self.app.backend def test_get_client_client_exists(self): with patch('riak.client.RiakClient') as mock_connection: self.backend._client = sentinel._client mocked_is_alive = self.backend._client.is_alive = Mock() mocked_is_alive.return_value.value = True client = self.backend._get_client() assert sentinel._client == client mock_connection.assert_not_called() def test_get(self): self.app.conf.couchbase_backend_settings = {} self.backend._client = Mock(name='_client') self.backend._bucket = Mock(name='_bucket') mocked_get = self.backend._bucket.get = Mock(name='bucket.get') mocked_get.return_value.data = sentinel.retval # should return None assert self.backend.get('1f3fab') == sentinel.retval self.backend._bucket.get.assert_called_once_with('1f3fab') def test_set(self): self.app.conf.couchbase_backend_settings = None self.backend._client = MagicMock() self.backend._bucket = MagicMock() self.backend._bucket.set = MagicMock() # should return None assert self.backend.set(sentinel.key, sentinel.value) is None def test_delete(self): self.app.conf.couchbase_backend_settings = {} self.backend._client = Mock(name='_client') self.backend._bucket = Mock(name='_bucket') mocked_delete = self.backend._client.delete = Mock('client.delete') mocked_delete.return_value = None # should return None assert self.backend.delete('1f3fab') is None self.backend._bucket.delete.assert_called_once_with('1f3fab') def test_config_params(self): self.app.conf.riak_backend_settings = { 'bucket': 'mycoolbucket', 'host': 'there.host.com', 'port': '1234', } assert self.backend.bucket_name == 'mycoolbucket' assert self.backend.host == 'there.host.com' assert self.backend.port == 1234 def test_backend_by_url(self, url='riak://myhost/mycoolbucket'): from celery.app import backends from celery.backends.riak import RiakBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is RiakBackend assert url_ == url def test_backend_params_by_url(self): self.app.conf.result_backend = 'riak://myhost:123/mycoolbucket' assert self.backend.bucket_name == 'mycoolbucket' assert self.backend.host == 'myhost' assert self.backend.port == 123 def test_non_ASCII_bucket_raises(self): self.app.conf.riak_backend_settings = { 'bucket': 'héhé', 'host': 'there.host.com', 'port': '1234', } with pytest.raises(ValueError): RiakBackend(app=self.app) celery-4.1.0/t/unit/backends/test_elasticsearch.py0000644000175000017500000001067213130607475022160 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, sentinel, skip from celery.app import backends from celery.backends import elasticsearch as module from celery.backends.elasticsearch import ElasticsearchBackend from celery.exceptions import ImproperlyConfigured @skip.unless_module('elasticsearch') class test_ElasticsearchBackend: def setup(self): self.backend = ElasticsearchBackend(app=self.app) def test_init_no_elasticsearch(self): prev, module.elasticsearch = module.elasticsearch, None try: with pytest.raises(ImproperlyConfigured): ElasticsearchBackend(app=self.app) finally: module.elasticsearch = prev def test_get(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.get = Mock() # expected result r = dict(found=True, _source={'result': sentinel.result}) x._server.get.return_value = r dict_result = x.get(sentinel.task_id) assert dict_result == sentinel.result x._server.get.assert_called_once_with( doc_type=x.doc_type, id=sentinel.task_id, index=x.index, ) def test_get_none(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.get = Mock() x._server.get.return_value = sentinel.result none_result = x.get(sentinel.task_id) assert none_result is None x._server.get.assert_called_once_with( doc_type=x.doc_type, id=sentinel.task_id, index=x.index, ) def test_delete(self): x = ElasticsearchBackend(app=self.app) x._server = Mock() x._server.delete = Mock() x._server.delete.return_value = sentinel.result assert x.delete(sentinel.task_id) is None x._server.delete.assert_called_once_with( doc_type=x.doc_type, id=sentinel.task_id, index=x.index, ) def test_backend_by_url(self, url='elasticsearch://localhost:9200/index'): backend, url_ = backends.by_url(url, self.app.loader) assert backend is ElasticsearchBackend assert url_ == url def test_backend_params_by_url(self): url = 'elasticsearch://localhost:9200/index/doc_type' with self.Celery(backend=url) as app: x = app.backend assert x.index == 'index' assert x.doc_type == 'doc_type' assert x.scheme == 'elasticsearch' assert x.host == 'localhost' assert x.port == 9200 def test_index(self): x = ElasticsearchBackend(app=self.app) x.doc_type = 'test-doc-type' x._server = Mock() x._server.index = Mock() expected_result = dict( _id=sentinel.task_id, _source={'result': sentinel.result} ) x._server.index.return_value = expected_result body = {"field1": "value1"} x._index( id=str(sentinel.task_id).encode(), body=body, kwarg1='test1' ) x._server.index.assert_called_once_with( id=str(sentinel.task_id), doc_type=x.doc_type, index=x.index, body=body, kwarg1='test1' ) def test_index_bytes_key(self): x = ElasticsearchBackend(app=self.app) x.doc_type = 'test-doc-type' x._server = Mock() x._server.index = Mock() expected_result = dict( _id=sentinel.task_id, _source={'result': sentinel.result} ) x._server.index.return_value = expected_result body = {b"field1": "value1"} x._index( id=str(sentinel.task_id).encode(), body=body, kwarg1='test1' ) x._server.index.assert_called_once_with( id=str(sentinel.task_id), doc_type=x.doc_type, index=x.index, body={"field1": "value1"}, kwarg1='test1' ) def test_config_params(self): self.app.conf.elasticsearch_max_retries = 10 self.app.conf.elasticsearch_timeout = 20.0 self.app.conf.elasticsearch_retry_on_timeout = True self.backend = ElasticsearchBackend(app=self.app) assert self.backend.es_max_retries == 10 assert self.backend.es_timeout == 20.0 assert self.backend.es_retry_on_timeout is True celery-4.1.0/t/unit/backends/test_rpc.py0000644000175000017500000000535113130607475020130 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch from celery import chord, group from celery.backends.rpc import RPCBackend from celery._state import _task_stack class test_RPCBackend: def setup(self): self.b = RPCBackend(app=self.app) def test_oid(self): oid = self.b.oid oid2 = self.b.oid assert oid == oid2 assert oid == self.app.oid def test_interface(self): self.b.on_reply_declare('task_id') def test_ensure_chords_allowed(self): with pytest.raises(NotImplementedError): self.b.ensure_chords_allowed() def test_apply_chord(self): with pytest.raises(NotImplementedError): self.b.apply_chord([], (), 'gid', Mock(name='body')) @pytest.mark.celery(result_backend='rpc') def test_chord_raises_error(self): with pytest.raises(NotImplementedError): chord(self.add.s(i, i) for i in range(10))(self.add.s([2])) @pytest.mark.celery(result_backend='rpc') def test_chain_with_chord_raises_error(self): with pytest.raises(NotImplementedError): (self.add.s(2, 2) | group(self.add.s(2, 2), self.add.s(5, 6)) | self.add.s()).delay() def test_destination_for(self): req = Mock(name='request') req.reply_to = 'reply_to' req.correlation_id = 'corid' assert self.b.destination_for('task_id', req) == ('reply_to', 'corid') task = Mock() _task_stack.push(task) try: task.request.reply_to = 'reply_to' task.request.correlation_id = 'corid' assert self.b.destination_for('task_id', None) == ( 'reply_to', 'corid', ) finally: _task_stack.pop() with pytest.raises(RuntimeError): self.b.destination_for('task_id', None) def test_binding(self): queue = self.b.binding assert queue.name == self.b.oid assert queue.exchange == self.b.exchange assert queue.routing_key == self.b.oid assert not queue.durable assert queue.auto_delete def test_create_binding(self): assert self.b._create_binding('id') == self.b.binding def test_on_task_call(self): with patch('celery.backends.rpc.maybe_declare') as md: with self.app.amqp.producer_pool.acquire() as prod: self.b.on_task_call(prod, 'task_id'), md.assert_called_with( self.b.binding(prod.channel), retry=True, ) def test_create_exchange(self): ex = self.b._create_exchange('name') assert isinstance(ex, self.b.Exchange) assert ex.name == '' celery-4.1.0/t/unit/backends/test_couchdb.py0000644000175000017500000000534113130607475020752 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import MagicMock, Mock, sentinel, skip from celery.app import backends from celery.backends import couchdb as module from celery.backends.couchdb import CouchBackend from celery.exceptions import ImproperlyConfigured try: import pycouchdb except ImportError: pycouchdb = None # noqa COUCHDB_CONTAINER = 'celery_container' @skip.unless_module('pycouchdb') class test_CouchBackend: def setup(self): self.Server = self.patching('pycouchdb.Server') self.backend = CouchBackend(app=self.app) def test_init_no_pycouchdb(self): """test init no pycouchdb raises""" prev, module.pycouchdb = module.pycouchdb, None try: with pytest.raises(ImproperlyConfigured): CouchBackend(app=self.app) finally: module.pycouchdb = prev def test_get_container_exists(self): self.backend._connection = sentinel._connection connection = self.backend.connection assert connection is sentinel._connection self.Server.assert_not_called() def test_get(self): """test_get CouchBackend.get should return and take two params db conn to couchdb is mocked. TODO Should test on key not exists """ x = CouchBackend(app=self.app) x._connection = Mock() get = x._connection.get = MagicMock() # should return None assert x.get('1f3fab') == get.return_value['value'] x._connection.get.assert_called_once_with('1f3fab') def test_delete(self): """test_delete CouchBackend.delete should return and take two params db conn to pycouchdb is mocked. TODO Should test on key not exists """ x = CouchBackend(app=self.app) x._connection = Mock() mocked_delete = x._connection.delete = Mock() mocked_delete.return_value = None # should return None assert x.delete('1f3fab') is None x._connection.delete.assert_called_once_with('1f3fab') def test_backend_by_url(self, url='couchdb://myhost/mycoolcontainer'): from celery.backends.couchdb import CouchBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is CouchBackend assert url_ == url def test_backend_params_by_url(self): url = 'couchdb://johndoe:mysecret@myhost:123/mycoolcontainer' with self.Celery(backend=url) as app: x = app.backend assert x.container == 'mycoolcontainer' assert x.host == 'myhost' assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.port == 123 celery-4.1.0/t/unit/backends/test_redis.py0000644000175000017500000003456013130607475020456 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import ssl from datetime import timedelta from contextlib import contextmanager from pickle import loads, dumps from case import ANY, ContextMock, Mock, mock, call, patch, skip from celery import signature from celery import states from celery import uuid from celery.canvas import Signature from celery.exceptions import ( ChordError, CPendingDeprecationWarning, ImproperlyConfigured, ) from celery.utils.collections import AttributeDict def raise_on_second_call(mock, exc, *retval): def on_first_call(*args, **kwargs): mock.side_effect = exc return mock.return_value mock.side_effect = on_first_call if retval: mock.return_value, = retval class Connection(object): connected = True def disconnect(self): self.connected = False class Pipeline(object): def __init__(self, client): self.client = client self.steps = [] def __getattr__(self, attr): def add_step(*args, **kwargs): self.steps.append((getattr(self.client, attr), args, kwargs)) return self return add_step def __enter__(self): return self def __exit__(self, type, value, traceback): pass def execute(self): return [step(*a, **kw) for step, a, kw in self.steps] class Redis(mock.MockCallbacks): Connection = Connection Pipeline = Pipeline def __init__(self, host=None, port=None, db=None, password=None, **kw): self.host = host self.port = port self.db = db self.password = password self.keyspace = {} self.expiry = {} self.connection = self.Connection() def get(self, key): return self.keyspace.get(key) def setex(self, key, expires, value): self.set(key, value) self.expire(key, expires) def set(self, key, value): self.keyspace[key] = value def expire(self, key, expires): self.expiry[key] = expires return expires def delete(self, key): return bool(self.keyspace.pop(key, None)) def pipeline(self): return self.Pipeline(self) def _get_list(self, key): try: return self.keyspace[key] except KeyError: l = self.keyspace[key] = [] return l def rpush(self, key, value): self._get_list(key).append(value) def lrange(self, key, start, stop): return self._get_list(key)[start:stop] def llen(self, key): return len(self.keyspace.get(key) or []) class redis(object): StrictRedis = Redis class ConnectionPool(object): def __init__(self, **kwargs): pass class UnixDomainSocketConnection(object): def __init__(self, **kwargs): pass class test_RedisBackend: def get_backend(self): from celery.backends.redis import RedisBackend class _RedisBackend(RedisBackend): redis = redis return _RedisBackend def get_E_LOST(self): from celery.backends.redis import E_LOST return E_LOST def setup(self): self.Backend = self.get_backend() self.E_LOST = self.get_E_LOST() self.b = self.Backend(app=self.app) @pytest.mark.usefixtures('depends_on_current_app') @skip.unless_module('redis') def test_reduce(self): from celery.backends.redis import RedisBackend x = RedisBackend(app=self.app) assert loads(dumps(x)) def test_no_redis(self): self.Backend.redis = None with pytest.raises(ImproperlyConfigured): self.Backend(app=self.app) def test_url(self): self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'redis://:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 def test_socket_url(self): self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'socket:///tmp/redis.sock?virtual_host=/3', app=self.app, ) assert x.connparams assert x.connparams['path'] == '/tmp/redis.sock' assert (x.connparams['connection_class'] is redis.UnixDomainSocketConnection) assert 'host' not in x.connparams assert 'port' not in x.connparams assert x.connparams['socket_timeout'] == 30.0 assert 'socket_connect_timeout' not in x.connparams assert x.connparams['db'] == 3 @skip.unless_module('redis') def test_backend_ssl(self): self.app.conf.redis_backend_use_ssl = { 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', 'ssl_keyfile': '/path/to/client.key', } self.app.conf.redis_socket_timeout = 30.0 self.app.conf.redis_socket_connect_timeout = 100.0 x = self.Backend( 'redis://:bosco@vandelay.com:123//1', app=self.app, ) assert x.connparams assert x.connparams['host'] == 'vandelay.com' assert x.connparams['db'] == 1 assert x.connparams['port'] == 123 assert x.connparams['password'] == 'bosco' assert x.connparams['socket_timeout'] == 30.0 assert x.connparams['socket_connect_timeout'] == 100.0 assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt' assert x.connparams['ssl_certfile'] == '/path/to/client.crt' assert x.connparams['ssl_keyfile'] == '/path/to/client.key' from redis.connection import SSLConnection assert x.connparams['connection_class'] is SSLConnection def test_compat_propertie(self): x = self.Backend( 'redis://:bosco@vandelay.com:123//1', app=self.app, ) with pytest.warns(CPendingDeprecationWarning): assert x.host == 'vandelay.com' with pytest.warns(CPendingDeprecationWarning): assert x.db == 1 with pytest.warns(CPendingDeprecationWarning): assert x.port == 123 with pytest.warns(CPendingDeprecationWarning): assert x.password == 'bosco' def test_conf_raises_KeyError(self): self.app.conf = AttributeDict({ 'result_serializer': 'json', 'result_cache_max': 1, 'result_expires': None, 'accept_content': ['json'], }) self.Backend(app=self.app) @patch('celery.backends.redis.logger') def test_on_connection_error(self, logger): intervals = iter([10, 20, 30]) exc = KeyError() assert self.b.on_connection_error(None, exc, intervals, 1) == 10 logger.error.assert_called_with( self.E_LOST, 1, 'Inf', 'in 10.00 seconds') assert self.b.on_connection_error(10, exc, intervals, 2) == 20 logger.error.assert_called_with(self.E_LOST, 2, 10, 'in 20.00 seconds') assert self.b.on_connection_error(10, exc, intervals, 3) == 30 logger.error.assert_called_with(self.E_LOST, 3, 10, 'in 30.00 seconds') def test_incr(self): self.b.client = Mock(name='client') self.b.incr('foo') self.b.client.incr.assert_called_with('foo') def test_expire(self): self.b.client = Mock(name='client') self.b.expire('foo', 300) self.b.client.expire.assert_called_with('foo', 300) def test_apply_chord(self): header = Mock(name='header') header.results = [Mock(name='t1'), Mock(name='t2')] self.b.apply_chord( header, (1, 2), 'gid', None, options={'max_retries': 10}, ) header.assert_called_with(1, 2, max_retries=10, task_id='gid') def test_unpack_chord_result(self): self.b.exception_to_python = Mock(name='etp') decode = Mock(name='decode') exc = KeyError() tup = decode.return_value = (1, 'id1', states.FAILURE, exc) with pytest.raises(ChordError): self.b._unpack_chord_result(tup, decode) decode.assert_called_with(tup) self.b.exception_to_python.assert_called_with(exc) exc = ValueError() tup = decode.return_value = (2, 'id2', states.RETRY, exc) ret = self.b._unpack_chord_result(tup, decode) self.b.exception_to_python.assert_called_with(exc) assert ret is self.b.exception_to_python() def test_on_chord_part_return_no_gid_or_tid(self): request = Mock(name='request') request.id = request.group = None assert self.b.on_chord_part_return(request, 'SUCCESS', 10) is None def test_ConnectionPool(self): self.b.redis = Mock(name='redis') assert self.b._ConnectionPool is None assert self.b.ConnectionPool is self.b.redis.ConnectionPool assert self.b.ConnectionPool is self.b.redis.ConnectionPool def test_expires_defaults_to_config(self): self.app.conf.result_expires = 10 b = self.Backend(expires=None, app=self.app) assert b.expires == 10 def test_expires_is_int(self): b = self.Backend(expires=48, app=self.app) assert b.expires == 48 def test_add_to_chord(self): b = self.Backend('redis://', app=self.app) gid = uuid() b.add_to_chord(gid, 'sig') b.client.incr.assert_called_with(b.get_key_for_group(gid, '.t'), 1) def test_expires_is_None(self): b = self.Backend(expires=None, app=self.app) assert b.expires == self.app.conf.result_expires.total_seconds() def test_expires_is_timedelta(self): b = self.Backend(expires=timedelta(minutes=1), app=self.app) assert b.expires == 60 def test_mget(self): assert self.b.mget(['a', 'b', 'c']) self.b.client.mget.assert_called_with(['a', 'b', 'c']) def test_set_no_expire(self): self.b.expires = None self.b.set('foo', 'bar') def create_task(self): tid = uuid() task = Mock(name='task-{0}'.format(tid)) task.name = 'foobarbaz' self.app.tasks['foobarbaz'] = task task.request.chord = signature(task) task.request.id = tid task.request.chord['chord_size'] = 10 task.request.group = 'group_id' return task @patch('celery.result.GroupResult.restore') def test_on_chord_part_return(self, restore): tasks = [self.create_task() for i in range(10)] for i in range(10): self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i) assert self.b.client.rpush.call_count self.b.client.rpush.reset_mock() assert self.b.client.lrange.call_count jkey = self.b.get_key_for_group('group_id', '.j') tkey = self.b.get_key_for_group('group_id', '.t') self.b.client.delete.assert_has_calls([call(jkey), call(tkey)]) self.b.client.expire.assert_has_calls([ call(jkey, 86400), call(tkey, 86400), ]) def test_on_chord_part_return__success(self): with self.chord_context(2) as (_, request, callback): self.b.on_chord_part_return(request, states.SUCCESS, 10) callback.delay.assert_not_called() self.b.on_chord_part_return(request, states.SUCCESS, 20) callback.delay.assert_called_with([10, 20]) def test_on_chord_part_return__callback_raises(self): with self.chord_context(1) as (_, request, callback): callback.delay.side_effect = KeyError(10) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__ChordError(self): with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, ChordError()) self.b.client.pipeline.return_value.rpush().llen().get().expire( ).expire().execute.return_value = (1, 1, 0, 4, 5) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) def test_on_chord_part_return__other_error(self): with self.chord_context(1) as (_, request, callback): self.b.client.pipeline = ContextMock() raise_on_second_call(self.b.client.pipeline, RuntimeError()) self.b.client.pipeline.return_value.rpush().llen().get().expire( ).expire().execute.return_value = (1, 1, 0, 4, 5) task = self.app._tasks['add'] = Mock(name='add_task') self.b.on_chord_part_return(request, states.SUCCESS, 10) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=ANY, ) @contextmanager def chord_context(self, size=1): with patch('celery.backends.redis.maybe_signature') as ms: tasks = [self.create_task() for i in range(size)] request = Mock(name='request') request.id = 'id1' request.group = 'gid1' callback = ms.return_value = Signature('add') callback.id = 'id1' callback['chord_size'] = size callback.delay = Mock(name='callback.delay') yield tasks, request, callback def test_process_cleanup(self): self.b.process_cleanup() def test_get_set_forget(self): tid = uuid() self.b.store_result(tid, 42, states.SUCCESS) assert self.b.get_state(tid) == states.SUCCESS assert self.b.get_result(tid) == 42 self.b.forget(tid) assert self.b.get_state(tid) == states.PENDING def test_set_expires(self): self.b = self.Backend(expires=512, app=self.app) tid = uuid() key = self.b.get_key_for_task(tid) self.b.store_result(tid, 42, states.SUCCESS) self.b.client.expire.assert_called_with( key, 512, ) celery-4.1.0/t/unit/backends/test_amqp.py0000644000175000017500000002127613130607475020306 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle import pytest from contextlib import contextmanager from datetime import timedelta from pickle import dumps, loads from case import Mock, mock from billiard.einfo import ExceptionInfo from celery import states from celery import uuid from celery.backends.amqp import AMQPBackend from celery.five import Empty, Queue, range from celery.result import AsyncResult class SomeClass(object): def __init__(self, data): self.data = data class test_AMQPBackend: def setup(self): self.app.conf.result_cache_max = 100 def create_backend(self, **opts): opts = dict(dict(serializer='pickle', persistent=True), **opts) return AMQPBackend(self.app, **opts) def test_destination_for(self): b = self.create_backend() request = Mock() assert b.destination_for('id', request) == ( b.rkey('id'), request.correlation_id, ) def test_store_result__no_routing_key(self): b = self.create_backend() b.destination_for = Mock() b.destination_for.return_value = None, None b.store_result('id', None, states.SUCCESS) def test_mark_as_done(self): tb1 = self.create_backend(max_cached_results=1) tb2 = self.create_backend(max_cached_results=1) tid = uuid() tb1.mark_as_done(tid, 42) assert tb2.get_state(tid) == states.SUCCESS assert tb2.get_result(tid) == 42 assert tb2._cache.get(tid) assert tb2.get_result(tid), 42 @pytest.mark.usefixtures('depends_on_current_app') def test_pickleable(self): assert loads(dumps(self.create_backend())) def test_revive(self): tb = self.create_backend() tb.revive(None) def test_is_pickled(self): tb1 = self.create_backend() tb2 = self.create_backend() tid2 = uuid() result = {'foo': 'baz', 'bar': SomeClass(12345)} tb1.mark_as_done(tid2, result) # is serialized properly. rindb = tb2.get_result(tid2) assert rindb.get('foo') == 'baz' assert rindb.get('bar').data == 12345 def test_mark_as_failure(self): tb1 = self.create_backend() tb2 = self.create_backend() tid3 = uuid() try: raise KeyError('foo') except KeyError as exception: einfo = ExceptionInfo() tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback) assert tb2.get_state(tid3) == states.FAILURE assert isinstance(tb2.get_result(tid3), KeyError) assert tb2.get_traceback(tid3) == einfo.traceback def test_repair_uuid(self): from celery.backends.amqp import repair_uuid for i in range(10): tid = uuid() assert repair_uuid(tid.replace('-', '')) == tid def test_expires_is_int(self): b = self.create_backend(expires=48) q = b._create_binding('x1y2z3') assert q.expires == 48 def test_expires_is_float(self): b = self.create_backend(expires=48.3) q = b._create_binding('x1y2z3') assert q.expires == 48.3 def test_expires_is_timedelta(self): b = self.create_backend(expires=timedelta(minutes=1)) q = b._create_binding('x1y2z3') assert q.expires == 60 @mock.sleepdeprived() def test_store_result_retries(self): iterations = [0] stop_raising_at = [5] def publish(*args, **kwargs): if iterations[0] > stop_raising_at[0]: return iterations[0] += 1 raise KeyError('foo') backend = AMQPBackend(self.app) from celery.app.amqp import Producer prod, Producer.publish = Producer.publish, publish try: with pytest.raises(KeyError): backend.retry_policy['max_retries'] = None backend.store_result('foo', 'bar', 'STARTED') with pytest.raises(KeyError): backend.retry_policy['max_retries'] = 10 backend.store_result('foo', 'bar', 'STARTED') finally: Producer.publish = prod def test_poll_no_messages(self): b = self.create_backend() assert b.get_task_meta(uuid())['status'] == states.PENDING @contextmanager def _result_context(self): results = Queue() class Message(object): acked = 0 requeued = 0 def __init__(self, **merge): self.payload = dict({'status': states.STARTED, 'result': None}, **merge) self.properties = {'correlation_id': merge.get('task_id')} self.body = pickle.dumps(self.payload) self.content_type = 'application/x-python-serialize' self.content_encoding = 'binary' def ack(self, *args, **kwargs): self.acked += 1 def requeue(self, *args, **kwargs): self.requeued += 1 class MockBinding(object): def __init__(self, *args, **kwargs): self.channel = Mock() def __call__(self, *args, **kwargs): return self def declare(self): pass def get(self, no_ack=False, accept=None): try: m = results.get(block=False) if m: m.accept = accept return m except Empty: pass def is_bound(self): return True class MockBackend(AMQPBackend): Queue = MockBinding backend = MockBackend(self.app, max_cached_results=100) backend._republish = Mock() yield results, backend, Message def test_backlog_limit_exceeded(self): with self._result_context() as (results, backend, Message): for i in range(1001): results.put(Message(task_id='id', status=states.RECEIVED)) with pytest.raises(backend.BacklogLimitExceeded): backend.get_task_meta('id') def test_poll_result(self): with self._result_context() as (results, backend, Message): tid = uuid() # FFWD's to the latest state. state_messages = [ Message(task_id=tid, status=states.RECEIVED, seq=1), Message(task_id=tid, status=states.STARTED, seq=2), Message(task_id=tid, status=states.FAILURE, seq=3), ] for state_message in state_messages: results.put(state_message) r1 = backend.get_task_meta(tid) # FFWDs to the last state. assert r1['status'] == states.FAILURE assert r1['seq'] == 3 # Caches last known state. tid = uuid() results.put(Message(task_id=tid)) backend.get_task_meta(tid) assert tid, backend._cache in 'Caches last known state' assert state_messages[-1].requeued # Returns cache if no new states. results.queue.clear() assert not results.qsize() backend._cache[tid] = 'hello' # returns cache if no new states. assert backend.get_task_meta(tid) == 'hello' def test_drain_events_decodes_exceptions_in_meta(self): tid = uuid() b = self.create_backend(serializer='json') b.store_result(tid, RuntimeError('aap'), states.FAILURE) result = AsyncResult(tid, backend=b) with pytest.raises(Exception) as excinfo: result.get() assert excinfo.value.__class__.__name__ == 'RuntimeError' assert str(excinfo.value) == 'aap' def test_no_expires(self): b = self.create_backend(expires=None) app = self.app app.conf.result_expires = None b = self.create_backend(expires=None) q = b._create_binding('foo') assert q.expires is None def test_process_cleanup(self): self.create_backend().process_cleanup() def test_reload_task_result(self): with pytest.raises(NotImplementedError): self.create_backend().reload_task_result('x') def test_reload_group_result(self): with pytest.raises(NotImplementedError): self.create_backend().reload_group_result('x') def test_save_group(self): with pytest.raises(NotImplementedError): self.create_backend().save_group('x', 'x') def test_restore_group(self): with pytest.raises(NotImplementedError): self.create_backend().restore_group('x') def test_delete_group(self): with pytest.raises(NotImplementedError): self.create_backend().delete_group('x') celery-4.1.0/t/unit/backends/test_cassandra.py0000644000175000017500000001340213130607475021277 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from pickle import loads, dumps from datetime import datetime from case import Mock, mock from celery import states from celery.exceptions import ImproperlyConfigured from celery.utils.objects import Bunch CASSANDRA_MODULES = ['cassandra', 'cassandra.auth', 'cassandra.cluster'] @mock.module(*CASSANDRA_MODULES) class test_CassandraBackend: def setup(self): self.app.conf.update( cassandra_servers=['example.com'], cassandra_keyspace='celery', cassandra_table='task_results', ) def test_init_no_cassandra(self, *modules): # should raise ImproperlyConfigured when no python-driver # installed. from celery.backends import cassandra as mod prev, mod.cassandra = mod.cassandra, None try: with pytest.raises(ImproperlyConfigured): mod.CassandraBackend(app=self.app) finally: mod.cassandra = prev def test_init_with_and_without_LOCAL_QUROM(self, *modules): from celery.backends import cassandra as mod mod.cassandra = Mock() cons = mod.cassandra.ConsistencyLevel = Bunch( LOCAL_QUORUM='foo', ) self.app.conf.cassandra_read_consistency = 'LOCAL_FOO' self.app.conf.cassandra_write_consistency = 'LOCAL_FOO' mod.CassandraBackend(app=self.app) cons.LOCAL_FOO = 'bar' mod.CassandraBackend(app=self.app) # no servers raises ImproperlyConfigured with pytest.raises(ImproperlyConfigured): self.app.conf.cassandra_servers = None mod.CassandraBackend( app=self.app, keyspace='b', column_family='c', ) @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self, *modules): from celery.backends.cassandra import CassandraBackend assert loads(dumps(CassandraBackend(app=self.app))) def test_get_task_meta_for(self, *modules): from celery.backends import cassandra as mod mod.cassandra = Mock() x = mod.CassandraBackend(app=self.app) x._connection = True session = x._session = Mock() execute = session.execute = Mock() execute.return_value = [ [states.SUCCESS, '1', datetime.now(), b'', b''] ] x.decode = Mock() meta = x._get_task_meta_for('task_id') assert meta['status'] == states.SUCCESS x._session.execute.return_value = [] meta = x._get_task_meta_for('task_id') assert meta['status'] == states.PENDING def test_store_result(self, *modules): from celery.backends import cassandra as mod mod.cassandra = Mock() x = mod.CassandraBackend(app=self.app) x._connection = True session = x._session = Mock() session.execute = Mock() x._store_result('task_id', 'result', states.SUCCESS) def test_process_cleanup(self, *modules): from celery.backends import cassandra as mod x = mod.CassandraBackend(app=self.app) x.process_cleanup() assert x._connection is None assert x._session is None def test_timeouting_cluster(self): # Tests behavior when Cluster.connect raises # cassandra.OperationTimedOut. from celery.backends import cassandra as mod class OTOExc(Exception): pass class VeryFaultyCluster(object): def __init__(self, *args, **kwargs): pass def connect(self, *args, **kwargs): raise OTOExc() def shutdown(self): pass mod.cassandra = Mock() mod.cassandra.OperationTimedOut = OTOExc mod.cassandra.cluster = Mock() mod.cassandra.cluster.Cluster = VeryFaultyCluster x = mod.CassandraBackend(app=self.app) with pytest.raises(OTOExc): x._store_result('task_id', 'result', states.SUCCESS) assert x._connection is None assert x._session is None x.process_cleanup() # shouldn't raise def test_please_free_memory(self): # Ensure that Cluster object IS shut down. from celery.backends import cassandra as mod class RAMHoggingCluster(object): objects_alive = 0 def __init__(self, *args, **kwargs): pass def connect(self, *args, **kwargs): RAMHoggingCluster.objects_alive += 1 return Mock() def shutdown(self): RAMHoggingCluster.objects_alive -= 1 mod.cassandra = Mock() mod.cassandra.cluster = Mock() mod.cassandra.cluster.Cluster = RAMHoggingCluster for x in range(0, 10): x = mod.CassandraBackend(app=self.app) x._store_result('task_id', 'result', states.SUCCESS) x.process_cleanup() assert RAMHoggingCluster.objects_alive == 0 def test_auth_provider(self): # Ensure valid auth_provider works properly, and invalid one raises # ImproperlyConfigured exception. from celery.backends import cassandra as mod class DummyAuth(object): ValidAuthProvider = Mock() mod.cassandra = Mock() mod.cassandra.auth = DummyAuth # Valid auth_provider self.app.conf.cassandra_auth_provider = 'ValidAuthProvider' self.app.conf.cassandra_auth_kwargs = { 'username': 'stuff' } mod.CassandraBackend(app=self.app) # Invalid auth_provider self.app.conf.cassandra_auth_provider = 'SpiderManAuth' self.app.conf.cassandra_auth_kwargs = { 'username': 'Jack' } with pytest.raises(ImproperlyConfigured): mod.CassandraBackend(app=self.app) celery-4.1.0/t/unit/backends/test_cache.py0000644000175000017500000002360413130607475020410 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import sys import types from contextlib import contextmanager from case import Mock, mock, patch, skip from kombu.utils.encoding import str_to_bytes, ensure_bytes from celery import states from celery import group, signature, uuid from celery.backends.cache import CacheBackend, DummyClient, backends from celery.exceptions import ImproperlyConfigured from celery.five import items, bytes_if_py2, string, text_t PY3 = sys.version_info[0] == 3 class SomeClass(object): def __init__(self, data): self.data = data class test_CacheBackend: def setup(self): self.app.conf.result_serializer = 'pickle' self.tb = CacheBackend(backend='memory://', app=self.app) self.tid = uuid() self.old_get_best_memcached = backends['memcache'] backends['memcache'] = lambda: (DummyClient, ensure_bytes) def teardown(self): backends['memcache'] = self.old_get_best_memcached def test_no_backend(self): self.app.conf.cache_backend = None with pytest.raises(ImproperlyConfigured): CacheBackend(backend=None, app=self.app) def test_mark_as_done(self): assert self.tb.get_state(self.tid) == states.PENDING assert self.tb.get_result(self.tid) is None self.tb.mark_as_done(self.tid, 42) assert self.tb.get_state(self.tid) == states.SUCCESS assert self.tb.get_result(self.tid) == 42 def test_is_pickled(self): result = {'foo': 'baz', 'bar': SomeClass(12345)} self.tb.mark_as_done(self.tid, result) # is serialized properly. rindb = self.tb.get_result(self.tid) assert rindb.get('foo') == 'baz' assert rindb.get('bar').data == 12345 def test_mark_as_failure(self): try: raise KeyError('foo') except KeyError as exception: self.tb.mark_as_failure(self.tid, exception) assert self.tb.get_state(self.tid) == states.FAILURE assert isinstance(self.tb.get_result(self.tid), KeyError) def test_apply_chord(self): tb = CacheBackend(backend='memory://', app=self.app) gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] tb.apply_chord(group(app=self.app), (), gid, {}, result=res) @patch('celery.result.GroupResult.restore') def test_on_chord_part_return(self, restore): tb = CacheBackend(backend='memory://', app=self.app) deps = Mock() deps.__len__ = Mock() deps.__len__.return_value = 2 restore.return_value = deps task = Mock() task.name = 'foobarbaz' self.app.tasks['foobarbaz'] = task task.request.chord = signature(task) gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] task.request.group = gid tb.apply_chord(group(app=self.app), (), gid, {}, result=res) deps.join_native.assert_not_called() tb.on_chord_part_return(task.request, 'SUCCESS', 10) deps.join_native.assert_not_called() tb.on_chord_part_return(task.request, 'SUCCESS', 10) deps.join_native.assert_called_with(propagate=True, timeout=3.0) deps.delete.assert_called_with() def test_mget(self): self.tb.set('foo', 1) self.tb.set('bar', 2) assert self.tb.mget(['foo', 'bar']) == {'foo': 1, 'bar': 2} def test_forget(self): self.tb.mark_as_done(self.tid, {'foo': 'bar'}) x = self.app.AsyncResult(self.tid, backend=self.tb) x.forget() assert x.result is None def test_process_cleanup(self): self.tb.process_cleanup() def test_expires_as_int(self): tb = CacheBackend(backend='memory://', expires=10, app=self.app) assert tb.expires == 10 def test_unknown_backend_raises_ImproperlyConfigured(self): with pytest.raises(ImproperlyConfigured): CacheBackend(backend='unknown://', app=self.app) def test_as_uri_no_servers(self): assert self.tb.as_uri() == 'memory:///' def test_as_uri_one_server(self): backend = 'memcache://127.0.0.1:11211/' b = CacheBackend(backend=backend, app=self.app) assert b.as_uri() == backend def test_as_uri_multiple_servers(self): backend = 'memcache://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' b = CacheBackend(backend=backend, app=self.app) assert b.as_uri() == backend @skip.unless_module('memcached', name='python-memcached') def test_regression_worker_startup_info(self): self.app.conf.result_backend = ( 'cache+memcached://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' ) worker = self.app.Worker() with mock.stdouts(): worker.on_start() assert worker.startup_info() class MyMemcachedStringEncodingError(Exception): pass class MemcachedClient(DummyClient): def set(self, key, value, *args, **kwargs): if PY3: key_t, must_be, not_be, cod = bytes, 'string', 'bytes', 'decode' else: key_t, must_be, not_be, cod = text_t, 'bytes', 'string', 'encode' if isinstance(key, key_t): raise MyMemcachedStringEncodingError( 'Keys must be {0}, not {1}. Convert your ' 'strings using mystring.{2}(charset)!'.format( must_be, not_be, cod)) return super(MemcachedClient, self).set(key, value, *args, **kwargs) class MockCacheMixin(object): @contextmanager def mock_memcache(self): memcache = types.ModuleType(bytes_if_py2('memcache')) memcache.Client = MemcachedClient memcache.Client.__module__ = memcache.__name__ prev, sys.modules['memcache'] = sys.modules.get('memcache'), memcache try: yield True finally: if prev is not None: sys.modules['memcache'] = prev @contextmanager def mock_pylibmc(self): pylibmc = types.ModuleType(bytes_if_py2('pylibmc')) pylibmc.Client = MemcachedClient pylibmc.Client.__module__ = pylibmc.__name__ prev = sys.modules.get('pylibmc') sys.modules['pylibmc'] = pylibmc try: yield True finally: if prev is not None: sys.modules['pylibmc'] = prev class test_get_best_memcache(MockCacheMixin): def test_pylibmc(self): with self.mock_pylibmc(): with mock.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] assert cache.get_best_memcache()[0].__module__ == 'pylibmc' def test_memcache(self): with self.mock_memcache(): with mock.reset_modules('celery.backends.cache'): with mock.mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] assert (cache.get_best_memcache()[0]().__module__ == 'memcache') def test_no_implementations(self): with mock.mask_modules('pylibmc', 'memcache'): with mock.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] with pytest.raises(ImproperlyConfigured): cache.get_best_memcache() def test_cached(self): with self.mock_pylibmc(): with mock.reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] cache.get_best_memcache()[0](behaviors={'foo': 'bar'}) assert cache._imp[0] cache.get_best_memcache()[0]() def test_backends(self): from celery.backends.cache import backends with self.mock_memcache(): for name, fun in items(backends): assert fun() class test_memcache_key(MockCacheMixin): def test_memcache_unicode_key(self): with self.mock_memcache(): with mock.reset_modules('celery.backends.cache'): with mock.mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] task_id, result = string(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result def test_memcache_bytes_key(self): with self.mock_memcache(): with mock.reset_modules('celery.backends.cache'): with mock.mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result def test_pylibmc_unicode_key(self): with mock.reset_modules('celery.backends.cache'): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] task_id, result = string(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result def test_pylibmc_bytes_key(self): with mock.reset_modules('celery.backends.cache'): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, state=states.SUCCESS) assert b.get_result(task_id) == result celery-4.1.0/t/unit/backends/test_filesystem.py0000644000175000017500000000437413130607475021534 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import os import pytest import tempfile from case import skip from celery import uuid from celery import states from celery.backends.filesystem import FilesystemBackend from celery.exceptions import ImproperlyConfigured @skip.if_win32() class test_FilesystemBackend: def setup(self): self.directory = tempfile.mkdtemp() self.url = 'file://' + self.directory self.path = self.directory.encode('ascii') def test_a_path_is_required(self): with pytest.raises(ImproperlyConfigured): FilesystemBackend(app=self.app) def test_a_path_in_url(self): tb = FilesystemBackend(app=self.app, url=self.url) assert tb.path == self.path def test_path_is_incorrect(self): with pytest.raises(ImproperlyConfigured): FilesystemBackend(app=self.app, url=self.url + '-incorrect') def test_missing_task_is_PENDING(self): tb = FilesystemBackend(app=self.app, url=self.url) assert tb.get_state('xxx-does-not-exist') == states.PENDING def test_mark_as_done_writes_file(self): tb = FilesystemBackend(app=self.app, url=self.url) tb.mark_as_done(uuid(), 42) assert len(os.listdir(self.directory)) == 1 def test_done_task_is_SUCCESS(self): tb = FilesystemBackend(app=self.app, url=self.url) tid = uuid() tb.mark_as_done(tid, 42) assert tb.get_state(tid) == states.SUCCESS def test_correct_result(self): data = {'foo': 'bar'} tb = FilesystemBackend(app=self.app, url=self.url) tid = uuid() tb.mark_as_done(tid, data) assert tb.get_result(tid) == data def test_get_many(self): data = {uuid(): 'foo', uuid(): 'bar', uuid(): 'baz'} tb = FilesystemBackend(app=self.app, url=self.url) for key, value in data.items(): tb.mark_as_done(key, value) for key, result in tb.get_many(data.keys()): assert result['result'] == data[key] def test_forget_deletes_file(self): tb = FilesystemBackend(app=self.app, url=self.url) tid = uuid() tb.mark_as_done(tid, 42) tb.forget(tid) assert len(os.listdir(self.directory)) == 0 celery-4.1.0/t/unit/backends/test_database.py0000644000175000017500000001756213130607475021117 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from datetime import datetime from pickle import loads, dumps from case import Mock, patch, skip from celery import states from celery import uuid from celery.exceptions import ImproperlyConfigured try: import sqlalchemy # noqa except ImportError: DatabaseBackend = Task = TaskSet = retry = None # noqa SessionManager = session_cleanup = None # noqa else: from celery.backends.database import ( DatabaseBackend, retry, session_cleanup, ) from celery.backends.database import session from celery.backends.database.session import SessionManager from celery.backends.database.models import Task, TaskSet class SomeClass(object): def __init__(self, data): self.data = data @skip.unless_module('sqlalchemy') class test_session_cleanup: def test_context(self): session = Mock(name='session') with session_cleanup(session): pass session.close.assert_called_with() def test_context_raises(self): session = Mock(name='session') with pytest.raises(KeyError): with session_cleanup(session): raise KeyError() session.rollback.assert_called_with() session.close.assert_called_with() @skip.unless_module('sqlalchemy') @skip.if_pypy() @skip.if_jython() class test_DatabaseBackend: def setup(self): self.uri = 'sqlite:///test.db' self.app.conf.result_serializer = 'pickle' def test_retry_helper(self): from celery.backends.database import DatabaseError calls = [0] @retry def raises(): calls[0] += 1 raise DatabaseError(1, 2, 3) with pytest.raises(DatabaseError): raises(max_retries=5) assert calls[0] == 5 def test_missing_dburi_raises_ImproperlyConfigured(self): self.app.conf.database_url = None with pytest.raises(ImproperlyConfigured): DatabaseBackend(app=self.app) def test_missing_task_id_is_PENDING(self): tb = DatabaseBackend(self.uri, app=self.app) assert tb.get_state('xxx-does-not-exist') == states.PENDING def test_missing_task_meta_is_dict_with_pending(self): tb = DatabaseBackend(self.uri, app=self.app) meta = tb.get_task_meta('xxx-does-not-exist-at-all') assert meta['status'] == states.PENDING assert meta['task_id'] == 'xxx-does-not-exist-at-all' assert meta['result'] is None assert meta['traceback'] is None def test_mark_as_done(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() assert tb.get_state(tid) == states.PENDING assert tb.get_result(tid) is None tb.mark_as_done(tid, 42) assert tb.get_state(tid) == states.SUCCESS assert tb.get_result(tid) == 42 def test_is_pickled(self): tb = DatabaseBackend(self.uri, app=self.app) tid2 = uuid() result = {'foo': 'baz', 'bar': SomeClass(12345)} tb.mark_as_done(tid2, result) # is serialized properly. rindb = tb.get_result(tid2) assert rindb.get('foo') == 'baz' assert rindb.get('bar').data == 12345 def test_mark_as_started(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() tb.mark_as_started(tid) assert tb.get_state(tid) == states.STARTED def test_mark_as_revoked(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() tb.mark_as_revoked(tid) assert tb.get_state(tid) == states.REVOKED def test_mark_as_retry(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() try: raise KeyError('foo') except KeyError as exception: import traceback trace = '\n'.join(traceback.format_stack()) tb.mark_as_retry(tid, exception, traceback=trace) assert tb.get_state(tid) == states.RETRY assert isinstance(tb.get_result(tid), KeyError) assert tb.get_traceback(tid) == trace def test_mark_as_failure(self): tb = DatabaseBackend(self.uri, app=self.app) tid3 = uuid() try: raise KeyError('foo') except KeyError as exception: import traceback trace = '\n'.join(traceback.format_stack()) tb.mark_as_failure(tid3, exception, traceback=trace) assert tb.get_state(tid3) == states.FAILURE assert isinstance(tb.get_result(tid3), KeyError) assert tb.get_traceback(tid3) == trace def test_forget(self): tb = DatabaseBackend(self.uri, backend='memory://', app=self.app) tid = uuid() tb.mark_as_done(tid, {'foo': 'bar'}) tb.mark_as_done(tid, {'foo': 'bar'}) x = self.app.AsyncResult(tid, backend=tb) x.forget() assert x.result is None def test_process_cleanup(self): tb = DatabaseBackend(self.uri, app=self.app) tb.process_cleanup() @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): tb = DatabaseBackend(self.uri, app=self.app) assert loads(dumps(tb)) def test_save__restore__delete_group(self): tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() res = {'something': 'special'} assert tb.save_group(tid, res) == res res2 = tb.restore_group(tid) assert res2 == res tb.delete_group(tid) assert tb.restore_group(tid) is None assert tb.restore_group('xxx-nonexisting-id') is None def test_cleanup(self): tb = DatabaseBackend(self.uri, app=self.app) for i in range(10): tb.mark_as_done(uuid(), 42) tb.save_group(uuid(), {'foo': 'bar'}) s = tb.ResultSession() for t in s.query(Task).all(): t.date_done = datetime.now() - tb.expires * 2 for t in s.query(TaskSet).all(): t.date_done = datetime.now() - tb.expires * 2 s.commit() s.close() tb.cleanup() def test_Task__repr__(self): assert 'foo' in repr(Task('foo')) def test_TaskSet__repr__(self): assert 'foo', repr(TaskSet('foo' in None)) @skip.unless_module('sqlalchemy') class test_SessionManager: def test_after_fork(self): s = SessionManager() assert not s.forked s._after_fork() assert s.forked @patch('celery.backends.database.session.create_engine') def test_get_engine_forked(self, create_engine): s = SessionManager() s._after_fork() engine = s.get_engine('dburi', foo=1) create_engine.assert_called_with('dburi', foo=1) assert engine is create_engine() engine2 = s.get_engine('dburi', foo=1) assert engine2 is engine @patch('celery.backends.database.session.sessionmaker') def test_create_session_forked(self, sessionmaker): s = SessionManager() s.get_engine = Mock(name='get_engine') s._after_fork() engine, session = s.create_session('dburi', short_lived_sessions=True) sessionmaker.assert_called_with(bind=s.get_engine()) assert session is sessionmaker() sessionmaker.return_value = Mock(name='new') engine, session2 = s.create_session('dburi', short_lived_sessions=True) sessionmaker.assert_called_with(bind=s.get_engine()) assert session2 is not session sessionmaker.return_value = Mock(name='new2') engine, session3 = s.create_session( 'dburi', short_lived_sessions=False) sessionmaker.assert_called_with(bind=s.get_engine()) assert session3 is session2 def test_coverage_madness(self): prev, session.register_after_fork = ( session.register_after_fork, None, ) try: SessionManager() finally: session.register_after_fork = prev celery-4.1.0/t/unit/backends/test_base.py0000644000175000017500000005174313130607475020264 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import sys import types from contextlib import contextmanager from case import ANY, Mock, call, patch, skip from celery import states from celery import chord, group, uuid from celery.backends.base import ( BaseBackend, KeyValueStoreBackend, DisabledBackend, _nulldict, ) from celery.exceptions import ChordError, TimeoutError from celery.five import items, bytes_if_py2, range from celery.result import result_from_tuple from celery.utils import serialization from celery.utils.functional import pass1 from celery.utils.serialization import subclass_exception from celery.utils.serialization import find_pickleable_exception as fnpe from celery.utils.serialization import UnpickleableExceptionWrapper from celery.utils.serialization import get_pickleable_exception as gpe class wrapobject(object): def __init__(self, *args, **kwargs): self.args = args if sys.version_info[0] == 3 or getattr(sys, 'pypy_version_info', None): Oldstyle = None else: Oldstyle = types.ClassType(bytes_if_py2('Oldstyle'), (), {}) Unpickleable = subclass_exception( bytes_if_py2('Unpickleable'), KeyError, 'foo.module', ) Impossible = subclass_exception( bytes_if_py2('Impossible'), object, 'foo.module', ) Lookalike = subclass_exception( bytes_if_py2('Lookalike'), wrapobject, 'foo.module', ) class test_nulldict: def test_nulldict(self): x = _nulldict() x['foo'] = 1 x.update(foo=1, bar=2) x.setdefault('foo', 3) class test_serialization: def test_create_exception_cls(self): assert serialization.create_exception_cls('FooError', 'm') assert serialization.create_exception_cls('FooError', 'm', KeyError) class test_BaseBackend_interface: def setup(self): self.b = BaseBackend(self.app) def test__forget(self): with pytest.raises(NotImplementedError): self.b._forget('SOMExx-N0Nex1stant-IDxx-') def test_forget(self): with pytest.raises(NotImplementedError): self.b.forget('SOMExx-N0nex1stant-IDxx-') def test_on_chord_part_return(self): self.b.on_chord_part_return(None, None, None) def test_apply_chord(self, unlock='celery.chord_unlock'): self.app.tasks[unlock] = Mock() self.b.apply_chord( group(app=self.app), (), 'dakj221', None, result=[self.app.AsyncResult(x) for x in [1, 2, 3]], ) assert self.app.tasks[unlock].apply_async.call_count class test_exception_pickle: @skip.if_python3(reason='does not support old style classes') @skip.if_pypy() def test_oldstyle(self): assert fnpe(Oldstyle()) def test_BaseException(self): assert fnpe(Exception()) is None def test_get_pickleable_exception(self): exc = Exception('foo') assert gpe(exc) == exc def test_unpickleable(self): assert isinstance(fnpe(Unpickleable()), KeyError) assert fnpe(Impossible()) is None class test_prepare_exception: def setup(self): self.b = BaseBackend(self.app) def test_unpickleable(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(Unpickleable(1, 2, 'foo')) assert isinstance(x, KeyError) y = self.b.exception_to_python(x) assert isinstance(y, KeyError) def test_impossible(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(Impossible()) assert isinstance(x, UnpickleableExceptionWrapper) assert str(x) y = self.b.exception_to_python(x) assert y.__class__.__name__ == 'Impossible' if sys.version_info < (2, 5): assert y.__class__.__module__ else: assert y.__class__.__module__ == 'foo.module' def test_regular(self): self.b.serializer = 'pickle' x = self.b.prepare_exception(KeyError('baz')) assert isinstance(x, KeyError) y = self.b.exception_to_python(x) assert isinstance(y, KeyError) def test_unicode_message(self): message = u'\u03ac' x = self.b.prepare_exception(Exception(message)) assert x == {'exc_message': message, 'exc_type': 'Exception'} class KVBackend(KeyValueStoreBackend): mget_returns_dict = False def __init__(self, app, *args, **kwargs): self.db = {} super(KVBackend, self).__init__(app) def get(self, key): return self.db.get(key) def set(self, key, value): self.db[key] = value def mget(self, keys): if self.mget_returns_dict: return {key: self.get(key) for key in keys} else: return [self.get(k) for k in keys] def delete(self, key): self.db.pop(key, None) class DictBackend(BaseBackend): def __init__(self, *args, **kwargs): BaseBackend.__init__(self, *args, **kwargs) self._data = {'can-delete': {'result': 'foo'}} def _restore_group(self, group_id): if group_id == 'exists': return {'result': 'group'} def _get_task_meta_for(self, task_id): if task_id == 'task-exists': return {'result': 'task'} def _delete_group(self, group_id): self._data.pop(group_id, None) class test_BaseBackend_dict: def setup(self): self.b = DictBackend(app=self.app) def test_delete_group(self): self.b.delete_group('can-delete') assert 'can-delete' not in self.b._data def test_prepare_exception_json(self): x = DictBackend(self.app, serializer='json') e = x.prepare_exception(KeyError('foo')) assert 'exc_type' in e e = x.exception_to_python(e) assert e.__class__.__name__ == 'KeyError' assert str(e).strip('u') == "'foo'" def test_save_group(self): b = BaseBackend(self.app) b._save_group = Mock() b.save_group('foofoo', 'xxx') b._save_group.assert_called_with('foofoo', 'xxx') def test_add_to_chord_interface(self): b = BaseBackend(self.app) with pytest.raises(NotImplementedError): b.add_to_chord('group_id', 'sig') def test_forget_interface(self): b = BaseBackend(self.app) with pytest.raises(NotImplementedError): b.forget('foo') def test_restore_group(self): assert self.b.restore_group('missing') is None assert self.b.restore_group('missing') is None assert self.b.restore_group('exists') == 'group' assert self.b.restore_group('exists') == 'group' assert self.b.restore_group('exists', cache=False) == 'group' def test_reload_group_result(self): self.b._cache = {} self.b.reload_group_result('exists') self.b._cache['exists'] = {'result': 'group'} def test_reload_task_result(self): self.b._cache = {} self.b.reload_task_result('task-exists') self.b._cache['task-exists'] = {'result': 'task'} def test_fail_from_current_stack(self): self.b.mark_as_failure = Mock() try: raise KeyError('foo') except KeyError as exc: self.b.fail_from_current_stack('task_id') self.b.mark_as_failure.assert_called() args = self.b.mark_as_failure.call_args[0] assert args[0] == 'task_id' assert args[1] is exc assert args[2] def test_prepare_value_serializes_group_result(self): self.b.serializer = 'json' g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) v = self.b.prepare_value(g) assert isinstance(v, (list, tuple)) assert result_from_tuple(v, app=self.app) == g v2 = self.b.prepare_value(g[0]) assert isinstance(v2, (list, tuple)) assert result_from_tuple(v2, app=self.app) == g[0] self.b.serializer = 'pickle' assert isinstance(self.b.prepare_value(g), self.app.GroupResult) def test_is_cached(self): b = BaseBackend(app=self.app, max_cached_results=1) b._cache['foo'] = 1 assert b.is_cached('foo') assert not b.is_cached('false') def test_mark_as_done__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') b.on_chord_part_return = Mock() b.mark_as_done('id', 10, request=request) b.on_chord_part_return.assert_called_with(request, states.SUCCESS, 10) def test_mark_as_failure__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [] b.on_chord_part_return = Mock() exc = KeyError() b.mark_as_failure('id', exc, request=request) b.on_chord_part_return.assert_called_with(request, states.FAILURE, exc) def test_mark_as_revoked__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [] b.on_chord_part_return = Mock() b.mark_as_revoked('id', 'revoked', request=request) b.on_chord_part_return.assert_called_with(request, states.REVOKED, ANY) def test_chord_error_from_stack_raises(self): b = BaseBackend(app=self.app) exc = KeyError() callback = Mock(name='callback') callback.options = {'link_error': []} task = self.app.tasks[callback.task] = Mock() b.fail_from_current_stack = Mock() group = self.patching('celery.group') group.side_effect = exc b.chord_error_from_stack(callback, exc=ValueError()) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=exc) def test_exception_to_python_when_None(self): b = BaseBackend(app=self.app) assert b.exception_to_python(None) is None def test_wait_for__on_interval(self): self.patching('time.sleep') b = BaseBackend(app=self.app) b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = {'status': states.PENDING} callback = Mock(name='callback') with pytest.raises(TimeoutError): b.wait_for(task_id='1', on_interval=callback, timeout=1) callback.assert_called_with() b._get_task_meta_for.return_value = {'status': states.SUCCESS} b.wait_for(task_id='1', timeout=None) def test_get_children(self): b = BaseBackend(app=self.app) b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = {} assert b.get_children('id') is None b._get_task_meta_for.return_value = {'children': 3} assert b.get_children('id') == 3 class test_KeyValueStoreBackend: def setup(self): self.b = KVBackend(app=self.app) def test_on_chord_part_return(self): assert not self.b.implements_incr self.b.on_chord_part_return(None, None, None) def test_get_store_delete_result(self): tid = uuid() self.b.mark_as_done(tid, 'Hello world') assert self.b.get_result(tid) == 'Hello world' assert self.b.get_state(tid) == states.SUCCESS self.b.forget(tid) assert self.b.get_state(tid) == states.PENDING def test_strip_prefix(self): x = self.b.get_key_for_task('x1b34') assert self.b._strip_prefix(x) == 'x1b34' assert self.b._strip_prefix('x1b34') == 'x1b34' def test_get_many(self): for is_dict in True, False: self.b.mget_returns_dict = is_dict ids = {uuid(): i for i in range(10)} for id, i in items(ids): self.b.mark_as_done(id, i) it = self.b.get_many(list(ids), interval=0.01) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list(self.b.get_many(list(ids), interval=0.01)) self.b._cache.clear() callback = Mock(name='callback') it = self.b.get_many( list(ids), on_message=callback, interval=0.05 ) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list( self.b.get_many(list(ids), interval=0.01) ) callback.assert_has_calls([ call(ANY) for id in ids ]) def test_get_many_times_out(self): tasks = [uuid() for _ in range(4)] self.b._cache[tasks[1]] = {'status': 'PENDING'} with pytest.raises(self.b.TimeoutError): list(self.b.get_many(tasks, timeout=0.01, interval=0.01)) def test_chord_part_return_no_gid(self): self.b.implements_incr = True task = Mock() state = 'SUCCESS' result = 10 task.request.group = None self.b.get_key_for_chord = Mock() self.b.get_key_for_chord.side_effect = AssertionError( 'should not get here', ) assert self.b.on_chord_part_return( task.request, state, result) is None @patch('celery.backends.base.GroupResult') @patch('celery.backends.base.maybe_signature') def test_chord_part_return_restore_raises(self, maybe_signature, GroupResult): self.b.implements_incr = True GroupResult.restore.side_effect = KeyError() self.b.chord_error_from_stack = Mock() callback = Mock(name='callback') request = Mock(name='request') request.group = 'gid' maybe_signature.return_value = callback self.b.on_chord_part_return(request, states.SUCCESS, 10) self.b.chord_error_from_stack.assert_called_with( callback, ANY, ) @patch('celery.backends.base.GroupResult') @patch('celery.backends.base.maybe_signature') def test_chord_part_return_restore_empty(self, maybe_signature, GroupResult): self.b.implements_incr = True GroupResult.restore.return_value = None self.b.chord_error_from_stack = Mock() callback = Mock(name='callback') request = Mock(name='request') request.group = 'gid' maybe_signature.return_value = callback self.b.on_chord_part_return(request, states.SUCCESS, 10) self.b.chord_error_from_stack.assert_called_with( callback, ANY, ) def test_filter_ready(self): self.b.decode_result = Mock() self.b.decode_result.side_effect = pass1 assert len(list(self.b._filter_ready([ (1, {'status': states.RETRY}), (2, {'status': states.FAILURE}), (3, {'status': states.SUCCESS}), ]))) == 2 @contextmanager def _chord_part_context(self, b): @self.app.task(shared=False) def callback(result): pass b.implements_incr = True b.client = Mock() with patch('celery.backends.base.GroupResult') as GR: deps = GR.restore.return_value = Mock(name='DEPS') deps.__len__ = Mock() deps.__len__.return_value = 10 b.incr = Mock() b.incr.return_value = 10 b.expire = Mock() task = Mock() task.request.group = 'grid' cb = task.request.chord = callback.s() task.request.chord.freeze() callback.backend = b callback.backend.fail_from_current_stack = Mock() yield task, deps, cb def test_chord_part_return_propagate_set(self): with self._chord_part_context(self.b) as (task, deps, _): self.b.on_chord_part_return(task.request, 'SUCCESS', 10) self.b.expire.assert_not_called() deps.delete.assert_called_with() deps.join_native.assert_called_with(propagate=True, timeout=3.0) def test_chord_part_return_propagate_default(self): with self._chord_part_context(self.b) as (task, deps, _): self.b.on_chord_part_return(task.request, 'SUCCESS', 10) self.b.expire.assert_not_called() deps.delete.assert_called_with() deps.join_native.assert_called_with(propagate=True, timeout=3.0) def test_chord_part_return_join_raises_internal(self): with self._chord_part_context(self.b) as (task, deps, callback): deps._failed_join_report = lambda: iter([]) deps.join_native.side_effect = KeyError('foo') self.b.on_chord_part_return(task.request, 'SUCCESS', 10) self.b.fail_from_current_stack.assert_called() args = self.b.fail_from_current_stack.call_args exc = args[1]['exc'] assert isinstance(exc, ChordError) assert 'foo' in str(exc) def test_chord_part_return_join_raises_task(self): b = KVBackend(serializer='pickle', app=self.app) with self._chord_part_context(b) as (task, deps, callback): deps._failed_join_report = lambda: iter([ self.app.AsyncResult('culprit'), ]) deps.join_native.side_effect = KeyError('foo') b.on_chord_part_return(task.request, 'SUCCESS', 10) b.fail_from_current_stack.assert_called() args = b.fail_from_current_stack.call_args exc = args[1]['exc'] assert isinstance(exc, ChordError) assert 'Dependency culprit raised' in str(exc) def test_restore_group_from_json(self): b = KVBackend(serializer='json', app=self.app) g = self.app.GroupResult( 'group_id', [self.app.AsyncResult('a'), self.app.AsyncResult('b')], ) b._save_group(g.id, g) g2 = b._restore_group(g.id)['result'] assert g2 == g def test_restore_group_from_pickle(self): b = KVBackend(serializer='pickle', app=self.app) g = self.app.GroupResult( 'group_id', [self.app.AsyncResult('a'), self.app.AsyncResult('b')], ) b._save_group(g.id, g) g2 = b._restore_group(g.id)['result'] assert g2 == g def test_chord_apply_fallback(self): self.b.implements_incr = False self.b.fallback_chord_unlock = Mock() self.b.apply_chord( group(app=self.app), (), 'group_id', 'body', result='result', foo=1, ) self.b.fallback_chord_unlock.assert_called_with( 'group_id', 'body', result='result', foo=1, ) def test_get_missing_meta(self): assert self.b.get_result('xxx-missing') is None assert self.b.get_state('xxx-missing') == states.PENDING def test_save_restore_delete_group(self): tid = uuid() tsr = self.app.GroupResult( tid, [self.app.AsyncResult(uuid()) for _ in range(10)], ) self.b.save_group(tid, tsr) self.b.restore_group(tid) assert self.b.restore_group(tid) == tsr self.b.delete_group(tid) assert self.b.restore_group(tid) is None def test_restore_missing_group(self): assert self.b.restore_group('xxx-nonexistant') is None class test_KeyValueStoreBackend_interface: def test_get(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).get('a') def test_set(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).set('a', 1) def test_incr(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).incr('a') def test_cleanup(self): assert not KeyValueStoreBackend(self.app).cleanup() def test_delete(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).delete('a') def test_mget(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).mget(['a']) def test_forget(self): with pytest.raises(NotImplementedError): KeyValueStoreBackend(self.app).forget('a') class test_DisabledBackend: def test_store_result(self): DisabledBackend(self.app).store_result() def test_is_disabled(self): with pytest.raises(NotImplementedError): DisabledBackend(self.app).get_state('foo') def test_as_uri(self): assert DisabledBackend(self.app).as_uri() == 'disabled://' @pytest.mark.celery(result_backend='disabled') def test_chord_raises_error(self): with pytest.raises(NotImplementedError): chord(self.add.s(i, i) for i in range(10))(self.add.s([2])) @pytest.mark.celery(result_backend='disabled') def test_chain_with_chord_raises_error(self): with pytest.raises(NotImplementedError): (self.add.s(2, 2) | group(self.add.s(2, 2), self.add.s(5, 6)) | self.add.s()).delay() class test_as_uri: def setup(self): self.b = BaseBackend( app=self.app, url='sch://uuuu:pwpw@hostname.dom' ) def test_as_uri_include_password(self): assert self.b.as_uri(True) == self.b.url def test_as_uri_exclude_password(self): assert self.b.as_uri() == 'sch://uuuu:**@hostname.dom/' celery-4.1.0/t/unit/backends/test_couchbase.py0000644000175000017500000001043413130607475021276 0ustar omeromer00000000000000"""Tests for the CouchbaseBackend.""" from __future__ import absolute_import, unicode_literals import pytest from kombu.utils.encoding import str_t from case import MagicMock, Mock, patch, sentinel, skip from celery.app import backends from celery.backends import couchbase as module from celery.backends.couchbase import CouchbaseBackend from celery.exceptions import ImproperlyConfigured try: import couchbase except ImportError: couchbase = None # noqa COUCHBASE_BUCKET = 'celery_bucket' @skip.unless_module('couchbase') class test_CouchbaseBackend: def setup(self): self.backend = CouchbaseBackend(app=self.app) def test_init_no_couchbase(self): prev, module.Couchbase = module.Couchbase, None try: with pytest.raises(ImproperlyConfigured): CouchbaseBackend(app=self.app) finally: module.Couchbase = prev def test_init_no_settings(self): self.app.conf.couchbase_backend_settings = [] with pytest.raises(ImproperlyConfigured): CouchbaseBackend(app=self.app) def test_init_settings_is_None(self): self.app.conf.couchbase_backend_settings = None CouchbaseBackend(app=self.app) def test_get_connection_connection_exists(self): with patch('couchbase.connection.Connection') as mock_Connection: self.backend._connection = sentinel._connection connection = self.backend._get_connection() assert sentinel._connection == connection mock_Connection.assert_not_called() def test_get(self): self.app.conf.couchbase_backend_settings = {} x = CouchbaseBackend(app=self.app) x._connection = Mock() mocked_get = x._connection.get = Mock() mocked_get.return_value.value = sentinel.retval # should return None assert x.get('1f3fab') == sentinel.retval x._connection.get.assert_called_once_with('1f3fab') def test_set(self): self.app.conf.couchbase_backend_settings = None x = CouchbaseBackend(app=self.app) x._connection = MagicMock() x._connection.set = MagicMock() # should return None assert x.set(sentinel.key, sentinel.value) is None def test_delete(self): self.app.conf.couchbase_backend_settings = {} x = CouchbaseBackend(app=self.app) x._connection = Mock() mocked_delete = x._connection.delete = Mock() mocked_delete.return_value = None # should return None assert x.delete('1f3fab') is None x._connection.delete.assert_called_once_with('1f3fab') def test_config_params(self): self.app.conf.couchbase_backend_settings = { 'bucket': 'mycoolbucket', 'host': ['here.host.com', 'there.host.com'], 'username': 'johndoe', 'password': 'mysecret', 'port': '1234', } x = CouchbaseBackend(app=self.app) assert x.bucket == 'mycoolbucket' assert x.host == ['here.host.com', 'there.host.com'] assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.port == 1234 def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'): from celery.backends.couchbase import CouchbaseBackend backend, url_ = backends.by_url(url, self.app.loader) assert backend is CouchbaseBackend assert url_ == url def test_backend_params_by_url(self): url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket' with self.Celery(backend=url) as app: x = app.backend assert x.bucket == 'mycoolbucket' assert x.host == 'myhost' assert x.username == 'johndoe' assert x.password == 'mysecret' assert x.port == 123 def test_correct_key_types(self): keys = [ self.backend.get_key_for_task('task_id', bytes('key')), self.backend.get_key_for_chord('group_id', bytes('key')), self.backend.get_key_for_group('group_id', bytes('key')), self.backend.get_key_for_task('task_id', 'key'), self.backend.get_key_for_chord('group_id', 'key'), self.backend.get_key_for_group('group_id', 'key'), ] for key in keys: assert isinstance(key, str_t) celery-4.1.0/t/unit/backends/test_mongodb.py0000644000175000017500000003733713130607475021002 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import datetime from pickle import loads, dumps from case import ANY, MagicMock, Mock, mock, patch, sentinel, skip from kombu.exceptions import EncodeError from celery import uuid from celery import states from celery.backends.mongodb import InvalidDocument, MongoBackend from celery.exceptions import ImproperlyConfigured COLLECTION = 'taskmeta_celery' TASK_ID = uuid() MONGODB_HOST = 'localhost' MONGODB_PORT = 27017 MONGODB_USER = 'mongo' MONGODB_PASSWORD = '1234' MONGODB_DATABASE = 'testing' MONGODB_COLLECTION = 'collection1' MONGODB_GROUP_COLLECTION = 'group_collection1' @skip.unless_module('pymongo') class test_MongoBackend: default_url = 'mongodb://uuuu:pwpw@hostname.dom/database' replica_set_url = ( 'mongodb://uuuu:pwpw@hostname.dom,' 'hostname.dom/database?replicaSet=rs' ) sanitized_default_url = 'mongodb://uuuu:**@hostname.dom/database' sanitized_replica_set_url = ( 'mongodb://uuuu:**@hostname.dom/,' 'hostname.dom/database?replicaSet=rs' ) def setup(self): self.patching('celery.backends.mongodb.MongoBackend.encode') self.patching('celery.backends.mongodb.MongoBackend.decode') self.patching('celery.backends.mongodb.Binary') self.patching('datetime.datetime') self.backend = MongoBackend(app=self.app, url=self.default_url) def test_init_no_mongodb(self, patching): patching('celery.backends.mongodb.pymongo', None) with pytest.raises(ImproperlyConfigured): MongoBackend(app=self.app) def test_init_no_settings(self): self.app.conf.mongodb_backend_settings = [] with pytest.raises(ImproperlyConfigured): MongoBackend(app=self.app) def test_init_settings_is_None(self): self.app.conf.mongodb_backend_settings = None MongoBackend(app=self.app) def test_init_with_settings(self): self.app.conf.mongodb_backend_settings = None # empty settings mb = MongoBackend(app=self.app) # uri uri = 'mongodb://localhost:27017' mb = MongoBackend(app=self.app, url=uri) assert mb.mongo_host == ['localhost:27017'] assert mb.options == mb._prepare_client_options() assert mb.database_name == 'celery' # uri with database name uri = 'mongodb://localhost:27017/celerydb' mb = MongoBackend(app=self.app, url=uri) assert mb.database_name == 'celerydb' # uri with user, password, database name, replica set uri = ('mongodb://' 'celeryuser:celerypassword@' 'mongo1.example.com:27017,' 'mongo2.example.com:27017,' 'mongo3.example.com:27017/' 'celerydatabase?replicaSet=rs0') mb = MongoBackend(app=self.app, url=uri) assert mb.mongo_host == [ 'mongo1.example.com:27017', 'mongo2.example.com:27017', 'mongo3.example.com:27017', ] assert mb.options == dict( mb._prepare_client_options(), replicaset='rs0', ) assert mb.user == 'celeryuser' assert mb.password == 'celerypassword' assert mb.database_name == 'celerydatabase' # same uri, change some parameters in backend settings self.app.conf.mongodb_backend_settings = { 'replicaset': 'rs1', 'user': 'backenduser', 'database': 'another_db', 'options': { 'socketKeepAlive': True, }, } mb = MongoBackend(app=self.app, url=uri) assert mb.mongo_host == [ 'mongo1.example.com:27017', 'mongo2.example.com:27017', 'mongo3.example.com:27017', ] assert mb.options == dict( mb._prepare_client_options(), replicaset='rs1', socketKeepAlive=True, ) assert mb.user == 'backenduser' assert mb.password == 'celerypassword' assert mb.database_name == 'another_db' mb = MongoBackend(app=self.app, url='mongodb://') @pytest.mark.usefixtures('depends_on_current_app') def test_reduce(self): x = MongoBackend(app=self.app) assert loads(dumps(x)) def test_get_connection_connection_exists(self): with patch('pymongo.MongoClient') as mock_Connection: self.backend._connection = sentinel._connection connection = self.backend._get_connection() assert sentinel._connection == connection mock_Connection.assert_not_called() def test_get_connection_no_connection_host(self): with patch('pymongo.MongoClient') as mock_Connection: self.backend._connection = None self.backend.host = MONGODB_HOST self.backend.port = MONGODB_PORT mock_Connection.return_value = sentinel.connection connection = self.backend._get_connection() mock_Connection.assert_called_once_with( host='mongodb://localhost:27017', **self.backend._prepare_client_options() ) assert sentinel.connection == connection def test_get_connection_no_connection_mongodb_uri(self): with patch('pymongo.MongoClient') as mock_Connection: mongodb_uri = 'mongodb://%s:%d' % (MONGODB_HOST, MONGODB_PORT) self.backend._connection = None self.backend.host = mongodb_uri mock_Connection.return_value = sentinel.connection connection = self.backend._get_connection() mock_Connection.assert_called_once_with( host=mongodb_uri, **self.backend._prepare_client_options() ) assert sentinel.connection == connection @patch('celery.backends.mongodb.MongoBackend._get_connection') def test_get_database_no_existing(self, mock_get_connection): # Should really check for combinations of these two, to be complete. self.backend.user = MONGODB_USER self.backend.password = MONGODB_PASSWORD mock_database = Mock() mock_connection = MagicMock(spec=['__getitem__']) mock_connection.__getitem__.return_value = mock_database mock_get_connection.return_value = mock_connection database = self.backend.database assert database is mock_database assert self.backend.__dict__['database'] is mock_database mock_database.authenticate.assert_called_once_with( MONGODB_USER, MONGODB_PASSWORD) @patch('celery.backends.mongodb.MongoBackend._get_connection') def test_get_database_no_existing_no_auth(self, mock_get_connection): # Should really check for combinations of these two, to be complete. self.backend.user = None self.backend.password = None mock_database = Mock() mock_connection = MagicMock(spec=['__getitem__']) mock_connection.__getitem__.return_value = mock_database mock_get_connection.return_value = mock_connection database = self.backend.database assert database is mock_database mock_database.authenticate.assert_not_called() assert self.backend.__dict__['database'] is mock_database @patch('celery.backends.mongodb.MongoBackend._get_database') def test_store_result(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._store_result( sentinel.task_id, sentinel.result, sentinel.status) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) mock_collection.save.assert_called_once_with(ANY) assert sentinel.result == ret_val mock_collection.save.side_effect = InvalidDocument() with pytest.raises(EncodeError): self.backend._store_result( sentinel.task_id, sentinel.result, sentinel.status) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_get_task_meta_for(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_collection.find_one.return_value = MagicMock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._get_task_meta_for(sentinel.task_id) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) assert list(sorted([ 'status', 'task_id', 'date_done', 'traceback', 'result', 'children', ])) == list(sorted(ret_val.keys())) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_get_task_meta_for_no_result(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_collection.find_one.return_value = None mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._get_task_meta_for(sentinel.task_id) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) assert {'status': states.PENDING, 'result': None} == ret_val @patch('celery.backends.mongodb.MongoBackend._get_database') def test_save_group(self, mock_get_database): self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection res = [self.app.AsyncResult(i) for i in range(3)] ret_val = self.backend._save_group( sentinel.taskset_id, res, ) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with( MONGODB_GROUP_COLLECTION, ) mock_collection.save.assert_called_once_with(ANY) assert res == ret_val @patch('celery.backends.mongodb.MongoBackend._get_database') def test_restore_group(self, mock_get_database): self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_collection.find_one.return_value = { '_id': sentinel.taskset_id, 'result': [uuid(), uuid()], 'date_done': 1, } self.backend.decode.side_effect = lambda r: r mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection ret_val = self.backend._restore_group(sentinel.taskset_id) mock_get_database.assert_called_once_with() mock_collection.find_one.assert_called_once_with( {'_id': sentinel.taskset_id}) assert (sorted(['date_done', 'result', 'task_id']) == sorted(list(ret_val.keys()))) mock_collection.find_one.return_value = None self.backend._restore_group(sentinel.taskset_id) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_delete_group(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection self.backend._delete_group(sentinel.taskset_id) mock_get_database.assert_called_once_with() mock_collection.remove.assert_called_once_with( {'_id': sentinel.taskset_id}) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_forget(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__.return_value = mock_collection self.backend._forget(sentinel.task_id) mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with( MONGODB_COLLECTION) mock_collection.remove.assert_called_once_with( {'_id': sentinel.task_id}) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_cleanup(self, mock_get_database): self.backend.taskmeta_collection = MONGODB_COLLECTION self.backend.groupmeta_collection = MONGODB_GROUP_COLLECTION mock_database = Mock(spec=['__getitem__', '__setitem__'], name='MD') self.backend.collections = mock_collection = Mock() mock_get_database.return_value = mock_database mock_database.__getitem__ = Mock(name='MD.__getitem__') mock_database.__getitem__.return_value = mock_collection self.backend.app.now = datetime.datetime.utcnow self.backend.cleanup() mock_get_database.assert_called_once_with() mock_collection.remove.assert_called() def test_get_database_authfailure(self): x = MongoBackend(app=self.app) x._get_connection = Mock() conn = x._get_connection.return_value = {} db = conn[x.database_name] = Mock() db.authenticate.return_value = False x.user = 'jerry' x.password = 'cere4l' with pytest.raises(ImproperlyConfigured): x._get_database() db.authenticate.assert_called_with('jerry', 'cere4l') def test_prepare_client_options(self): with patch('pymongo.version_tuple', new=(3, 0, 3)): options = self.backend._prepare_client_options() assert options == { 'maxPoolSize': self.backend.max_pool_size } def test_as_uri_include_password(self): assert self.backend.as_uri(True) == self.default_url def test_as_uri_exclude_password(self): assert self.backend.as_uri() == self.sanitized_default_url def test_as_uri_include_password_replica_set(self): backend = MongoBackend(app=self.app, url=self.replica_set_url) assert backend.as_uri(True) == self.replica_set_url def test_as_uri_exclude_password_replica_set(self): backend = MongoBackend(app=self.app, url=self.replica_set_url) assert backend.as_uri() == self.sanitized_replica_set_url def test_regression_worker_startup_info(self): self.app.conf.result_backend = ( 'mongodb://user:password@host0.com:43437,host1.com:43437' '/work4us?replicaSet=rs&ssl=true' ) worker = self.app.Worker() with mock.stdouts(): worker.on_start() assert worker.startup_info() @skip.unless_module('pymongo') class test_MongoBackend_no_mock: def test_encode_decode(self, app): backend = MongoBackend(app=app) data = {'foo': 1} assert backend.decode(backend.encode(data)) backend.serializer = 'bson' assert backend.encode(data) == data assert backend.decode(data) == data def test_de(self, app): backend = MongoBackend(app=app) data = {'foo': 1} assert backend.encode(data) backend.serializer = 'bson' assert backend.encode(data) == data celery-4.1.0/t/unit/backends/test_consul.py0000644000175000017500000000141013130607475020637 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import Mock, skip from celery.backends.consul import ConsulBackend @skip.unless_module('consul') class test_ConsulBackend: def setup(self): self.backend = ConsulBackend( app=self.app, url='consul://localhost:800') def test_supports_autoexpire(self): assert self.backend.supports_autoexpire def test_consul_consistency(self): assert self.backend.consistency == 'consistent' def test_get(self): index = 100 data = {'Key': 'test-consul-1', 'Value': 'mypayload'} self.backend.client = Mock(name='c.client') self.backend.client.kv.get.return_value = (index, data) assert self.backend.get(data['Key']) == 'mypayload' celery-4.1.0/t/unit/contrib/0000755000175000017500000000000013135426347015617 5ustar omeromer00000000000000celery-4.1.0/t/unit/contrib/test_abortable.py0000644000175000017500000000264413130607475021167 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery.contrib.abortable import AbortableTask, AbortableAsyncResult class test_AbortableTask: def setup(self): @self.app.task(base=AbortableTask, shared=False) def abortable(): return True self.abortable = abortable def test_async_result_is_abortable(self): result = self.abortable.apply_async() tid = result.id assert isinstance( self.abortable.AsyncResult(tid), AbortableAsyncResult) def test_is_not_aborted(self): self.abortable.push_request() try: result = self.abortable.apply_async() tid = result.id assert not self.abortable.is_aborted(task_id=tid) finally: self.abortable.pop_request() def test_is_aborted_not_abort_result(self): self.abortable.AsyncResult = self.app.AsyncResult self.abortable.push_request() try: self.abortable.request.id = 'foo' assert not self.abortable.is_aborted() finally: self.abortable.pop_request() def test_abort_yields_aborted(self): self.abortable.push_request() try: result = self.abortable.apply_async() result.abort() tid = result.id assert self.abortable.is_aborted(task_id=tid) finally: self.abortable.pop_request() celery-4.1.0/t/unit/contrib/__init__.py0000644000175000017500000000000013130607475017714 0ustar omeromer00000000000000celery-4.1.0/t/unit/contrib/test_migrate.py0000644000175000017500000002454313130607475020666 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from contextlib import contextmanager from amqp import ChannelError from case import Mock, mock, patch from kombu import Connection, Producer, Queue, Exchange from kombu.transport.virtual import QoS from celery.contrib.migrate import ( StopFiltering, State, migrate_task, migrate_tasks, filter_callback, _maybe_queue, filter_status, move_by_taskmap, move_by_idmap, move_task_by_id, start_filter, task_id_in, task_id_eq, expand_dest, move, ) from celery.utils.encoding import bytes_t, ensure_bytes # hack to ignore error at shutdown QoS.restore_at_shutdown = False def Message(body, exchange='exchange', routing_key='rkey', compression=None, content_type='application/json', content_encoding='utf-8'): return Mock( attrs={ 'body': body, 'delivery_info': { 'exchange': exchange, 'routing_key': routing_key, }, 'headers': { 'compression': compression, }, 'content_type': content_type, 'content_encoding': content_encoding, 'properties': {} }, ) class test_State: def test_strtotal(self): x = State() assert x.strtotal == '?' x.total_apx = 100 assert x.strtotal == '100' def test_repr(self): x = State() assert repr(x) x.filtered = 'foo' assert repr(x) class test_move: @contextmanager def move_context(self, **kwargs): with patch('celery.contrib.migrate.start_filter') as start: with patch('celery.contrib.migrate.republish') as republish: pred = Mock(name='predicate') move(pred, app=self.app, connection=self.app.connection(), **kwargs) start.assert_called() callback = start.call_args[0][2] yield callback, pred, republish def msgpair(self, **kwargs): body = dict({'task': 'add', 'id': 'id'}, **kwargs) return body, Message(body) def test_move(self): with self.move_context() as (callback, pred, republish): pred.return_value = None body, message = self.msgpair() callback(body, message) message.ack.assert_not_called() republish.assert_not_called() pred.return_value = 'foo' callback(body, message) message.ack.assert_called_with() republish.assert_called() def test_move_transform(self): trans = Mock(name='transform') trans.return_value = Queue('bar') with self.move_context(transform=trans) as (callback, pred, republish): pred.return_value = 'foo' body, message = self.msgpair() with patch('celery.contrib.migrate.maybe_declare') as maybed: callback(body, message) trans.assert_called_with('foo') maybed.assert_called() republish.assert_called() def test_limit(self): with self.move_context(limit=1) as (callback, pred, republish): pred.return_value = 'foo' body, message = self.msgpair() with pytest.raises(StopFiltering): callback(body, message) republish.assert_called() def test_callback(self): cb = Mock() with self.move_context(callback=cb) as (callback, pred, republish): pred.return_value = 'foo' body, message = self.msgpair() callback(body, message) republish.assert_called() cb.assert_called() class test_start_filter: def test_start(self): with patch('celery.contrib.migrate.eventloop') as evloop: app = Mock() filt = Mock(name='filter') conn = Connection('memory://') evloop.side_effect = StopFiltering() app.amqp.queues = {'foo': Queue('foo'), 'bar': Queue('bar')} consumer = app.amqp.TaskConsumer.return_value = Mock(name='consum') consumer.queues = list(app.amqp.queues.values()) consumer.channel = conn.default_channel consumer.__enter__ = Mock(name='consumer.__enter__') consumer.__exit__ = Mock(name='consumer.__exit__') consumer.callbacks = [] def register_callback(x): consumer.callbacks.append(x) consumer.register_callback = register_callback start_filter(app, conn, filt, queues='foo,bar', ack_messages=True) body = {'task': 'add', 'id': 'id'} for callback in consumer.callbacks: callback(body, Message(body)) consumer.callbacks[:] = [] cb = Mock(name='callback=') start_filter(app, conn, filt, tasks='add,mul', callback=cb) for callback in consumer.callbacks: callback(body, Message(body)) cb.assert_called() on_declare_queue = Mock() start_filter(app, conn, filt, tasks='add,mul', queues='foo', on_declare_queue=on_declare_queue) on_declare_queue.assert_called() start_filter(app, conn, filt, queues=['foo', 'bar']) consumer.callbacks[:] = [] state = State() start_filter(app, conn, filt, tasks='add,mul', callback=cb, state=state, limit=1) stop_filtering_raised = False for callback in consumer.callbacks: try: callback(body, Message(body)) except StopFiltering: stop_filtering_raised = True assert state.count assert stop_filtering_raised class test_filter_callback: def test_filter(self): callback = Mock() filt = filter_callback(callback, ['add', 'mul']) t1 = {'task': 'add'} t2 = {'task': 'div'} message = Mock() filt(t2, message) callback.assert_not_called() filt(t1, message) callback.assert_called_with(t1, message) def test_task_id_in(): assert task_id_in(['A'], {'id': 'A'}, Mock()) assert not task_id_in(['A'], {'id': 'B'}, Mock()) def test_task_id_eq(): assert task_id_eq('A', {'id': 'A'}, Mock()) assert not task_id_eq('A', {'id': 'B'}, Mock()) def test_expand_dest(): assert expand_dest(None, 'foo', 'bar') == ('foo', 'bar') assert expand_dest(('b', 'x'), 'foo', 'bar') == ('b', 'x') def test_maybe_queue(): app = Mock() app.amqp.queues = {'foo': 313} assert _maybe_queue(app, 'foo') == 313 assert _maybe_queue(app, Queue('foo')) == Queue('foo') def test_filter_status(): with mock.stdouts() as (stdout, stderr): filter_status(State(), {'id': '1', 'task': 'add'}, Mock()) assert stdout.getvalue() def test_move_by_taskmap(): with patch('celery.contrib.migrate.move') as move: move_by_taskmap({'add': Queue('foo')}) move.assert_called() cb = move.call_args[0][0] assert cb({'task': 'add'}, Mock()) def test_move_by_idmap(): with patch('celery.contrib.migrate.move') as move: move_by_idmap({'123f': Queue('foo')}) move.assert_called() cb = move.call_args[0][0] assert cb({'id': '123f'}, Mock()) def test_move_task_by_id(): with patch('celery.contrib.migrate.move') as move: move_task_by_id('123f', Queue('foo')) move.assert_called() cb = move.call_args[0][0] assert cb({'id': '123f'}, Mock()) == Queue('foo') class test_migrate_task: def test_removes_compression_header(self): x = Message('foo', compression='zlib') producer = Mock() migrate_task(producer, x.body, x) producer.publish.assert_called() args, kwargs = producer.publish.call_args assert isinstance(args[0], bytes_t) assert 'compression' not in kwargs['headers'] assert kwargs['compression'] == 'zlib' assert kwargs['content_type'] == 'application/json' assert kwargs['content_encoding'] == 'utf-8' assert kwargs['exchange'] == 'exchange' assert kwargs['routing_key'] == 'rkey' class test_migrate_tasks: def test_migrate(self, app, name='testcelery'): connection_kwargs = dict( transport_options={'polling_interval': 0.01} ) x = Connection('memory://foo', **connection_kwargs) y = Connection('memory://foo', **connection_kwargs) # use separate state x.default_channel.queues = {} y.default_channel.queues = {} ex = Exchange(name, 'direct') q = Queue(name, exchange=ex, routing_key=name) q(x.default_channel).declare() Producer(x).publish('foo', exchange=name, routing_key=name) Producer(x).publish('bar', exchange=name, routing_key=name) Producer(x).publish('baz', exchange=name, routing_key=name) assert x.default_channel.queues assert not y.default_channel.queues migrate_tasks(x, y, accept=['text/plain'], app=app) yq = q(y.default_channel) assert yq.get().body == ensure_bytes('foo') assert yq.get().body == ensure_bytes('bar') assert yq.get().body == ensure_bytes('baz') Producer(x).publish('foo', exchange=name, routing_key=name) callback = Mock() migrate_tasks(x, y, callback=callback, accept=['text/plain'], app=app) callback.assert_called() migrate = Mock() Producer(x).publish('baz', exchange=name, routing_key=name) migrate_tasks(x, y, callback=callback, migrate=migrate, accept=['text/plain'], app=app) migrate.assert_called() with patch('kombu.transport.virtual.Channel.queue_declare') as qd: def effect(*args, **kwargs): if kwargs.get('passive'): raise ChannelError('some channel error') return 0, 3, 0 qd.side_effect = effect migrate_tasks(x, y, app=app) x = Connection('memory://', **connection_kwargs) x.default_channel.queues = {} y.default_channel.queues = {} callback = Mock() migrate_tasks(x, y, callback=callback, accept=['text/plain'], app=app) callback.assert_not_called() celery-4.1.0/t/unit/contrib/test_rdb.py0000644000175000017500000000617313130607475020004 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import errno import socket import pytest from case import Mock, patch, skip from celery.contrib.rdb import ( Rdb, debugger, set_trace, ) from celery.five import WhateverIO class SockErr(socket.error): errno = None class test_Rdb: @patch('celery.contrib.rdb.Rdb') def test_debugger(self, Rdb): x = debugger() assert x assert x is debugger() @patch('celery.contrib.rdb.debugger') @patch('celery.contrib.rdb._frame') def test_set_trace(self, _frame, debugger): assert set_trace(Mock()) assert set_trace() debugger.return_value.set_trace.assert_called() @patch('celery.contrib.rdb.Rdb.get_avail_port') @skip.if_pypy() def test_rdb(self, get_avail_port): sock = Mock() get_avail_port.return_value = (sock, 8000) sock.accept.return_value = (Mock(), ['helu']) out = WhateverIO() with Rdb(out=out) as rdb: get_avail_port.assert_called() assert 'helu' in out.getvalue() # set_quit with patch('sys.settrace') as settrace: rdb.set_quit() settrace.assert_called_with(None) # set_trace with patch('celery.contrib.rdb.Pdb.set_trace') as pset: with patch('celery.contrib.rdb._frame'): rdb.set_trace() rdb.set_trace(Mock()) pset.side_effect = SockErr pset.side_effect.errno = errno.ENOENT with pytest.raises(SockErr): rdb.set_trace() # _close_session rdb._close_session() rdb.active = True rdb._handle = None rdb._client = None rdb._sock = None rdb._close_session() # do_continue rdb.set_continue = Mock() rdb.do_continue(Mock()) rdb.set_continue.assert_called_with() # do_quit rdb.set_quit = Mock() rdb.do_quit(Mock()) rdb.set_quit.assert_called_with() @patch('socket.socket') @skip.if_pypy() def test_get_avail_port(self, sock): out = WhateverIO() sock.return_value.accept.return_value = (Mock(), ['helu']) with Rdb(out=out): pass with patch('celery.contrib.rdb.current_process') as curproc: curproc.return_value.name = 'PoolWorker-10' with Rdb(out=out): pass err = sock.return_value.bind.side_effect = SockErr() err.errno = errno.ENOENT with pytest.raises(SockErr): with Rdb(out=out): pass err.errno = errno.EADDRINUSE with pytest.raises(Exception): with Rdb(out=out): pass called = [0] def effect(*a, **kw): try: if called[0] > 50: return True raise err finally: called[0] += 1 sock.return_value.bind.side_effect = effect with Rdb(out=out): pass celery-4.1.0/t/unit/worker/0000755000175000017500000000000013135426347015470 5ustar omeromer00000000000000celery-4.1.0/t/unit/worker/test_strategy.py0000644000175000017500000001552213130607475020746 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from collections import defaultdict from contextlib import contextmanager from case import Mock, patch from kombu.utils.limits import TokenBucket from celery.exceptions import InvalidTaskError from celery.worker import state from celery.worker.strategy import proto1_to_proto2 from celery.utils.time import rate class test_proto1_to_proto2: def setup(self): self.message = Mock(name='message') self.body = { 'args': (1,), 'kwargs': {'foo': 'baz'}, 'utc': False, 'taskset': '123', } def test_message_without_args(self): self.body.pop('args') body, _, _, _ = proto1_to_proto2(self.message, self.body) assert body[:2] == ((), {'foo': 'baz'}) def test_message_without_kwargs(self): self.body.pop('kwargs') body, _, _, _ = proto1_to_proto2(self.message, self.body) assert body[:2] == ((1,), {}) def test_message_kwargs_not_mapping(self): self.body['kwargs'] = (2,) with pytest.raises(InvalidTaskError): proto1_to_proto2(self.message, self.body) def test_message_no_taskset_id(self): self.body.pop('taskset') assert proto1_to_proto2(self.message, self.body) def test_message(self): body, headers, decoded, utc = proto1_to_proto2(self.message, self.body) assert body == ((1,), {'foo': 'baz'}, { 'callbacks': None, 'errbacks': None, 'chord': None, 'chain': None, }) assert headers == dict(self.body, group='123') assert decoded assert not utc class test_default_strategy_proto2: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add def get_message_class(self): return self.TaskMessage def prepare_message(self, message): return message class Context(object): def __init__(self, sig, s, reserved, consumer, message): self.sig = sig self.s = s self.reserved = reserved self.consumer = consumer self.message = message def __call__(self, callbacks=[], **kwargs): return self.s( self.message, (self.message.payload if not self.message.headers.get('id') else None), self.message.ack, self.message.reject, callbacks, **kwargs ) def was_reserved(self): return self.reserved.called def was_rate_limited(self): assert not self.was_reserved() return self.consumer._limit_task.called def was_scheduled(self): assert not self.was_reserved() assert not self.was_rate_limited() return self.consumer.timer.call_at.called def event_sent(self): return self.consumer.event_dispatcher.send.call_args def get_request(self): if self.was_reserved(): return self.reserved.call_args[0][0] if self.was_rate_limited(): return self.consumer._limit_task.call_args[0][0] if self.was_scheduled(): return self.consumer.timer.call_at.call_args[0][0] raise ValueError('request not handled') @contextmanager def _context(self, sig, rate_limits=True, events=True, utc=True, limit=None): assert sig.type.Strategy reserved = Mock() consumer = Mock() consumer.task_buckets = defaultdict(lambda: None) if limit: bucket = TokenBucket(rate(limit), capacity=1) consumer.task_buckets[sig.task] = bucket consumer.controller.state.revoked = set() consumer.disable_rate_limits = not rate_limits consumer.event_dispatcher.enabled = events s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved) assert s message = self.task_message_from_sig( self.app, sig, utc=utc, TaskMessage=self.get_message_class(), ) message = self.prepare_message(message) yield self.Context(sig, s, reserved, consumer, message) def test_when_logging_disabled(self): with patch('celery.worker.strategy.logger') as logger: logger.isEnabledFor.return_value = False with self._context(self.add.s(2, 2)) as C: C() logger.info.assert_not_called() def test_task_strategy(self): with self._context(self.add.s(2, 2)) as C: C() assert C.was_reserved() req = C.get_request() C.consumer.on_task_request.assert_called_with(req) assert C.event_sent() def test_callbacks(self): with self._context(self.add.s(2, 2)) as C: callbacks = [Mock(name='cb1'), Mock(name='cb2')] C(callbacks=callbacks) req = C.get_request() for callback in callbacks: callback.assert_called_with(req) def test_when_events_disabled(self): with self._context(self.add.s(2, 2), events=False) as C: C() assert C.was_reserved() assert not C.event_sent() def test_eta_task(self): with self._context(self.add.s(2, 2).set(countdown=10)) as C: C() assert C.was_scheduled() C.consumer.qos.increment_eventually.assert_called_with() def test_eta_task_utc_disabled(self): with self._context(self.add.s(2, 2).set(countdown=10), utc=False) as C: C() assert C.was_scheduled() C.consumer.qos.increment_eventually.assert_called_with() def test_when_rate_limited(self): task = self.add.s(2, 2) with self._context(task, rate_limits=True, limit='1/m') as C: C() assert C.was_rate_limited() def test_when_rate_limited__limits_disabled(self): task = self.add.s(2, 2) with self._context(task, rate_limits=False, limit='1/m') as C: C() assert C.was_reserved() def test_when_revoked(self): task = self.add.s(2, 2) task.freeze() try: with self._context(task) as C: C.consumer.controller.state.revoked.add(task.id) state.revoked.add(task.id) C() with pytest.raises(ValueError): C.get_request() finally: state.revoked.discard(task.id) class test_default_strategy_proto1(test_default_strategy_proto2): def get_message_class(self): return self.TaskMessage1 class test_default_strategy_proto1__no_utc(test_default_strategy_proto2): def get_message_class(self): return self.TaskMessage1 def prepare_message(self, message): message.payload['utc'] = False return message celery-4.1.0/t/unit/worker/__init__.py0000644000175000017500000000000013130607475017565 0ustar omeromer00000000000000celery-4.1.0/t/unit/worker/test_heartbeat.py0000644000175000017500000000344313130607475021042 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import Mock from celery.worker.heartbeat import Heart class MockDispatcher(object): heart = None next_iter = 0 def __init__(self): self.sent = [] self.on_enabled = set() self.on_disabled = set() self.enabled = True def send(self, msg, **_fields): self.sent.append(msg) if self.heart: if self.next_iter > 10: self.heart._shutdown.set() self.next_iter += 1 class MockTimer(object): def call_repeatedly(self, secs, fun, args=(), kwargs={}): class entry(tuple): canceled = False def cancel(self): self.canceled = True return entry((secs, fun, args, kwargs)) def cancel(self, entry): entry.cancel() class test_Heart: def test_start_stop(self): timer = MockTimer() eventer = MockDispatcher() h = Heart(timer, eventer, interval=1) h.start() assert h.tref h.stop() assert h.tref is None h.stop() def test_send_sends_signal(self): h = Heart(MockTimer(), MockDispatcher(), interval=1) h._send_sent_signal = None h._send('worker-heartbeat') h._send_sent_signal = Mock(name='send_sent_signal') h._send('worker') h._send_sent_signal.assert_called_with(sender=h) def test_start_when_disabled(self): timer = MockTimer() eventer = MockDispatcher() eventer.enabled = False h = Heart(timer, eventer) h.start() assert not h.tref def test_stop_when_disabled(self): timer = MockTimer() eventer = MockDispatcher() eventer.enabled = False h = Heart(timer, eventer) h.stop() celery-4.1.0/t/unit/worker/test_control.py0000644000175000017500000005602313130607475020565 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import sys import socket from collections import defaultdict from datetime import datetime, timedelta from case import Mock, call, patch from kombu import pidbox from kombu.utils.uuid import uuid from celery.five import Queue as FastQueue from celery.utils.timer2 import Timer from celery.worker import WorkController as _WC # noqa from celery.worker import consumer from celery.worker import control from celery.worker import state as worker_state from celery.worker.request import Request from celery.worker.state import revoked from celery.worker.pidbox import Pidbox, gPidbox from celery.utils.collections import AttributeDict hostname = socket.gethostname() class WorkController(object): autoscaler = None def stats(self): return {'total': worker_state.total_count} class Consumer(consumer.Consumer): def __init__(self, app): self.app = app self.buffer = FastQueue() self.timer = Timer() self.event_dispatcher = Mock() self.controller = WorkController() self.task_consumer = Mock() self.prefetch_multiplier = 1 self.initial_prefetch_count = 1 from celery.concurrency.base import BasePool self.pool = BasePool(10) self.task_buckets = defaultdict(lambda: None) self.hub = None def call_soon(self, p, *args, **kwargs): return p(*args, **kwargs) class test_Pidbox: def test_shutdown(self): with patch('celery.worker.pidbox.ignore_errors') as eig: parent = Mock() pbox = Pidbox(parent) pbox._close_channel = Mock() assert pbox.c is parent pconsumer = pbox.consumer = Mock() cancel = pconsumer.cancel pbox.shutdown(parent) eig.assert_called_with(parent, cancel) pbox._close_channel.assert_called_with(parent) class test_Pidbox_green: def test_stop(self): parent = Mock() g = gPidbox(parent) stopped = g._node_stopped = Mock() shutdown = g._node_shutdown = Mock() close_chan = g._close_channel = Mock() g.stop(parent) shutdown.set.assert_called_with() stopped.wait.assert_called_with() close_chan.assert_called_with(parent) assert g._node_stopped is None assert g._node_shutdown is None close_chan.reset() g.stop(parent) close_chan.assert_called_with(parent) def test_resets(self): parent = Mock() g = gPidbox(parent) g._resets = 100 g.reset() assert g._resets == 101 def test_loop(self): parent = Mock() conn = self.app.connection_for_read() parent.connection_for_read.return_value = conn drain = conn.drain_events = Mock() g = gPidbox(parent) parent.connection = Mock() do_reset = g._do_reset = Mock() call_count = [0] def se(*args, **kwargs): if call_count[0] > 2: g._node_shutdown.set() g.reset() call_count[0] += 1 drain.side_effect = se g.loop(parent) assert do_reset.call_count == 4 class test_ControlPanel: def setup(self): self.panel = self.create_panel(consumer=Consumer(self.app)) @self.app.task(name='c.unittest.mytask', rate_limit=200, shared=False) def mytask(): pass self.mytask = mytask def create_state(self, **kwargs): kwargs.setdefault('app', self.app) kwargs.setdefault('hostname', hostname) kwargs.setdefault('tset', set) return AttributeDict(kwargs) def create_panel(self, **kwargs): return self.app.control.mailbox.Node( hostname=hostname, state=self.create_state(**kwargs), handlers=control.Panel.data, ) def test_enable_events(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) evd = consumer.event_dispatcher evd.groups = set() panel.handle('enable_events') assert not evd.groups evd.groups = {'worker'} panel.handle('enable_events') assert 'task' in evd.groups evd.groups = {'task'} assert 'already enabled' in panel.handle('enable_events')['ok'] def test_disable_events(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) evd = consumer.event_dispatcher evd.enabled = True evd.groups = {'task'} panel.handle('disable_events') assert 'task' not in evd.groups assert 'already disabled' in panel.handle('disable_events')['ok'] def test_clock(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) panel.state.app.clock.value = 313 x = panel.handle('clock') assert x['clock'] == 313 def test_hello(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) panel.state.app.clock.value = 313 panel.state.hostname = 'elaine@vandelay.com' worker_state.revoked.add('revoked1') try: assert panel.handle('hello', { 'from_node': 'elaine@vandelay.com', }) is None x = panel.handle('hello', { 'from_node': 'george@vandelay.com', }) assert x['clock'] == 314 # incremented x = panel.handle('hello', { 'from_node': 'george@vandelay.com', 'revoked': {'1234', '4567', '891'} }) assert 'revoked1' in x['revoked'] assert '1234' in x['revoked'] assert '4567' in x['revoked'] assert '891' in x['revoked'] assert x['clock'] == 315 # incremented finally: worker_state.revoked.discard('revoked1') def test_conf(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) panel.app = self.app panel.app.finalize() self.app.conf.some_key6 = 'hello world' x = panel.handle('dump_conf') assert 'some_key6' in x def test_election(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) consumer.gossip = Mock() panel.handle( 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, ) consumer.gossip.election.assert_called_with('id', 'topic', 'action') def test_election__no_gossip(self): consumer = Mock(name='consumer') consumer.gossip = None panel = self.create_panel(consumer=consumer) panel.handle( 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, ) def test_heartbeat(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) event_dispatcher = consumer.event_dispatcher event_dispatcher.enabled = True panel.handle('heartbeat') assert ('worker-heartbeat',) in event_dispatcher.send.call_args def test_time_limit(self): panel = self.create_panel(consumer=Mock()) r = panel.handle('time_limit', arguments=dict( task_name=self.mytask.name, hard=30, soft=10)) assert self.mytask.time_limit == 30 assert self.mytask.soft_time_limit == 10 assert 'ok' in r r = panel.handle('time_limit', arguments=dict( task_name=self.mytask.name, hard=None, soft=None)) assert self.mytask.time_limit is None assert self.mytask.soft_time_limit is None assert 'ok' in r r = panel.handle('time_limit', arguments=dict( task_name='248e8afya9s8dh921eh928', hard=30)) assert 'error' in r def test_active_queues(self): import kombu x = kombu.Consumer(self.app.connection_for_read(), [kombu.Queue('foo', kombu.Exchange('foo'), 'foo'), kombu.Queue('bar', kombu.Exchange('bar'), 'bar')], auto_declare=False) consumer = Mock() consumer.task_consumer = x panel = self.create_panel(consumer=consumer) r = panel.handle('active_queues') assert list(sorted(q['name'] for q in r)) == ['bar', 'foo'] def test_active_queues__empty(self): consumer = Mock(name='consumer') panel = self.create_panel(consumer=consumer) consumer.task_consumer = None assert not panel.handle('active_queues') def test_dump_tasks(self): info = '\n'.join(self.panel.handle('dump_tasks')) assert 'mytask' in info assert 'rate_limit=200' in info def test_dump_tasks2(self): prev, control.DEFAULT_TASK_INFO_ITEMS = ( control.DEFAULT_TASK_INFO_ITEMS, []) try: info = '\n'.join(self.panel.handle('dump_tasks')) assert 'mytask' in info assert 'rate_limit=200' not in info finally: control.DEFAULT_TASK_INFO_ITEMS = prev def test_stats(self): prev_count, worker_state.total_count = worker_state.total_count, 100 try: assert self.panel.handle('stats')['total'] == 100 finally: worker_state.total_count = prev_count def test_report(self): self.panel.handle('report') def test_active(self): r = Request( self.TaskMessage(self.mytask.name, 'do re mi'), app=self.app, ) worker_state.active_requests.add(r) try: assert self.panel.handle('dump_active') finally: worker_state.active_requests.discard(r) def test_pool_grow(self): class MockPool(object): def __init__(self, size=1): self.size = size def grow(self, n=1): self.size += n def shrink(self, n=1): self.size -= n @property def num_processes(self): return self.size consumer = Consumer(self.app) consumer.prefetch_multiplier = 8 consumer.qos = Mock(name='qos') consumer.pool = MockPool(1) panel = self.create_panel(consumer=consumer) panel.handle('pool_grow') assert consumer.pool.size == 2 consumer.qos.increment_eventually.assert_called_with(8) assert consumer.initial_prefetch_count == 16 panel.handle('pool_shrink') assert consumer.pool.size == 1 consumer.qos.decrement_eventually.assert_called_with(8) assert consumer.initial_prefetch_count == 8 panel.state.consumer = Mock() panel.state.consumer.controller = Mock() sc = panel.state.consumer.controller.autoscaler = Mock() panel.handle('pool_grow') sc.force_scale_up.assert_called() panel.handle('pool_shrink') sc.force_scale_down.assert_called() def test_add__cancel_consumer(self): class MockConsumer(object): queues = [] canceled = [] consuming = False hub = Mock(name='hub') def add_queue(self, queue): self.queues.append(queue.name) def consume(self): self.consuming = True def cancel_by_queue(self, queue): self.canceled.append(queue) def consuming_from(self, queue): return queue in self.queues consumer = Consumer(self.app) consumer.task_consumer = MockConsumer() panel = self.create_panel(consumer=consumer) panel.handle('add_consumer', {'queue': 'MyQueue'}) assert 'MyQueue' in consumer.task_consumer.queues assert consumer.task_consumer.consuming panel.handle('add_consumer', {'queue': 'MyQueue'}) panel.handle('cancel_consumer', {'queue': 'MyQueue'}) assert 'MyQueue' in consumer.task_consumer.canceled def test_revoked(self): worker_state.revoked.clear() worker_state.revoked.add('a1') worker_state.revoked.add('a2') try: assert sorted(self.panel.handle('dump_revoked')) == ['a1', 'a2'] finally: worker_state.revoked.clear() def test_dump_schedule(self): consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) assert not panel.handle('dump_schedule') r = Request( self.TaskMessage(self.mytask.name, 'CAFEBABE'), app=self.app, ) consumer.timer.schedule.enter_at( consumer.timer.Entry(lambda x: x, (r,)), datetime.now() + timedelta(seconds=10)) consumer.timer.schedule.enter_at( consumer.timer.Entry(lambda x: x, (object(),)), datetime.now() + timedelta(seconds=10)) assert panel.handle('dump_schedule') def test_dump_reserved(self): consumer = Consumer(self.app) req = Request( self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app, ) # ^ need to keep reference for reserved_tasks WeakSet. worker_state.task_reserved(req) try: panel = self.create_panel(consumer=consumer) response = panel.handle('dump_reserved', {'safe': True}) assert response[0]['name'] == self.mytask.name assert response[0]['hostname'] == socket.gethostname() worker_state.reserved_requests.clear() assert not panel.handle('dump_reserved') finally: worker_state.reserved_requests.clear() def test_rate_limit_invalid_rate_limit_string(self): e = self.panel.handle('rate_limit', arguments=dict( task_name='tasks.add', rate_limit='x1240301#%!')) assert 'Invalid rate limit string' in e.get('error') def test_rate_limit(self): class xConsumer(object): reset = False def reset_rate_limits(self): self.reset = True consumer = xConsumer() panel = self.create_panel(app=self.app, consumer=consumer) task = self.app.tasks[self.mytask.name] panel.handle('rate_limit', arguments=dict(task_name=task.name, rate_limit='100/m')) assert task.rate_limit == '100/m' assert consumer.reset consumer.reset = False panel.handle('rate_limit', arguments=dict( task_name=task.name, rate_limit=0, )) assert task.rate_limit == 0 assert consumer.reset def test_rate_limit_nonexistant_task(self): self.panel.handle('rate_limit', arguments={ 'task_name': 'xxxx.does.not.exist', 'rate_limit': '1000/s'}) def test_unexposed_command(self): with pytest.raises(KeyError): self.panel.handle('foo', arguments={}) def test_revoke_with_name(self): tid = uuid() m = { 'method': 'revoke', 'destination': hostname, 'arguments': { 'task_id': tid, 'task_name': self.mytask.name, }, } self.panel.handle_message(m, None) assert tid in revoked def test_revoke_with_name_not_in_registry(self): tid = uuid() m = { 'method': 'revoke', 'destination': hostname, 'arguments': { 'task_id': tid, 'task_name': 'xxxxxxxxx33333333388888', }, } self.panel.handle_message(m, None) assert tid in revoked def test_revoke(self): tid = uuid() m = { 'method': 'revoke', 'destination': hostname, 'arguments': { 'task_id': tid, }, } self.panel.handle_message(m, None) assert tid in revoked m = { 'method': 'revoke', 'destination': 'does.not.exist', 'arguments': { 'task_id': tid + 'xxx', }, } self.panel.handle_message(m, None) assert tid + 'xxx' not in revoked def test_revoke_terminate(self): request = Mock() request.id = tid = uuid() state = self.create_state() state.consumer = Mock() worker_state.task_reserved(request) try: r = control.revoke(state, tid, terminate=True) assert tid in revoked assert request.terminate.call_count assert 'terminate:' in r['ok'] # unknown task id only revokes r = control.revoke(state, uuid(), terminate=True) assert 'tasks unknown' in r['ok'] finally: worker_state.task_ready(request) def test_autoscale(self): self.panel.state.consumer = Mock() self.panel.state.consumer.controller = Mock() sc = self.panel.state.consumer.controller.autoscaler = Mock() sc.update.return_value = 10, 2 m = {'method': 'autoscale', 'destination': hostname, 'arguments': {'max': '10', 'min': '2'}} r = self.panel.handle_message(m, None) assert 'ok' in r self.panel.state.consumer.controller.autoscaler = None r = self.panel.handle_message(m, None) assert 'error' in r def test_ping(self): m = {'method': 'ping', 'destination': hostname} r = self.panel.handle_message(m, None) assert r == {'ok': 'pong'} def test_shutdown(self): m = {'method': 'shutdown', 'destination': hostname} with pytest.raises(SystemExit): self.panel.handle_message(m, None) def test_panel_reply(self): replies = [] class _Node(pidbox.Node): def reply(self, data, exchange, routing_key, **kwargs): replies.append(data) panel = _Node( hostname=hostname, state=self.create_state(consumer=Consumer(self.app)), handlers=control.Panel.data, mailbox=self.app.control.mailbox, ) r = panel.dispatch('ping', reply_to={ 'exchange': 'x', 'routing_key': 'x', }) assert r == {'ok': 'pong'} assert replies[0] == {panel.hostname: {'ok': 'pong'}} def test_pool_restart(self): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer consumer.controller.pool.restart = Mock() consumer.reset_rate_limits = Mock(name='reset_rate_limits()') consumer.update_strategies = Mock(name='update_strategies()') consumer.event_dispatcher = Mock(name='evd') panel = self.create_panel(consumer=consumer) assert panel.state.consumer.controller.consumer is consumer panel.app = self.app _import = panel.app.loader.import_from_cwd = Mock() _reload = Mock() with pytest.raises(ValueError): panel.handle('pool_restart', {'reloader': _reload}) self.app.conf.worker_pool_restarts = True panel.handle('pool_restart', {'reloader': _reload}) consumer.controller.pool.restart.assert_called() consumer.reset_rate_limits.assert_called_with() consumer.update_strategies.assert_called_with() _reload.assert_not_called() _import.assert_not_called() consumer.controller.pool.restart.side_effect = NotImplementedError() panel.handle('pool_restart', {'reloader': _reload}) consumer.controller.consumer = None panel.handle('pool_restart', {'reloader': _reload}) @patch('celery.worker.worker.logger.debug') def test_pool_restart_import_modules(self, _debug): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer consumer.controller.pool.restart = Mock() consumer.reset_rate_limits = Mock(name='reset_rate_limits()') consumer.update_strategies = Mock(name='update_strategies()') panel = self.create_panel(consumer=consumer) panel.app = self.app assert panel.state.consumer.controller.consumer is consumer _import = consumer.controller.app.loader.import_from_cwd = Mock() _reload = Mock() self.app.conf.worker_pool_restarts = True with patch('sys.modules'): panel.handle('pool_restart', { 'modules': ['foo', 'bar'], 'reloader': _reload, }) consumer.controller.pool.restart.assert_called() consumer.reset_rate_limits.assert_called_with() consumer.update_strategies.assert_called_with() _reload.assert_not_called() _import.assert_has_calls([call('bar'), call('foo')], any_order=True) assert _import.call_count == 2 def test_pool_restart_reload_modules(self): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer consumer.controller.pool.restart = Mock() consumer.reset_rate_limits = Mock(name='reset_rate_limits()') consumer.update_strategies = Mock(name='update_strategies()') panel = self.create_panel(consumer=consumer) panel.app = self.app _import = panel.app.loader.import_from_cwd = Mock() _reload = Mock() self.app.conf.worker_pool_restarts = True with patch.dict(sys.modules, {'foo': None}): panel.handle('pool_restart', { 'modules': ['foo'], 'reload': False, 'reloader': _reload, }) consumer.controller.pool.restart.assert_called() _reload.assert_not_called() _import.assert_not_called() _import.reset_mock() _reload.reset_mock() consumer.controller.pool.restart.reset_mock() panel.handle('pool_restart', { 'modules': ['foo'], 'reload': True, 'reloader': _reload, }) consumer.controller.pool.restart.assert_called() _reload.assert_called() _import.assert_not_called() def test_query_task(self): consumer = Consumer(self.app) consumer.controller = _WC(app=self.app) consumer.controller.consumer = consumer panel = self.create_panel(consumer=consumer) panel.app = self.app req1 = Request( self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app, ) worker_state.task_reserved(req1) try: assert not panel.handle('query_task', {'ids': {'1daa'}}) ret = panel.handle('query_task', {'ids': {req1.id}}) assert req1.id in ret assert ret[req1.id][0] == 'reserved' worker_state.active_requests.add(req1) try: ret = panel.handle('query_task', {'ids': {req1.id}}) assert ret[req1.id][0] == 'active' finally: worker_state.active_requests.clear() ret = panel.handle('query_task', {'ids': {req1.id}}) assert ret[req1.id][0] == 'reserved' finally: worker_state.reserved_requests.clear() celery-4.1.0/t/unit/worker/test_bootsteps.py0000644000175000017500000002236013130607475021124 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch from celery import bootsteps class test_StepFormatter: def test_get_prefix(self): f = bootsteps.StepFormatter() s = Mock() s.last = True assert f._get_prefix(s) == f.blueprint_prefix s2 = Mock() s2.last = False s2.conditional = True assert f._get_prefix(s2) == f.conditional_prefix s3 = Mock() s3.last = s3.conditional = False assert f._get_prefix(s3) == '' def test_node(self): f = bootsteps.StepFormatter() f.draw_node = Mock() step = Mock() step.last = False f.node(step, x=3) f.draw_node.assert_called_with(step, f.node_scheme, {'x': 3}) step.last = True f.node(step, x=3) f.draw_node.assert_called_with(step, f.blueprint_scheme, {'x': 3}) def test_edge(self): f = bootsteps.StepFormatter() f.draw_edge = Mock() a, b = Mock(), Mock() a.last = True f.edge(a, b, x=6) f.draw_edge.assert_called_with(a, b, f.edge_scheme, { 'x': 6, 'arrowhead': 'none', 'color': 'darkseagreen3', }) a.last = False f.edge(a, b, x=6) f.draw_edge.assert_called_with(a, b, f.edge_scheme, { 'x': 6, }) class test_Step: class Def(bootsteps.StartStopStep): name = 'test_Step.Def' def setup(self): self.steps = [] def test_blueprint_name(self, bp='test_blueprint_name'): class X(bootsteps.Step): blueprint = bp name = 'X' assert X.name == 'X' class Y(bootsteps.Step): name = '%s.Y' % bp assert Y.name == '{0}.Y'.format(bp) def test_init(self): assert self.Def(self) def test_create(self): self.Def(self).create(self) def test_include_if(self): x = self.Def(self) x.enabled = True assert x.include_if(self) x.enabled = False assert not x.include_if(self) def test_instantiate(self): assert isinstance( self.Def(self).instantiate(self.Def, self), self.Def, ) def test_include_when_enabled(self): x = self.Def(self) x.create = Mock() x.create.return_value = 'George' assert x.include(self) assert x.obj == 'George' x.create.assert_called_with(self) def test_include_when_disabled(self): x = self.Def(self) x.enabled = False x.create = Mock() assert not x.include(self) x.create.assert_not_called() def test_repr(self): x = self.Def(self) assert repr(x) class test_ConsumerStep: def test_interface(self): step = bootsteps.ConsumerStep(self) with pytest.raises(NotImplementedError): step.get_consumers(self) def test_start_stop_shutdown(self): consumer = Mock() self.connection = Mock() class Step(bootsteps.ConsumerStep): def get_consumers(self, c): return [consumer] step = Step(self) assert step.get_consumers(self) == [consumer] step.start(self) consumer.consume.assert_called_with() step.stop(self) consumer.cancel.assert_called_with() step.shutdown(self) consumer.channel.close.assert_called_with() def test_start_no_consumers(self): self.connection = Mock() class Step(bootsteps.ConsumerStep): def get_consumers(self, c): return () step = Step(self) step.start(self) def test_close_no_consumer_channel(self): step = bootsteps.ConsumerStep(Mock()) step.consumers = [Mock()] step.consumers[0].channel = None step._close(Mock()) class test_StartStopStep: class Def(bootsteps.StartStopStep): name = 'test_StartStopStep.Def' def setup(self): self.steps = [] def test_start__stop(self): x = self.Def(self) x.create = Mock() # include creates the underlying object and sets # its x.obj attribute to it, as well as appending # it to the parent.steps list. x.include(self) assert self.steps assert self.steps[0] is x x.start(self) x.obj.start.assert_called_with() x.stop(self) x.obj.stop.assert_called_with() x.obj = None assert x.start(self) is None def test_terminate__no_obj(self): x = self.Def(self) x.obj = None x.terminate(Mock()) def test_include_when_disabled(self): x = self.Def(self) x.enabled = False x.include(self) assert not self.steps def test_terminate(self): x = self.Def(self) x.create = Mock() x.include(self) delattr(x.obj, 'terminate') x.terminate(self) x.obj.stop.assert_called_with() class test_Blueprint: class Blueprint(bootsteps.Blueprint): name = 'test_Blueprint' def test_steps_added_to_unclaimed(self): class tnA(bootsteps.Step): name = 'test_Blueprint.A' class tnB(bootsteps.Step): name = 'test_Blueprint.B' class xxA(bootsteps.Step): name = 'xx.A' class Blueprint(self.Blueprint): default_steps = [tnA, tnB] blueprint = Blueprint() assert tnA in blueprint.types assert tnB in blueprint.types assert xxA not in blueprint.types def test_init(self): blueprint = self.Blueprint() assert blueprint.name == 'test_Blueprint' def test_close__on_close_is_None(self): blueprint = self.Blueprint() blueprint.on_close = None blueprint.send_all = Mock() blueprint.close(1) blueprint.send_all.assert_called_with( 1, 'close', 'closing', reverse=False, ) def test_send_all_with_None_steps(self): parent = Mock() blueprint = self.Blueprint() parent.steps = [None, None, None] blueprint.send_all(parent, 'close', 'Closing', reverse=False) def test_send_all_raises(self): parent = Mock() blueprint = self.Blueprint() parent.steps = [Mock()] parent.steps[0].foo.side_effect = KeyError() blueprint.send_all(parent, 'foo', propagate=False) with pytest.raises(KeyError): blueprint.send_all(parent, 'foo', propagate=True) def test_stop_state_in_TERMINATE(self): blueprint = self.Blueprint() blueprint.state = bootsteps.TERMINATE blueprint.stop(Mock()) def test_join_raises_IGNORE_ERRORS(self): prev, bootsteps.IGNORE_ERRORS = bootsteps.IGNORE_ERRORS, (KeyError,) try: blueprint = self.Blueprint() blueprint.shutdown_complete = Mock() blueprint.shutdown_complete.wait.side_effect = KeyError('luke') blueprint.join(timeout=10) blueprint.shutdown_complete.wait.assert_called_with(timeout=10) finally: bootsteps.IGNORE_ERRORS = prev def test_connect_with(self): class b1s1(bootsteps.Step): pass class b1s2(bootsteps.Step): last = True class b2s1(bootsteps.Step): pass class b2s2(bootsteps.Step): last = True b1 = self.Blueprint([b1s1, b1s2]) b2 = self.Blueprint([b2s1, b2s2]) b1.apply(Mock()) b2.apply(Mock()) b1.connect_with(b2) assert b1s1 in b1.graph assert b2s1 in b1.graph assert b2s2 in b1.graph assert repr(b1s1) assert str(b1s1) def test_topsort_raises_KeyError(self): class Step(bootsteps.Step): requires = ('xyxxx.fsdasewe.Unknown',) b = self.Blueprint([Step]) b.steps = b.claim_steps() with pytest.raises(ImportError): b._finalize_steps(b.steps) Step.requires = () b.steps = b.claim_steps() b._finalize_steps(b.steps) with patch('celery.bootsteps.DependencyGraph') as Dep: g = Dep.return_value = Mock() g.topsort.side_effect = KeyError('foo') with pytest.raises(KeyError): b._finalize_steps(b.steps) def test_apply(self): class MyBlueprint(bootsteps.Blueprint): name = 'test_apply' def modules(self): return ['A', 'B'] class B(bootsteps.Step): name = 'test_apply.B' class C(bootsteps.Step): name = 'test_apply.C' requires = [B] class A(bootsteps.Step): name = 'test_apply.A' requires = [C] class D(bootsteps.Step): name = 'test_apply.D' last = True x = MyBlueprint([A, D]) x.apply(self) assert isinstance(x.order[0], B) assert isinstance(x.order[1], C) assert isinstance(x.order[2], A) assert isinstance(x.order[3], D) assert A in x.types assert x[A.name] is x.order[2] def test_find_last_but_no_steps(self): class MyBlueprint(bootsteps.Blueprint): name = 'qwejwioqjewoqiej' x = MyBlueprint() x.apply(self) assert x._find_last() is None celery-4.1.0/t/unit/worker/test_revoke.py0000644000175000017500000000044713130607475020377 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery.worker import state class test_revoked: def test_is_working(self): state.revoked.add('foo') assert 'foo' in state.revoked state.revoked.pop_value('foo') assert 'foo' not in state.revoked celery-4.1.0/t/unit/worker/test_loops.py0000644000175000017500000003664313130607475020247 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import errno import socket import pytest from case import Mock from kombu.async import Hub, READ, WRITE, ERR from celery.bootsteps import CLOSE, RUN from celery.exceptions import ( InvalidTaskError, WorkerLostError, WorkerShutdown, WorkerTerminate, ) from celery.five import Empty, python_2_unicode_compatible from celery.platforms import EX_FAILURE from celery.worker import state from celery.worker.consumer import Consumer from celery.worker.loops import _quick_drain, asynloop, synloop @python_2_unicode_compatible class PromiseEqual(object): def __init__(self, fun, *args, **kwargs): self.fun = fun self.args = args self.kwargs = kwargs def __eq__(self, other): return (other.fun == self.fun and other.args == self.args and other.kwargs == self.kwargs) def __repr__(self): return ''.format(self) class X(object): def __init__(self, app, heartbeat=None, on_task_message=None, transport_driver_type=None): hub = Hub() ( self.obj, self.connection, self.consumer, self.blueprint, self.hub, self.qos, self.heartbeat, self.clock, ) = self.args = [Mock(name='obj'), Mock(name='connection'), Mock(name='consumer'), Mock(name='blueprint'), hub, Mock(name='qos'), heartbeat, Mock(name='clock')] self.connection.supports_heartbeats = True self.connection.get_heartbeat_interval.side_effect = ( lambda: self.heartbeat ) self.consumer.callbacks = [] self.obj.strategies = {} self.connection.connection_errors = (socket.error,) if transport_driver_type: self.connection.transport.driver_type = transport_driver_type self.hub.readers = {} self.hub.timer = Mock(name='hub.timer') self.hub.timer._queue = [Mock()] self.hub.fire_timers = Mock(name='hub.fire_timers') self.hub.fire_timers.return_value = 1.7 self.hub.poller = Mock(name='hub.poller') self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close self.Hub = self.hub self.blueprint.state = RUN # need this for create_task_handler self._consumer = _consumer = Consumer( Mock(), timer=Mock(), controller=Mock(), app=app) _consumer.on_task_message = on_task_message or [] self.obj.create_task_handler = _consumer.create_task_handler self.on_unknown_message = self.obj.on_unknown_message = Mock( name='on_unknown_message', ) _consumer.on_unknown_message = self.on_unknown_message self.on_unknown_task = self.obj.on_unknown_task = Mock( name='on_unknown_task', ) _consumer.on_unknown_task = self.on_unknown_task self.on_invalid_task = self.obj.on_invalid_task = Mock( name='on_invalid_task', ) _consumer.on_invalid_task = self.on_invalid_task _consumer.strategies = self.obj.strategies def timeout_then_error(self, mock): def first(*args, **kwargs): mock.side_effect = socket.error() raise socket.timeout() mock.side_effect = first def close_then_error(self, mock=None, mod=0, exc=None): mock = Mock() if mock is None else mock def first(*args, **kwargs): if not mod or mock.call_count > mod: self.close() raise (socket.error() if exc is None else exc) mock.side_effect = first return mock def close(self, *args, **kwargs): self.blueprint.state = CLOSE def closer(self, mock=None, mod=0): mock = Mock() if mock is None else mock def closing(*args, **kwargs): if not mod or mock.call_count >= mod: self.close() mock.side_effect = closing return mock def get_task_callback(*args, **kwargs): x = X(*args, **kwargs) x.blueprint.state = CLOSE asynloop(*x.args) return x, x.consumer.on_message class test_asynloop: def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add def test_drain_after_consume(self): x, _ = get_task_callback(self.app, transport_driver_type='amqp') assert _quick_drain in [p.fun for p in x.hub._ready] def test_pool_did_not_start_at_startup(self): x = X(self.app) x.obj.restart_count = 0 x.obj.pool.did_start_ok.return_value = False with pytest.raises(WorkerLostError): asynloop(*x.args) def test_setup_heartbeat(self): x = X(self.app, heartbeat=10) x.hub.timer.call_repeatedly = Mock(name='x.hub.call_repeatedly()') x.blueprint.state = CLOSE asynloop(*x.args) x.consumer.consume.assert_called_with() x.obj.on_ready.assert_called_with() x.hub.timer.call_repeatedly.assert_called_with( 10 / 2.0, x.connection.heartbeat_check, (2.0,), ) def task_context(self, sig, **kwargs): x, on_task = get_task_callback(self.app, **kwargs) message = self.task_message_from_sig(self.app, sig) strategy = x.obj.strategies[sig.task] = Mock(name='strategy') return x, on_task, message, strategy def test_on_task_received(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) on_task(msg) strategy.assert_called_with( msg, None, PromiseEqual(x._consumer.call_soon, msg.ack_log_error), PromiseEqual(x._consumer.call_soon, msg.reject_log_error), [], ) def test_on_task_received_executes_on_task_message(self): cbs = [Mock(), Mock(), Mock()] x, on_task, msg, strategy = self.task_context( self.add.s(2, 2), on_task_message=cbs, ) on_task(msg) strategy.assert_called_with( msg, None, PromiseEqual(x._consumer.call_soon, msg.ack_log_error), PromiseEqual(x._consumer.call_soon, msg.reject_log_error), cbs, ) def test_on_task_message_missing_name(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) msg.headers.pop('task') on_task(msg) x.on_unknown_message.assert_called_with(msg.decode(), msg) def test_on_task_pool_raises(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) strategy.side_effect = ValueError() with pytest.raises(ValueError): on_task(msg) def test_on_task_InvalidTaskError(self): x, on_task, msg, strategy = self.task_context(self.add.s(2, 2)) exc = strategy.side_effect = InvalidTaskError() on_task(msg) x.on_invalid_task.assert_called_with(None, msg, exc) def test_should_terminate(self): x = X(self.app) # XXX why aren't the errors propagated?!? state.should_terminate = True try: with pytest.raises(WorkerTerminate): asynloop(*x.args) finally: state.should_terminate = None def test_should_terminate_hub_close_raises(self): x = X(self.app) # XXX why aren't the errors propagated?!? state.should_terminate = EX_FAILURE x.hub.close.side_effect = MemoryError() try: with pytest.raises(WorkerTerminate): asynloop(*x.args) finally: state.should_terminate = None def test_should_stop(self): x = X(self.app) state.should_stop = 303 try: with pytest.raises(WorkerShutdown): asynloop(*x.args) finally: state.should_stop = None def test_updates_qos(self): x = X(self.app) x.qos.prev = 3 x.qos.value = 3 x.hub.on_tick.add(x.closer(mod=2)) x.hub.timer._queue = [1] asynloop(*x.args) x.qos.update.assert_not_called() x = X(self.app) x.qos.prev = 1 x.qos.value = 6 x.hub.on_tick.add(x.closer(mod=2)) asynloop(*x.args) x.qos.update.assert_called_with() x.hub.fire_timers.assert_called_with(propagate=(socket.error,)) def test_poll_empty(self): x = X(self.app) x.hub.readers = {6: Mock()} x.hub.timer._queue = [1] x.close_then_error(x.hub.poller.poll) x.hub.fire_timers.return_value = 33.37 poller = x.hub.poller poller.poll.return_value = [] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called_with(33.37) def test_poll_readable(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) poller = x.hub.poller poller.poll.return_value = [(6, READ)] with pytest.raises(socket.error): asynloop(*x.args) reader.assert_called_with(6) poller.poll.assert_called() def test_poll_readable_raises_Empty(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, READ)] reader.side_effect = Empty() with pytest.raises(socket.error): asynloop(*x.args) reader.assert_called_with(6) poller.poll.assert_called() def test_poll_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, WRITE)] with pytest.raises(socket.error): asynloop(*x.args) writer.assert_called_with(6) poller.poll.assert_called() def test_poll_writable_none_registered(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(7, WRITE)] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called() def test_poll_unknown_event(self): x = X(self.app) writer = Mock(name='reader') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, 0)] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called() def test_poll_keep_draining_disabled(self): x = X(self.app) x.hub.writers = {6: Mock()} poll = x.hub.poller.poll def se(*args, **kwargs): poll.side_effect = socket.error() poll.side_effect = se poller = x.hub.poller poll.return_value = [(6, 0)] with pytest.raises(socket.error): asynloop(*x.args) poller.poll.assert_called() def test_poll_err_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6, 48) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) poller = x.hub.poller poller.poll.return_value = [(6, ERR)] with pytest.raises(socket.error): asynloop(*x.args) writer.assert_called_with(6, 48) poller.poll.assert_called() def test_poll_write_generator(self): x = X(self.app) x.hub.remove = Mock(name='hub.remove()') def Gen(): yield 1 yield 2 gen = Gen() x.hub.add_writer(6, gen) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] with pytest.raises(socket.error): asynloop(*x.args) assert gen.gi_frame.f_lasti != -1 x.hub.remove.assert_not_called() def test_poll_write_generator_stopped(self): x = X(self.app) def Gen(): raise StopIteration() yield gen = Gen() x.hub.add_writer(6, gen) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] x.hub.remove = Mock(name='hub.remove()') with pytest.raises(socket.error): asynloop(*x.args) assert gen.gi_frame is None def test_poll_write_generator_raises(self): x = X(self.app) def Gen(): raise ValueError('foo') yield gen = Gen() x.hub.add_writer(6, gen) x.hub.remove = Mock(name='hub.remove()') x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] with pytest.raises(ValueError): asynloop(*x.args) assert gen.gi_frame is None x.hub.remove.assert_called_with(6) def test_poll_err_readable(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6, 24) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) poller = x.hub.poller poller.poll.return_value = [(6, ERR)] with pytest.raises(socket.error): asynloop(*x.args) reader.assert_called_with(6, 24) poller.poll.assert_called() def test_poll_raises_ValueError(self): x = X(self.app) x.hub.readers = {6: Mock()} poller = x.hub.poller x.close_then_error(poller.poll, exc=ValueError) asynloop(*x.args) poller.poll.assert_called() class test_synloop: def test_timeout_ignored(self): x = X(self.app) x.timeout_then_error(x.connection.drain_events) with pytest.raises(socket.error): synloop(*x.args) assert x.connection.drain_events.call_count == 2 def test_updates_qos_when_changed(self): x = X(self.app) x.qos.prev = 2 x.qos.value = 2 x.timeout_then_error(x.connection.drain_events) with pytest.raises(socket.error): synloop(*x.args) x.qos.update.assert_not_called() x.qos.value = 4 x.timeout_then_error(x.connection.drain_events) with pytest.raises(socket.error): synloop(*x.args) x.qos.update.assert_called_with() def test_ignores_socket_errors_when_closed(self): x = X(self.app) x.close_then_error(x.connection.drain_events) assert synloop(*x.args) is None class test_quick_drain: def setup(self): self.connection = Mock(name='connection') def test_drain(self): _quick_drain(self.connection, timeout=33.3) self.connection.drain_events.assert_called_with(timeout=33.3) def test_drain_error(self): exc = KeyError() exc.errno = 313 self.connection.drain_events.side_effect = exc with pytest.raises(KeyError): _quick_drain(self.connection, timeout=33.3) def test_drain_error_EAGAIN(self): exc = KeyError() exc.errno = errno.EAGAIN self.connection.drain_events.side_effect = exc _quick_drain(self.connection, timeout=33.3) celery-4.1.0/t/unit/worker/test_request.py0000644000175000017500000010207313130607475020572 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import numbers import os import pytest import signal import socket import sys from datetime import datetime, timedelta from case import Mock, patch from billiard.einfo import ExceptionInfo from kombu.utils.encoding import default_encode, from_utf8, safe_str, safe_repr from kombu.utils.uuid import uuid from celery import states from celery.app.trace import ( trace_task, _trace_task_ret, TraceInfo, mro_lookup, build_tracer, setup_worker_optimizations, reset_worker_optimizations, ) from celery.exceptions import ( Ignore, InvalidTaskError, Reject, Retry, TaskRevokedError, Terminated, WorkerLostError, ) from celery.five import monotonic from celery.signals import task_revoked from celery.worker import request as module from celery.worker.request import ( Request, create_request_cls, logger as req_logger, ) from celery.worker.state import revoked class RequestCase: def setup(self): self.app.conf.result_serializer = 'pickle' @self.app.task(shared=False) def add(x, y, **kw_): return x + y self.add = add @self.app.task(shared=False) def mytask(i, **kwargs): return i ** i self.mytask = mytask @self.app.task(shared=False) def mytask_raising(i): raise KeyError(i) self.mytask_raising = mytask_raising def xRequest(self, name=None, id=None, args=None, kwargs=None, on_ack=None, on_reject=None, Request=Request, **head): args = [1] if args is None else args kwargs = {'f': 'x'} if kwargs is None else kwargs on_ack = on_ack or Mock(name='on_ack') on_reject = on_reject or Mock(name='on_reject') message = self.TaskMessage( name or self.mytask.name, id, args=args, kwargs=kwargs, **head ) return Request(message, app=self.app, on_ack=on_ack, on_reject=on_reject) class test_mro_lookup: def test_order(self): class A(object): pass class B(A): pass class C(B): pass class D(C): @classmethod def mro(cls): return () A.x = 10 assert mro_lookup(C, 'x') == A assert mro_lookup(C, 'x', stop={A}) is None B.x = 10 assert mro_lookup(C, 'x') == B C.x = 10 assert mro_lookup(C, 'x') == C assert mro_lookup(D, 'x') is None def jail(app, task_id, name, args, kwargs): request = {'id': task_id} task = app.tasks[name] task.__trace__ = None # rebuild return trace_task( task, task_id, args, kwargs, request=request, eager=False, app=app, ).retval @pytest.mark.skipif(sys.version_info[0] > 3, reason='Py2 only') class test_default_encode: def test_jython(self): prev, sys.platform = sys.platform, 'java 1.6.1' try: assert default_encode(b'foo') == b'foo' finally: sys.platform = prev def test_cpython(self): prev, sys.platform = sys.platform, 'darwin' gfe, sys.getfilesystemencoding = ( sys.getfilesystemencoding, lambda: 'utf-8', ) try: assert default_encode(b'foo') == b'foo' finally: sys.platform = prev sys.getfilesystemencoding = gfe class test_Retry: def test_retry_semipredicate(self): try: raise Exception('foo') except Exception as exc: ret = Retry('Retrying task', exc) assert ret.exc == exc class test_trace_task(RequestCase): def test_process_cleanup_fails(self, patching): _logger = patching('celery.app.trace.logger') self.mytask.backend = Mock() self.mytask.backend.process_cleanup = Mock(side_effect=KeyError()) tid = uuid() ret = jail(self.app, tid, self.mytask.name, [2], {}) assert ret == 4 self.mytask.backend.mark_as_done.assert_called() assert 'Process cleanup failed' in _logger.error.call_args[0][0] def test_process_cleanup_BaseException(self): self.mytask.backend = Mock() self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit()) with pytest.raises(SystemExit): jail(self.app, uuid(), self.mytask.name, [2], {}) def test_execute_jail_success(self): ret = jail(self.app, uuid(), self.mytask.name, [2], {}) assert ret == 4 def test_marked_as_started(self): _started = [] def store_result(tid, meta, state, **kwargs): if state == states.STARTED: _started.append(tid) self.mytask.backend.store_result = Mock(name='store_result') self.mytask.backend.store_result.side_effect = store_result self.mytask.track_started = True tid = uuid() jail(self.app, tid, self.mytask.name, [2], {}) assert tid in _started self.mytask.ignore_result = True tid = uuid() jail(self.app, tid, self.mytask.name, [2], {}) assert tid not in _started def test_execute_jail_failure(self): ret = jail( self.app, uuid(), self.mytask_raising.name, [4], {}, ) assert isinstance(ret, ExceptionInfo) assert ret.exception.args == (4,) def test_execute_ignore_result(self): @self.app.task(shared=False, ignore_result=True) def ignores_result(i): return i ** i task_id = uuid() ret = jail(self.app, task_id, ignores_result.name, [4], {}) assert ret == 256 assert not self.app.AsyncResult(task_id).ready() class test_Request(RequestCase): def get_request(self, sig, Request=Request, **kwargs): return Request( self.task_message_from_sig(self.app, sig), on_ack=Mock(name='on_ack'), on_reject=Mock(name='on_reject'), eventer=Mock(name='eventer'), app=self.app, connection_errors=(socket.error,), task=sig.type, **kwargs ) def test_shadow(self): assert self.get_request( self.add.s(2, 2).set(shadow='fooxyz')).name == 'fooxyz' def test_invalid_eta_raises_InvalidTaskError(self): with pytest.raises(InvalidTaskError): self.get_request(self.add.s(2, 2).set(eta='12345')) def test_invalid_expires_raises_InvalidTaskError(self): with pytest.raises(InvalidTaskError): self.get_request(self.add.s(2, 2).set(expires='12345')) def test_valid_expires_with_utc_makes_aware(self): with patch('celery.worker.request.maybe_make_aware') as mma: self.get_request(self.add.s(2, 2).set(expires=10), maybe_make_aware=mma) mma.assert_called() def test_maybe_expire_when_expires_is_None(self): req = self.get_request(self.add.s(2, 2)) assert not req.maybe_expire() def test_on_retry_acks_if_late(self): self.add.acks_late = True req = self.get_request(self.add.s(2, 2)) req.on_retry(Mock()) req.on_ack.assert_called_with(req_logger, req.connection_errors) def test_on_failure_Termianted(self): einfo = None try: raise Terminated('9') except Terminated: einfo = ExceptionInfo() assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.eventer.send.assert_called_with( 'task-revoked', uuid=req.id, terminated=True, signum='9', expired=False, ) def test_on_failure_propagates_MemoryError(self): einfo = None try: raise MemoryError() except MemoryError: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) with pytest.raises(MemoryError): req.on_failure(einfo) def test_on_failure_Ignore_acknowledges(self): einfo = None try: raise Ignore() except Ignore: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.on_ack.assert_called_with(req_logger, req.connection_errors) def test_on_failure_Reject_rejects(self): einfo = None try: raise Reject() except Reject: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, False, ) def test_on_failure_Reject_rejects_with_requeue(self): einfo = None try: raise Reject(requeue=True) except Reject: einfo = ExceptionInfo(internal=True) assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, True, ) def test_on_failure_WorkerLostError_rejects_with_requeue(self): einfo = None try: raise WorkerLostError() except: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = True req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = False req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, True) def test_on_failure_WorkerLostError_redelivered_None(self): einfo = None try: raise WorkerLostError() except: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = True req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = None req.on_failure(einfo) req.on_reject.assert_called_with( req_logger, req.connection_errors, True) def test_tzlocal_is_cached(self): req = self.get_request(self.add.s(2, 2)) req._tzlocal = 'foo' assert req.tzlocal == 'foo' def test_task_wrapper_repr(self): assert repr(self.xRequest()) def test_sets_store_errors(self): self.mytask.ignore_result = True job = self.xRequest() assert not job.store_errors self.mytask.store_errors_even_if_ignored = True job = self.xRequest() assert job.store_errors def test_send_event(self): job = self.xRequest() job.eventer = Mock(name='.eventer') job.send_event('task-frobulated') job.eventer.send.assert_called_with('task-frobulated', uuid=job.id) def test_send_events__disabled_at_task_level(self): job = self.xRequest() job.task.send_events = False job.eventer = Mock(name='.eventer') job.send_event('task-frobulated') job.eventer.send.assert_not_called() def test_on_retry(self): job = self.get_request(self.mytask.s(1, f='x')) job.eventer = Mock(name='.eventer') try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) job.eventer.send.assert_called_with( 'task-retried', uuid=job.id, exception=safe_repr(einfo.exception.exc), traceback=safe_str(einfo.traceback), ) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo) def test_compat_properties(self): job = self.xRequest() assert job.task_id == job.id assert job.task_name == job.name job.task_id = 'ID' assert job.id == 'ID' job.task_name = 'NAME' assert job.name == 'NAME' def test_terminate__pool_ref(self): pool = Mock() signum = signal.SIGTERM job = self.get_request(self.mytask.s(1, f='x')) job._apply_result = Mock(name='_apply_result') with self.assert_signal_called( task_revoked, sender=job.task, request=job, terminated=True, expired=False, signum=signum): job.time_start = monotonic() job.worker_pid = 314 job.terminate(pool, signal='TERM') job._apply_result().terminate.assert_called_with(signum) job._apply_result = Mock(name='_apply_result2') job._apply_result.return_value = None job.terminate(pool, signal='TERM') def test_terminate__task_started(self): pool = Mock() signum = signal.SIGTERM job = self.get_request(self.mytask.s(1, f='x')) with self.assert_signal_called( task_revoked, sender=job.task, request=job, terminated=True, expired=False, signum=signum): job.time_start = monotonic() job.worker_pid = 313 job.terminate(pool, signal='TERM') pool.terminate_job.assert_called_with(job.worker_pid, signum) def test_terminate__task_reserved(self): pool = Mock() job = self.get_request(self.mytask.s(1, f='x')) job.time_start = None job.terminate(pool, signal='TERM') pool.terminate_job.assert_not_called() assert job._terminate_on_ack == (pool, 15) job.terminate(pool, signal='TERM') def test_revoked_expires_expired(self): job = self.get_request(self.mytask.s(1, f='x').set( expires=datetime.utcnow() - timedelta(days=1) )) with self.assert_signal_called( task_revoked, sender=job.task, request=job, terminated=False, expired=True, signum=None): job.revoked() assert job.id in revoked assert self.mytask.backend.get_status(job.id) == states.REVOKED def test_revoked_expires_not_expired(self): job = self.xRequest( expires=datetime.utcnow() + timedelta(days=1), ) job.revoked() assert job.id not in revoked assert self.mytask.backend.get_status(job.id) != states.REVOKED def test_revoked_expires_ignore_result(self): self.mytask.ignore_result = True job = self.xRequest( expires=datetime.utcnow() - timedelta(days=1), ) job.revoked() assert job.id in revoked assert self.mytask.backend.get_status(job.id) != states.REVOKED def test_already_revoked(self): job = self.xRequest() job._already_revoked = True assert job.revoked() def test_revoked(self): job = self.xRequest() with self.assert_signal_called( task_revoked, sender=job.task, request=job, terminated=False, expired=False, signum=None): revoked.add(job.id) assert job.revoked() assert job._already_revoked assert job.acknowledged def test_execute_does_not_execute_revoked(self): job = self.xRequest() revoked.add(job.id) job.execute() def test_execute_acks_late(self): self.mytask_raising.acks_late = True job = self.xRequest( name=self.mytask_raising.name, kwargs={}, ) job.execute() assert job.acknowledged job.execute() def test_execute_using_pool_does_not_execute_revoked(self): job = self.xRequest() revoked.add(job.id) with pytest.raises(TaskRevokedError): job.execute_using_pool(None) def test_on_accepted_acks_early(self): job = self.xRequest() job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) assert job.acknowledged prev, module._does_debug = module._does_debug, False try: job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) finally: module._does_debug = prev def test_on_accepted_acks_late(self): job = self.xRequest() self.mytask.acks_late = True job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) assert not job.acknowledged def test_on_accepted_terminates(self): signum = signal.SIGTERM pool = Mock() job = self.xRequest() with self.assert_signal_called( task_revoked, sender=job.task, request=job, terminated=True, expired=False, signum=signum): job.terminate(pool, signal='TERM') assert not pool.terminate_job.call_count job.on_accepted(pid=314, time_accepted=monotonic()) pool.terminate_job.assert_called_with(314, signum) def test_on_success_acks_early(self): job = self.xRequest() job.time_start = 1 job.on_success((0, 42, 0.001)) prev, module._does_info = module._does_info, False try: job.on_success((0, 42, 0.001)) assert not job.acknowledged finally: module._does_info = prev def test_on_success_BaseException(self): job = self.xRequest() job.time_start = 1 with pytest.raises(SystemExit): try: raise SystemExit() except SystemExit: job.on_success((1, ExceptionInfo(), 0.01)) else: assert False def test_on_success_eventer(self): job = self.xRequest() job.time_start = 1 job.eventer = Mock() job.eventer.send = Mock() job.on_success((0, 42, 0.001)) job.eventer.send.assert_called() def test_on_success_when_failure(self): job = self.xRequest() job.time_start = 1 job.on_failure = Mock() try: raise KeyError('foo') except Exception: job.on_success((1, ExceptionInfo(), 0.001)) job.on_failure.assert_called() def test_on_success_acks_late(self): job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True job.on_success((0, 42, 0.001)) assert job.acknowledged def test_on_failure_WorkerLostError(self): def get_ei(): try: raise WorkerLostError('do re mi') except WorkerLostError: return ExceptionInfo() job = self.xRequest() exc_info = get_ei() job.on_failure(exc_info) assert self.mytask.backend.get_status(job.id) == states.FAILURE self.mytask.ignore_result = True exc_info = get_ei() job = self.xRequest() job.on_failure(exc_info) assert self.mytask.backend.get_status(job.id) == states.PENDING def test_on_failure_acks_late(self): job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True try: raise KeyError('foo') except KeyError: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job.acknowledged def test_from_message_invalid_kwargs(self): m = self.TaskMessage(self.mytask.name, args=(), kwargs='foo') req = Request(m, app=self.app) with pytest.raises(InvalidTaskError): raise req.execute().exception def test_on_timeout(self, patching): warn = patching('celery.worker.request.warn') error = patching('celery.worker.request.error') job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = True job.on_timeout(soft=True, timeout=1337) assert 'Soft time limit' in warn.call_args[0][0] job.on_timeout(soft=False, timeout=1337) assert 'Hard time limit' in error.call_args[0][0] assert self.mytask.backend.get_status(job.id) == states.FAILURE job.acknowledge.assert_called_with() self.mytask.ignore_result = True job = self.xRequest() job.on_timeout(soft=True, timeout=1336) assert self.mytask.backend.get_status(job.id) == states.PENDING job = self.xRequest() job.acknowledge = Mock(name='ack') job.task.acks_late = False job.on_timeout(soft=True, timeout=1335) job.acknowledge.assert_not_called() def test_fast_trace_task(self): from celery.app import trace setup_worker_optimizations(self.app) assert trace.trace_task_ret is trace._fast_trace_task tid = uuid() message = self.TaskMessage(self.mytask.name, tid, args=[4]) assert len(message.payload) == 3 try: self.mytask.__trace__ = build_tracer( self.mytask.name, self.mytask, self.app.loader, 'test', app=self.app, ) failed, res, runtime = trace.trace_task_ret( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding) assert not failed assert res == repr(4 ** 4) assert runtime is not None assert isinstance(runtime, numbers.Real) finally: reset_worker_optimizations() assert trace.trace_task_ret is trace._trace_task_ret delattr(self.mytask, '__trace__') failed, res, runtime = trace.trace_task_ret( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding, app=self.app, ) assert not failed assert res == repr(4 ** 4) assert runtime is not None assert isinstance(runtime, numbers.Real) def test_trace_task_ret(self): self.mytask.__trace__ = build_tracer( self.mytask.name, self.mytask, self.app.loader, 'test', app=self.app, ) tid = uuid() message = self.TaskMessage(self.mytask.name, tid, args=[4]) _, R, _ = _trace_task_ret( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding, app=self.app, ) assert R == repr(4 ** 4) def test_trace_task_ret__no_trace(self): try: delattr(self.mytask, '__trace__') except AttributeError: pass tid = uuid() message = self.TaskMessage(self.mytask.name, tid, args=[4]) _, R, _ = _trace_task_ret( self.mytask.name, tid, message.headers, message.body, message.content_type, message.content_encoding, app=self.app, ) assert R == repr(4 ** 4) def test_trace_catches_exception(self): @self.app.task(request=None, shared=False) def raising(): raise KeyError('baz') with pytest.warns(RuntimeWarning): res = trace_task(raising, uuid(), [], {}, app=self.app)[0] assert isinstance(res, ExceptionInfo) def test_worker_task_trace_handle_retry(self): tid = uuid() self.mytask.push_request(id=tid) try: raise ValueError('foo') except Exception as exc: try: raise Retry(str(exc), exc=exc) except Retry as exc: w = TraceInfo(states.RETRY, exc) w.handle_retry( self.mytask, self.mytask.request, store_errors=False, ) assert self.mytask.backend.get_status(tid) == states.PENDING w.handle_retry( self.mytask, self.mytask.request, store_errors=True, ) assert self.mytask.backend.get_status(tid) == states.RETRY finally: self.mytask.pop_request() def test_worker_task_trace_handle_failure(self): tid = uuid() self.mytask.push_request() try: self.mytask.request.id = tid try: raise ValueError('foo') except Exception as exc: w = TraceInfo(states.FAILURE, exc) w.handle_failure( self.mytask, self.mytask.request, store_errors=False, ) assert self.mytask.backend.get_status(tid) == states.PENDING w.handle_failure( self.mytask, self.mytask.request, store_errors=True, ) assert self.mytask.backend.get_status(tid) == states.FAILURE finally: self.mytask.pop_request() def test_from_message(self): us = 'æØåveéðƒeæ' tid = uuid() m = self.TaskMessage( self.mytask.name, tid, args=[2], kwargs={us: 'bar'}, ) job = Request(m, app=self.app) assert isinstance(job, Request) assert job.name == self.mytask.name assert job.id == tid assert job.message is m def test_from_message_empty_args(self): tid = uuid() m = self.TaskMessage(self.mytask.name, tid, args=[], kwargs={}) job = Request(m, app=self.app) assert isinstance(job, Request) def test_from_message_missing_required_fields(self): m = self.TaskMessage(self.mytask.name) m.headers.clear() with pytest.raises(KeyError): Request(m, app=self.app) def test_from_message_nonexistant_task(self): m = self.TaskMessage( 'cu.mytask.doesnotexist', args=[2], kwargs={'æØåveéðƒeæ': 'bar'}, ) with pytest.raises(KeyError): Request(m, app=self.app) def test_execute(self): tid = uuid() job = self.xRequest(id=tid, args=[4], kwargs={}) assert job.execute() == 256 meta = self.mytask.backend.get_task_meta(tid) assert meta['status'] == states.SUCCESS assert meta['result'] == 256 def test_execute_success_no_kwargs(self): @self.app.task # traverses coverage for decorator without parens def mytask_no_kwargs(i): return i ** i tid = uuid() job = self.xRequest( name=mytask_no_kwargs.name, id=tid, args=[4], kwargs={}, ) assert job.execute() == 256 meta = mytask_no_kwargs.backend.get_task_meta(tid) assert meta['result'] == 256 assert meta['status'] == states.SUCCESS def test_execute_ack(self): scratch = {'ACK': False} def on_ack(*args, **kwargs): scratch['ACK'] = True tid = uuid() job = self.xRequest(id=tid, args=[4], on_ack=on_ack) assert job.execute() == 256 meta = self.mytask.backend.get_task_meta(tid) assert scratch['ACK'] assert meta['result'] == 256 assert meta['status'] == states.SUCCESS def test_execute_fail(self): tid = uuid() job = self.xRequest( name=self.mytask_raising.name, id=tid, args=[4], kwargs={}, ) assert isinstance(job.execute(), ExceptionInfo) assert self.mytask_raising.backend.serializer == 'pickle' meta = self.mytask_raising.backend.get_task_meta(tid) assert meta['status'] == states.FAILURE assert isinstance(meta['result'], KeyError) def test_execute_using_pool(self): tid = uuid() job = self.xRequest(id=tid, args=[4]) p = Mock() job.execute_using_pool(p) p.apply_async.assert_called_once() args = p.apply_async.call_args[1]['args'] assert args[0] == self.mytask.name assert args[1] == tid assert args[2] == job.request_dict assert args[3] == job.message.body def _test_on_failure(self, exception, **kwargs): tid = uuid() job = self.xRequest(id=tid, args=[4]) job.send_event = Mock(name='send_event') job.task.backend.mark_as_failure = Mock(name='mark_as_failure') try: raise exception except type(exception): exc_info = ExceptionInfo() job.on_failure(exc_info, **kwargs) job.send_event.assert_called() return job def test_on_failure(self): self._test_on_failure(Exception('Inside unit tests')) def test_on_failure__unicode_exception(self): self._test_on_failure(Exception('Бобры атакуют')) def test_on_failure__utf8_exception(self): self._test_on_failure(Exception( from_utf8('Бобры атакуют'))) def test_on_failure__WorkerLostError(self): exc = WorkerLostError() job = self._test_on_failure(exc) job.task.backend.mark_as_failure.assert_called_with( job.id, exc, request=job, store_result=True, ) def test_on_failure__return_ok(self): self._test_on_failure(KeyError(), return_ok=True) def test_reject(self): job = self.xRequest(id=uuid()) job.on_reject = Mock(name='on_reject') job.reject(requeue=True) job.on_reject.assert_called_with( req_logger, job.connection_errors, True, ) assert job.acknowledged job.on_reject.reset_mock() job.reject(requeue=True) job.on_reject.assert_not_called() def test_group(self): gid = uuid() job = self.xRequest(id=uuid(), group=gid) assert job.group == gid class test_create_request_class(RequestCase): def setup(self): self.task = Mock(name='task') self.pool = Mock(name='pool') self.eventer = Mock(name='eventer') RequestCase.setup(self) def create_request_cls(self, **kwargs): return create_request_cls( Request, self.task, self.pool, 'foo', self.eventer, **kwargs ) def zRequest(self, Request=None, revoked_tasks=None, ref=None, **kwargs): return self.xRequest( Request=Request or self.create_request_cls( ref=ref, revoked_tasks=revoked_tasks, ), **kwargs) def test_on_success(self): self.zRequest(id=uuid()).on_success((False, 'hey', 3.1222)) def test_on_success__SystemExit(self, errors=(SystemExit, KeyboardInterrupt)): for exc in errors: einfo = None try: raise exc() except exc: einfo = ExceptionInfo() with pytest.raises(exc): self.zRequest(id=uuid()).on_success((True, einfo, 1.0)) def test_on_success__calls_failure(self): job = self.zRequest(id=uuid()) einfo = Mock(name='einfo') job.on_failure = Mock(name='on_failure') job.on_success((True, einfo, 1.0)) job.on_failure.assert_called_with(einfo, return_ok=True) def test_on_success__acks_late_enabled(self): self.task.acks_late = True job = self.zRequest(id=uuid()) job.acknowledge = Mock(name='ack') job.on_success((False, 'foo', 1.0)) job.acknowledge.assert_called_with() def test_on_success__acks_late_disabled(self): self.task.acks_late = False job = self.zRequest(id=uuid()) job.acknowledge = Mock(name='ack') job.on_success((False, 'foo', 1.0)) job.acknowledge.assert_not_called() def test_on_success__no_events(self): self.eventer = None job = self.zRequest(id=uuid()) job.send_event = Mock(name='send_event') job.on_success((False, 'foo', 1.0)) job.send_event.assert_not_called() def test_on_success__with_events(self): job = self.zRequest(id=uuid()) job.send_event = Mock(name='send_event') job.on_success((False, 'foo', 1.0)) job.send_event.assert_called_with( 'task-succeeded', result='foo', runtime=1.0, ) def test_execute_using_pool__revoked(self): tid = uuid() job = self.zRequest(id=tid, revoked_tasks={tid}) job.revoked = Mock() job.revoked.return_value = True with pytest.raises(TaskRevokedError): job.execute_using_pool(self.pool) def test_execute_using_pool__expired(self): tid = uuid() job = self.zRequest(id=tid, revoked_tasks=set()) job.expires = 1232133 job.revoked = Mock() job.revoked.return_value = True with pytest.raises(TaskRevokedError): job.execute_using_pool(self.pool) def test_execute_using_pool(self): from celery.app.trace import trace_task_ret as trace weakref_ref = Mock(name='weakref.ref') job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref) job.execute_using_pool(self.pool) self.pool.apply_async.assert_called_with( trace, args=(job.type, job.id, job.request_dict, job.body, job.content_type, job.content_encoding), accept_callback=job.on_accepted, timeout_callback=job.on_timeout, callback=job.on_success, error_callback=job.on_failure, soft_timeout=self.task.soft_time_limit, timeout=self.task.time_limit, correlation_id=job.id, ) assert job._apply_result weakref_ref.assert_called_with(self.pool.apply_async()) assert job._apply_result is weakref_ref() celery-4.1.0/t/unit/worker/test_components.py0000644000175000017500000000467313130607475021276 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch, skip from celery.exceptions import ImproperlyConfigured from celery.worker.components import Beat, Hub, Pool, Timer # some of these are tested in test_worker, so I've only written tests # here to complete coverage. Should move everyting to this module at some # point [-ask] class test_Timer: def test_create__eventloop(self): w = Mock(name='w') w.use_eventloop = True Timer(w).create(w) assert not w.timer.queue class test_Hub: def setup(self): self.w = Mock(name='w') self.hub = Hub(self.w) self.w.hub = Mock(name='w.hub') @patch('celery.worker.components.set_event_loop') @patch('celery.worker.components.get_event_loop') def test_create(self, get_event_loop, set_event_loop): self.hub._patch_thread_primitives = Mock(name='ptp') assert self.hub.create(self.w) is self.hub self.hub._patch_thread_primitives.assert_called_with(self.w) def test_start(self): self.hub.start(self.w) def test_stop(self): self.hub.stop(self.w) self.w.hub.close.assert_called_with() def test_terminate(self): self.hub.terminate(self.w) self.w.hub.close.assert_called_with() class test_Pool: def test_close_terminate(self): w = Mock() comp = Pool(w) pool = w.pool = Mock() comp.close(w) pool.close.assert_called_with() comp.terminate(w) pool.terminate.assert_called_with() w.pool = None comp.close(w) comp.terminate(w) @skip.if_win32() def test_create_when_eventloop(self): w = Mock() w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True comp = Pool(w) w.pool = Mock() comp.create(w) assert w.process_task is w._process_task_sem def test_create_calls_instantiate_with_max_memory(self): w = Mock() w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True comp = Pool(w) comp.instantiate = Mock() w.max_memory_per_child = 32 comp.create(w) assert comp.instantiate.call_args[1]['max_memory_per_child'] == 32 class test_Beat: def test_create__green(self): w = Mock(name='w') w.pool_cls.__module__ = 'foo_gevent' with pytest.raises(ImproperlyConfigured): Beat(w).create(w) celery-4.1.0/t/unit/worker/test_state.py0000644000175000017500000001214613130607475020223 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle import pytest from time import time from case import Mock, patch from celery import uuid from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.worker import state from celery.utils.collections import LimitedSet @pytest.fixture def reset_state(): yield state.active_requests.clear() state.revoked.clear() state.total_count.clear() class MockShelve(dict): filename = None in_sync = False closed = False def open(self, filename, **kwargs): self.filename = filename return self def sync(self): self.in_sync = True def close(self): self.closed = True class MyPersistent(state.Persistent): storage = MockShelve() class test_maybe_shutdown: def teardown(self): state.should_stop = None state.should_terminate = None def test_should_stop(self): state.should_stop = True with pytest.raises(WorkerShutdown): state.maybe_shutdown() state.should_stop = 0 with pytest.raises(WorkerShutdown): state.maybe_shutdown() state.should_stop = False try: state.maybe_shutdown() except SystemExit: raise RuntimeError('should not have exited') state.should_stop = None try: state.maybe_shutdown() except SystemExit: raise RuntimeError('should not have exited') state.should_stop = 0 try: state.maybe_shutdown() except SystemExit as exc: assert exc.code == 0 else: raise RuntimeError('should have exited') state.should_stop = 303 try: state.maybe_shutdown() except SystemExit as exc: assert exc.code == 303 else: raise RuntimeError('should have exited') def test_should_terminate(self): state.should_terminate = True with pytest.raises(WorkerTerminate): state.maybe_shutdown() @pytest.mark.usefixtures('reset_state') class test_Persistent: @pytest.fixture def p(self): return MyPersistent(state, filename='celery-state') def test_close_twice(self, p): p._is_open = False p.close() def test_constructor(self, p): assert p.db == {} assert p.db.filename == p.filename def test_save(self, p): p.db['foo'] = 'bar' p.save() assert p.db.in_sync assert p.db.closed def add_revoked(self, p, *ids): for id in ids: p.db.setdefault(str('revoked'), LimitedSet()).add(id) def test_merge(self, p, data=['foo', 'bar', 'baz']): state.revoked.update(data) p.merge() for item in data: assert item in state.revoked def test_merge_dict(self, p): p.clock = Mock() p.clock.adjust.return_value = 626 d = {str('revoked'): {str('abc'): time()}, str('clock'): 313} p._merge_with(d) p.clock.adjust.assert_called_with(313) assert d[str('clock')] == 626 assert str('abc') in state.revoked def test_sync_clock_and_purge(self, p): passthrough = Mock() passthrough.side_effect = lambda x: x with patch('celery.worker.state.revoked') as revoked: d = {str('clock'): 0} p.clock = Mock() p.clock.forward.return_value = 627 p._dumps = passthrough p.compress = passthrough p._sync_with(d) revoked.purge.assert_called_with() assert d[str('clock')] == 627 assert str('revoked') not in d assert d[str('zrevoked')] is revoked def test_sync(self, p, data1=['foo', 'bar', 'baz'], data2=['baz', 'ini', 'koz']): self.add_revoked(p, *data1) for item in data2: state.revoked.add(item) p.sync() assert p.db[str('zrevoked')] pickled = p.decompress(p.db[str('zrevoked')]) assert pickled saved = pickle.loads(pickled) for item in data2: assert item in saved class SimpleReq(object): def __init__(self, name): self.id = uuid() self.name = name @pytest.mark.usefixtures('reset_state') class test_state: def test_accepted(self, requests=[SimpleReq('foo'), SimpleReq('bar'), SimpleReq('baz'), SimpleReq('baz')]): for request in requests: state.task_accepted(request) for req in requests: assert req in state.active_requests assert state.total_count['foo'] == 1 assert state.total_count['bar'] == 1 assert state.total_count['baz'] == 2 def test_ready(self, requests=[SimpleReq('foo'), SimpleReq('bar')]): for request in requests: state.task_accepted(request) assert len(state.active_requests) == 2 for request in requests: state.task_ready(request) assert len(state.active_requests) == 0 celery-4.1.0/t/unit/worker/test_worker.py0000644000175000017500000010515113130607475020413 0ustar omeromer00000000000000from __future__ import absolute_import, print_function, unicode_literals import os import pytest import socket import sys from collections import deque from datetime import datetime, timedelta from functools import partial from threading import Event from amqp import ChannelError from case import Mock, patch, skip from kombu import Connection from kombu.common import QoS, ignore_errors from kombu.transport.base import Message from kombu.transport.memory import Transport from kombu.utils.uuid import uuid from celery.bootsteps import RUN, CLOSE, TERMINATE, StartStopStep from celery.concurrency.base import BasePool from celery.exceptions import ( WorkerShutdown, WorkerTerminate, TaskRevokedError, InvalidTaskError, ImproperlyConfigured, ) from celery.five import Empty, range, Queue as FastQueue from celery.platforms import EX_FAILURE from celery.worker import worker as worker_module from celery.worker import components from celery.worker import consumer from celery.worker import state from celery.worker.consumer import Consumer from celery.worker.pidbox import gPidbox from celery.worker.request import Request from celery.utils.nodenames import worker_direct from celery.utils.serialization import pickle from celery.utils.timer2 import Timer def MockStep(step=None): if step is None: step = Mock(name='step') else: step.blueprint = Mock(name='step.blueprint') step.blueprint.name = 'MockNS' step.name = 'MockStep(%s)' % (id(step),) return step def mock_event_dispatcher(): evd = Mock(name='event_dispatcher') evd.groups = ['worker'] evd._outbound_buffer = deque() return evd def find_step(obj, typ): return obj.blueprint.steps[typ.name] def create_message(channel, **data): data.setdefault('id', uuid()) m = Message(body=pickle.dumps(dict(**data)), channel=channel, content_type='application/x-python-serialize', content_encoding='binary', delivery_info={'consumer_tag': 'mock'}) m.accept = ['application/x-python-serialize'] return m class ConsumerCase: def create_task_message(self, channel, *args, **kwargs): m = self.TaskMessage(*args, **kwargs) m.channel = channel m.delivery_info = {'consumer_tag': 'mock'} return m class test_Consumer(ConsumerCase): def setup(self): self.buffer = FastQueue() self.timer = Timer() @self.app.task(shared=False) def foo_task(x, y, z): return x * y * z self.foo_task = foo_task def teardown(self): self.timer.stop() def LoopConsumer(self, buffer=None, controller=None, timer=None, app=None, without_mingle=True, without_gossip=True, without_heartbeat=True, **kwargs): if controller is None: controller = Mock(name='.controller') buffer = buffer if buffer is not None else self.buffer.put timer = timer if timer is not None else self.timer app = app if app is not None else self.app c = Consumer( buffer, timer=timer, app=app, controller=controller, without_mingle=without_mingle, without_gossip=without_gossip, without_heartbeat=without_heartbeat, **kwargs ) c.task_consumer = Mock(name='.task_consumer') c.qos = QoS(c.task_consumer.qos, 10) c.connection = Mock(name='.connection') c.controller = c.app.WorkController() c.heart = Mock(name='.heart') c.controller.consumer = c c.pool = c.controller.pool = Mock(name='.controller.pool') c.node = Mock(name='.node') c.event_dispatcher = mock_event_dispatcher() return c def NoopConsumer(self, *args, **kwargs): c = self.LoopConsumer(*args, **kwargs) c.loop = Mock(name='.loop') return c def test_info(self): c = self.NoopConsumer() c.connection.info.return_value = {'foo': 'bar'} c.controller.pool.info.return_value = [Mock(), Mock()] info = c.controller.stats() assert info['prefetch_count'] == 10 assert info['broker'] def test_start_when_closed(self): c = self.NoopConsumer() c.blueprint.state = CLOSE c.start() def test_connection(self): c = self.NoopConsumer() c.blueprint.start(c) assert isinstance(c.connection, Connection) c.blueprint.state = RUN c.event_dispatcher = None c.blueprint.restart(c) assert c.connection c.blueprint.state = RUN c.shutdown() assert c.connection is None assert c.task_consumer is None c.blueprint.start(c) assert isinstance(c.connection, Connection) c.blueprint.restart(c) c.stop() c.shutdown() assert c.connection is None assert c.task_consumer is None def test_close_connection(self): c = self.NoopConsumer() c.blueprint.state = RUN step = find_step(c, consumer.Connection) connection = c.connection step.shutdown(c) connection.close.assert_called() assert c.connection is None def test_close_connection__heart_shutdown(self): c = self.NoopConsumer() event_dispatcher = c.event_dispatcher heart = c.heart c.event_dispatcher.enabled = True c.blueprint.state = RUN Events = find_step(c, consumer.Events) Events.shutdown(c) Heart = find_step(c, consumer.Heart) Heart.shutdown(c) event_dispatcher.close.assert_called() heart.stop.assert_called_with() @patch('celery.worker.consumer.consumer.warn') def test_receive_message_unknown(self, warn): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() channel = Mock(name='.channeol') m = create_message(channel, unknown={'baz': '!!!'}) callback = self._get_on_message(c) callback(m) warn.assert_called() @patch('celery.worker.strategy.to_timestamp') def test_receive_message_eta_OverflowError(self, to_timestamp): to_timestamp.side_effect = OverflowError() c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() m = self.create_task_message( Mock(), self.foo_task.name, args=('2, 2'), kwargs={}, eta=datetime.now().isoformat(), ) c.update_strategies() callback = self._get_on_message(c) callback(m) assert m.acknowledged @patch('celery.worker.consumer.consumer.error') def test_receive_message_InvalidTaskError(self, error): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() m = self.create_task_message( Mock(), self.foo_task.name, args=(1, 2), kwargs='foobarbaz', id=1) c.update_strategies() strat = c.strategies[self.foo_task.name] = Mock(name='strategy') strat.side_effect = InvalidTaskError() callback = self._get_on_message(c) callback(m) error.assert_called() assert 'Received invalid task message' in error.call_args[0][0] @patch('celery.worker.consumer.consumer.crit') def test_on_decode_error(self, crit): c = self.LoopConsumer() class MockMessage(Mock): content_type = 'application/x-msgpack' content_encoding = 'binary' body = 'foobarbaz' message = MockMessage() c.on_decode_error(message, KeyError('foo')) assert message.ack.call_count assert "Can't decode message body" in crit.call_args[0][0] def _get_on_message(self, c): if c.qos is None: c.qos = Mock() c.task_consumer = Mock() c.event_dispatcher = mock_event_dispatcher() c.connection = Mock(name='.connection') c.connection.get_heartbeat_interval.return_value = 0 c.connection.drain_events.side_effect = WorkerShutdown() with pytest.raises(WorkerShutdown): c.loop(*c.loop_args()) assert c.task_consumer.on_message return c.task_consumer.on_message def test_receieve_message(self): c = self.LoopConsumer() c.blueprint.state = RUN m = self.create_task_message( Mock(), self.foo_task.name, args=[2, 4, 8], kwargs={}, ) c.update_strategies() callback = self._get_on_message(c) callback(m) in_bucket = self.buffer.get_nowait() assert isinstance(in_bucket, Request) assert in_bucket.name == self.foo_task.name assert in_bucket.execute() == 2 * 4 * 8 assert self.timer.empty() def test_start_channel_error(self): c = self.NoopConsumer(task_events=False, pool=BasePool()) c.loop.on_nth_call_do_raise(KeyError('foo'), SyntaxError('bar')) c.channel_errors = (KeyError,) try: with pytest.raises(KeyError): c.start() finally: c.timer and c.timer.stop() def test_start_connection_error(self): c = self.NoopConsumer(task_events=False, pool=BasePool()) c.loop.on_nth_call_do_raise(KeyError('foo'), SyntaxError('bar')) c.connection_errors = (KeyError,) try: with pytest.raises(SyntaxError): c.start() finally: c.timer and c.timer.stop() def test_loop_ignores_socket_timeout(self): class Connection(self.app.connection_for_read().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None raise socket.timeout(10) c = self.NoopConsumer() c.connection = Connection(self.app.conf.broker_url) c.connection.obj = c c.qos = QoS(c.task_consumer.qos, 10) c.loop(*c.loop_args()) def test_loop_when_socket_error(self): class Connection(self.app.connection_for_read().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None raise socket.error('foo') c = self.LoopConsumer() c.blueprint.state = RUN conn = c.connection = Connection(self.app.conf.broker_url) c.connection.obj = c c.qos = QoS(c.task_consumer.qos, 10) with pytest.raises(socket.error): c.loop(*c.loop_args()) c.blueprint.state = CLOSE c.connection = conn c.loop(*c.loop_args()) def test_loop(self): class Connection(self.app.connection_for_read().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None @property def supports_heartbeats(self): return False c = self.LoopConsumer() c.blueprint.state = RUN c.connection = Connection(self.app.conf.broker_url) c.connection.obj = c c.connection.get_heartbeat_interval = Mock(return_value=None) c.qos = QoS(c.task_consumer.qos, 10) c.loop(*c.loop_args()) c.loop(*c.loop_args()) assert c.task_consumer.consume.call_count c.task_consumer.qos.assert_called_with(prefetch_count=10) assert c.qos.value == 10 c.qos.decrement_eventually() assert c.qos.value == 9 c.qos.update() assert c.qos.value == 9 c.task_consumer.qos.assert_called_with(prefetch_count=9) def test_ignore_errors(self): c = self.NoopConsumer() c.connection_errors = (AttributeError, KeyError,) c.channel_errors = (SyntaxError,) ignore_errors(c, Mock(side_effect=AttributeError('foo'))) ignore_errors(c, Mock(side_effect=KeyError('foo'))) ignore_errors(c, Mock(side_effect=SyntaxError('foo'))) with pytest.raises(IndexError): ignore_errors(c, Mock(side_effect=IndexError('foo'))) def test_apply_eta_task(self): c = self.NoopConsumer() c.qos = QoS(None, 10) task = Mock(name='task', id='1234213') qos = c.qos.value c.apply_eta_task(task) assert task in state.reserved_requests assert c.qos.value == qos - 1 assert self.buffer.get_nowait() is task def test_receieve_message_eta_isoformat(self): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() m = self.create_task_message( Mock(), self.foo_task.name, eta=(datetime.now() + timedelta(days=1)).isoformat(), args=[2, 4, 8], kwargs={}, ) c.qos = QoS(c.task_consumer.qos, 1) current_pcount = c.qos.value c.event_dispatcher.enabled = False c.update_strategies() callback = self._get_on_message(c) callback(m) c.timer.stop() c.timer.join(1) items = [entry[2] for entry in self.timer.queue] found = 0 for item in items: if item.args[0].name == self.foo_task.name: found = True assert found assert c.qos.value > current_pcount c.timer.stop() def test_pidbox_callback(self): c = self.NoopConsumer() con = find_step(c, consumer.Control).box con.node = Mock() con.reset = Mock() con.on_message('foo', 'bar') con.node.handle_message.assert_called_with('foo', 'bar') con.node = Mock() con.node.handle_message.side_effect = KeyError('foo') con.on_message('foo', 'bar') con.node.handle_message.assert_called_with('foo', 'bar') con.node = Mock() con.node.handle_message.side_effect = ValueError('foo') con.on_message('foo', 'bar') con.node.handle_message.assert_called_with('foo', 'bar') con.reset.assert_called() def test_revoke(self): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() channel = Mock(name='channel') id = uuid() t = self.create_task_message( channel, self.foo_task.name, args=[2, 4, 8], kwargs={}, id=id, ) state.revoked.add(id) callback = self._get_on_message(c) callback(t) assert self.buffer.empty() def test_receieve_message_not_registered(self): c = self.LoopConsumer() c.blueprint.state = RUN c.steps.pop() channel = Mock(name='channel') m = self.create_task_message( channel, 'x.X.31x', args=[2, 4, 8], kwargs={}, ) callback = self._get_on_message(c) assert not callback(m) with pytest.raises(Empty): self.buffer.get_nowait() assert self.timer.empty() @patch('celery.worker.consumer.consumer.warn') @patch('celery.worker.consumer.consumer.logger') def test_receieve_message_ack_raises(self, logger, warn): c = self.LoopConsumer() c.blueprint.state = RUN channel = Mock(name='channel') m = self.create_task_message( channel, self.foo_task.name, args=[2, 4, 8], kwargs={}, ) m.headers = None c.update_strategies() c.connection_errors = (socket.error,) m.reject = Mock() m.reject.side_effect = socket.error('foo') callback = self._get_on_message(c) assert not callback(m) warn.assert_called() with pytest.raises(Empty): self.buffer.get_nowait() assert self.timer.empty() m.reject_log_error.assert_called_with(logger, c.connection_errors) def test_receive_message_eta(self): if os.environ.get('C_DEBUG_TEST'): pp = partial(print, file=sys.__stderr__) else: def pp(*args, **kwargs): pass pp('TEST RECEIVE MESSAGE ETA') pp('+CREATE MYKOMBUCONSUMER') c = self.LoopConsumer() pp('-CREATE MYKOMBUCONSUMER') c.steps.pop() channel = Mock(name='channel') pp('+ CREATE MESSAGE') m = self.create_task_message( channel, self.foo_task.name, args=[2, 4, 8], kwargs={}, eta=(datetime.now() + timedelta(days=1)).isoformat(), ) pp('- CREATE MESSAGE') try: pp('+ BLUEPRINT START 1') c.blueprint.start(c) pp('- BLUEPRINT START 1') p = c.app.conf.broker_connection_retry c.app.conf.broker_connection_retry = False pp('+ BLUEPRINT START 2') c.blueprint.start(c) pp('- BLUEPRINT START 2') c.app.conf.broker_connection_retry = p pp('+ BLUEPRINT RESTART') c.blueprint.restart(c) pp('- BLUEPRINT RESTART') pp('+ GET ON MESSAGE') callback = self._get_on_message(c) pp('- GET ON MESSAGE') pp('+ CALLBACK') callback(m) pp('- CALLBACK') finally: pp('+ STOP TIMER') c.timer.stop() pp('- STOP TIMER') try: pp('+ JOIN TIMER') c.timer.join() pp('- JOIN TIMER') except RuntimeError: pass in_hold = c.timer.queue[0] assert len(in_hold) == 3 eta, priority, entry = in_hold task = entry.args[0] assert isinstance(task, Request) assert task.name == self.foo_task.name assert task.execute() == 2 * 4 * 8 with pytest.raises(Empty): self.buffer.get_nowait() def test_reset_pidbox_node(self): c = self.NoopConsumer() con = find_step(c, consumer.Control).box con.node = Mock() chan = con.node.channel = Mock() chan.close.side_effect = socket.error('foo') c.connection_errors = (socket.error,) con.reset() chan.close.assert_called_with() def test_reset_pidbox_node_green(self): c = self.NoopConsumer(pool=Mock(is_green=True)) con = find_step(c, consumer.Control) assert isinstance(con.box, gPidbox) con.start(c) c.pool.spawn_n.assert_called_with(con.box.loop, c) def test_green_pidbox_node(self): pool = Mock() pool.is_green = True c = self.NoopConsumer(pool=Mock(is_green=True)) controller = find_step(c, consumer.Control) class BConsumer(Mock): def __enter__(self): self.consume() return self def __exit__(self, *exc_info): self.cancel() controller.box.node.listen = BConsumer() connections = [] class Connection(object): calls = 0 def __init__(self, obj): connections.append(self) self.obj = obj self.default_channel = self.channel() self.closed = False def __enter__(self): return self def __exit__(self, *exc_info): self.close() def channel(self): return Mock() def as_uri(self): return 'dummy://' def drain_events(self, **kwargs): if not self.calls: self.calls += 1 raise socket.timeout() self.obj.connection = None controller.box._node_shutdown.set() def close(self): self.closed = True c.connection_for_read = lambda: Connection(obj=c) controller = find_step(c, consumer.Control) controller.box.loop(c) controller.box.node.listen.assert_called() assert controller.box.consumer controller.box.consumer.consume.assert_called_with() assert c.connection is None assert connections[0].closed @patch('kombu.connection.Connection._establish_connection') @patch('kombu.utils.functional.sleep') def test_connect_errback(self, sleep, connect): c = self.NoopConsumer() Transport.connection_errors = (ChannelError,) connect.on_nth_call_do(ChannelError('error'), n=1) c.connect() connect.assert_called_with() def test_stop_pidbox_node(self): c = self.NoopConsumer() cont = find_step(c, consumer.Control) cont._node_stopped = Event() cont._node_shutdown = Event() cont._node_stopped.set() cont.stop(c) def test_start__loop(self): class _QoS(object): prev = 3 value = 4 def update(self): self.prev = self.value init_callback = Mock(name='init_callback') c = self.NoopConsumer(init_callback=init_callback) c.qos = _QoS() c.connection = Connection(self.app.conf.broker_url) c.connection.get_heartbeat_interval = Mock(return_value=None) c.iterations = 0 def raises_KeyError(*args, **kwargs): c.iterations += 1 if c.qos.prev != c.qos.value: c.qos.update() if c.iterations >= 2: raise KeyError('foo') c.loop = raises_KeyError with pytest.raises(KeyError): c.start() assert c.iterations == 2 assert c.qos.prev == c.qos.value init_callback.reset_mock() c = self.NoopConsumer(task_events=False, init_callback=init_callback) c.qos = _QoS() c.connection = Connection(self.app.conf.broker_url) c.connection.get_heartbeat_interval = Mock(return_value=None) c.loop = Mock(side_effect=socket.error('foo')) with pytest.raises(socket.error): c.start() c.loop.assert_called() def test_reset_connection_with_no_node(self): c = self.NoopConsumer() c.steps.pop() c.blueprint.start(c) class test_WorkController(ConsumerCase): def setup(self): self.worker = self.create_worker() self._logger = worker_module.logger self._comp_logger = components.logger self.logger = worker_module.logger = Mock() self.comp_logger = components.logger = Mock() @self.app.task(shared=False) def foo_task(x, y, z): return x * y * z self.foo_task = foo_task def teardown(self): worker_module.logger = self._logger components.logger = self._comp_logger def create_worker(self, **kw): worker = self.app.WorkController(concurrency=1, loglevel=0, **kw) worker.blueprint.shutdown_complete.set() return worker def test_on_consumer_ready(self): self.worker.on_consumer_ready(Mock()) def test_setup_queues_worker_direct(self): self.app.conf.worker_direct = True self.app.amqp.__dict__['queues'] = Mock() self.worker.setup_queues({}) self.app.amqp.queues.select_add.assert_called_with( worker_direct(self.worker.hostname), ) def test_setup_queues__missing_queue(self): self.app.amqp.queues.select = Mock(name='select') self.app.amqp.queues.deselect = Mock(name='deselect') self.app.amqp.queues.select.side_effect = KeyError() self.app.amqp.queues.deselect.side_effect = KeyError() with pytest.raises(ImproperlyConfigured): self.worker.setup_queues('x,y', exclude='foo,bar') self.app.amqp.queues.select = Mock(name='select') with pytest.raises(ImproperlyConfigured): self.worker.setup_queues('x,y', exclude='foo,bar') def test_send_worker_shutdown(self): with patch('celery.signals.worker_shutdown') as ws: self.worker._send_worker_shutdown() ws.send.assert_called_with(sender=self.worker) @skip.todo('unstable test') def test_process_shutdown_on_worker_shutdown(self): from celery.concurrency.prefork import process_destructor from celery.concurrency.asynpool import Worker with patch('celery.signals.worker_process_shutdown') as ws: with patch('os._exit') as _exit: worker = Worker(None, None, on_exit=process_destructor) worker._do_exit(22, 3.1415926) ws.send.assert_called_with( sender=None, pid=22, exitcode=3.1415926, ) _exit.assert_called_with(3.1415926) def test_process_task_revoked_release_semaphore(self): self.worker._quick_release = Mock() req = Mock() req.execute_using_pool.side_effect = TaskRevokedError self.worker._process_task(req) self.worker._quick_release.assert_called_with() delattr(self.worker, '_quick_release') self.worker._process_task(req) def test_shutdown_no_blueprint(self): self.worker.blueprint = None self.worker._shutdown() @patch('celery.worker.worker.create_pidlock') def test_use_pidfile(self, create_pidlock): create_pidlock.return_value = Mock() worker = self.create_worker(pidfile='pidfilelockfilepid') worker.steps = [] worker.start() create_pidlock.assert_called() worker.stop() worker.pidlock.release.assert_called() def test_attrs(self): worker = self.worker assert worker.timer is not None assert isinstance(worker.timer, Timer) assert worker.pool is not None assert worker.consumer is not None assert worker.steps def test_with_embedded_beat(self): worker = self.app.WorkController(concurrency=1, loglevel=0, beat=True) assert worker.beat assert worker.beat in [w.obj for w in worker.steps] def test_with_autoscaler(self): worker = self.create_worker( autoscale=[10, 3], send_events=False, timer_cls='celery.utils.timer2.Timer', ) assert worker.autoscaler def test_dont_stop_or_terminate(self): worker = self.app.WorkController(concurrency=1, loglevel=0) worker.stop() assert worker.blueprint.state != CLOSE worker.terminate() assert worker.blueprint.state != CLOSE sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False try: worker.blueprint.state = RUN worker.stop(in_sighandler=True) assert worker.blueprint.state != CLOSE worker.terminate(in_sighandler=True) assert worker.blueprint.state != CLOSE finally: worker.pool.signal_safe = sigsafe def test_on_timer_error(self): worker = self.app.WorkController(concurrency=1, loglevel=0) try: raise KeyError('foo') except KeyError as exc: components.Timer(worker).on_timer_error(exc) msg, args = self.comp_logger.error.call_args[0] assert 'KeyError' in msg % args def test_on_timer_tick(self): worker = self.app.WorkController(concurrency=1, loglevel=10) components.Timer(worker).on_timer_tick(30.0) xargs = self.comp_logger.debug.call_args[0] fmt, arg = xargs[0], xargs[1] assert arg == 30.0 assert 'Next ETA %s secs' in fmt def test_process_task(self): worker = self.worker worker.pool = Mock() channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) worker._process_task(task) assert worker.pool.apply_async.call_count == 1 worker.pool.stop() def test_process_task_raise_base(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C') channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) worker.steps = [] worker.blueprint.state = RUN with pytest.raises(KeyboardInterrupt): worker._process_task(task) def test_process_task_raise_WorkerTerminate(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = WorkerTerminate() channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) worker.steps = [] worker.blueprint.state = RUN with pytest.raises(SystemExit): worker._process_task(task) def test_process_task_raise_regular(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = KeyError('some exception') channel = Mock() m = self.create_task_message( channel, self.foo_task.name, args=[4, 8, 10], kwargs={}, ) task = Request(m, app=self.app) with pytest.raises(KeyError): worker._process_task(task) worker.pool.stop() def test_start_catches_base_exceptions(self): worker1 = self.create_worker() worker1.blueprint.state = RUN stc = MockStep() stc.start.side_effect = WorkerTerminate() worker1.steps = [stc] worker1.start() stc.start.assert_called_with(worker1) assert stc.terminate.call_count worker2 = self.create_worker() worker2.blueprint.state = RUN sec = MockStep() sec.start.side_effect = WorkerShutdown() sec.terminate = None worker2.steps = [sec] worker2.start() assert sec.stop.call_count def test_statedb(self): from celery.worker import state Persistent = state.Persistent state.Persistent = Mock() try: worker = self.create_worker(statedb='statefilename') assert worker._persistence finally: state.Persistent = Persistent def test_process_task_sem(self): worker = self.worker worker._quick_acquire = Mock() req = Mock() worker._process_task_sem(req) worker._quick_acquire.assert_called_with(worker._process_task, req) def test_signal_consumer_close(self): worker = self.worker worker.consumer = Mock() worker.signal_consumer_close() worker.consumer.close.assert_called_with() worker.consumer.close.side_effect = AttributeError() worker.signal_consumer_close() def test_rusage__no_resource(self): from celery.worker import worker prev, worker.resource = worker.resource, None try: self.worker.pool = Mock(name='pool') with pytest.raises(NotImplementedError): self.worker.rusage() self.worker.stats() finally: worker.resource = prev def test_repr(self): assert repr(self.worker) def test_str(self): assert str(self.worker) == self.worker.hostname def test_start__stop(self): worker = self.worker worker.blueprint.shutdown_complete.set() worker.steps = [MockStep(StartStopStep(self)) for _ in range(4)] worker.blueprint.state = RUN worker.blueprint.started = 4 for w in worker.steps: w.start = Mock() w.close = Mock() w.stop = Mock() worker.start() for w in worker.steps: w.start.assert_called() worker.consumer = Mock() worker.stop(exitcode=3) for stopstep in worker.steps: stopstep.close.assert_called() stopstep.stop.assert_called() # Doesn't close pool if no pool. worker.start() worker.pool = None worker.stop() # test that stop of None is not attempted worker.steps[-1] = None worker.start() worker.stop() def test_start__KeyboardInterrupt(self): worker = self.worker worker.blueprint = Mock(name='blueprint') worker.blueprint.start.side_effect = KeyboardInterrupt() worker.stop = Mock(name='stop') worker.start() worker.stop.assert_called_with(exitcode=EX_FAILURE) def test_register_with_event_loop(self): worker = self.worker hub = Mock(name='hub') worker.blueprint = Mock(name='blueprint') worker.register_with_event_loop(hub) worker.blueprint.send_all.assert_called_with( worker, 'register_with_event_loop', args=(hub,), description='hub.register', ) def test_step_raises(self): worker = self.worker step = Mock() worker.steps = [step] step.start.side_effect = TypeError() worker.stop = Mock() worker.start() worker.stop.assert_called_with(exitcode=EX_FAILURE) def test_state(self): assert self.worker.state def test_start__terminate(self): worker = self.worker worker.blueprint.shutdown_complete.set() worker.blueprint.started = 5 worker.blueprint.state = RUN worker.steps = [MockStep() for _ in range(5)] worker.start() for w in worker.steps[:3]: w.start.assert_called() assert worker.blueprint.started == len(worker.steps) assert worker.blueprint.state == RUN worker.terminate() for step in worker.steps: step.terminate.assert_called() worker.blueprint.state = TERMINATE worker.terminate() def test_Hub_create(self): w = Mock() x = components.Hub(w) x.create(w) assert w.timer.max_interval def test_Pool_create_threaded(self): w = Mock() w._conninfo.connection_errors = w._conninfo.channel_errors = () w.pool_cls = Mock() w.use_eventloop = False pool = components.Pool(w) pool.create(w) def test_Pool_pool_no_sem(self): w = Mock() w.pool_cls.uses_semaphore = False components.Pool(w).create(w) assert w.process_task is w._process_task def test_Pool_create(self): from kombu.async.semaphore import LaxBoundedSemaphore w = Mock() w._conninfo.connection_errors = w._conninfo.channel_errors = () w.hub = Mock() PoolImp = Mock() poolimp = PoolImp.return_value = Mock() poolimp._pool = [Mock(), Mock()] poolimp._cache = {} poolimp._fileno_to_inq = {} poolimp._fileno_to_outq = {} from celery.concurrency.prefork import TaskPool as _TaskPool class MockTaskPool(_TaskPool): Pool = PoolImp @property def timers(self): return {Mock(): 30} w.pool_cls = MockTaskPool w.use_eventloop = True w.consumer.restart_count = -1 pool = components.Pool(w) pool.create(w) pool.register_with_event_loop(w, w.hub) if sys.platform != 'win32': assert isinstance(w.semaphore, LaxBoundedSemaphore) P = w.pool P.start() celery-4.1.0/t/unit/worker/test_autoscale.py0000644000175000017500000001431513130607475021063 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import sys from case import Mock, mock, patch from celery.concurrency.base import BasePool from celery.five import monotonic from celery.worker import state from celery.worker import autoscale from celery.utils.objects import Bunch class MockPool(BasePool): shrink_raises_exception = False shrink_raises_ValueError = False def __init__(self, *args, **kwargs): super(MockPool, self).__init__(*args, **kwargs) self._pool = Bunch(_processes=self.limit) def grow(self, n=1): self._pool._processes += n def shrink(self, n=1): if self.shrink_raises_exception: raise KeyError('foo') if self.shrink_raises_ValueError: raise ValueError('foo') self._pool._processes -= n @property def num_processes(self): return self._pool._processes class test_WorkerComponent: def test_register_with_event_loop(self): parent = Mock(name='parent') parent.autoscale = True parent.consumer.on_task_message = set() w = autoscale.WorkerComponent(parent) assert parent.autoscaler is None assert w.enabled hub = Mock(name='hub') w.create(parent) w.register_with_event_loop(parent, hub) assert (parent.autoscaler.maybe_scale in parent.consumer.on_task_message) hub.call_repeatedly.assert_called_with( parent.autoscaler.keepalive, parent.autoscaler.maybe_scale, ) parent.hub = hub hub.on_init = [] w.instantiate = Mock() w.register_with_event_loop(parent, Mock(name='loop')) assert parent.consumer.on_task_message class test_Autoscaler: def setup(self): self.pool = MockPool(3) def test_stop(self): class Scaler(autoscale.Autoscaler): alive = True joined = False def is_alive(self): return self.alive def join(self, timeout=None): self.joined = True worker = Mock(name='worker') x = Scaler(self.pool, 10, 3, worker=worker) x._is_stopped.set() x.stop() assert x.joined x.joined = False x.alive = False x.stop() assert not x.joined @mock.sleepdeprived(module=autoscale) def test_body(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.body() assert x.pool.num_processes == 3 _keep = [Mock(name='req{0}'.format(i)) for i in range(20)] [state.task_reserved(m) for m in _keep] x.body() x.body() assert x.pool.num_processes == 10 worker.consumer._update_prefetch_count.assert_called() state.reserved_requests.clear() x.body() assert x.pool.num_processes == 10 x._last_scale_up = monotonic() - 10000 x.body() assert x.pool.num_processes == 3 worker.consumer._update_prefetch_count.assert_called() def test_run(self): class Scaler(autoscale.Autoscaler): scale_called = False def body(self): self.scale_called = True self._is_shutdown.set() worker = Mock(name='worker') x = Scaler(self.pool, 10, 3, worker=worker) x.run() assert x._is_shutdown.isSet() assert x._is_stopped.isSet() assert x.scale_called def test_shrink_raises_exception(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.scale_up(3) x.pool.shrink_raises_exception = True x._shrink(1) @patch('celery.worker.autoscale.debug') def test_shrink_raises_ValueError(self, debug): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.scale_up(3) x._last_scale_up = monotonic() - 10000 x.pool.shrink_raises_ValueError = True x.scale_down(1) assert debug.call_count def test_update_and_force(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) assert x.processes == 3 x.force_scale_up(5) assert x.processes == 8 x.update(5, None) assert x.processes == 5 x.force_scale_down(3) assert x.processes == 2 x.update(None, 3) assert x.processes == 3 x.force_scale_down(1000) assert x.min_concurrency == 0 assert x.processes == 0 x.force_scale_up(1000) x.min_concurrency = 1 x.force_scale_down(1) x.update(max=300, min=10) x.update(max=300, min=2) x.update(max=None, min=None) def test_info(self): worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) info = x.info() assert info['max'] == 10 assert info['min'] == 3 assert info['current'] == 3 @patch('os._exit') def test_thread_crash(self, _exit): class _Autoscaler(autoscale.Autoscaler): def body(self): self._is_shutdown.set() raise OSError('foo') worker = Mock(name='worker') x = _Autoscaler(self.pool, 10, 3, worker=worker) stderr = Mock() p, sys.stderr = sys.stderr, stderr try: x.run() finally: sys.stderr = p _exit.assert_called_with(1) stderr.write.assert_called() @mock.sleepdeprived(module=autoscale) def test_no_negative_scale(self): total_num_processes = [] worker = Mock(name='worker') x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.body() # the body func scales up or down _keep = [Mock(name='req{0}'.format(i)) for i in range(35)] for req in _keep: state.task_reserved(req) x.body() total_num_processes.append(self.pool.num_processes) for req in _keep: state.task_ready(req) x.body() total_num_processes.append(self.pool.num_processes) assert all(x.min_concurrency <= i <= x.max_concurrency for i in total_num_processes) celery-4.1.0/t/unit/worker/test_consumer.py0000644000175000017500000004634613130607475020747 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import errno import pytest import socket from collections import deque from case import ContextMock, Mock, call, patch, skip from billiard.exceptions import RestartFreqExceeded from celery.worker.consumer.agent import Agent from celery.worker.consumer.consumer import (CLOSE, TERMINATE, Consumer, dump_body) from celery.worker.consumer.gossip import Gossip from celery.worker.consumer.heart import Heart from celery.worker.consumer.mingle import Mingle from celery.worker.consumer.tasks import Tasks from celery.utils.collections import LimitedSet class test_Consumer: def get_consumer(self, no_hub=False, **kwargs): consumer = Consumer( on_task_request=Mock(), init_callback=Mock(), pool=Mock(), app=self.app, timer=Mock(), controller=Mock(), hub=None if no_hub else Mock(), **kwargs ) consumer.blueprint = Mock(name='blueprint') consumer._restart_state = Mock(name='_restart_state') consumer.connection = _amqp_connection() consumer.connection_errors = (socket.error, OSError,) consumer.conninfo = consumer.connection return consumer def test_repr(self): assert repr(self.get_consumer()) def test_taskbuckets_defaultdict(self): c = self.get_consumer() assert c.task_buckets['fooxasdwx.wewe'] is None @skip.if_python3(reason='buffer type not available') def test_dump_body_buffer(self): msg = Mock() msg.body = 'str' assert dump_body(msg, buffer(msg.body)) # noqa: F821 def test_sets_heartbeat(self): c = self.get_consumer(amqheartbeat=10) assert c.amqheartbeat == 10 self.app.conf.broker_heartbeat = 20 c = self.get_consumer(amqheartbeat=None) assert c.amqheartbeat == 20 def test_gevent_bug_disables_connection_timeout(self): with patch('celery.worker.consumer.consumer._detect_environment') as d: d.return_value = 'gevent' self.app.conf.broker_connection_timeout = 33.33 self.get_consumer() assert self.app.conf.broker_connection_timeout is None def test_limit_moved_to_pool(self): with patch('celery.worker.consumer.consumer.task_reserved') as reserv: c = self.get_consumer() c.on_task_request = Mock(name='on_task_request') request = Mock(name='request') c._limit_move_to_pool(request) reserv.assert_called_with(request) c.on_task_request.assert_called_with(request) def test_update_prefetch_count(self): c = self.get_consumer() c._update_qos_eventually = Mock(name='update_qos') c.initial_prefetch_count = None c.pool.num_processes = None c.prefetch_multiplier = 10 assert c._update_prefetch_count(1) is None c.initial_prefetch_count = 10 c.pool.num_processes = 10 c._update_prefetch_count(8) c._update_qos_eventually.assert_called_with(8) assert c.initial_prefetch_count == 10 * 10 def test_flush_events(self): c = self.get_consumer() c.event_dispatcher = None c._flush_events() c.event_dispatcher = Mock(name='evd') c._flush_events() c.event_dispatcher.flush.assert_called_with() def test_on_send_event_buffered(self): c = self.get_consumer() c.hub = None c.on_send_event_buffered() c.hub = Mock(name='hub') c.on_send_event_buffered() c.hub._ready.add.assert_called_with(c._flush_events) def test_limit_task(self): c = self.get_consumer() c.timer = Mock() bucket = Mock() request = Mock() bucket.can_consume.return_value = True bucket.contents = deque() c._limit_task(request, bucket, 3) bucket.can_consume.assert_called_with(3) bucket.expected_time.assert_called_with(3) c.timer.call_after.assert_called_with( bucket.expected_time(), c._on_bucket_wakeup, (bucket, 3), priority=c._limit_order, ) bucket.can_consume.return_value = False bucket.expected_time.return_value = 3.33 limit_order = c._limit_order c._limit_task(request, bucket, 4) assert c._limit_order == limit_order + 1 bucket.can_consume.assert_called_with(4) c.timer.call_after.assert_called_with( 3.33, c._on_bucket_wakeup, (bucket, 4), priority=c._limit_order, ) bucket.expected_time.assert_called_with(4) def test_start_blueprint_raises_EMFILE(self): c = self.get_consumer() exc = c.blueprint.start.side_effect = OSError() exc.errno = errno.EMFILE with pytest.raises(OSError): c.start() def test_max_restarts_exceeded(self): c = self.get_consumer() def se(*args, **kwargs): c.blueprint.state = CLOSE raise RestartFreqExceeded() c._restart_state.step.side_effect = se c.blueprint.start.side_effect = socket.error() with patch('celery.worker.consumer.consumer.sleep') as sleep: c.start() sleep.assert_called_with(1) def test_do_not_restart_when_closed(self): c = self.get_consumer() c.blueprint.state = None def bp_start(*args, **kwargs): c.blueprint.state = CLOSE c.blueprint.start.side_effect = bp_start with patch('celery.worker.consumer.consumer.sleep'): c.start() c.blueprint.start.assert_called_once_with(c) def test_do_not_restart_when_terminated(self): c = self.get_consumer() c.blueprint.state = None def bp_start(*args, **kwargs): c.blueprint.state = TERMINATE c.blueprint.start.side_effect = bp_start with patch('celery.worker.consumer.consumer.sleep'): c.start() c.blueprint.start.assert_called_once_with(c) def test_no_retry_raises_error(self): self.app.conf.broker_connection_retry = False c = self.get_consumer() c.blueprint.start.side_effect = socket.error() with pytest.raises(socket.error): c.start() def _closer(self, c): def se(*args, **kwargs): c.blueprint.state = CLOSE return se def test_collects_at_restart(self): c = self.get_consumer() c.connection.collect.side_effect = MemoryError() c.blueprint.start.side_effect = socket.error() c.blueprint.restart.side_effect = self._closer(c) c.start() c.connection.collect.assert_called_with() def test_register_with_event_loop(self): c = self.get_consumer() c.register_with_event_loop(Mock(name='loop')) def test_on_close_clears_semaphore_timer_and_reqs(self): with patch('celery.worker.consumer.consumer.reserved_requests') as res: c = self.get_consumer() c.on_close() c.controller.semaphore.clear.assert_called_with() c.timer.clear.assert_called_with() res.clear.assert_called_with() c.pool.flush.assert_called_with() c.controller = None c.timer = None c.pool = None c.on_close() def test_connect_error_handler(self): self.app._connection = _amqp_connection() conn = self.app._connection.return_value c = self.get_consumer() assert c.connect() conn.ensure_connection.assert_called() errback = conn.ensure_connection.call_args[0][0] errback(Mock(), 0) class test_Heart: def test_start(self): c = Mock() c.timer = Mock() c.event_dispatcher = Mock() with patch('celery.worker.heartbeat.Heart') as hcls: h = Heart(c) assert h.enabled assert h.heartbeat_interval is None assert c.heart is None h.start(c) assert c.heart hcls.assert_called_with(c.timer, c.event_dispatcher, h.heartbeat_interval) c.heart.start.assert_called_with() def test_start_heartbeat_interval(self): c = Mock() c.timer = Mock() c.event_dispatcher = Mock() with patch('celery.worker.heartbeat.Heart') as hcls: h = Heart(c, False, 20) assert h.enabled assert h.heartbeat_interval == 20 assert c.heart is None h.start(c) assert c.heart hcls.assert_called_with(c.timer, c.event_dispatcher, h.heartbeat_interval) c.heart.start.assert_called_with() class test_Tasks: def test_stop(self): c = Mock() tasks = Tasks(c) assert c.task_consumer is None assert c.qos is None c.task_consumer = Mock() tasks.stop(c) def test_stop_already_stopped(self): c = Mock() tasks = Tasks(c) tasks.stop(c) class test_Agent: def test_start(self): c = Mock() agent = Agent(c) agent.instantiate = Mock() agent.agent_cls = 'foo:Agent' assert agent.create(c) is not None agent.instantiate.assert_called_with(agent.agent_cls, c.connection) class test_Mingle: def test_start_no_replies(self): c = Mock() c.app.connection_for_read = _amqp_connection() mingle = Mingle(c) I = c.app.control.inspect.return_value = Mock() I.hello.return_value = {} mingle.start(c) def test_start(self): c = Mock() c.app.connection_for_read = _amqp_connection() mingle = Mingle(c) assert mingle.enabled Aig = LimitedSet() Big = LimitedSet() Aig.add('Aig-1') Aig.add('Aig-2') Big.add('Big-1') I = c.app.control.inspect.return_value = Mock() I.hello.return_value = { 'A@example.com': { 'clock': 312, 'revoked': Aig._data, }, 'B@example.com': { 'clock': 29, 'revoked': Big._data, }, 'C@example.com': { 'error': 'unknown method', }, } our_revoked = c.controller.state.revoked = LimitedSet() mingle.start(c) I.hello.assert_called_with(c.hostname, our_revoked._data) c.app.clock.adjust.assert_has_calls([ call(312), call(29), ], any_order=True) assert 'Aig-1' in our_revoked assert 'Aig-2' in our_revoked assert 'Big-1' in our_revoked def _amqp_connection(): connection = ContextMock(name='Connection') connection.return_value = ContextMock(name='connection') connection.return_value.transport.driver_type = 'amqp' return connection class test_Gossip: def test_init(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) assert g.enabled assert c.gossip is g def test_callbacks(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) on_node_join = Mock(name='on_node_join') on_node_join2 = Mock(name='on_node_join2') on_node_leave = Mock(name='on_node_leave') on_node_lost = Mock(name='on.node_lost') g.on.node_join.add(on_node_join) g.on.node_join.add(on_node_join2) g.on.node_leave.add(on_node_leave) g.on.node_lost.add(on_node_lost) worker = Mock(name='worker') g.on_node_join(worker) on_node_join.assert_called_with(worker) on_node_join2.assert_called_with(worker) g.on_node_leave(worker) on_node_leave.assert_called_with(worker) g.on_node_lost(worker) on_node_lost.assert_called_with(worker) def test_election(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.start(c) g.election('id', 'topic', 'action') assert g.consensus_replies['id'] == [] g.dispatcher.send.assert_called_with( 'worker-elect', id='id', topic='topic', cver=1, action='action', ) def test_call_task(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.start(c) signature = g.app.signature = Mock(name='app.signature') task = Mock() g.call_task(task) signature.assert_called_with(task) signature.return_value.apply_async.assert_called_with() signature.return_value.apply_async.side_effect = MemoryError() with patch('celery.worker.consumer.gossip.logger') as logger: g.call_task(task) logger.exception.assert_called() def Event(self, id='id', clock=312, hostname='foo@example.com', pid=4312, topic='topic', action='action', cver=1): return { 'id': id, 'clock': clock, 'hostname': hostname, 'pid': pid, 'topic': topic, 'action': action, 'cver': cver, } def test_on_elect(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.start(c) event = self.Event('id1') g.on_elect(event) in_heap = g.consensus_requests['id1'] assert in_heap g.dispatcher.send.assert_called_with('worker-elect-ack', id='id1') event.pop('clock') with patch('celery.worker.consumer.gossip.logger') as logger: g.on_elect(event) logger.exception.assert_called() def Consumer(self, hostname='foo@x.com', pid=4312): c = Mock() c.app.connection = _amqp_connection() c.hostname = hostname c.pid = pid return c def setup_election(self, g, c): g.start(c) g.clock = self.app.clock assert 'idx' not in g.consensus_replies assert g.on_elect_ack({'id': 'idx'}) is None g.state.alive_workers.return_value = [ 'foo@x.com', 'bar@x.com', 'baz@x.com', ] g.consensus_replies['id1'] = [] g.consensus_requests['id1'] = [] e1 = self.Event('id1', 1, 'foo@x.com') e2 = self.Event('id1', 2, 'bar@x.com') e3 = self.Event('id1', 3, 'baz@x.com') g.on_elect(e1) g.on_elect(e2) g.on_elect(e3) assert len(g.consensus_requests['id1']) == 3 with patch('celery.worker.consumer.gossip.info'): g.on_elect_ack(e1) assert len(g.consensus_replies['id1']) == 1 g.on_elect_ack(e2) assert len(g.consensus_replies['id1']) == 2 g.on_elect_ack(e3) with pytest.raises(KeyError): g.consensus_replies['id1'] def test_on_elect_ack_win(self): c = self.Consumer(hostname='foo@x.com') # I will win c.app.connection_for_read = _amqp_connection() g = Gossip(c) handler = g.election_handlers['topic'] = Mock() self.setup_election(g, c) handler.assert_called_with('action') def test_on_elect_ack_lose(self): c = self.Consumer(hostname='bar@x.com') # I will lose c.app.connection_for_read = _amqp_connection() g = Gossip(c) handler = g.election_handlers['topic'] = Mock() self.setup_election(g, c) handler.assert_not_called() def test_on_elect_ack_win_but_no_action(self): c = self.Consumer(hostname='foo@x.com') # I will win c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.election_handlers = {} with patch('celery.worker.consumer.gossip.logger') as logger: self.setup_election(g, c) logger.exception.assert_called() def test_on_node_join(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) with patch('celery.worker.consumer.gossip.debug') as debug: g.on_node_join(c) debug.assert_called_with('%s joined the party', 'foo@x.com') def test_on_node_leave(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) with patch('celery.worker.consumer.gossip.debug') as debug: g.on_node_leave(c) debug.assert_called_with('%s left', 'foo@x.com') def test_on_node_lost(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) with patch('celery.worker.consumer.gossip.info') as info: g.on_node_lost(c) info.assert_called_with('missed heartbeat from %s', 'foo@x.com') def test_register_timer(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.register_timer() c.timer.call_repeatedly.assert_called_with(g.interval, g.periodic) tref = g._tref g.register_timer() tref.cancel.assert_called_with() def test_periodic(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) g.on_node_lost = Mock() state = g.state = Mock() worker = Mock() state.workers = {'foo': worker} worker.alive = True worker.hostname = 'foo' g.periodic() worker.alive = False g.periodic() g.on_node_lost.assert_called_with(worker) with pytest.raises(KeyError): state.workers['foo'] def test_on_message__task(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) assert g.enabled message = Mock(name='message') message.delivery_info = {'routing_key': 'task.failed'} g.on_message(Mock(name='prepare'), message) def test_on_message(self): c = self.Consumer() c.app.connection_for_read = _amqp_connection() g = Gossip(c) assert g.enabled prepare = Mock() prepare.return_value = 'worker-online', {} c.app.events.State.assert_called_with( on_node_join=g.on_node_join, on_node_leave=g.on_node_leave, max_tasks_in_memory=1, ) g.update_state = Mock() worker = Mock() g.on_node_join = Mock() g.on_node_leave = Mock() g.update_state.return_value = worker, 1 message = Mock() message.delivery_info = {'routing_key': 'worker-online'} message.headers = {'hostname': 'other'} handler = g.event_handlers['worker-online'] = Mock() g.on_message(prepare, message) handler.assert_called_with(message.payload) g.event_handlers = {} g.on_message(prepare, message) message.delivery_info = {'routing_key': 'worker-offline'} prepare.return_value = 'worker-offline', {} g.on_message(prepare, message) message.delivery_info = {'routing_key': 'worker-baz'} prepare.return_value = 'worker-baz', {} g.update_state.return_value = worker, 0 g.on_message(prepare, message) message.headers = {'hostname': g.hostname} g.on_message(prepare, message) g.clock.forward.assert_called_with() celery-4.1.0/t/__init__.py0000644000175000017500000000000013130607475015275 0ustar omeromer00000000000000celery-4.1.0/t/distro/0000755000175000017500000000000013135426347014504 5ustar omeromer00000000000000celery-4.1.0/t/distro/test_CI_reqs.py0000644000175000017500000000164513130607475017446 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import os import pprint import pytest def _get_extras_reqs_from(name): try: with open(os.path.join('requirements', name)) as fh: lines = fh.readlines() except OSError: pytest.skip('requirements dir missing, not running from dist?') else: return { line.split()[1] for line in lines if line.startswith('-r extras/') } def _get_all_extras(): return set( os.path.join('extras', f) for f in os.listdir('requirements/extras/') ) def test_all_reqs_enabled_in_tests(): ci_default = _get_extras_reqs_from('test-ci-default.txt') ci_base = _get_extras_reqs_from('test-ci-base.txt') defined = ci_default | ci_base all_extras = _get_all_extras() diff = all_extras - defined print('Missing CI reqs:\n{0}'.format(pprint.pformat(diff))) assert not diff celery-4.1.0/t/benchmarks/0000755000175000017500000000000013135426347015315 5ustar omeromer00000000000000celery-4.1.0/t/benchmarks/bench_worker.py0000644000175000017500000000600213130607475020333 0ustar omeromer00000000000000from __future__ import absolute_import, print_function, unicode_literals import os import sys os.environ.update( NOSETPS='yes', USE_FAST_LOCALS='yes', ) from celery import Celery # noqa from celery.five import range # noqa from kombu.five import monotonic # noqa DEFAULT_ITS = 40000 BROKER_TRANSPORT = os.environ.get('BROKER', 'librabbitmq://') if hasattr(sys, 'pypy_version_info'): BROKER_TRANSPORT = 'pyamqp://' app = Celery('bench_worker') app.conf.update( broker_url=BROKER_TRANSPORT, broker_pool_limit=10, worker_pool='solo', worker_prefetch_multiplier=0, task_default_delivery_mode=1, task_queues={ 'bench.worker': { 'exchange': 'bench.worker', 'routing_key': 'bench.worker', 'no_ack': True, 'exchange_durable': False, 'queue_durable': False, 'auto_delete': True, } }, task_serializer='json', task_default_queue='bench.worker', result_backend=None, ), def tdiff(then): return monotonic() - then @app.task(cur=0, time_start=None, queue='bench.worker', bare=True) def it(_, n): # use internal counter, as ordering can be skewed # by previous runs, or the broker. i = it.cur if i and not i % 5000: print('({0} so far: {1}s)'.format(i, tdiff(it.subt)), file=sys.stderr) it.subt = monotonic() if not i: it.subt = it.time_start = monotonic() elif i > n - 2: total = tdiff(it.time_start) print('({0} so far: {1}s)'.format(i, tdiff(it.subt)), file=sys.stderr) print('-- process {0} tasks: {1}s total, {2} tasks/s} '.format( n, total, n / (total + .0), )) import os os._exit() it.cur += 1 def bench_apply(n=DEFAULT_ITS): time_start = monotonic() task = it._get_current_object() with app.producer_or_acquire() as producer: [task.apply_async((i, n), producer=producer) for i in range(n)] print('-- apply {0} tasks: {1}s'.format(n, monotonic() - time_start)) def bench_work(n=DEFAULT_ITS, loglevel='CRITICAL'): loglevel = os.environ.get('BENCH_LOGLEVEL') or loglevel if loglevel: app.log.setup_logging_subsystem(loglevel=loglevel) worker = app.WorkController(concurrency=15, queues=['bench.worker']) try: print('-- starting worker') worker.start() except SystemExit: raise assert sum(worker.state.total_count.values()) == n + 1 def bench_both(n=DEFAULT_ITS): bench_apply(n) bench_work(n) def main(argv=sys.argv): n = DEFAULT_ITS if len(argv) < 2: print('Usage: {0} [apply|work|both] [n=20k]'.format( os.path.basename(argv[0]), )) return sys.exit(1) try: try: n = int(argv[2]) except IndexError: pass return {'apply': bench_apply, 'work': bench_work, 'both': bench_both}[argv[1]](n=n) except: raise if __name__ == '__main__': main() celery-4.1.0/t/integration/0000755000175000017500000000000013135426347015523 5ustar omeromer00000000000000celery-4.1.0/t/integration/__init__.py0000644000175000017500000000000013130607475017620 0ustar omeromer00000000000000celery-4.1.0/t/integration/test_canvas.py0000644000175000017500000001462113130607475020411 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from redis import StrictRedis from celery import chain, chord, group from celery.exceptions import TimeoutError from celery.result import AsyncResult, GroupResult from .conftest import flaky from .tasks import add, add_replaced, add_to_all, collect_ids, ids, redis_echo TIMEOUT = 120 class test_chain: @flaky def test_simple_chain(self, manager): c = add.s(4, 4) | add.s(8) | add.s(16) assert c().get(timeout=TIMEOUT) == 32 @flaky def test_complex_chain(self, manager): c = ( add.s(2, 2) | ( add.s(4) | add_replaced.s(8) | add.s(16) | add.s(32) ) | group(add.s(i) for i in range(4)) ) res = c() assert res.get(timeout=TIMEOUT) == [64, 65, 66, 67] @flaky def test_group_chord_group_chain(self, manager): from celery.five import bytes_if_py2 if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = StrictRedis() redis_connection.delete('redis-echo') before = group(redis_echo.si('before {}'.format(i)) for i in range(3)) connect = redis_echo.si('connect') after = group(redis_echo.si('after {}'.format(i)) for i in range(2)) result = (before | connect | after).delay() result.get(timeout=TIMEOUT) redis_messages = list(map( bytes_if_py2, redis_connection.lrange('redis-echo', 0, -1) )) before_items = \ set(map(bytes_if_py2, (b'before 0', b'before 1', b'before 2'))) after_items = set(map(bytes_if_py2, (b'after 0', b'after 1'))) assert set(redis_messages[:3]) == before_items assert redis_messages[3] == b'connect' assert set(redis_messages[4:]) == after_items redis_connection.delete('redis-echo') @flaky def test_parent_ids(self, manager, num=10): assert manager.inspect().ping() c = chain(ids.si(i=i) for i in range(num)) c.freeze() res = c() try: res.get(timeout=TIMEOUT) except TimeoutError: print(manager.inspect.active()) print(manager.inspect.reserved()) print(manager.inspect.stats()) raise self.assert_ids(res, num - 1) def assert_ids(self, res, size): i, root = size, res while root.parent: root = root.parent node = res while node: root_id, parent_id, value = node.get(timeout=30) assert value == i if node.parent: assert parent_id == node.parent.id assert root_id == root.id node = node.parent i -= 1 class test_group: @flaky def test_parent_ids(self, manager): assert manager.inspect().ping() g = ( ids.si(i=1) | ids.si(i=2) | group(ids.si(i=i) for i in range(2, 50)) ) res = g() expected_root_id = res.parent.parent.id expected_parent_id = res.parent.id values = res.get(timeout=TIMEOUT) for i, r in enumerate(values): root_id, parent_id, value = r assert root_id == expected_root_id assert parent_id == expected_parent_id assert value == i + 2 def assert_ids(r, expected_value, expected_root_id, expected_parent_id): root_id, parent_id, value = r.get(timeout=TIMEOUT) assert expected_value == value assert root_id == expected_root_id assert parent_id == expected_parent_id class test_chord: @flaky def test_group_chain(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') c = ( add.s(2, 2) | group(add.s(i) for i in range(4)) | add_to_all.s(8) ) res = c() assert res.get(timeout=TIMEOUT) == [12, 13, 14, 15] @flaky def test_parent_ids(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') root = ids.si(i=1) expected_root_id = root.freeze().id g = chain( root, ids.si(i=2), chord( group(ids.si(i=i) for i in range(3, 50)), chain(collect_ids.s(i=50) | ids.si(i=51)), ), ) self.assert_parentids_chord(g(), expected_root_id) @flaky def test_parent_ids__OR(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') root = ids.si(i=1) expected_root_id = root.freeze().id g = ( root | ids.si(i=2) | group(ids.si(i=i) for i in range(3, 50)) | collect_ids.s(i=50) | ids.si(i=51) ) self.assert_parentids_chord(g(), expected_root_id) def assert_parentids_chord(self, res, expected_root_id): assert isinstance(res, AsyncResult) assert isinstance(res.parent, AsyncResult) assert isinstance(res.parent.parent, GroupResult) assert isinstance(res.parent.parent.parent, AsyncResult) assert isinstance(res.parent.parent.parent.parent, AsyncResult) # first we check the last task assert_ids(res, 51, expected_root_id, res.parent.id) # then the chord callback prev, (root_id, parent_id, value) = res.parent.get(timeout=30) assert value == 50 assert root_id == expected_root_id # started by one of the chord header tasks. assert parent_id in res.parent.parent.results # check what the chord callback recorded for i, p in enumerate(prev): root_id, parent_id, value = p assert root_id == expected_root_id assert parent_id == res.parent.parent.parent.id # ids(i=2) root_id, parent_id, value = res.parent.parent.parent.get(timeout=30) assert value == 2 assert parent_id == res.parent.parent.parent.parent.id assert root_id == expected_root_id # ids(i=1) root_id, parent_id, value = res.parent.parent.parent.parent.get( timeout=30) assert value == 1 assert root_id == expected_root_id assert parent_id is None celery-4.1.0/t/integration/conftest.py0000644000175000017500000000231013130607475017714 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import os import pytest from functools import wraps from celery.contrib.testing.manager import Manager TEST_BROKER = os.environ.get('TEST_BROKER', 'pyamqp://') TEST_BACKEND = os.environ.get('TEST_BACKEND', 'redis://') def flaky(fun): @wraps(fun) def _inner(*args, **kwargs): for i in reversed(range(3)): try: return fun(*args, **kwargs) except Exception: if not i: raise _inner.__wrapped__ = fun return _inner @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': TEST_BROKER, 'result_backend': TEST_BACKEND } @pytest.fixture(scope='session') def celery_enable_logging(): return True @pytest.fixture(scope='session') def celery_worker_pool(): return 'prefork' @pytest.fixture(scope='session') def celery_includes(): return {'t.integration.tasks'} @pytest.fixture def app(celery_app): yield celery_app @pytest.fixture def manager(app, celery_session_worker): return Manager(app) @pytest.fixture(autouse=True) def ZZZZ_set_app_current(app): app.set_current() app.set_default() celery-4.1.0/t/integration/tasks.py0000644000175000017500000000363613130607475017230 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from time import sleep from celery import shared_task, group from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @shared_task def add(x, y): """Add two numbers.""" return x + y @shared_task(bind=True) def add_replaced(self, x, y): """Add two numbers (via the add task).""" raise self.replace(add.s(x, y)) @shared_task(bind=True) def add_to_all(self, nums, val): """Add the given value to all supplied numbers.""" subtasks = [add.s(num, val) for num in nums] raise self.replace(group(*subtasks)) @shared_task def print_unicode(log_message='hå它 valmuefrø', print_message='hiöäüß'): """Task that both logs and print strings containing funny characters.""" logger.warning(log_message) print(print_message) @shared_task def sleeping(i, **_): """Task sleeping for ``i`` seconds, and returning nothing.""" sleep(i) @shared_task(bind=True) def ids(self, i): """Returns a tuple of ``root_id``, ``parent_id`` and the argument passed as ``i``.""" return self.request.root_id, self.request.parent_id, i @shared_task(bind=True) def collect_ids(self, res, i): """Used as a callback in a chain or group where the previous tasks are :task:`ids`: returns a tuple of:: (previous_result, (root_id, parent_id, i)) """ return res, (self.request.root_id, self.request.parent_id, i) @shared_task(bind=True, expires=60.0, max_retries=1) def retry_once(self): """Task that fails and is retried. Returns the number of retries.""" if self.request.retries: return self.request.retries raise self.retry(countdown=0.1) @shared_task def redis_echo(message): """Task that appends the message to a redis list""" from redis import StrictRedis redis_connection = StrictRedis() redis_connection.rpush('redis-echo', message) celery-4.1.0/t/integration/test_tasks.py0000644000175000017500000000124113130607475020255 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery import group from .conftest import flaky from .tasks import print_unicode, retry_once, sleeping class test_tasks: @flaky def test_task_accepted(self, manager, sleep=1): r1 = sleeping.delay(sleep) sleeping.delay(sleep) manager.assert_accepted([r1.id]) @flaky def test_task_retried(self): res = retry_once.delay() assert res.get(timeout=10) == 1 # retried once @flaky def test_unicode_task(self, manager): manager.join( group(print_unicode.s() for _ in range(5))(), timeout=10, propagate=True, ) celery-4.1.0/requirements/0000755000175000017500000000000013135426347015460 5ustar omeromer00000000000000celery-4.1.0/requirements/default.txt0000644000175000017500000000006413130607475017643 0ustar omeromer00000000000000pytz>dev billiard>=3.5.0.2,<3.6.0 kombu>=4.0.2,<5.0 celery-4.1.0/requirements/test-ci-default.txt0000644000175000017500000000071113130607475021210 0ustar omeromer00000000000000-r test-ci-base.txt #: Disabled for Cryptography crashing on 2.7 after interpreter shutdown. #-r extras/auth.txt -r extras/riak.txt -r extras/solar.txt -r extras/mongodb.txt -r extras/yaml.txt -r extras/tblib.txt -r extras/sqs.txt -r extras/slmq.txt -r extras/msgpack.txt -r extras/memcache.txt -r extras/eventlet.txt -r extras/gevent.txt -r extras/elasticsearch.txt -r extras/couchdb.txt -r extras/consul.txt -r extras/cassandra.txt -r extras/dynamodb.txt celery-4.1.0/requirements/test-pypy3.txt0000644000175000017500000000002113130607475020251 0ustar omeromer00000000000000-r deps/mock.txt celery-4.1.0/requirements/extras/0000755000175000017500000000000013135426347016766 5ustar omeromer00000000000000celery-4.1.0/requirements/extras/zeromq.txt0000644000175000017500000000001613130607475021037 0ustar omeromer00000000000000pyzmq>=13.1.0 celery-4.1.0/requirements/extras/memcache.txt0000644000175000017500000000001013130607475021256 0ustar omeromer00000000000000pylibmc celery-4.1.0/requirements/extras/tblib.txt0000644000175000017500000000001513130607475020615 0ustar omeromer00000000000000tblib>=1.3.0 celery-4.1.0/requirements/extras/dynamodb.txt0000644000175000017500000000001413130607475021315 0ustar omeromer00000000000000boto3==1.4.3celery-4.1.0/requirements/extras/sqlalchemy.txt0000644000175000017500000000001313130607475021661 0ustar omeromer00000000000000sqlalchemy celery-4.1.0/requirements/extras/zookeeper.txt0000644000175000017500000000001513130607475021524 0ustar omeromer00000000000000kazoo>=1.3.1 celery-4.1.0/requirements/extras/pymemcache.txt0000644000175000017500000000002113130607475021631 0ustar omeromer00000000000000python-memcached celery-4.1.0/requirements/extras/msgpack.txt0000644000175000017500000000002613130607475021150 0ustar omeromer00000000000000msgpack-python>=0.3.0 celery-4.1.0/requirements/extras/auth.txt0000644000175000017500000000001213130607475020457 0ustar omeromer00000000000000pyOpenSSL celery-4.1.0/requirements/extras/cassandra.txt0000644000175000017500000000002013130607475021454 0ustar omeromer00000000000000cassandra-drivercelery-4.1.0/requirements/extras/redis.txt0000644000175000017500000000001613130607475020630 0ustar omeromer00000000000000redis>=2.10.5 celery-4.1.0/requirements/extras/elasticsearch.txt0000644000175000017500000000001613130607475022334 0ustar omeromer00000000000000elasticsearch celery-4.1.0/requirements/extras/librabbitmq.txt0000644000175000017500000000002313130607475022010 0ustar omeromer00000000000000librabbitmq>=1.5.0 celery-4.1.0/requirements/extras/django.txt0000644000175000017500000000001413130607475020762 0ustar omeromer00000000000000Django>=1.8 celery-4.1.0/requirements/extras/mongodb.txt0000644000175000017500000000001713130607475021150 0ustar omeromer00000000000000pymongo>=3.3.0 celery-4.1.0/requirements/extras/couchbase.txt0000644000175000017500000000001213130607475021452 0ustar omeromer00000000000000couchbase celery-4.1.0/requirements/extras/riak.txt0000644000175000017500000000001313130607475020445 0ustar omeromer00000000000000riak >=2.0 celery-4.1.0/requirements/extras/slmq.txt0000644000175000017500000000003313130607475020475 0ustar omeromer00000000000000softlayer_messaging>=1.0.3 celery-4.1.0/requirements/extras/solar.txt0000644000175000017500000000000613130607475020641 0ustar omeromer00000000000000ephem celery-4.1.0/requirements/extras/couchdb.txt0000644000175000017500000000001213130607475021125 0ustar omeromer00000000000000pycouchdb celery-4.1.0/requirements/extras/gevent.txt0000644000175000017500000000000713130607475021012 0ustar omeromer00000000000000gevent celery-4.1.0/requirements/extras/consul.txt0000644000175000017500000000001613130607475021025 0ustar omeromer00000000000000python-consul celery-4.1.0/requirements/extras/sqs.txt0000644000175000017500000000002413130607475020327 0ustar omeromer00000000000000boto>=2.13.3 pycurl celery-4.1.0/requirements/extras/pyro.txt0000644000175000017500000000000613130607475020512 0ustar omeromer00000000000000pyro4 celery-4.1.0/requirements/extras/yaml.txt0000644000175000017500000000001513130607475020463 0ustar omeromer00000000000000PyYAML>=3.10 celery-4.1.0/requirements/extras/eventlet.txt0000644000175000017500000000001113130607475021343 0ustar omeromer00000000000000eventlet celery-4.1.0/requirements/pkgutils.txt0000644000175000017500000000020613130607475020057 0ustar omeromer00000000000000setuptools>=20.6.7 wheel>=0.29.0 flake8>=2.5.4 flakeplus>=1.1 pydocstyle==1.1.1 tox>=2.3.1 sphinx2rst>=1.0 cyanide>=1.0.1 bumpversion celery-4.1.0/requirements/jython.txt0000644000175000017500000000002013130607475017522 0ustar omeromer00000000000000multiprocessing celery-4.1.0/requirements/security.txt0000644000175000017500000000002313130607475020061 0ustar omeromer00000000000000-r extras/auth.txt celery-4.1.0/requirements/test-integration.txt0000644000175000017500000000006613130607475021521 0ustar omeromer00000000000000simplejson -r extras/redis.txt -r extras/dynamodb.txt celery-4.1.0/requirements/test.txt0000644000175000017500000000003013130607475017167 0ustar omeromer00000000000000case>=1.3.1 pytest>=3.0 celery-4.1.0/requirements/README.rst0000644000175000017500000000265613130607475017156 0ustar omeromer00000000000000======================== pip requirements files ======================== Index ===== * :file:`requirements/default.txt` Default requirements for Python 2.7+. * :file:`requirements/jython.txt` Extra requirements needed to run on Jython 2.5 * :file:`requirements/security.txt` Extra requirements needed to use the message signing serializer, see the Security Guide. * :file:`requirements/test.txt` Requirements needed to run the full unittest suite. * :file:`requirements/test-ci-base.txt` Extra test requirements required by the CI suite (Tox). * :file:`requirements/test-ci-default.txt` Extra test requirements required for Python 2.7 by the CI suite (Tox). * :file:`requirements/test-integration.txt` Extra requirements needed when running the integration test suite. * :file:`requirements/doc.txt` Extra requirements required to build the Sphinx documentation. * :file:`requirements/pkgutils.txt` Extra requirements required to perform package distribution maintenance. * :file:`requirements/dev.txt` Requirement file installing the current dev branch of Celery and dependencies (will not be present in stable branches). Examples ======== Installing requirements ----------------------- :: $ pip install -U -r requirements/default.txt Running the tests ----------------- :: $ pip install -U -r requirements/default.txt $ pip install -U -r requirements/test.txt celery-4.1.0/requirements/test-ci-base.txt0000644000175000017500000000013113130607475020472 0ustar omeromer00000000000000pytest-cov codecov -r extras/redis.txt -r extras/sqlalchemy.txt -r extras/pymemcache.txt celery-4.1.0/requirements/docs.txt0000644000175000017500000000010113130607475017137 0ustar omeromer00000000000000sphinx_celery>=1.3 Sphinx==1.5.1 typing -r extras/sqlalchemy.txt celery-4.1.0/requirements/deps/0000755000175000017500000000000013135426347016413 5ustar omeromer00000000000000celery-4.1.0/requirements/deps/mock.txt0000644000175000017500000000001213130607475020074 0ustar omeromer00000000000000mock>=1.3 celery-4.1.0/requirements/deps/nose.txt0000644000175000017500000000001413130607475020111 0ustar omeromer00000000000000nose>=1.3.7 celery-4.1.0/setup.py0000644000175000017500000001327513130607475014455 0ustar omeromer00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import codecs import os import re import sys import setuptools import setuptools.command.test try: from platform import python_implementation as _pyimp except (AttributeError, ImportError): def _pyimp(): return 'Python (unknown)' NAME = 'celery' # -*- Python Versions -*- E_UNSUPPORTED_PYTHON = """ ---------------------------------------- Celery 4.0 requires %s %s or later ---------------------------------------- - For CPython 2.6, PyPy 1.x, Jython 2.6, CPython 3.2->3.3; use Celery 3.1: $ pip install 'celery<4' - For CPython 2.5, Jython 2.5; use Celery 3.0: $ pip install 'celery<3.1' - For CPython 2.4; use Celery 2.2: $ pip install 'celery<2.3' """ PYIMP = _pyimp() PY26_OR_LESS = sys.version_info < (2, 7) PY3 = sys.version_info[0] == 3 PY33_OR_LESS = PY3 and sys.version_info < (3, 4) JYTHON = sys.platform.startswith('java') PYPY_VERSION = getattr(sys, 'pypy_version_info', None) PYPY = PYPY_VERSION is not None PYPY24_ATLEAST = PYPY_VERSION and PYPY_VERSION >= (2, 4) if PY26_OR_LESS: raise Exception(E_UNSUPPORTED_PYTHON % (PYIMP, '2.7')) elif PY33_OR_LESS and not PYPY24_ATLEAST: raise Exception(E_UNSUPPORTED_PYTHON % (PYIMP, '3.4')) # -*- Extras -*- EXTENSIONS = { 'auth', 'cassandra', 'django', 'elasticsearch', 'memcache', 'pymemcache', 'couchbase', 'eventlet', 'gevent', 'msgpack', 'yaml', 'redis', 'sqs', 'couchdb', 'riak', 'zookeeper', 'solar', 'sqlalchemy', 'librabbitmq', 'pyro', 'slmq', 'tblib', 'consul', 'dynamodb' } # -*- Classifiers -*- classes = """ Development Status :: 5 - Production/Stable License :: OSI Approved :: BSD License Topic :: System :: Distributed Computing Topic :: Software Development :: Object Brokering Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.4 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Operating System :: OS Independent """ # -*- Distribution Meta -*- re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') re_doc = re.compile(r'^"""(.+?)"""') def _add_default(m): attr_name, attr_value = m.groups() return ((attr_name, attr_value.strip("\"'")),) def _add_doc(m): return (('doc', m.groups()[0]),) def parse_dist_meta(): """Extract metadata information from ``$dist/__init__.py``.""" pats = {re_meta: _add_default, re_doc: _add_doc} here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, NAME, '__init__.py')) as meta_fh: distmeta = {} for line in meta_fh: if line.strip() == '# -eof meta-': break for pattern, handler in pats.items(): m = pattern.match(line.strip()) if m: distmeta.update(handler(m)) return distmeta # -*- Requirements -*- def _strip_comments(l): return l.split('#', 1)[0].strip() def _pip_requirement(req): if req.startswith('-r '): _, path = req.split() return reqs(*path.split('/')) return [req] def _reqs(*f): return [ _pip_requirement(r) for r in ( _strip_comments(l) for l in open( os.path.join(os.getcwd(), 'requirements', *f)).readlines() ) if r] def reqs(*f): """Parse requirement file. Example: reqs('default.txt') # requirements/default.txt reqs('extras', 'redis.txt') # requirements/extras/redis.txt Returns: List[str]: list of requirements specified in the file. """ return [req for subreq in _reqs(*f) for req in subreq] def extras(*p): """Parse requirement in the requirements/extras/ directory.""" return reqs('extras', *p) def install_requires(): """Get list of requirements required for installation.""" if JYTHON: return reqs('default.txt') + reqs('jython.txt') return reqs('default.txt') def extras_require(): """Get map of all extra requirements.""" return {x: extras(x + '.txt') for x in EXTENSIONS} # -*- Long Description -*- def long_description(): try: return codecs.open('README.rst', 'r', 'utf-8').read() except IOError: return 'Long description error: Missing README.rst file' # -*- Command: setup.py test -*- class pytest(setuptools.command.test.test): user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')] def initialize_options(self): setuptools.command.test.test.initialize_options(self) self.pytest_args = [] def run_tests(self): import pytest as _pytest sys.exit(_pytest.main(self.pytest_args)) # -*- %%% -*- meta = parse_dist_meta() setuptools.setup( name=NAME, packages=setuptools.find_packages(exclude=['t', 't.*']), version=meta['version'], description=meta['doc'], long_description=long_description(), keywords=meta['keywords'], author=meta['author'], author_email=meta['contact'], url=meta['homepage'], license='BSD', platforms=['any'], install_requires=install_requires(), tests_require=reqs('test.txt'), extras_require=extras_require(), classifiers=[s.strip() for s in classes.split('\n') if s], cmdclass={'test': pytest}, include_package_data=True, zip_safe=False, entry_points={ 'console_scripts': [ 'celery = celery.__main__:main', ], 'pytest11': [ 'celery = celery.contrib.pytest', ], }, ) celery-4.1.0/setup.cfg0000644000175000017500000000055113135426347014557 0ustar omeromer00000000000000[tool:pytest] testpaths = t/unit/ python_classes = test_* [build_sphinx] source-dir = docs/ build-dir = docs/_build all_files = 1 [flake8] ignore = N806, N802, N801, N803 [pep257] ignore = D102,D104,D203,D105,D213 [bdist_rpm] requires = pytz >= 2016.7 billiard >= 3.5.0.2 kombu >= 4.0.2 [bdist_wheel] universal = 1 [egg_info] tag_build = tag_date = 0 celery-4.1.0/PKG-INFO0000644000175000017500000004151513135426347014040 0ustar omeromer00000000000000Metadata-Version: 1.1 Name: celery Version: 4.1.0 Summary: Distributed Task Queue. Home-page: http://celeryproject.org Author: Ask Solem Author-email: ask@celeryproject.org License: BSD Description: .. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |license| |wheel| |pyversion| |pyimp| :Version: 4.1.0 (latentcall) :Web: http://celeryproject.org/ :Download: https://pypi.python.org/pypi/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors -- What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, and a `PHP client`_. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php What do I need? =============== Celery version 4.0 runs on, - Python (2.7, 3.4, 3.5) - PyPy (5.4, 5.5) This is the last version to support Python 2.7, and from the next version (Celery 5.x) Python 3.5 or newer is required. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4 was Celery series 2.2 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery 4.0 coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html Celery is... ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make:: from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports... ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``) - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.python.org/pypi/pyramid_celery/ .. _`celery-pylons`: https://pypi.python.org/pypi/celery-pylons .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ .. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using ``pip``: :: $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the ``pip`` command-line by using brackets. Multiple bundles can be specified by separating them by commas. :: $ pip install "celery[librabbitmq]" $ pip install "celery[librabbitmq,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the ``eventlet`` pool. :``celery[gevent]``: for using the ``gevent`` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[librabbitmq]``: for using the librabbitmq C library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport (*experimental*). :``celery[tblib``] for using the ``task_remote_tracebacks`` feature. :``celery[memcache]``: for using Memcached as a result backend (using ``pylibmc``) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]`` specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.python.org/pypi/celery/ You can install it by doing the following,: :: $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of ``kombu``, ``amqp``, ``billiard``, and ``vine``. You can install the latest snapshot of these using the following pip commands: :: $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please see the Contributing section. .. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ network. .. _`Freenode`: https://freenode.net .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://wiki.github.com/celery/celery/ .. _contributing-short: Contributing ============ Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html .. _license: License ======= This software is licensed under the `New BSD License`. See the ``LICENSE`` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround .. |build-status| image:: https://secure.travis-ci.org/celery/celery.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/celery .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.python.org/pypi/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.python.org/pypi/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Support Python implementations. :target: https://pypi.python.org/pypi/celery/ Keywords: task job queue distributed messaging actor Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: Software Development :: Object Brokering Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: OS Independent celery-4.1.0/LICENSE0000644000175000017500000000510713130607475013743 0ustar omeromer00000000000000Copyright (c) 2015-2016 Ask Solem & contributors. All rights reserved. Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved. Copyright (c) 2009, 2010, 2011, 2012 Ask Solem, and individual contributors. All rights reserved. Celery is licensed under The BSD License (3 Clause, also known as the new BSD license). The license is an OSI approved Open Source license and is GPL-compatible(1). The license text can also be found here: http://www.opensource.org/licenses/BSD-3-Clause License ======= Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Ask Solem, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Documentation License ===================== The documentation portion of Celery (the rendered contents of the "docs" directory of a software distribution or checkout) is supplied under the "Creative Commons Attribution-ShareAlike 4.0 International" (CC BY-SA 4.0) License as described by https://creativecommons.org/licenses/by-sa/4.0/ Footnotes ========= (1) A GPL-compatible license makes it possible to combine Celery with other software that is released under the GPL, it does not mean that we're distributing Celery under the GPL license. The BSD license, unlike the GPL, let you distribute a modified version without making your changes open source. celery-4.1.0/CONTRIBUTORS.txt0000644000175000017500000001532013135426300015421 0ustar omeromer00000000000000Every contribution to Celery is as important to us, as every coin in the money bin is to Scrooge McDuck. The first commit to the Celery codebase was made on Fri Apr 24 13:30:00 2009 +0200, and has since then been improved by many contributors. Everyone who have ever contributed to Celery should be in this list, but in a recent policy change it has been decided that everyone must add themselves here, and not be added by others, so it's currently incomplete waiting for everyone to add their names. The list of authors added before the policy change can be found in docs/AUTHORS.txt. -- Contributor offers to license certain software (a “Contribution†or multiple “Contributionsâ€) to Celery, and Celery agrees to accept said Contributions, under the terms of the BSD open source license. Contributor understands and agrees that Celery shall have the irrevocable and perpetual right to make and distribute copies of any Contribution, as well as to create and distribute collective works and derivative works of any Contribution, under the BSD License. Contributors ------------ Ask Solem, 2012/06/07 Sean O'Connor, 2012/06/07 Patrick Altman, 2012/06/07 Chris St. Pierre, 2012/06/07 Jeff Terrace, 2012/06/07 Mark Lavin, 2012/06/07 Jesper Noehr, 2012/06/07 Brad Jasper, 2012/06/07 Juan Catalano, 2012/06/07 Luke Zapart, 2012/06/07 Roger Hu, 2012/06/07 Honza Král, 2012/06/07 Aaron Elliot Ross, 2012/06/07 Alec Clowes, 2012/06/07 Daniel Watkins, 2012/06/07 Timo Sugliani, 2012/06/07 Yury V. Zaytsev, 2012/06/7 Marcin KuźmiÅ„ski, 2012/06/07 Norman Richards, 2012/06/07 Kevin Tran, 2012/06/07 David Arthur, 2012/06/07 Bryan Berg, 2012/06/07 Mikhail Korobov, 2012/06/07 Jerzy Kozera, 2012/06/07 Ben Firshman, 2012/06/07 Jannis Leidel, 2012/06/07 Chris Rose, 2012/06/07 Julien Poissonnier, 2012/06/07 Åukasz OleÅ›, 2012/06/07 David Strauss, 2012/06/07 Chris Streeter, 2012/06/07 Thomas Johansson, 2012/06/07 Ales Zoulek, 2012/06/07 Clay Gerrard, 2012/06/07 Matt Williamson, 2012/06/07 Travis Swicegood, 2012/06/07 Jeff Balogh, 2012/06/07 Harm Verhagen, 2012/06/07 Wes Winham, 2012/06/07 David Cramer, 2012/06/07 Steeve Morin, 2012/06/07 Mher Movsisyan, 2012/06/08 Chris Peplin, 2012/06/07 Florian Apolloner, 2012/06/07 Juarez Bochi, 2012/06/07 Christopher Angove, 2012/06/07 Jason Pellerin, 2012/06/07 Miguel Hernandez Martos, 2012/06/07 Neil Chintomby, 2012/06/07 Mauro Rocco, 2012/06/07 Ionut Turturica, 2012/06/07 Adriano Petrich, 2012/06/07 Michael Elsdörfer, 2012/06/07 Kornelijus Survila, 2012/06/07 Stefán Kjartansson, 2012/06/07 Keith Perkins, 2012/06/07 Flavio Percoco, 2012/06/07 Wes Turner, 2012/06/07 Vitaly Babiy, 2012/06/07 Tayfun Sen, 2012/06/08 Gert Van Gool, 2012/06/08 Akira Matsuzaki, 2012/06/08 Simon Josi, 2012/06/08 Sam Cooke, 2012/06/08 Frederic Junod, 2012/06/08 Roberto Gaiser, 2012/06/08 Piotr Sikora, 2012/06/08 Chris Adams, 2012/06/08 Branko ÄŒibej, 2012/06/08 Vladimir Kryachko, 2012/06/08 Remy Noel 2012/06/08 Jude Nagurney, 2012/06/09 Jonatan Heyman, 2012/06/10 David Miller 2012/06/11 Matthew Morrison, 2012/06/11 Leo Dirac, 2012/06/11 Mark Thurman, 2012/06/11 Dimitrios Kouzis-Loukas, 2012/06/13 Steven Skoczen, 2012/06/17 Loren Abrams, 2012/06/19 Eran Rundstein, 2012/06/24 John Watson, 2012/06/27 Matt Long, 2012/07/04 David Markey, 2012/07/05 Jared Biel, 2012/07/05 Jed Smith, 2012/07/08 Åukasz Langa, 2012/07/10 Rinat Shigapov, 2012/07/20 Hynek Schlawack, 2012/07/23 Paul McMillan, 2012/07/26 Mitar, 2012/07/28 Adam DePue, 2012/08/22 Thomas Meson, 2012/08/28 Daniel Lundin, 2012/08/30 Alexey Zatelepin, 2012/09/18 Sundar Raman, 2012/09/24 Henri Colas, 2012/11/16 Thomas Grainger, 2012/11/29 Marius Gedminas, 2012/11/29 Christoph Krybus, 2013/01/07 Jun Sakai, 2013/01/16 Vlad Frolov, 2013/01/23 Milen Pavlov, 2013/03/08 Pär Wieslander, 2013/03/20 Theo Spears, 2013/03/28 Romuald Brunet, 2013/03/29 Aaron Harnly, 2013/04/04 Peter Brook, 2013/05/09 Muneyuki Noguchi, 2013/04/24 Stas Rudakou, 2013/05/29 Dong Weiming, 2013/06/27 Oleg Anashkin, 2013/06/27 Ross Lawley, 2013/07/05 Alain Masiero, 2013/08/07 Adrien Guinet, 2013/08/14 Christopher Lee, 2013/08/29 Alexander Smirnov, 2013/08/30 Matt Robenolt, 2013/08/31 Jameel Al-Aziz, 2013/10/04 Fazleev Maksim, 2013/10/08 Ian A Wilson, 2013/10/18 Daniel M Taub, 2013/10/22 Matt Wise, 2013/11/06 Michael Robellard, 2013/11/07 Vsevolod Kulaga, 2013/11/16 Ionel Cristian MărieÈ™, 2013/12/09 КонÑтантин Подшумок, 2013/12/16 Antoine Legrand, 2014/01/09 Pepijn de Vos, 2014/01/15 Dan McGee, 2014/01/27 Paul Kilgo, 2014/01/28 Môshe van der Sterre, 2014/01/31 Martin Davidsson, 2014/02/08 Chris Clark, 2014/02/20 Matthew Duggan, 2014/04/10 Brian Bouterse, 2014/04/10 Dmitry Malinovsky, 2014/04/28 Luke Pomfrey, 2014/05/06 Alexey Kotlyarov, 2014/05/16 Ross Deane, 2014/07/11 Tadej Janež, 2014/08/08 Akexander Koshelev, 2014/08/19 Davide Quarta, 2014/08/19 John Whitlock, 2014/08/19 Konstantinos Koukopoulos, 2014/08/24 Albert Yee Wang, 2014/08/29 Andrea Rabbaglietti, 2014/10/02 Joe Jevnik, 2014/10/22 Nathan Van Gheem, 2014/10/28 Gino Ledesma, 2014/10/28 Thomas French, 2014/11/10 Michael Permana, 2014/11/6 William King, 2014/11/21 Bert Vanderbauwhede, 2014/12/18 John Anderson, 2014/12/27 Luke Burden, 2015/01/24 Mickaël Penhard, 2015/02/15 Mark Parncutt, 2015/02/16 Samuel Jaillet, 2015/03/24 Ilya Georgievsky, 2015/03/31 Fatih Sucu, 2015/04/17 James Pulec, 2015/04/19 Alexander Lebedev, 2015/04/25 Frantisek Holop, 2015/05/21 Feanil Patel, 2015/05/21 Jocelyn Delalande, 2015/06/03 Justin Patrin, 2015/08/06 Juan Rossi, 2015/08/10 Piotr MaÅ›lanka, 2015/08/24 Gerald Manipon, 2015/10/19 Krzysztof Bujniewicz, 2015/10/21 Sukrit Khera, 2015/10/26 Dave Smith, 2015/10/27 Dennis Brakhane, 2015/10/30 Chris Harris, 2015/11/27 Valentyn Klindukh, 2016/01/15 Wayne Chang, 2016/01/15 Mike Attwood, 2016/01/22 David Harrigan, 2016/02/01 Ahmet Demir, 2016/02/27 Maxime Verger, 2016/02/29 Alexander Oblovatniy, 2016/03/10 Komu Wairagu, 2016/04/03 Joe Sanford, 2016/04/11 Takeshi Kanemoto, 2016/04/22 Arthur Vuillard, 2016/04/22 Colin McIntosh, 2016/04/26 Jeremy Zafran, 2016/05/17 Anand Reddy Pandikunta, 2016/06/18 Adriano Martins de Jesus, 2016/06/22 Kevin Richardson, 2016/06/29 Andrew Stewart, 2016/07/04 Xin Li, 2016/08/03 Alli Witheford, 2016/09/29 Alan Justino da Silva, 2016/10/14 Marat Sharafutdinov, 2016/11/04 Viktor Holmqvist, 2016/12/02 Rick Wargo, 2016/12/02 zhengxiaowai, 2016/12/07 Michael Howitz, 2016/12/08 Andreas Pelme, 2016/12/13 Mike Chen, 2016/12/20 Alejandro Pernin, 2016/12/23 Yuval Shalev, 2016/12/27 Morgan Doocy, 2017/01/02 Arcadiy Ivanov, 2017/01/08 Ryan Hiebert, 2017/01/20 Jianjian Yu, 2017/04/09 Brian May, 2017/04/10 Dmytro Petruk, 2017/04/12 Joey Wilhelm, 2017/04/12 Yoichi Nakayama, 2017/04/25 Simon Schmidt, 2017/05/19 Anthony Lukach, 2017/05/23 Samuel Dion-Girardeau, 2017/05/29 Aydin Sen, 2017/06/14 Preston Moore, 2017/06/18 celery-4.1.0/Changelog0000644000175000017500000002071713135426300014543 0ustar omeromer00000000000000.. _changelog: ================ Change history ================ This document contains change notes for bugfix releases in the 4.1.x series (latentcall), please see :ref:`whatsnew-4.1` for an overview of what's new in Celery 4.1. .. _version-4.1.0: 4.1.0 ===== :release-date: 2017-07-25 00:00 PM PST :release-by: Omer Katz - **Configuration**: CELERY_SEND_EVENTS instead of CELERYD_SEND_EVENTS for 3.1.x compatibility (#3997) Contributed by **abhinav nilaratna**. - **App**: Restore behavior so Broadcast queues work. (#3934) Contributed by **Patrick Cloke**. - **Sphinx**: Make appstr use standard format (#4134) (#4139) Contributed by **Preston Moore**. - **App**: Make id, name always accessible from logging.Formatter via extra (#3994) Contributed by **Yoichi NAKAYAMA**. - **Worker**: Add worker_shutting_down signal (#3998) Contributed by **Daniel Huang**. - **PyPy**: Support PyPy version 5.8.0 (#4128) Contributed by **Omer Katz**. - **Results**: Elasticsearch: Fix serializing keys (#3924) Contributed by :github_user:`staticfox`. - **Canvas**: Deserialize all tasks in a chain (#4015) Contributed by :github_user:`fcoelho`. - **Systemd**: Recover loglevel for ExecStart in systemd config (#4023) Contributed by **Yoichi NAKAYAMA**. - **Sphinx**: Use the Sphinx add_directive_to_domain API. (#4037) Contributed by **Patrick Cloke**. - **App**: Pass properties to before_task_publish signal (#4035) Contributed by **Javier Domingo Cansino**. - **Results**: Add SSL option for Redis backends (#3831) Contributed by **Chris Kuehl**. - **Beat**: celery.schedule.crontab: fix reduce (#3826) (#3827) Contributed by **Taylor C. Richberger**. - **State**: Fix celery issues when using flower REST API Contributed by **Thierry RAMORASOAVINA**. - **Results**: Elasticsearch: Fix serializing document id. Contributed by **Acey9**. - **Beat**: Make shallow copy of schedules dictionary Contributed by **Brian May**. - **Beat**: Populate heap when periodic tasks are changed Contributed by **Wojciech Żywno**. - **Task**: Allow class methods to define tasks (#3952) Contributed by **georgepsarakis**. - **Platforms**: Always return boolean value when checking if signal is supported (#3962). Contributed by **Jian Yu**. - **Canvas**: Avoid duplicating chains in chords (#3779) Contributed by **Ryan Hiebert**. - **Canvas**: Lookup task only if list has items (#3847) Contributed by **Marc Gibbons**. - **Results**: Allow unicode message for exception raised in task (#3903) Contributed by **George Psarakis**. - **Python3**: Support for Python 3.6 (#3904, #3903, #3736) Contributed by **Jon Dufresne**, **George Psarakis**, **Asif Saifuddin Auvi**, **Omer Katz**. - **App**: Fix retried tasks with expirations (#3790) Contributed by **Brendan MacDonell**. - * Fixes items format route in docs (#3875) Contributed by **Slam**. - **Utils**: Fix maybe_make_aware (#3850) Contributed by **Taylor C. Richberger**. - **Task**: Fix task ETA issues when timezone is defined in configuration (#3867) Contributed by **George Psarakis**. - **Concurrency**: Consumer does not shutdown properly when embedded in gevent application (#3746) Contributed by **Arcadiy Ivanov**. - **Canvas**: Fix #3725: Task replaced with group does not complete (#3731) Contributed by **Morgan Doocy**. - **Task**: Correct order in chains with replaced tasks (#3730) Contributed by **Morgan Doocy**. - **Result**: Enable synchronous execution of sub-tasks (#3696) Contributed by **shalev67**. - **Task**: Fix request context for blocking task apply (added hostname) (#3716) Contributed by **Marat Sharafutdinov**. - **Utils**: Fix task argument handling (#3678) (#3693) Contributed by **Roman Sichny**. - **Beat**: Provide a transparent method to update the Scheduler heap (#3721) Contributed by **Alejandro Pernin**. - **Beat**: Specify default value for pidfile option of celery beat. (#3722) Contributed by **Arnaud Rocher**. - **Results**: Elasticsearch: Stop generating a new field every time when a new result is being put (#3708) Contributed by **Mike Chen**. - **Requirements** - Now depends on :ref:`Kombu 4.1.0 `. - **Results**: Elasticsearch now reuses fields when new results are added. Contributed by **Mike Chen**. - **Results**: Fixed MongoDB integration when using binary encodings (Issue #3575). Contributed by **Andrew de Quincey**. - **Worker**: Making missing ``*args`` and ``**kwargs`` in Task protocol 1 return empty value in protocol 2 (Issue #3687). Contributed by **Roman Sichny**. - **App**: Fixed :exc:`TypeError` in AMQP when using deprecated signal (Issue #3707). Contributed by :github_user:`michael-k`. - **Beat**: Added a transparent method to update the scheduler heap. Contributed by **Alejandro Pernin**. - **Task**: Fixed handling of tasks with keyword arguments on Python 3 (Issue #3657). Contributed by **Roman Sichny**. - **Task**: Fixed request context for blocking task apply by adding missing hostname attribute. Contributed by **Marat Sharafutdinov**. - **Task**: Added option to run subtasks synchronously with ``disable_sync_subtasks`` argument. Contributed by :github_user:`shalev67`. - **App**: Fixed chaining of replaced tasks (Issue #3726). Contributed by **Morgan Doocy**. - **Canvas**: Fixed bug where replaced tasks with groups were not completing (Issue #3725). Contributed by **Morgan Doocy**. - **Worker**: Fixed problem where consumer does not shutdown properly when embedded in a gevent application (Issue #3745). Contributed by **Arcadiy Ivanov**. - **Results**: Added support for using AWS DynamoDB as a result backend (#3736). Contributed by **George Psarakis**. - **Testing**: Added caching on pip installs. Contributed by :github_user:`orf`. - **Worker**: Prevent consuming queue before ready on startup (Issue #3620). Contributed by **Alan Hamlett**. - **App**: Fixed task ETA issues when timezone is defined in configuration (Issue #3753). Contributed by **George Psarakis**. - **Utils**: ``maybe_make_aware`` should not modify datetime when it is already timezone-aware (Issue #3849). Contributed by **Taylor C. Richberger**. - **App**: Fixed retrying tasks with expirations (Issue #3734). Contributed by **Brendan MacDonell**. - **Results**: Allow unicode message for exceptions raised in task (Issue #3858). Contributed by :github_user:`staticfox`. - **Canvas**: Fixed :exc:`IndexError` raised when chord has an empty header. Contributed by **Marc Gibbons**. - **Canvas**: Avoid duplicating chains in chords (Issue #3771). Contributed by **Ryan Hiebert** and **George Psarakis**. - **Utils**: Allow class methods to define tasks (Issue #3863). Contributed by **George Psarakis**. - **Beat**: Populate heap when periodic tasks are changed. Contributed by :github_user:`wzywno` and **Brian May**. - **Results**: Added support for Elasticsearch backend options settings. Contributed by :github_user:`Acey9`. - **Events**: Ensure ``Task.as_dict()`` works when not all information about task is available. Contributed by :github_user:`tramora`. - **Schedules**: Fixed pickled crontab schedules to restore properly (Issue #3826). Contributed by **Taylor C. Richberger**. - **Results**: Added SSL option for redis backends (Issue #3830). Contributed by **Chris Kuehl**. - Documentation and examples improvements by: - **Bruno Alla** - **Jamie Alessio** - **Vivek Anand** - **Peter Bittner** - **Kalle Bronsen** - **Jon Dufresne** - **James Michael DuPont** - **Sergey Fursov** - **Samuel Dion-Girardeau** - **Daniel Hahler** - **Mike Helmick** - **Marc Hörsken** - **Christopher Hoskin** - **Daniel Huang** - **Primož Kerin** - **Michal Kuffa** - **Simon Legner** - **Anthony Lukach** - **Ed Morley** - **Jay McGrath** - **Rico Moorman** - **Viraj Navkal** - **Ross Patterson** - **Dmytro Petruk** - **Luke Plant** - **Eric Poelke** - **Salvatore Rinchiera** - **Arnaud Rocher** - **Kirill Romanov** - **Simon Schmidt** - **Tamer Sherif** - **YuLun Shih** - **Ask Solem** - **Tom 'Biwaa' Riat** - **Arthur Vigil** - **Joey Wilhelm** - **Jian Yu** - **YuLun Shih** - **Arthur Vigil** - **Joey Wilhelm** - :github_user:`baixuexue123` - :github_user:`bronsen` - :github_user:`michael-k` - :github_user:`orf` - :github_user:`3lnc` celery-4.1.0/celery.egg-info/0000755000175000017500000000000013135426347015712 5ustar omeromer00000000000000celery-4.1.0/celery.egg-info/not-zip-safe0000644000175000017500000000000113135426346020137 0ustar omeromer00000000000000 celery-4.1.0/celery.egg-info/dependency_links.txt0000644000175000017500000000000113135426346021757 0ustar omeromer00000000000000 celery-4.1.0/celery.egg-info/PKG-INFO0000644000175000017500000004151513135426346017014 0ustar omeromer00000000000000Metadata-Version: 1.1 Name: celery Version: 4.1.0 Summary: Distributed Task Queue. Home-page: http://celeryproject.org Author: Ask Solem Author-email: ask@celeryproject.org License: BSD Description: .. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |license| |wheel| |pyversion| |pyimp| :Version: 4.1.0 (latentcall) :Web: http://celeryproject.org/ :Download: https://pypi.python.org/pypi/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors -- What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, and a `PHP client`_. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php What do I need? =============== Celery version 4.0 runs on, - Python (2.7, 3.4, 3.5) - PyPy (5.4, 5.5) This is the last version to support Python 2.7, and from the next version (Celery 5.x) Python 3.5 or newer is required. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4 was Celery series 2.2 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery 4.0 coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html Celery is... ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make:: from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports... ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``) - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.python.org/pypi/pyramid_celery/ .. _`celery-pylons`: https://pypi.python.org/pypi/celery-pylons .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ .. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using ``pip``: :: $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the ``pip`` command-line by using brackets. Multiple bundles can be specified by separating them by commas. :: $ pip install "celery[librabbitmq]" $ pip install "celery[librabbitmq,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the ``eventlet`` pool. :``celery[gevent]``: for using the ``gevent`` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[librabbitmq]``: for using the librabbitmq C library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport (*experimental*). :``celery[tblib``] for using the ``task_remote_tracebacks`` feature. :``celery[memcache]``: for using Memcached as a result backend (using ``pylibmc``) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]`` specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.python.org/pypi/celery/ You can install it by doing the following,: :: $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of ``kombu``, ``amqp``, ``billiard``, and ``vine``. You can install the latest snapshot of these using the following pip commands: :: $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please see the Contributing section. .. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ network. .. _`Freenode`: https://freenode.net .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://wiki.github.com/celery/celery/ .. _contributing-short: Contributing ============ Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html .. _license: License ======= This software is licensed under the `New BSD License`. See the ``LICENSE`` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround .. |build-status| image:: https://secure.travis-ci.org/celery/celery.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/celery .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.python.org/pypi/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.python.org/pypi/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Support Python implementations. :target: https://pypi.python.org/pypi/celery/ Keywords: task job queue distributed messaging actor Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: Software Development :: Object Brokering Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: OS Independent celery-4.1.0/celery.egg-info/top_level.txt0000644000175000017500000000000713135426346020440 0ustar omeromer00000000000000celery celery-4.1.0/celery.egg-info/requires.txt0000644000175000017500000000116413135426346020313 0ustar omeromer00000000000000pytz>dev billiard<3.6.0,>=3.5.0.2 kombu<5.0,>=4.0.2 [auth] pyOpenSSL [cassandra] cassandra-driver [consul] python-consul [couchbase] couchbase [couchdb] pycouchdb [django] Django>=1.8 [dynamodb] boto3==1.4.3 [elasticsearch] elasticsearch [eventlet] eventlet [gevent] gevent [librabbitmq] librabbitmq>=1.5.0 [memcache] pylibmc [msgpack] msgpack-python>=0.3.0 [pymemcache] python-memcached [pyro] pyro4 [redis] redis>=2.10.5 [riak] riak>=2.0 [slmq] softlayer_messaging>=1.0.3 [solar] ephem [sqlalchemy] sqlalchemy [sqs] boto>=2.13.3 pycurl [tblib] tblib>=1.3.0 [yaml] PyYAML>=3.10 [zookeeper] kazoo>=1.3.1 celery-4.1.0/celery.egg-info/entry_points.txt0000644000175000017500000000013413135426346021205 0ustar omeromer00000000000000[console_scripts] celery = celery.__main__:main [pytest11] celery = celery.contrib.pytest celery-4.1.0/celery.egg-info/SOURCES.txt0000644000175000017500000004644613135426347017614 0ustar omeromer00000000000000CONTRIBUTORS.txt Changelog LICENSE MANIFEST.in README.rst TODO setup.cfg setup.py celery/__init__.py celery/__main__.py celery/_state.py celery/beat.py celery/bootsteps.py celery/canvas.py celery/exceptions.py celery/five.py celery/local.py celery/platforms.py celery/result.py celery/schedules.py celery/signals.py celery/states.py celery.egg-info/PKG-INFO celery.egg-info/SOURCES.txt celery.egg-info/dependency_links.txt celery.egg-info/entry_points.txt celery.egg-info/not-zip-safe celery.egg-info/requires.txt celery.egg-info/top_level.txt celery/app/__init__.py celery/app/amqp.py celery/app/annotations.py celery/app/backends.py celery/app/base.py celery/app/builtins.py celery/app/control.py celery/app/defaults.py celery/app/events.py celery/app/log.py celery/app/registry.py celery/app/routes.py celery/app/task.py celery/app/trace.py celery/app/utils.py celery/apps/__init__.py celery/apps/beat.py celery/apps/multi.py celery/apps/worker.py celery/backends/__init__.py celery/backends/amqp.py celery/backends/async.py celery/backends/base.py celery/backends/cache.py celery/backends/cassandra.py celery/backends/consul.py celery/backends/couchbase.py celery/backends/couchdb.py celery/backends/dynamodb.py celery/backends/elasticsearch.py celery/backends/filesystem.py celery/backends/mongodb.py celery/backends/redis.py celery/backends/riak.py celery/backends/rpc.py celery/backends/database/__init__.py celery/backends/database/models.py celery/backends/database/session.py celery/bin/__init__.py celery/bin/amqp.py celery/bin/base.py celery/bin/beat.py celery/bin/call.py celery/bin/celery.py celery/bin/celeryd_detach.py celery/bin/control.py celery/bin/events.py celery/bin/graph.py celery/bin/list.py celery/bin/logtool.py celery/bin/migrate.py celery/bin/multi.py celery/bin/purge.py celery/bin/result.py celery/bin/shell.py celery/bin/upgrade.py celery/bin/worker.py celery/concurrency/__init__.py celery/concurrency/asynpool.py celery/concurrency/base.py celery/concurrency/eventlet.py celery/concurrency/gevent.py celery/concurrency/prefork.py celery/concurrency/solo.py celery/contrib/__init__.py celery/contrib/abortable.py celery/contrib/migrate.py celery/contrib/pytest.py celery/contrib/rdb.py celery/contrib/sphinx.py celery/contrib/testing/__init__.py celery/contrib/testing/app.py celery/contrib/testing/manager.py celery/contrib/testing/mocks.py celery/contrib/testing/tasks.py celery/contrib/testing/worker.py celery/events/__init__.py celery/events/cursesmon.py celery/events/dispatcher.py celery/events/dumper.py celery/events/event.py celery/events/receiver.py celery/events/snapshot.py celery/events/state.py celery/fixups/__init__.py celery/fixups/django.py celery/loaders/__init__.py celery/loaders/app.py celery/loaders/base.py celery/loaders/default.py celery/security/__init__.py celery/security/certificate.py celery/security/key.py celery/security/serialization.py celery/security/utils.py celery/task/__init__.py celery/task/base.py celery/utils/__init__.py celery/utils/abstract.py celery/utils/collections.py celery/utils/debug.py celery/utils/deprecated.py celery/utils/encoding.py celery/utils/functional.py celery/utils/graph.py celery/utils/imports.py celery/utils/iso8601.py celery/utils/log.py celery/utils/nodenames.py celery/utils/objects.py celery/utils/saferepr.py celery/utils/serialization.py celery/utils/sysinfo.py celery/utils/term.py celery/utils/text.py celery/utils/threads.py celery/utils/time.py celery/utils/timer2.py celery/utils/dispatch/__init__.py celery/utils/dispatch/signal.py celery/utils/dispatch/weakref_backports.py celery/utils/static/__init__.py celery/utils/static/celery_128.png celery/worker/__init__.py celery/worker/autoscale.py celery/worker/components.py celery/worker/control.py celery/worker/heartbeat.py celery/worker/loops.py celery/worker/pidbox.py celery/worker/request.py celery/worker/state.py celery/worker/strategy.py celery/worker/worker.py celery/worker/consumer/__init__.py celery/worker/consumer/agent.py celery/worker/consumer/connection.py celery/worker/consumer/consumer.py celery/worker/consumer/control.py celery/worker/consumer/events.py celery/worker/consumer/gossip.py celery/worker/consumer/heart.py celery/worker/consumer/mingle.py celery/worker/consumer/tasks.py docs/AUTHORS.txt docs/Makefile docs/THANKS docs/changelog.rst docs/community.rst docs/conf.py docs/configuration.html docs/contributing.rst docs/copyright.rst docs/faq.rst docs/glossary.rst docs/index.rst docs/make.bat docs/spelling_wordlist.txt docs/whatsnew-3.1.rst docs/whatsnew-4.0.rst docs/_ext/celerydocs.py docs/_static/.keep docs/_templates/sidebardonations.html docs/django/first-steps-with-django.rst docs/django/index.rst docs/getting-started/first-steps-with-celery.rst docs/getting-started/index.rst docs/getting-started/introduction.rst docs/getting-started/next-steps.rst docs/getting-started/resources.rst docs/getting-started/brokers/index.rst docs/getting-started/brokers/rabbitmq.rst docs/getting-started/brokers/redis.rst docs/getting-started/brokers/sqs.rst docs/history/changelog-1.0.rst docs/history/changelog-2.0.rst docs/history/changelog-2.1.rst docs/history/changelog-2.2.rst docs/history/changelog-2.3.rst docs/history/changelog-2.4.rst docs/history/changelog-2.5.rst docs/history/changelog-3.0.rst docs/history/changelog-3.1.rst docs/history/changelog-4.0.rst docs/history/index.rst docs/history/whatsnew-2.5.rst docs/history/whatsnew-3.0.rst docs/images/celery-banner-small.png docs/images/celery-banner.png docs/images/celery_128.png docs/images/celery_512.png docs/images/celeryevshotsm.jpg docs/images/dashboard.png docs/images/favicon.ico docs/images/monitor.png docs/images/result_graph.png docs/images/worker_graph_full.png docs/includes/installation.txt docs/includes/introduction.txt docs/includes/resources.txt docs/internals/app-overview.rst docs/internals/deprecation.rst docs/internals/guide.rst docs/internals/index.rst docs/internals/protocol.rst docs/internals/worker.rst docs/internals/reference/celery._state.rst docs/internals/reference/celery.app.annotations.rst docs/internals/reference/celery.app.routes.rst docs/internals/reference/celery.app.trace.rst docs/internals/reference/celery.backends.amqp.rst docs/internals/reference/celery.backends.async.rst docs/internals/reference/celery.backends.base.rst docs/internals/reference/celery.backends.cache.rst docs/internals/reference/celery.backends.cassandra.rst docs/internals/reference/celery.backends.consul.rst docs/internals/reference/celery.backends.couchbase.rst docs/internals/reference/celery.backends.couchdb.rst docs/internals/reference/celery.backends.database.models.rst docs/internals/reference/celery.backends.database.rst docs/internals/reference/celery.backends.database.session.rst docs/internals/reference/celery.backends.dynamodb.rst docs/internals/reference/celery.backends.elasticsearch.rst docs/internals/reference/celery.backends.filesystem.rst docs/internals/reference/celery.backends.mongodb.rst docs/internals/reference/celery.backends.redis.rst docs/internals/reference/celery.backends.riak.rst docs/internals/reference/celery.backends.rpc.rst docs/internals/reference/celery.backends.rst docs/internals/reference/celery.concurrency.base.rst docs/internals/reference/celery.concurrency.eventlet.rst docs/internals/reference/celery.concurrency.gevent.rst docs/internals/reference/celery.concurrency.prefork.rst docs/internals/reference/celery.concurrency.rst docs/internals/reference/celery.concurrency.solo.rst docs/internals/reference/celery.events.cursesmon.rst docs/internals/reference/celery.events.dumper.rst docs/internals/reference/celery.events.snapshot.rst docs/internals/reference/celery.platforms.rst docs/internals/reference/celery.security.certificate.rst docs/internals/reference/celery.security.key.rst docs/internals/reference/celery.security.serialization.rst docs/internals/reference/celery.security.utils.rst docs/internals/reference/celery.utils.abstract.rst docs/internals/reference/celery.utils.collections.rst docs/internals/reference/celery.utils.deprecated.rst docs/internals/reference/celery.utils.dispatch.rst docs/internals/reference/celery.utils.dispatch.signal.rst docs/internals/reference/celery.utils.dispatch.weakref_backports.rst docs/internals/reference/celery.utils.functional.rst docs/internals/reference/celery.utils.graph.rst docs/internals/reference/celery.utils.imports.rst docs/internals/reference/celery.utils.iso8601.rst docs/internals/reference/celery.utils.log.rst docs/internals/reference/celery.utils.nodenames.rst docs/internals/reference/celery.utils.objects.rst docs/internals/reference/celery.utils.rst docs/internals/reference/celery.utils.saferepr.rst docs/internals/reference/celery.utils.serialization.rst docs/internals/reference/celery.utils.sysinfo.rst docs/internals/reference/celery.utils.term.rst docs/internals/reference/celery.utils.text.rst docs/internals/reference/celery.utils.threads.rst docs/internals/reference/celery.utils.time.rst docs/internals/reference/celery.utils.timer2.rst docs/internals/reference/celery.worker.autoscale.rst docs/internals/reference/celery.worker.components.rst docs/internals/reference/celery.worker.control.rst docs/internals/reference/celery.worker.heartbeat.rst docs/internals/reference/celery.worker.loops.rst docs/internals/reference/celery.worker.pidbox.rst docs/internals/reference/index.rst docs/reference/celery.app.amqp.rst docs/reference/celery.app.backends.rst docs/reference/celery.app.builtins.rst docs/reference/celery.app.control.rst docs/reference/celery.app.defaults.rst docs/reference/celery.app.events.rst docs/reference/celery.app.log.rst docs/reference/celery.app.registry.rst docs/reference/celery.app.rst docs/reference/celery.app.task.rst docs/reference/celery.app.utils.rst docs/reference/celery.apps.beat.rst docs/reference/celery.apps.multi.rst docs/reference/celery.apps.worker.rst docs/reference/celery.beat.rst docs/reference/celery.bin.amqp.rst docs/reference/celery.bin.base.rst docs/reference/celery.bin.beat.rst docs/reference/celery.bin.call.rst docs/reference/celery.bin.celery.rst docs/reference/celery.bin.control.rst docs/reference/celery.bin.events.rst docs/reference/celery.bin.graph.rst docs/reference/celery.bin.list.rst docs/reference/celery.bin.logtool.rst docs/reference/celery.bin.migrate.rst docs/reference/celery.bin.multi.rst docs/reference/celery.bin.purge.rst docs/reference/celery.bin.result.rst docs/reference/celery.bin.shell.rst docs/reference/celery.bin.upgrade.rst docs/reference/celery.bin.worker.rst docs/reference/celery.bootsteps.rst docs/reference/celery.contrib.abortable.rst docs/reference/celery.contrib.migrate.rst docs/reference/celery.contrib.pytest.rst docs/reference/celery.contrib.rdb.rst docs/reference/celery.contrib.sphinx.rst docs/reference/celery.contrib.testing.app.rst docs/reference/celery.contrib.testing.manager.rst docs/reference/celery.contrib.testing.mocks.rst docs/reference/celery.contrib.testing.worker.rst docs/reference/celery.events.dispatcher.rst docs/reference/celery.events.event.rst docs/reference/celery.events.receiver.rst docs/reference/celery.events.rst docs/reference/celery.events.state.rst docs/reference/celery.exceptions.rst docs/reference/celery.loaders.app.rst docs/reference/celery.loaders.base.rst docs/reference/celery.loaders.default.rst docs/reference/celery.loaders.rst docs/reference/celery.result.rst docs/reference/celery.rst docs/reference/celery.schedules.rst docs/reference/celery.security.rst docs/reference/celery.signals.rst docs/reference/celery.states.rst docs/reference/celery.utils.debug.rst docs/reference/celery.worker.consumer.agent.rst docs/reference/celery.worker.consumer.connection.rst docs/reference/celery.worker.consumer.consumer.rst docs/reference/celery.worker.consumer.control.rst docs/reference/celery.worker.consumer.events.rst docs/reference/celery.worker.consumer.gossip.rst docs/reference/celery.worker.consumer.heart.rst docs/reference/celery.worker.consumer.mingle.rst docs/reference/celery.worker.consumer.rst docs/reference/celery.worker.consumer.tasks.rst docs/reference/celery.worker.request.rst docs/reference/celery.worker.rst docs/reference/celery.worker.state.rst docs/reference/celery.worker.strategy.rst docs/reference/celery.worker.worker.rst docs/reference/index.rst docs/sec/CELERYSA-0001.txt docs/sec/CELERYSA-0002.txt docs/sec/CELERYSA-0003.txt docs/templates/readme.txt docs/tutorials/daemonizing.html docs/tutorials/debugging.html docs/tutorials/index.rst docs/tutorials/task-cookbook.rst docs/userguide/application.rst docs/userguide/calling.rst docs/userguide/canvas.rst docs/userguide/configuration.rst docs/userguide/daemonizing.rst docs/userguide/debugging.rst docs/userguide/extending.rst docs/userguide/index.rst docs/userguide/monitoring.rst docs/userguide/optimizing.rst docs/userguide/periodic-tasks.rst docs/userguide/routing.rst docs/userguide/security.rst docs/userguide/signals.rst docs/userguide/tasks.rst docs/userguide/testing.rst docs/userguide/workers.rst docs/userguide/concurrency/eventlet.rst docs/userguide/concurrency/index.rst examples/README.rst examples/app/myapp.py examples/celery_http_gateway/README.rst examples/celery_http_gateway/__init__.py examples/celery_http_gateway/manage.py examples/celery_http_gateway/settings.py examples/celery_http_gateway/tasks.py examples/celery_http_gateway/urls.py examples/django/README.rst examples/django/manage.py examples/django/requirements.txt examples/django/demoapp/__init__.py examples/django/demoapp/models.py examples/django/demoapp/tasks.py examples/django/demoapp/views.py examples/django/proj/__init__.py examples/django/proj/celery.py examples/django/proj/settings.py examples/django/proj/urls.py examples/django/proj/wsgi.py examples/eventlet/README.rst examples/eventlet/bulk_task_producer.py examples/eventlet/celeryconfig.py examples/eventlet/tasks.py examples/eventlet/webcrawler.py examples/gevent/celeryconfig.py examples/gevent/tasks.py examples/next-steps/setup.py examples/next-steps/proj/__init__.py examples/next-steps/proj/celery.py examples/next-steps/proj/tasks.py examples/periodic-tasks/myapp.py examples/resultgraph/tasks.py examples/tutorial/tasks.py extra/bash-completion/celery.bash extra/generic-init.d/celerybeat extra/generic-init.d/celeryd extra/macOS/org.celeryq.beat.plist extra/macOS/org.celeryq.worker.plist extra/supervisord/celery.sh extra/supervisord/celerybeat.conf extra/supervisord/celeryd.conf extra/supervisord/supervisord.conf extra/systemd/celery.conf extra/systemd/celery.service extra/systemd/celery.tmpfiles extra/zsh-completion/celery.zsh requirements/README.rst requirements/default.txt requirements/docs.txt requirements/jython.txt requirements/pkgutils.txt requirements/security.txt requirements/test-ci-base.txt requirements/test-ci-default.txt requirements/test-integration.txt requirements/test-pypy3.txt requirements/test.txt requirements/deps/mock.txt requirements/deps/nose.txt requirements/extras/auth.txt requirements/extras/cassandra.txt requirements/extras/consul.txt requirements/extras/couchbase.txt requirements/extras/couchdb.txt requirements/extras/django.txt requirements/extras/dynamodb.txt requirements/extras/elasticsearch.txt requirements/extras/eventlet.txt requirements/extras/gevent.txt requirements/extras/librabbitmq.txt requirements/extras/memcache.txt requirements/extras/mongodb.txt requirements/extras/msgpack.txt requirements/extras/pymemcache.txt requirements/extras/pyro.txt requirements/extras/redis.txt requirements/extras/riak.txt requirements/extras/slmq.txt requirements/extras/solar.txt requirements/extras/sqlalchemy.txt requirements/extras/sqs.txt requirements/extras/tblib.txt requirements/extras/yaml.txt requirements/extras/zeromq.txt requirements/extras/zookeeper.txt t/__init__.py t/benchmarks/bench_worker.py t/distro/test_CI_reqs.py t/integration/__init__.py t/integration/conftest.py t/integration/tasks.py t/integration/test_canvas.py t/integration/test_tasks.py t/unit/__init__.py t/unit/conftest.py t/unit/app/__init__.py t/unit/app/test_amqp.py t/unit/app/test_annotations.py t/unit/app/test_app.py t/unit/app/test_backends.py t/unit/app/test_beat.py t/unit/app/test_builtins.py t/unit/app/test_celery.py t/unit/app/test_control.py t/unit/app/test_defaults.py t/unit/app/test_exceptions.py t/unit/app/test_loaders.py t/unit/app/test_log.py t/unit/app/test_registry.py t/unit/app/test_routes.py t/unit/app/test_schedules.py t/unit/app/test_utils.py t/unit/apps/__init__.py t/unit/apps/test_multi.py t/unit/backends/__init__.py t/unit/backends/test_amqp.py t/unit/backends/test_base.py t/unit/backends/test_cache.py t/unit/backends/test_cassandra.py t/unit/backends/test_consul.py t/unit/backends/test_couchbase.py t/unit/backends/test_couchdb.py t/unit/backends/test_database.py t/unit/backends/test_dynamodb.py t/unit/backends/test_elasticsearch.py t/unit/backends/test_filesystem.py t/unit/backends/test_mongodb.py t/unit/backends/test_redis.py t/unit/backends/test_riak.py t/unit/backends/test_rpc.py t/unit/bin/__init__.py t/unit/bin/celery.py t/unit/bin/test_amqp.py t/unit/bin/test_base.py t/unit/bin/test_beat.py t/unit/bin/test_call.py t/unit/bin/test_celery.py t/unit/bin/test_celeryd_detach.py t/unit/bin/test_celeryevdump.py t/unit/bin/test_control.py t/unit/bin/test_events.py t/unit/bin/test_list.py t/unit/bin/test_migrate.py t/unit/bin/test_multi.py t/unit/bin/test_purge.py t/unit/bin/test_result.py t/unit/bin/test_worker.py t/unit/bin/proj/__init__.py t/unit/bin/proj/app.py t/unit/compat_modules/__init__.py t/unit/compat_modules/test_compat.py t/unit/compat_modules/test_compat_utils.py t/unit/compat_modules/test_decorators.py t/unit/compat_modules/test_messaging.py t/unit/concurrency/__init__.py t/unit/concurrency/test_concurrency.py t/unit/concurrency/test_eventlet.py t/unit/concurrency/test_gevent.py t/unit/concurrency/test_pool.py t/unit/concurrency/test_prefork.py t/unit/concurrency/test_solo.py t/unit/contrib/__init__.py t/unit/contrib/test_abortable.py t/unit/contrib/test_migrate.py t/unit/contrib/test_rdb.py t/unit/events/__init__.py t/unit/events/test_cursesmon.py t/unit/events/test_events.py t/unit/events/test_snapshot.py t/unit/events/test_state.py t/unit/fixups/__init__.py t/unit/fixups/test_django.py t/unit/security/__init__.py t/unit/security/case.py t/unit/security/test_certificate.py t/unit/security/test_key.py t/unit/security/test_security.py t/unit/security/test_serialization.py t/unit/tasks/__init__.py t/unit/tasks/test_canvas.py t/unit/tasks/test_chord.py t/unit/tasks/test_context.py t/unit/tasks/test_result.py t/unit/tasks/test_states.py t/unit/tasks/test_tasks.py t/unit/tasks/test_trace.py t/unit/utils/__init__.py t/unit/utils/test_collections.py t/unit/utils/test_debug.py t/unit/utils/test_deprecated.py t/unit/utils/test_dispatcher.py t/unit/utils/test_encoding.py t/unit/utils/test_functional.py t/unit/utils/test_graph.py t/unit/utils/test_imports.py t/unit/utils/test_local.py t/unit/utils/test_nodenames.py t/unit/utils/test_objects.py t/unit/utils/test_pickle.py t/unit/utils/test_platforms.py t/unit/utils/test_saferepr.py t/unit/utils/test_serialization.py t/unit/utils/test_sysinfo.py t/unit/utils/test_term.py t/unit/utils/test_text.py t/unit/utils/test_threads.py t/unit/utils/test_time.py t/unit/utils/test_timer2.py t/unit/utils/test_utils.py t/unit/worker/__init__.py t/unit/worker/test_autoscale.py t/unit/worker/test_bootsteps.py t/unit/worker/test_components.py t/unit/worker/test_consumer.py t/unit/worker/test_control.py t/unit/worker/test_heartbeat.py t/unit/worker/test_loops.py t/unit/worker/test_request.py t/unit/worker/test_revoke.py t/unit/worker/test_state.py t/unit/worker/test_strategy.py t/unit/worker/test_worker.pycelery-4.1.0/docs/0000755000175000017500000000000013135426347013665 5ustar omeromer00000000000000celery-4.1.0/docs/_ext/0000755000175000017500000000000013135426347014624 5ustar omeromer00000000000000celery-4.1.0/docs/_ext/celerydocs.py0000644000175000017500000001257513130607475017342 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import sys import typing from docutils import nodes from sphinx.environment import NoUri APPATTRS = { 'amqp': 'celery.app.amqp.AMQP', 'backend': 'celery.backends.base.BaseBackend', 'conf': 'celery.app.utils.Settings', 'control': 'celery.app.control.Control', 'events': 'celery.events.Events', 'loader': 'celery.app.loaders.base.BaseLoader', 'log': 'celery.app.log.Logging', 'pool': 'kombu.connection.ConnectionPool', 'tasks': 'celery.app.registry.Registry', 'AsyncResult': 'celery.result.AsyncResult', 'ResultSet': 'celery.result.ResultSet', 'GroupResult': 'celery.result.GroupResult', 'Worker': 'celery.apps.worker.Worker', 'WorkController': 'celery.worker.WorkController', 'Beat': 'celery.apps.beat.Beat', 'Task': 'celery.app.task.Task', 'signature': 'celery.canvas.Signature', } APPDIRECT = { 'on_configure', 'on_after_configure', 'on_after_finalize', 'set_current', 'set_default', 'close', 'on_init', 'start', 'worker_main', 'task', 'gen_task_name', 'finalize', 'add_defaults', 'config_from_object', 'config_from_envvar', 'config_from_cmdline', 'setup_security', 'autodiscover_tasks', 'send_task', 'connection', 'connection_or_acquire', 'producer_or_acquire', 'prepare_config', 'now', 'select_queues', 'either', 'bugreport', 'create_task_cls', 'subclass_with_self', 'annotations', 'current_task', 'oid', 'timezone', '__reduce_keys__', 'fixups', 'finalized', 'configured', 'add_periodic_task', 'autofinalize', 'steps', 'user_options', 'main', 'clock', } APPATTRS.update({x: 'celery.Celery.{0}'.format(x) for x in APPDIRECT}) ABBRS = { 'Celery': 'celery.Celery', } ABBR_EMPTY = { 'exc': 'celery.exceptions', } DEFAULT_EMPTY = 'celery.Celery' if sys.version_info[0] < 3: def bytes_if_py2(s): if isinstance(s, unicode): return s.encode() return s else: def bytes_if_py2(s): # noqa return s def typeify(S, type): if type in ('meth', 'func'): return S + '()' return S def shorten(S, newtarget, src_dict): if S.startswith('@-'): return S[2:] elif S.startswith('@'): if src_dict is APPATTRS: return '.'.join(['app', S[1:]]) return S[1:] return S def get_abbr(pre, rest, type, orig=None): if pre: for d in APPATTRS, ABBRS: try: return d[pre], rest, d except KeyError: pass raise KeyError('Unknown abbreviation: {0} ({1})'.format( '.'.join([pre, rest]) if orig is None else orig, type, )) else: for d in APPATTRS, ABBRS: try: return d[rest], '', d except KeyError: pass return ABBR_EMPTY.get(type, DEFAULT_EMPTY), rest, ABBR_EMPTY def resolve(S, type): if '.' not in S: try: getattr(typing, S) except AttributeError: pass else: return 'typing.{0}'.format(S), None orig = S if S.startswith('@'): S = S.lstrip('@-') try: pre, rest = S.split('.', 1) except ValueError: pre, rest = '', S target, rest, src = get_abbr(pre, rest, type, orig) return '.'.join([target, rest]) if rest else target, src return S, None def pkg_of(module_fqdn): return module_fqdn.split('.', 1)[0] def basename(module_fqdn): return module_fqdn.lstrip('@').rsplit('.', -1)[-1] def modify_textnode(T, newtarget, node, src_dict, type): src = node.children[0].rawsource return nodes.Text( (typeify(basename(T), type) if '~' in src else typeify(shorten(T, newtarget, src_dict), type)), src, ) def maybe_resolve_abbreviations(app, env, node, contnode): domainname = node.get('refdomain') target = node['reftarget'] type = node['reftype'] if target.startswith('@'): newtarget, src_dict = resolve(target, type) node['reftarget'] = newtarget # shorten text if '~' is not enabled. if len(contnode) and isinstance(contnode[0], nodes.Text): contnode[0] = modify_textnode(target, newtarget, node, src_dict, type) if domainname: try: domain = env.domains[node.get('refdomain')] except KeyError: raise NoUri return domain.resolve_xref(env, node['refdoc'], app.builder, type, newtarget, node, contnode) def setup(app): app.connect( bytes_if_py2('missing-reference'), maybe_resolve_abbreviations, ) app.add_crossref_type( directivename=bytes_if_py2('sig'), rolename=bytes_if_py2('sig'), indextemplate=bytes_if_py2('pair: %s; sig'), ) app.add_crossref_type( directivename=bytes_if_py2('state'), rolename=bytes_if_py2('state'), indextemplate=bytes_if_py2('pair: %s; state'), ) app.add_crossref_type( directivename=bytes_if_py2('control'), rolename=bytes_if_py2('control'), indextemplate=bytes_if_py2('pair: %s; control'), ) app.add_crossref_type( directivename=bytes_if_py2('event'), rolename=bytes_if_py2('event'), indextemplate=bytes_if_py2('pair: %s; event'), ) celery-4.1.0/docs/conf.py0000644000175000017500000000477513130607475015177 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from sphinx_celery import conf globals().update(conf.build_config( 'celery', __file__, project='Celery', version_dev='5.0', version_stable='4.0', canonical_url='http://docs.celeryproject.org', webdomain='celeryproject.org', github_project='celery/celery', author='Ask Solem & contributors', author_name='Ask Solem', copyright='2009-2016', publisher='Celery Project', html_logo='images/celery_512.png', html_favicon='images/favicon.ico', html_prepend_sidebars=['sidebardonations.html'], extra_extensions=[ 'sphinx.ext.napoleon', 'celery.contrib.sphinx', 'celerydocs', ], extra_intersphinx_mapping={ 'cyanide': ('https://cyanide.readthedocs.io/en/latest', None), }, apicheck_ignore_modules=[ 'celery.five', 'celery.__main__', 'celery.task', 'celery.contrib.testing', 'celery.contrib.testing.tasks', 'celery.task.base', 'celery.bin', 'celery.bin.celeryd_detach', 'celery.contrib', r'celery.fixups.*', 'celery.local', 'celery.app.base', 'celery.apps', 'celery.canvas', 'celery.concurrency.asynpool', 'celery.utils.encoding', r'celery.utils.static.*', ], )) settings = {} ignored_settings = { # Deprecated broker settings (replaced by broker_url) 'broker_host', 'broker_user', 'broker_password', 'broker_vhost', 'broker_port', 'broker_transport', # deprecated task settings. 'chord_propagates', # MongoDB settings replaced by URL config., 'mongodb_backend_settings', # Database URL replaced by URL config (result_backend = db+...). 'database_url', # Redis settings replaced by URL config. 'redis_host', 'redis_port', 'redis_db', 'redis_password', # Old deprecated AMQP result backend. 'result_exchange', 'result_exchange_type', # Experimental 'worker_agent', # Deprecated worker settings. 'worker_pool_putlocks', } def configcheck_project_settings(): from celery.app.defaults import NAMESPACES, flatten settings.update(dict(flatten(NAMESPACES))) return set(settings) def is_deprecated_setting(setting): try: return settings[setting].deprecate_by except KeyError: pass def configcheck_should_ignore(setting): return setting in ignored_settings or is_deprecated_setting(setting) celery-4.1.0/docs/images/0000755000175000017500000000000013135426347015132 5ustar omeromer00000000000000celery-4.1.0/docs/images/dashboard.png0000644000175000017500000025545713130607475017607 0ustar omeromer00000000000000‰PNG  IHDR؃D{“ pHYs  šœ€IDATxÚìÝy`õýÿñçìn²¹/pK¸oD.Qª‚UQ«U¬ú•jµ–~KëõmmK¿U{`«V­þüV«V´Z)ZÅz"‡77r†û!ûØMvw~ì‘ÙÍnXH„×ã[¿$s|æ=óžÍ{æ33†iš£<Ï7MÓ¼Â0Œa†iš†ÁÉ\Vk—ßÖéã½|­¿Ö_ë¯õ×úkýµþZ­¿Ö_ë¯õ×úŸÖë¿Xìp8^Ö¦iŽöx<ú|¾iÁ†ƒÚº‚Ç+ry͵Ïå¶6žÖ¬ÿñÄÕ–õ?”å_ùWþ•念ëÏå)ÿÊ¿ò¯ü+ÿʬi;Úú§µÙl:Žÿ6Ün÷}¦iÞ},ðdŸ18Þéã±µþZ­¿Ö_ë¯õ×úkýµþZ­¿Ö_ë¯õd·Û0Ün÷VŸÏ7à˜¶Pì³1ÇrF¨=ÏYÏŽ{<ë")ÿ­§üÇoyÊ¿òßÚõWþOå¿õ”ÿø-OùWþ[»þÊÿ‰sºåßf³m3êêêÌÖVèm=ƒ©¹ùã±bm=ãp<ÓG;Þõ?VÖöNæúKüÊ¿ò¯ü+ÿÊ¿ò9¿òl”ÿØíŸÈõWþ•ë°ã]ÿc¥üÇnÿD¬mm-¯½öv»§Óš ´´”o¼1¼ÚÚÚö?]ÓÁœì³\嬙ø)ÿ§7åÿô¦üŸÞ”ÿÓ›òzSþOo-壡¡•+W’••*²öïßÏùçŸ6­­¹YÏŒÿ³þn&Úô±Ú‹œ¶µËoÍòÚ2¾5˵ÎïóùÂþ=žx¬ÃbÅÔšö¢åæx×_ùWþ•å_ùWþ•å_ùWþ•å_ù÷çÝáp0räH|>6› ‡ÃÃáÀn·7YžQSSc6· xwi8ÇÏñÄÛšåµ¶ýxu°Ùlø|¾¨ñDë.r²·÷‰¤ü+ÿÊ¿ò¯ü+ÿ­Gù?¹ÛûDRþ•å_ùoïüWWWóÅ_““CRRR¨‹ûþýû9÷ÜsÃÚ°5—tkâ¬+¬ä­Uý±œ‘hí˜XgŒ"w¼–Π´´¼–Ö¿µgt"—}¬ëo\Ç–Ö?Ú:µõŒŽò¯ü+ÿÊ¿ò¯ü+ÿÊ¿ò¯ü+ÿÊÿéžÿO?ý”´´4¨­­¥¢¢—Ëu¹ëÀÈJߺCÇû|¾°é"Ïæ4·"ÇG»‰¼¹ 9}ä_´ö"ã ^æ¶‘ƒã‚Âgb‰µþÖe°ÝȤµ6^ë6Æg=£ÕÜúG®Oä6Vþ•å_ùWþ•å_ùWþ•å_ùWþÃó_WWG§N¨©©¡¢¢‚ñãdzvíZ\.W“6Ö >Z­Ù€‘=Z‚"7P´„Gvyh.¾æD.?ZìѺ{DÆkÝàÍ­»5^kœ‘]:‚ mi}b­dL­]ÿXËRþ•å_ùWþ•å_ùWþ•å_ù?ó_SSCjjjÔü2„7’‘‘ÁĉINN欳ÎâwÞišÿªª*3x¦ ¸²­Ý@ÖñÑæÜ`±6@ä`;ÑÎnDn°È¹![:ÛÙα®?€×ëåÈ‘#”——ÓÐÐÐìÎ<šÛ¦íÕæñÌ"ÖçTÖÑòo³ÙHHH ;;›œœZÕî©öù·Ž¶Þ±Ž±Úoíöеþ-峥㷎ÿÊ¿ò¯ü+ÿÊ¿ò¯üŸNùõÕW¹ð IMM¥ªªŠU«VQWWGß¾}2dv»Ó4ñx<¡¶<‡#¬G´d5WñGÓÒ F¶-IÖy"»<Øl¶°v"wâ`2€°3"­9-æÈù[ÚYMÓÄív³sçN²²²((( )))¬ëÈ©Êçóár¹(..fÛ¶m 0‡Ãœ>Ÿÿhg…›;žF<·äöj.Fëpëãæ¾¬uüWþ•å_ùWþ•åÿtÌRR«V­bèС¬]»–ôôt233Ù¾};ƒ ]Ͷ.#!!¡é²+**ÌÖž ±PWWGII ¥¥¥TWW––FNN]ºt!%%¥Ù3 ‘Ë«­­åðáÃQÛËÍÍ%99¹Ù3,‘Ëi®½.]º„ `ëÖ’X òz½lÚ´‰þýû“••zd{kÚùª²MÓäÈ‘#ìÚµ‹Aƒ‘Ц3¼Ö6£@Ûòùo®Ý`¼ñúüµõ xk·mðçhëo]¶uXsÇòÈøåømÚã=ïü+ÿÊ¿ò¯ü+ÿÊ¿òßžùÿàƒÈÍÍ¥®®»ÝNrrrèžëóÎ;¯ÕùwÄZ±æÎ@”——³{÷n<999ôèÑ€ÊÊJŠ‹‹)--¥OŸ>¡b3r‡‰ü·¬¬¬Åöz÷îM§NšÝ‚ÿ¶¦½>}ú““ÖVK ër¬¿oÚ´‰ž={ú××00ŒàY%001ÿb¬óþáQkD3“š€abbD5z“ Ãÿsh +þøQüŠ¿Åø ÃuçÎñù|ìÚµ‹‚‚‚¨ˆü×:>ÚÙÌhŸÑ&‘Di3ÚÁ8ò€z¼ŸÿhÃ[ú¹¹õ޵ޑ¿GŸ#¿lcÕ¶Æëþ§–Žÿ±bmM~•å_ùWþ•å_ùWþ;rþCW¥].Lœ8±Mù7ÊËËÍh<Ld•_SSömÛHHH`àÀ¤¤¤„ºƒz<jkkÙºu+ ôïߟ”””fÏØÔÖÖ¶º½„n<·n<ëF¬««cëÖ­­Ž/---æ¦Èκ¼à°Ã‡ãñx(((@DüvîÜ @·nÝZÿ‘ãc}YEžµoËç?²ËU¬/µÈ6ƒíXŸbùÐÈöZë +øoäYÞȇ™´æøkûDcmKùWþ•å_ùWþ•å¿£çéÒ¥tïÞ=´ªªªèׯ™™™mÊ¿#rãGVúÖu™¦É¡C‡0M“Þ½{ãt:ñz½x<žP€N§“Þ½{³}ûvŠ‹‹9ãŒ3¢îhAmiïСCôíÛ7æU1Ã0(**jS|ÖÖ¶bí4Öé¼^/¥¥¥ :¤q¸¸æhúÿýàƒxýõ×93gÎäü .ÀƼœé_0ý×­×?M ë%ÉÀ4Ö Ì`C¡\¯˜6ßߨ¸Ø¡W寯†Z#b¹†¡øOÃøóó»³iÓ—äææ†NpY?ÿÕÕÕÔÔÔ„=$Âzlˆv N<¨Ùív’’’HKKÃn·7ù‰ü°~Qy±^ëКÏä—KäA6òØj=s9}dÛ‘ñX‡µ4_$ëúÅú"híñ?rY‘Û/Ú™áÈõù”ÕÈï£Èuoîø¯ü+ÿÊ¿ò¯ü+ÿÊ¿ò<ù·Îçt:Ù¸q#£G&%%¥Uù÷ù|þ.âÑú¡Gž­ ª¨¨ 77›ÍF]]]èôà‚g=rss)))ir– 2 ¶¶þÂ6ÚŽêóùÚÜ^´30Ñv’h;FII 9ÙÙ8˜>¦a€Ï‡¿Ðñ ‹-jvÇoîƒñúë¯sþ×¾†ÏÒ9×Àº Óçb€@!æ3Cµ‰ª¹0 |f @3L0ýãÌà”Qâo,îEá/þ‚%^°3B«aþןâ?ýâw:ÉÉÉ¡¤¤„üüü&¯ŠŠ ºtéBbbb“ÏC´e4 ¡‡«eff’’’µÛSäÏ-µßÚÏä|Í}9ZÏG;(Gþ+>ëžu\sÓGË#ψ·åøßÒY´?2š;þG~9Çz·§5¦æŽÿÊ¿ò¯ü+ÿÊ¿ò¯ü+ÿÇšÿ#GŽàõzÆ»\.Þ~ûm.¸à‚Ð3ÆZÊ¿#r%#ÿL¢Ëå 5,X#Ϙ¦IJJJèÅÛ‘¯«²ž¥hk{Ñ΂XWìXڋֳܸ±v(Ó4ýW¯‡ Á4ñ_é3@ñaøüEÝno’¨+®¸‚ /¼ð ………|ôÑGlܸ‘ÚÚÚÐtv»Ý_( Ÿ¿lòDþbȦ‰iøðÁ+  ÷ÕúÃ÷ù‹3ÃÄ0ýW'}†Š;ZüÁÂÉ0MK þŸýÅWàÈ#XÚÜ®†â?-ã7mtéÒ…M›6ѵk×°Ïlð3ép8¢èb¥µ~HHH 111tò-555ê´±–©µŸëÏ‘_ÈÖv#‡EÞSd×zÖ8Ö19Ú2¬Û+Ö;,­ËŽkŽÿ†a„Žg6› ¯×ö%™«àr£å°-_ÔÑr¹œh?+ÿÊ¿ò¯ü+ÿÊ¿ò¯ü·6ÿ×^{m“˜‚ó¸Ýî°m+ÿ¦iú_Óe} zäÂÁ§a§³Ùl8ŽP—Lëåú`‚ív;‡£ÉÙ‘h÷´µ½X;pp9mm/Ú:G&(Ú=•••$''“œœŒ×, M0ÀEÆÐ5Ç0ßùÎw?~<à?c·ÛŸéókþY1M“’5ÿfe`™&Mã'pUÔgøë?_3ñ‹F]—OFü÷ ÓTüÇ¿Ïð…>ååådgg‡½r!11‘ºººÐÛ¬Ÿáh¿à™ÐhŸÿÄÄD:uêDeeeØÉµh÷×D~¶õóo=&F‹;ÚHäg:Ö¼‘_˜ÖcX¬{¶¢}i¿­ÇßÈ3‘_v-ÿ#ó`]Çh÷tµtü·¶w¬ù·Îk=‰ªü+ÿÊ¿ò¯ü+ÿÊ¿òß–ü/oþÖ nMHdPÁ3ÉÉÉÔ×ׇ^sÙ;¨¾¾žäää&E&¥­íE²~ |>_›ÚKJJ k£¥\pCz½^JJJèÖ­[¨x±ì ¡ z‘ Ã`̘1¼ôÒK|ðÁØl6ÒÓÓ™9s&C‡ +®ý1[~®ÞÂcsÇðÓ­S\ÌÊâ—•Zr(€ý|“o¹–R3±gÔ˜šžÅ‰Œß o1ôA†ê/Ÿ£ëø[ÂæþýÊLŒ±L3¬­`¡}<%[ãïqŽßº‘#sjWñS__Ïúõë>|8N§·Û͆ 1bDX7okü¦i†nÅÈÊÊ û|%%%… ìX|ëzDž Îët:±ÙlTVV’––ÖäØyöÖ4Ͱdäò#Ûþy78>²+Vdn­_‚Ñֱ΄F®oävhî Ìçó…ÖÓGäYâ¶ÿƒëù-îh_Þ‘Çë6>žü¶ž4Uþ•å_ùWþ•å_ùoü;‚g)¬ÜÊÍf#--²²2²²²Blð,‡µ+tYYYè Ý‘g¬;Dzzz›Ú‹ÕÏÞ0Œ6Ç—‘‘–ŒæÚŽ<»TWWGNNŽ›&¦¿Ûlà†Uk1d݃ÛwÛ¶m˜¦‰×를¼œ… UàʤÁAîï:†ÿöV쾋ѓ©«)a˺]tIj\–ÿùSþ«† ¦s*ÃÌ 7#Э×^Ù¤…ø ^& ].­†ñ¯-oqvºÇ4q¦g²å£© êÞüá˜á¿›F V3p6Fü¡mÿø ÿ¨`£¡<†.غ_Ÿîñ¯ZµŠ£Gâõz=z4k×®¥¤¤„††Ξ0!jü>ŸœœvíÚ…Çã u 7 ƒäädÊÊÊÈÎÎ;Z9‘ºh_"ÖÏhVVEEE¤§§‡ä¢Ü­_mýüG;ƒÛÜië|‘gš#þѾx­óGžÑŽlËlæ˜íLsäYð–Žÿ­=ƒ¹îÖø"ÿ ˆ<³~¬ù¶}”å_ùWþ•å_ùWþOvþmÑ’mãÿíÚµ+µµµ”””„ Ø »ÝŽÍf£¤¤„ÚÚZºvíZxp‚ÿéÒ¥K«Û ¾ò'Z%¾.]º49`M²õì„uçŸiú»ñš>|&˜¦/ð1Eîȱ˜¦¿€¯Úø&ÿ ,ZôKFvJÂgúp¦tbäÄ3éf31öñÔ'–šFÚ„;øìp>Ó ÄˆËWÇÊÿû!i©©¤¥ÅsŸ–`šà«ÛÊ3È¢E’ššÆ¯Ÿÿi3ïcoC`­ÛÀí®æ `ú0÷û»÷¡[·LR22ÉÌÌÄik¼Òê3}˜f-Ÿ?÷ RSÓHMKcæÏ³·,½ ß~‘*LLßA»f÷.Ú >“†¢·¹fæc6M®|Š i©¤¦¦ñÃg?óŸ&¨ÝÂÝ΢W'--×”ÅÜþþüúðáxWdü>3ðŸ/°ŸŸ-æ Æ8âOq`œ¿k¶éóù»GûÇï‘6íú÷›àöt¦öâó7ÝìþÓâ5j$ééé¸Ýn¾üòK¾Ü¸·ÛMzz:#GŽŠ¿ÏçAFFG ûü›¦ÿ9ÕÕÕagþ"ÏžFl›ûüÛíöP›>Ÿ/tB-x¼Æ“lÁÏú±|þ­óDûò°NcÑëõ†­¿u±Ž½±ŽÇ‘_F‘ÓY—ãõzCëm=ÉùåÑÜò¬íFûÒ²nãXÓG;þGžù=žüGnå_ùWþ•å_ùWþ•ÿöÈ¿-ò^ëˆÜA|>IIIôìÙ“ââbvíÚEEEÇCyy9»ví¢¸¸˜^½z…º`[çÜ(IIIôêՋÇÇlïðáÃôêÕ §Ók°MëŠ:ÎÛ‹Œ/rƒÿÆ:›T^^N—®]ÓùÅd°Àñ@Á‚ãxùñìZýð[Æv _Á+’>Ó¬çŸ fnÒÏ)«¬ä“ÛvsÞþ‰+0˜î໿à¹٬-­äà'·ñýóîbkƒjؽì¯\÷@1˾ÜÊÜóÏeê²ßðÊêR0Mö¾õ$Ý8…±ÝüW ýEY`=ù7/ÿíyþñüs<÷ÆÅ\pºïü‚soù”׿,¢ôð—|}ý- ¹ãßd ÎÆ—þÈæZsï2~úï,xq% ¦ÉÖ7äßãÑùà¿pá\~öe U—Sóýóx~G¾@¼×?pˆe_nå»3coÓ •¾°¸üÓþ3ýÓFÌ`ëó X_(çáÛßç/wƒŸÓçšv`¸ÏGãÃÉ‚û Áe¶°ÿt€ø“?nÿ‘ÇÇh±DŽüÒ±~iD~ADþÙÕ+Ú˜u\´3è‘_f‘_­=þG~ÉX»—E[Ç–Žÿ‘9ˆõåí‹:Úñ?rÛ*ÿÊ¿ò¯ü+ÿÊ¿ò¯üŸìü;"ƒ°žEˆ išdee‘’’¡C‡Ø¾};ÕÕÕ€ÿÛœœLBBBØÙ’È® ÖÀ3338p ÅÅÅaí¥§§“““ÃÀILLl’€`»Ö7 ƒŒŒŒfÛ4h aqXÛ‰|p€uyõõõÔ×ד•Çã?#ì,§M>ýé¸ù—k`ºMê¿â,ŽüOkÓ³—w¦¼Åÿþê3ê¶¼oœE­i/'ã3ëXóÚcÀžûͯ ô-àK*êžÄ—•Àk¯ÝËÈÎÝøÉ<¸ôÁ¥Ì}áþ~ý_¹õŸ[É4}þuô‡DpóÔ¹LÜ õP Á˲€ék`Íë1tþ{Lí™f 7þö·Ü9q{¸{ØÈ{JHßð6CçÜ O/esݼ;w9úèEv|úþó×øœR^ú©ÃìŒ÷Œìl†ú2GÛþþո挟ÆÁóYoLåÏ ôÚ,KáíoÉ•Ix×ðÀÿCÃóø+f#´æöŸŽ?Q&F‹û¿×ë%;+‹ÂíÛihh 111ÔNbb"‡úúzBí¶¶wG$ŸÏj§¶¶–äää&Ç4ë±,ò˜×šÏ´6¬_\‘íFn³h]Ь÷ò4·Ìà°h¯Ö°Æa=+ÞšíÙ–ã¿u[—g½o+r[Xãuü·.'¸="¿ØbuA‹vü·>ØEùWþ•å_ùWþ•åÿdçßœ)øDëȆ£%Ìëõâp8èÛ·/ay½^hhhh²#7H0ˆà²ûôéÃgœöT9ŸÏGCC§ÉƈŒ-8Îçóáp8èÓ§ýúõ K¾Ï磾¾>”ëºZ×/–à‘ýË LY†ÅI÷1SáËgYwäÎéܸ ÿb¨¾~ñÅ\3%—:Ï,®ûeÒ"Ú¨øú7˜uÍx¨›Å7¾—Lï$À0…TK‡ƒI×?£þO¿¼…û˜ÃçÓºÅX¿¯sÝw¿ÅPQx¨®œ –é@5 tfÆü)L}þïüì óÿù+öâùgúñ·²qhžÂ:àj.þÆ7Èõx˜5ë{té\«)¤&™mØþfô_[•'ę̈ƒÍðÁQ~1£ÿþˆßårñÙgŸQ__z VYYŸ|ò1gu–¿gIŒpƒŸììlJKK ˆÿ³–’’Bee%ÙÙÙQï-Šv¼ˆü ~΃W±­1Œ`õd:÷õ[áw¨I;ƒ#F0 W.ö@üÖe™¦‰½Ï×øã”åüè»÷1å9ô·GY?L ’Ú:ëNBË4M'g]t#_ÞùW¾¬21Í*ÞzòGðÍo2Ài2`Æ5ðô½<Í×Û­ã¯ø:ÝyS¸š¦IŸq/±½"›‘#F0bH/ÒìM»Û4·ýM‚1™ÍÿsÓ×-‡ÙrÛÒ|Œ7þÕ«WSTTzÈÙèÑ£ñz½±jÕªã÷z½äææRUUÕd¿OMM¥®®.ôy _"²­ýüû|þÛXÜnwè¤\pxäçîX>ÿ‘ŸßÈÏzd,±†YÏD[Û‰6od‘_2±Nðá>nh·…íoêÝl¯}³1þhYlüÙÄeÖ¦m[ûp›ãŒÀ° Ù±ã3f ^¯—±cÇ’èLĦL™ÂªU«3fL ÷±ã÷x=dffâõx¨««#)))ìsj½Š¾oÛçß0üïÆv¹\¤¦¦6ùòŠ\Fk?ÿ‘óG["ÏØ6·\ë±*Ú1ÐºŽ‘Ë méˆíùŸu>k;Öív<Çÿ掩-mç–¾CŽ'ÿÑ–¥ü+ÿÊ¿ò¯ü+ÿÊ¿ò²òo:tÈŒLdpe"c+IÑ~¶@ëýÑ®ÖF[¡ÈaÖÄXïa°î´‘:ò±í‘IˆÜIc½ ¼¨¨ˆD§“3úöõ?œÉ€Ð½´Fc1…i`nd½çþ'¬ËÁ 7ÜÀ¤I“Âdï¶Ûn ½ Ûn·sßo~¸ßÙ¾¼‰ªò#¸<àHJ#;-9ìuKîª*<8HMO«âüsû§ñÔUãò˜$¥§áh&þuO_Ê´woààóW’ç Lÿ&4ï n\Bcii¶¡®·ÇNZZRX!g`Äx]”å&o‹—{R*I ¢Óú~«f¶“Úõã·nÿ6DZýOÇø“““Ù¹k'õîúÐÛ¬Ž9Bvvvè=Û‘ŸÕP­üü×ÕÕQSSÓ¤ëùñ|þ­g5£}Fv_Šv`oî‹­¹ã©õ˜<3\/ë¸`›>Ÿ/ð–ƒð3äÑDûÖñ_ùWþ•å_ùWþ•å¿íùüïÁ¶&*rƒYmG Nc=í}oÖáÁ@b­p¬¢í¼ÁM¬ø#¯Âß“mG·ÆQQQÁˆáÃC]Oý5Fðꤦ/P€“Ý4¦gŸ}–£G2cÆ ©©©aëÖ­|ôÑG†ŠëÆuðÿ?3PPAjf'Ràøàºà󑘚†38Ÿï˜?ÿ‡ƒäädÒÓÓæ9–ÏäYÖhóF;ãíçhǾh_:Öa‘_¢±Ö9ò¸Ùv¬3ÀÖeëø¯ü+ÿÊ¿ò¯ü+ÿÊ¿òìù ìÈ Z3s¬÷•Ež©ˆ¤uYÖ ¹sD.ÏzßrðÑòÁ«Ä‘gY Ãë¢ÑÚÄG^æß¿?ÎD'ýôÇårEl3Э¶ñzže§±þúÅÄßµ7bëøÆ–Ðd©‘‹ [`Ø 4Þ'µ%ůøOhü6ÃFRrÛ·oÃí®§G_‰Ï´3±Á˜ƒ1@Ó3À‘±Do]F¬³¬‘qG~ùFë–fFÔ/æ–¾uüWþ•å_ùWþ•å_ù?öüC ÀnmbíÑÎÀ˜fãåóàF°Ng=3Ù5 –ÈqÖ›Ü#7Ts1G›&r݃¿9r„ªª*† J‚ÝŽ«Þí¯3‚O‡6‚u‡¥Ú1ýÝl Ãÿ3†áÿ'ô*#ІzÓðLþÉ劼ÏÖ_¡eYK™À:m©’³Yžéì€U(^ůøÛ!þD§×Ë—7’œœL·nÝ:ôç?šh_ÑÎTGÞsd}0‡uÚX‰v†5òK&Ú—Ykb†¦Ý²"·ŽÿÊ¿ò¯ü+ÿÊ¿ò¯ü+ÿmË¿aû÷ï7ƒg;¢VàQv˜È`›ÛÁ -YÁíþÈö"w¾ÈDEÛp‘ÉÉÉêó9>²Ë•µ}ëºEžqmî‹#Ú¸È3«Á/*Ól|2hsÓ[‡ENg´/kÿ•å_ùWþ•å_ùWþ=ÿ†a`,Y²$êßÅÑ‚µn¬È³Ö³)‘7ÚÇ üòr8amZÏôXiÜá"W0ÖY™æ6Läz¤¥¥‘ŸŸO~~>†aPUUMè)É¡†Àúz"ÿEÅÀ+”Ìà#ìaÏ–7…^‰v10QcÓþ+‘FhºàHÿ•A#ØXà÷``fઢa+lůøÛ/þôŒt<ÅÅÅìÙ³—ËÕ¡>ÿÍáŒvìki|0ÞXÝ’¬±»5E;­¯Ý¾p#Û³Æk]š;S{ºÿ•å_ùWþ•å_ùWþ%ÿ¦ib¸\.3ÖÂNgÁz<Ün7a÷¨6ÃhyZÕB¨2[˜:TE!ø‹âWü,~§Ó‰ÃáÝÏ"""""òUfš&Fñáæÿ”xp’¼¨¡?® ÓrõÊ´\%3‚P›¡«\fÄCèñ¿Ï×µoÇ [Ï/½™ÁPÛǺlÓTüŠ_ñ+~ůø¿âWüŠ_ñ+~ůøÛ-~Óôaû_ÓEãEª–4?m`lØD±æhÞâò[˜@ñ+~ůø¿âWüŠ_ñ+~ůø¿âo¯øMLŒC‡ŠM[¨ o,øƒ•¹a6Öü¦ÑxvÀ_Ä›3†ªwˈPïO3²íÀ™Ëé‚àr 3°Ôà›a÷ 4>1™ÐY ůø¿âWüŠ_ñ+~ůø¿âWüŠ¿}â7MãСCº[DDDDDDä8˜¦‰£Í3ÑÌåqëi€35;+—×Ö6:Rü‡’’êëëC/Hÿ*Å,m(~ůø¿âWüŠ_ñ+~ůø¼â·Ûí$&&Òµk×&¯ÞjïøfèFpÃrÉÛ›+ìæñ`;¡ËåÁ«ïFãâ#n.olÊ¿ Óú{à·° õ†.LJnhÇÚt৯Hü¦i²wï^úqIÉÉǘb‘Ó—išTWW³{÷nzöì‰#!¡CÔþ+Ø&`šþ~æ>KgwÃÿ¯t |4ÃC41Á ,0Üð¦ÏÃÿÄ7‚ó»Ä~÷îab†úÍûŸØæß¾@;a}î ®ƒ/,–Žñ¡bz÷éCjZÉÉɨk¾ˆˆˆˆˆHÛ¥§§c³Ù8t¨˜¼nÝ:Dýh&6Ó@èfnBñ…~ þêÃôŸ ð˜‰B÷›¾ðËçf`êàܰà•~°Ú4v2€À ¾À;0Ü¿ÍÌ@$fèÄCGß]ï&5%EŵˆˆˆˆˆÈq0 ƒÎ;³gÏ^ÖAêGG¨(´´g &l`دfàá#«ü&SGLoíjm†MöÀ7k[fôå|Uâ÷x<†¡âZDDDDD·êW_]ĦM›èÕ«'ßúÖ·p8Z÷¨0‡Ã×ë <<ÔbôOBýhšÁ{° vC·<Î<üRlX?t«Æ—u·fX_ôm†º¾‡n]ŽÑŒÖƒ>põ>"”yy9ÅÅÅ”””гgO23³ÈÎÊ ‹íúuŒ1²Uñ›fËéî»ïnvüÍ7ßLŸ>}Z³±EDDDDD:´={ö°zõj~ô£¹¼ôÒ?ؼy3Çoõü¦i6©³š«:ÄÇÌŽ;0 ƒŒŒ Ì™gžIRR2ÇS?†=E<ØÅ9bËÄM[Y¿~=K—.à†n ++«¥Õ'üZ/±N0D ŸÂÜ)))ÇÔYðoô?ü¢¢"†a°fÍ<ùùùœsÎ9tëÖ7Þ|“êêêPÝšø[ãþûïo2ìñÇçûßÿ>wß}wŠl;VLEæ0ÆôËnS mQ¶c5+29{L?Q~—“Å˾Íë©NëÇàžíŒˆˆˆˆH‹ éÝ»¹¹¹ 2„/¿ÜÔ¦;š&W™Cÿ:uêDvv6999Øív¼^/ëׯgçÎ|ó›ß$))É?OêG+G¨ºaïë]Š5 3ЧÜò„´@ÀÖo`Ù‡’š’Š×çeó–ÍL˜0Ћ¶­W—-/õŽx@[ã¹#¸¤@¿ùP0᥮®»Ý†AUe%&&©É)mŽ3x^ÃÜCí_Ö†õøàƒHNN!++›Í†ÍfàóùðùL**+yéÿ ??Ÿ’Ã%tïžï¿÷ºñ·æ vKî¿ÿþ6Ùu¼1v*sû æg÷²cÙõƦÎý¥ædGùýäróÙKOòîþzœÁ!n˜tý8§§ˆ{߇üù¹/°N}Öl¾=½¯ÿwo/=öûëÛ${·|ûR[3>L)¯/ø WÌeFßTN,=ü$K'ÝÊã×ßAIDDDDädؾ½aÆ0tèPV®\‰Çãiu7ñ`×íXõ£¿Ü >ôÌÄ‘à`Æ…R|ø0Ë–-£¶¦–””ª*«Xµz“&N¤-õ£µþ5M°aúoÜ&pi݇Ÿibš¾Ð áþËî`ú|˜¾@q½l™™¤¤¦‘‘Á¨‘£ÀçoÙgúÞ6&¦Ïê¯n—lú ¬¼/ƒ¯qºÐ¤f0bHIIÁn·a·Ù°9TUVQ][Ó¦øƒ[;xc»h»øP1~ø!™™™¤¦¦0lØP.»l³gÏæ²Y—1tØPRSRÈÉΦ¦ºš´´TGëã“û￟'žx¢UÓ:Ï…s“â¶ìhœ`DfèjuäïñãaÃkÏðÂò=ÍN³í&öÁívãv»©¯rãñx§p¥p_!UñUõUÔ44ŽÇUÎæM…©ò·QUOUMÆ[xK6ðfa ];èâ:°í³ +AýDDDD¤ã+//çàÁ <€®]»ššÂöíÛ[݆i6_?úK]Ÿ¿NÅÿ{­«Žììl&Mš„Ïôáp8HMKeËæ-mª£Õ¿Ó4ÃßáˆÒ´\†¶–†6¬gùòåddd˜˜HBB3gÎ$)ÉÙ¸  i·h3úë¥M³é ‡˜µ¨‰adeeS^VŽaú_2^UY*¾[Š?t†ë9 ÿo+V¬ ==ÄÄD¦NÊgœACC>ŸÌ¬L&MœHeEGŽÁf³áõz±ÙløïGk9þãóøãŸ˜†O0Z}ªù–øhÖnyh×NéÝì”Yç›{®ècl$Mãî{®]Äã70íÛwsugÛÇ[\µ²¦0@=¶EDDDä4öŸÿü‡õë×ãñxðz}¡áƒ¦sçΡß'OžÌ믿Λo¾ @CC—^½zrÝu×a³Ùš´»§pcí·~Ý:*++9sÜ™$%%áñx¨««Ããñ„êÚÚÚÚÆ7CEi#²~l|Tcáíˆ~Cxc#Ö‚øðáÃ|úé§ddd””„ÍfcæÌ™$'§`ú¼aÏk|!·åýÝD«=ƒ¯½Š648à‘è–‰2³3ýE6v»êêjìv;ÎDgÌøc11¨¬¨ ´´”ÔÔT èÝ«uuu¡ø=>o½õEEE$''[’I”$Dÿxºˆÿûßû½¥‡¡ÅR²ú~<ö*žüþ£§—qÿSH Žßð ?›{O~pÝß©|îZª>}†[ošÃâõÁóóðÏ/ݼlÿôæL˜Ãz`ÖüÅ<šÞÅògæ3uNð^óëxyÕƒ\9&×?öàræßúî,캧×óÜÃÙðÌ\n˜;‡‘s×sÞß7óÇo8yä®ßÁÅwp{°‹7@Ã1oê8ªáó÷ÑõœÙ¤5»Þâg¿[ÍìßÜÉø;Ôl摟= þ˜Ûgô¼|ö×_ónÖ·¸çŠþ”n~¿<ü û­ ¹è&nºd´ÿ¤€{;ÜõW²/œHñâ7)d¿}üâˆÅo篿Œíy—ñË›ÏÁI Ÿ½ôž^Z@×I×óÓë&ú»µGmïj*>{•Çž~‡ò@“ýø\Òÿä\‘SÑ#Gùè£ùÙÏþ‡„„æ{׎?žñãÇ7þË_þŠ={öзoß°áÁ‡œµT?úð±vÝZ6lÜÀˆ#غukèžk›Íæ¿Ùlù Zf°[¸¥ð5±8B7oémâ =Í0ßþ|øða¿¶˜$gÉII`¸\.ž{þyjkjðú¼7Àô™$&&påUߤk—.¡Jßr‹´eH€aŠqw}GK†¿W,4™¿¨ößíÿÏ4MÊÊÊÉÊÊÄéLl?дø'øäqU•$$$bš&}úöÁ]_¸wÜÀ4M6nØ€«®ŽîÝ»ãÜîóùHIIi,œ-ñ›ÁûÍ/oì«Ð~\[^ ËØoqîÝgó¦QûÅóŒ5•u•«xÿ‡cüãG| ν›÷Ö‡Þa{I ÉÀ}‡™xç{,˜6ˆƒoÿŠ©sf1óª:®”eIÝáƒ[;a-YFŸmO2kî,_Uǃ’Xýð5L»˜ù‹Wñ3SXú绸jlþ¾¹’k¥óùÿý€ûOà“ÝïÒùè&t Ï¹×qOòÉþ›'¯LJÏîਦû !ÞGƒ Ê—>ÈÝ› :a ?ž»%<àZÊ]woæŒþcùÚ%08×r%:Ð|éƒ?bÇqœ5í|¦ µãƒ*·±¼.Ù €Ô.½Éd1Ë×düôž”¬[Æ&ðáj*gô'ý“7?/¦×Mpïz‹ÿyx1Y£fqÇcqmú¿ø$¿¬ý!÷_=ðPå*gÓâ7uÑ5ÜÔ§WãÕøPÂ_ö Ÿ»FqÇÝçà6¿ô{ž^êæÊßËÏ&~ø9þ’×—yÓó¢·çÝÅsO¿“®gþE}9R¸G§hù‰Ín÷_uþòË/5jT›çß¶m^¯7zqìÒ¥þ²Ö ú`íšµ†7‚)É©6M|¦Ibb"‡¦s®ÿÂ_sõ£|%V ›v°®v„_Ún|—ÿ·Æ‚p÷îÝ8ì’““1—åHMI!ɽ›¬ËåbÏîÝtíÒ%ê»ÆšnœÆ%Ö×»qØíþ³1X»ØívLÓ¤¾¾žÄÄÄ&ñ[ÙEààƒØl^¯g¢Ó2Ê?~ô˜1œ5aB“¼^/.—«IüÖ®aOMoG›Þ~ ¸‰ÿwßµô¸ô>ùí[L˜û[nz‚Ú·ŸÎeÙ?ïcJ6@?ÿtÀ +ï`પ"gÊåŒàIŽÔÖÑ ®*à\–ì~“½“`Z*çÎ}žÊÚ:pmçs3â·Ëøù¥c¸ö¾|xÿbžz{;×CCÝz ;öV2rÊ”P é½Çqö¹°½ïXÎ:+ø¯t®¸ùv˲S9ïÖ[P]Gñ¶OY¼øiV.^Ã=º™À3ÎHê>‘›n@]é.–¾ò&þfø•Ù¤î\;ç&Ÿ÷ߟ•+?ââ‹/¦GM¦i¬Àš¯¿’““?~<ëÖ­ÃéL »hkîÕöx<,~í5Î?ÿ|zõêÕlýöhr³q”-x«vdîÆ§^û 0€´´´ð•1MN§³É‰‰‰ddd0pà@‚÷_c†¯´nZ†KNNÆaw`³Ø †aðٰþ+Ö†ÍÝdŒßn·“œ”5þà‚‡5®zß>}1O///FЬ¾¾šêjjªk¨©©¡¦¦šêšjêê\QãÛèíñ¨býûÀ¬³ébš’Ù(¦¢¡ŠõŸŒ$?½éÜ?}†Ë ƒäŒ ¦}ïNÖ7»¬J8÷j¦õn,¾#oAî›iífܳgÁﯧ ˜úÓõÌŸõßšZ@²1’‡ßߘ®wàßæäöÎèÑã™qõíüádz€µ|¸­24ޞѓѣG3qúÜóç{¼ùòò@Û€=‡áãG3zü9\7ïÌ—Eùʰ©¦•ãð²aùZ’JÌɈiC xÛŠ6³¼®üñ­ ¡œ5Ûö±nÅç0d}5n.‡®ȱ´˜”’TQxžšè’oÒ¡|å+¼S=g]Æààföx¨Ø÷&÷Ý{/÷ΘBÂO4iÏÙŸïÝzYûVòðü;ùþÝO°¡ÄˆˆˆˆH[ <ˆÛoÿ>ŸG}ŒÝ»w7;}EEýëÓlÙ²…[n¹™³ÎŠ}±'Výe­}¦#G2þ¬ñ8NŒ@dǃÇãÁ0ŒPûú믳aýúfëÇÆ§u[~†‰ÃÿԳƋÞÖ‡u5Æg’Ã¥—\Ê’%Kðú|Ø °;|ý¢‹ÂnJ_Q“šêj<>ŸÿÎn¿íÀ0‚©ÃŠÏ`wk›Í SçNáífÇ€††z*+ªÀhŒ?3+›a„Ú°ÆoX¶®0ð|q¸ArJ2Á¾mݺ•üüü@<¾À+ÀãÿhåJöíÛÏ9ÓΡ[×nQã=ºÍÚ3àî°-KgðÄsáÎu”ÁºÁ}èNJB:ƒ‡Ÿ ¬ãHô³Ù® Ü:a•¿]FÃSp°…ËŒÁÐÜsÂúËà`WiUT†ÕiŬ[ #~;ØWöp~þ/“•láéï fîôyL®ûcuµ~­S»ú Æšªš–ù€½ ù]amInˆúгÜ\'»°Œ:Þ»—•…0åÒ3§tY<Í“?Œ‹qŒë?G¼øìc|î‚i?¤Ò§ Vî¦B¯þò4Té$Y7»ÇÖ~Ctv SÊÿÅ+‹ïãùœù\7>¤QsøãÍÍ\‰Žh/wø%ÜÿøE”l^ʃ¿Âc/~Âã·ŸÓúˆˆˆˆˆ¤¥¥qíµ³Y½z5/¼ðwÞygÌËÏ>û,½{÷fæÌ™Íß·z&VŒžÙ– ¼î:ú`Äð8pÃf–šÊš5kرcŽ„RRR0l6V¬X‰ 6`­¿cõžJölà­·Vã ^1v—q¸d¯ýî7,*ÊKÛDÒp~8Ìà «÷àr•°ü‰?ñ'à;ç \,á–o9HBvW ¤ã>|éÝaýϱº¤ —ð–òÑë¯òÑö`,^Jöí£´ÆÛ]‡¯øŸ>Ø»gã•Ùš’}•ÖàõºÙ÷Ù¿x³²&Œh,¿½•ìÛW‚Û륦hÿ~¿èCVR+Ç5;×°žŒ>#â`9ƒ×\å5íl2€!gò`ï=v°‰W^_C¥ÛMÑæ·xri9Œú-<¸7ùL¿ù¹¨'¬|ú^^ZSÎþ\>­+®µOó×÷6SYSCiÑ.Ö¬Ù…7fCûxï­ØWê"£{/úr """"rª«k8W•r©³{Ør"~ÿµcgàýÌçýü]þ^w5ßÛ'4þ·‹7óó²*ö}ø-¾õ-,ã^Âÿ,µ$ÎÿÁ_àù[ÛåOüö“RîUÌÛo¾ƒ»a0ûç5¼ß}, ÍĤëï`FÏÆªtïûòðÒÆËà]Ç]ɯܸ@×¼ï1BSd á¦{n¤§½•ãŸ.‡‚ËèÕä„\£'ðÎâB¾6Ñu;wÐh²X “&‡ÚÈ|w\SËï^|’;ß ,fÔ,~þÝÆ«ÏN Á팟Hå’{~CÃ}¿æ'ï¥ó=búÕ?eáéWæóW“ö¼ˆ?ŒîKjÔö\¬]ü¯,~.°) ˜s娸ìŒ""""rÚZ³f Æ úÊ-ð?kküøq|ôÑÇôï߿ٶ¬EýÂëGÿDÖü<¡ú·¦¦†I“&a;wî$!!äädìv;{÷îå?ÿù\paXýH°Œ(ñŒÂ;L›á_zèâF°;´¿( Eh!1ÑÉÞ}{Y±b†aP]]ÍàÁƒ?~|ã|ª¶qþß #Xf6>q-´!ÌÐú6Ř˜–§“×T×PçªÃáp`š&éié$&&„õ±o)þPáؤ†éâœÝngÿþý¬X±ǃËåÂívãõzý¯s:IJJÂáp0yòdºwïîzz+ãß»w/cÇŽ%==ÊMÎwß}7÷ß?-iítQy\”UÕáp¤“žîˆ=>9tKdWUu¤“îOeUžÎ±¾âÚUUF’ӳû=WUuž¨ãpUQV{Ù^w%•n¼ØÉÈÉÁio2••¸½àLÉ!#µi‘ZSYJ­Û örsRÛ8¾„'¾/Ì™ÏÍãs9.^7•5nìöTRSíÇ×V»†J·»3•TgËmºkjp{!#C¯ç‘ããóùøÃþÀÕW_MïÞ½1M“åË—³bÅJzöìÁW\Ajj*UUUüéOâ–[n!77ößÔK—.¥WïÞ1ëGK¹`4ÖªV?¦¤¦ðъعË_dû|>(//ç /¤g¯^XëG ¬þ5Mü÷`[ÞÖ8Q`aaÁF¸ëÝôìÙ“É“'óé§Ÿ’ššÊ¨Q£_Wº`yÅÖŒ(¸­“˜¡´LÞŽ )))Ô×׃ éié_NKñ‡ß™˜&Ð%½W¯^\vÙe|òÉ'…Þ‹þùÿ¸æš«ÉËËcĈ‘|úé§\|ñÅÍ7«¬#k´àìµ5µœ9îLŽ–¥¢¢‚„„ÿƒ´““ÉÉÉiì"öÄòÆåš¦é¿¼–ÛõõõôìÙ3ð”p¨¬¬Äçóð„†z¨šÏç u hküÍ­—ÓédÆŒ{½K9zô(:u"''‡””êêê"Šëøºûî»Oøv”(u8÷<>üøÛ9Åì߀ž={rðàA^|ñ%²³³¸õÖ[ÉÎÎbòäI¼ûî»<õÔÿ1sæ úñá‡Æ=†æêG¯×ËÌ™3Y²d ˜¦ÉôéÓIOOoU½klß¾Ý4lFã ´ B]C]žMëýÊ–Ëìfãw#ðbïл B—æCã®̛–ö-ƒƒ÷m7¶~5žåîÞ¡¾õ' ~ðc·ÛpØx}^<oãF=†ø÷îÛË™cÏl¶‹¸ˆˆˆˆˆÈ©êèÑ£<÷ÜsÔ×7pöÙ˜åÜ=î<0•¥¨ Û–ÂÌÀë­Ã,ýàûÄsnmÈßg>ØÞÜ/m×8p_·˜?øj­¯BüîgäÈQdff¶ç>-"""""ò•çñxøè£•äç÷ˆ{ýh``·Ûñù|xM_«êG»ˆ›ßñÑoÝll'ú›§‚3ZÞGùríÀøÆAáØMë²ÍƱ¬xXø¯@üGd/""""""Ǧ¤¤‡#Q?š˜ø<¾ðéZQ?:LkÁ|ª6—ÕÐÛÆ)Û \å ¾vË4_õe™0ô–®À™„༠³qÚÆáÁ×iYÎ2„n"œ~ã+NN; 11ÉÏËoñn""""""ÎãñpäÈ6oÙL÷îÝCkïúÑÄÄØºu«é¿šjFk"\¨Ozà—àåöÈYÌàŠÑx?6ÖKñáÇ„µeYVÓy£Ý–þÕˆß4MŽ)¡¾¾¯×‹ˆˆˆˆˆˆ´žÝn'11‘ììŽÀkg;@ýè ^Á{3XãåZK fèbÖۑÂn¹G¹1šÆq¡Êžhżi Ü?}ð~èÅZÁ«Ã„?DÌ?ì+¿aйs.III8zù‘ˆˆˆˆˆH[¸Ýn\n—ÿbh Àëõ£‰#XßcþCã½Ç¦åç0føtM§±ör7#g´´ÑM'5£Ìo­ÉŽ¿ËåÂår!""""""m×ëG"""""""rÜ,¶ÙÌd–êÜ$zÉoš­j©¹å….ÝG^~§…³Š_ñ+~ůø¿âWüŠ_ñ+~ůø;Ço ÍgZ¦iü!4<¼xbZp]LˆyéÜ2p*ÓÒ¼imÃ4ýï.³¼º;¬=“Æ'ŽE,Hñ+~ůø¿âWüŠ_ñ+~ůø¿âoïøo¼ñ†ÞÉ,""""""rLÓıÚó‚ l9in˜q~{‡Ð¡<ûÖ;킈ˆˆˆˆÄiš8l6C¶H;±ÙôÙ9˜&8ì6› l‘vb·éAþ"""""§ÿlC¶H{±*°EDDDDN&&›®`‹´›®`‹ˆˆˆˆœLÓTq‘ö¤.â"""""§=äL¤é!g"""""§=äL¤é ¶ˆˆˆˆÈ©A9igzÈ™ˆˆˆˆÈ©!ð³“ÑE<ƒì쮤: ÞSÁá²Ãí½î"‚ºˆ‹ˆˆˆˆœN|ñ´³¸xò• ëÖ‹Dëpo÷¾Ë’_ä@[ÛtNå»WÞFÊþ§yøÃ·ÚgË…taÆÅó•æÀc ‡=™ª¢çxtemŠ[: u95rvb ì”^7rË´ IÀÃÑ£Û8ZÛ@rZ/zfg“ß÷r¾Ók,ÿ^|'k«ÛÐpjY àHNì¯8J¦Sz6‰NHôº¨õ#rØ [‡Š[: í """""§†÷𮬋¹)P\W¿Í?ßùÃÆOæÚéß§wr/.ž9—Cÿ|„’Ö¶í³a¶põφ€*Þý>mæD=íâwGgR}¸wR*2¡a5‡+©ÆIç.IØCªª9Zc'·[rhX‹­7¸))®'3?g;§¢Uû‚ÏKui=^"?©¦ibKq’žÒÚµ·ÌëñPUæ!¹S í¹ÌÀúÙì¤g'JCµ›š:I™I$%Û±*r]MŸ †A»?~Â祺¬>pñw+2ì6’ÒHlפ»†j5.HÍiܯLO•e^RNÀ¾Öar)"""§½v{ÒY—“Ô—¼É£ï-›-üæÊxqQß¾ú‡ä&Oàì~ÿàß»¬%v:;õ$ÅW +,ãB±Q®þ53ß Ñ¸Ýl4[+57dvêG¦3 ÜGÙ{tÿ Š÷doŸc•Ía7‘Xñ>[Ý;ü…³7Þo"ml*ù¯À$=÷¿7ÀMaÑ¿imG{òyL;«‡-¤ÔÛ¾kÚª+Øiã¸î‚‹I÷zð`âóš€Ínà°'S[¼ˆE_®jó²]¯àšýYÿÉÙZÛŽ!áL®½übÒ½;X²ôyÊÖã‘—A£~¤>ìÚú$+=¦E8»]Á53 XÿÉŸØZkâ­pâuÖ‘˜â8¦öâ&íLfO»˜››‡ƒ†Êƒ¬ýb-¢SN_©ºÑô2äÌ{8;>ùh… þ?Îük¸nFVÇ}_ë@¹‘Ó^à)âq~ÈYêù ÏMJXþñ‹Øb¶ýËvìãz’žœ‰Í8À€?â‚AcH¶\«¯ÛÎòå¿fõQÂ.áYÛnq>ç$nœu3öÝO°¸¨;—\g&ÐPþO.yŽºˆó†ý”놧pÕ=,Ú­è5Báx ƒfŸW#îÎý®gÖèóÉI°Lë-g˦ÿãõë€q\{åít÷¬æéý‰#–ÉÆM[À´¼\lYÀ kÖ5ŽpNâúKn&·n%ϾñGŽcûÔ—¼ÊCïþ+~ûG«¤çô%?g{6í Á^ÛõèK2y.{Ÿbì`:Éï6–Ü”­µ¾ugêºçô¥ê0í_`·æ³gK';³+i^Þ„l2ÀCuuØ“p¸Ò[×N„ôÎÃ8£Ûö;ZØwO´ä®ôéÕŸTú3y×Þ,. ò¹Æ2iºõû2±¥Ç´ˆÌÜœÑm0E‰Ûëzð[î§Ï¡øã²v~‚£+ý{õÇÙPN…»H ­k}ûOå¼]¯òøÒEØ’¾:%¶×7…sÏK'`rÁX ·¬Ã¤g¤W·ìN0°&¦¯+Î(—"""rÚ399ëÜc™€·tëêlØ›¹@·gÍÏy|S:uî*ì6=‡ÿ³†ô ìðgì(­#¿çYä§ögú¿Ãûï»Ø¬T #Ô½¶Uó9²H³Cr¯ÿâ†~.ÆÞZ\f ÉYS—ûwV†]KgäÃGËÆèÊ讎_}óëª`,qwî?—ÆŒ ºt[í&½ó8v鯠áóÈNú3_}ˆj<„!¹6KŒƒÐ%€î=Ï$m݆Р‚Ìžãé–õ‡)³ÙèsÌÛ§šƒ%»Û¡K{G+KÉï”O&G1HLêG2ItJîMqÝ~L_ÙéPWú%Õ€×UÇ¡=‡9RØ{w¥[—$Œºö›tíÛ¶eàäÀz0]}eû¶–’Ö?Ÿ®Y x].í)´“D9tÊHÀÑNÞàî$UW²k_ÉY™tï•Iª³õŸ§VmÛºå¼ðÚrÿÏ çqó7ÀÞxòã÷C“øjë)Û[ÿâ ƒÌüT²²0ðQ{´–ÃÅ.Ó“ÉéšBR¢Ýçño›ßõÔÔqdO I=³ÉJ7bÎw>ƒàyŽ‚ASñ¿N¢ À$³ïºÆy·Wøêë)/ª¤²Æ?<#/“¬ìŒz7%E $¥”òR:§Ó¹‹“ÒM¿æWË“qæú¨Þ{˜:xë«(.n [žbµévSRj’• wW“Ý'‡Ì”8~|þíY¸ú×¼¶;øáîÁùþŒá}¯àòŸñÆC1×ÙøÜõ”ª¤ªÆŸû¬îidf:šÍý‰aÒµà|:ᢢ6‰®Ã/ wãzÊCûZ•‡ê(Þ›@BJ%^g*º&‡ºŒ7—×âýrú¤†ö‹šC•¸“’ ´i.¿šëEDDäTpBºˆÛ*.^ߪî¯î†šÀtcùÚþ€‡Í_üŠÿì <_|Ãß;éwLÉïÆ”3/bÓêÐ’Ú<Ÿ ! »ï«>y”åÐgĽ\6°ƒNããO–5–1‚T f kËbuÿn·,FO¸•<·»-ü^X¯{;ËÖ¼G-²‹ø. ×û¿ü¯lÚ¿ˆ•½oäºñSéÚÿ:Ælÿ1›ìg`AúôÅÇeëý“åžIçà¢RÒ?ÉÆÆzÿ¯zú‹é}»?Æfw\ÛÚçA\G«wC§1tJÊàh}3ƒk%  {Ö¬¯ÛO‚³?9ÀÁêÐࢬv&—^v9ž¢¥ûßà?›ß#¿ßqÝ`“ÏvÂO¾=–òÒ]$ØÀ]^Ž/ë|çÛC¨+}™OwLeít.»ìëd9\ìÚð4ŸÖî!/7¼#;žáàóó;ûsíÚËçýå¹ ­¾¼ÍÛ6Åÿ<~‡£ñay>w݇ßÁíßz¨àÁ­ÿbñê7©ªÉgú%?âìž]ücjwóþ¿d£Q:ç“h·á­©¡ó Ÿ1÷ÛÃ)ßù{gìùâl›¨­u‘’.#^g«i_2Ç<€‡aÃæób¦]Ët1]¯&8\ø/ž{÷Ÿd ¿•[®ÀÁƒ^ú÷É ¦x) ßz£à»üàÚA,[ñÆ^3‡;Ð÷f~ûÝQüýõ?S«Í‘?ä–Á6ÞÝwÍ9‹~ÍË[vÅuÝýùÌÂf ^¹?È{ý“>}‹>½GÀ¾1â³eBBöüøš è”èÏý¾/Ÿç¹O>Àaö8y9ðtaôÐ({‹%Û³¹füYŒÍëÄeåÕÌ`ü̇˜™Ÿp—o`Ñ?ÃŒ잆fóúƒkû±lÉÏø² ŸY×ÿy_`ï5%<—KžàØú8ˆˆˆˆ¿ò3{ ©OM›®|fô™D6Psðï¼»§(lÞµ?IÁ¬{Èë<„.¶Íþ+Ám™Ï?ÆÅ—Ÿ=ÀGEÕØm6ömûÊ}Èè>™Þ¶;‚÷p&É@Ñž¨·ÙbL¶P‰Ým<ÙQ§Éaú¨¸‚Ñg]ú²ÿ°h˶°¸kö=ËŠü!œ×£3#OâùÝë¨/èA×ü±¤n܈ è×{(‰x8RVNçìnœÑ»'›wQÐ9 ØÏÖ=¥dŸqíqmŸöÒPSHcè”Þ ßá#têê ªøcöÛ;“Ûy éû?Ä–Ù (åhu.fðÍs.Àî=Àž¢õ©£éÕãë|3Á`im2Ééƒ9g$ÔÔlæh½,°_ã²s.ÂáÚÄÖƒ…¤dO£ïðïcßü;öÚÂÛqç\ÁˆìDöîùeö< zLaÂysøhÓԴr=Û¼}ÁÛ{Bd ¼‡ëÆ §òÐû¼·w/9Ý¿Îè³½ïM*Ïú)g÷LbÓš¿²×ìÎYc.ä‚oþ’>€i3UE5äú-ßÓ‡šC‹ymÍ'Œ:ï÷1ç‹{c3pû÷¬¤óàé ë;€í»wâMœÈˆN ÜùégLÄ0 ¼ßdî×/ÆáÙÍš Ÿcït6# .c®ÓdzE™¤&v¥Ÿr¾Ü°ê´qœÕw_¶œf7œölR\ElÞñ1CûMVÃn¾Ü»‘êfÛL%5k³¦@Ùѵ–ïç"p\0,=[¨ÛÊ¡:èßy YéÜ#¾ß­KàŽ‹/ÀQ³•O7|‚½ë9œ9ôFnMnà3®)[B†Íf´i>€{ë‹jcó¬`ËÑo0¾S£úöààž@w†å÷ñå®]ͬG°Øñ°iõŸØTå5]Þ£”Ø l†w–?î’GmságLîq©i™ØÊ?ç€ûëôMLAšÁ¦ÚÎôëÒ ØÃ×2mÊ,òó†cÛuºŽ¡‹ê®g—Í ßñnŸöâÝF™ z¥÷C™tv×låˆm8tC–/[f¸>ã¨úö™ˆ£lÚþ(€#Ë©éq'ƒ»N¥ç}”?ŪÃ;IN¿ž~$Ñwįß9—£û_`U™ÿ惾}¦’H+—½Än74p˜}—îÝF²»‚°vº÷¾‹î80\N ·ý“5›¶0¢· wV³ÍÛÙr¢&8¯×µ/·òîKãhÖ†y&£»ç’שuÉ@"ì_÷*¶mcLOu6ƒ ‰Q>B^^my’W6Ža3Hhf¾qÀ¨ú”­¥Ó?xŽ];Èë3TññÞ]œuÆD0 ÆŒš†ƒ>|ï7l¬ ߦlüï9§çù ¯ð_YÞ°â§,+XGn÷è•?ÛAÿ ^c¾¬¤KŸ³I>²’å[?aÈø[lóà–?°hóÖcËYóéçÆãœ©Œlf¯L%¼¿ôl®ö®ÅÇ-ä6xHÈ8‰9ÄËÐþ“€RV¾›ÝŠY_ÔÀ´üi MxƒÒÀªíÞðoú{ÆhHâú‘£Õ§3y½ZΫ×0=‰üÇR¯QÊÚ/ß Ë%'dÝDDDDZÇ4Áa3â{;ØCºK—aضîi~â®×pÓÙçÒPüOÖØ³°gclv¬’Hp4^ñ±62ÓZ?Ÿh¨ÚB¹þTó/w¬e|§Éôê7 ÛÞW ë9twBÃ‘ÏØÞ`kæPÁõ¸¨8²’Oǵ¶ÐSÄCqgfÕ<\䩾whEl+*¦oŸ®ôËë–ƒè‘ ÞòMì:úC<³èÕi8ÝŒwHë9;pàÀçqÛ>í£–ÒêÃôêÜ“œ¤|`e5›©b “FC&TÙNàiðå”44¶Ptx7ƒ³®hUStt§¥}¹ý÷°§'eRQé%+ÃŽ·ÁtfÒ¿b’5œ´¾dVxÃÚÙ³åiR_Kï³è9pàá@áËìs›¤´ò>쨹o–åVƒÀ¼Õûöá*ø6svyø”†/—ý™ÜéßcØè+)}%àa÷†¿³ù€A*þ,/Ïß…8;³+žZp¦6?Ÿ=î÷ðNÊÙ«Yµk-ãÇŽc ç-:õ說±½Ì`"þÏŽ×íÊØ_×ø¹ÜµgçtŠÿyÛ.ÊCãüwí{1%™aذWºÙ±¶V´YÅŽÝÛ!W­_w\í{kRHLoùÊÜ]cÆgÔ×G8:NÕ°â­_R“BvÒÑ“–CoíFöËà’9ÏrIhL#{ å3°÷ ­gÕîµTŽMfr[ÌÿÖÿ8#ðü£I.EDDDÚ“‰‰Ãno]qd+.†’Ôé,Ù—°½™i{và¿÷²¡ WC Fé®gxoÏa’-Õö6à!‡½Òº†``·Ûp¹Ú6Ÿ70ŸUCñ쮟LŸÌñŒ°¿J}ß1Ø»—7™6\ã8[b°ûb öÆÂÈn·Q]S»Ò­Kì5›NŸ˜6ýžƒðöéJ^þ²¼¤‡Š·`·—²ïH½ºõ¦wV2;uŠÙQt(nÛ§½”WïÆÛy<ã†@CÕ'¨É‘}‡|ð°»Úu+99H¡ao5®®Éà®%¹kÀô*p`Ú°\¾‡=»ÿ—Òôï2ºó×™R´™O¼¥8““]¼õŸ‡ñvq‚«¯ó2úeî¤Ê>1¬Ì´^ì^ýŸ%¹é–9‚î]/ጂÙxwî`kMëÞEÔæm8eóŽ:çVÆæÁçÿŠ/êk̽ñ žî=³÷ãßóv]Ý»œÉ AßdäðÿâkµYXô—Ÿý”½¹?bfßY\Y°W‹гǀ˜ó-;ÜÚ¢µ~ ‡ûÞ•;ŠÉ3~€3 ·.Çž4ÞŸCÃFjz2FÒa7e1êIÏÈóg8°mlvöàv n+[ø8Àô_%n¹ÍŒDö†V­Í1­»Y_†§Þ‹é3©¯q‘1øô³ÃÑÊÍÆ—”êßï9Ú€/'_]"gÎxŒ¡Þ×Y¶?é$åÐ$sÀ¹ä¶?ÏúŠZ’ìàòf2väÕä žBê—ét¶¥²ÑSC‚á£ÞyÀQ»§ÙuLp& ÔVy1Mw™ƒ”Äàg <—‰‰¶¯ÖkÍDDDä”xMWœßƒ]ýÛ+/exFWÎw;¾XcÂñœÕ×ÿ|àâC›9œ4ztÅž`PQ½‡Š°i;1í¼ù$`éÊÏüƒW‚—mÓ|vÈr¥ã_îÛKŸ~½8l 9™àÙÆ¦Cµ-\iücΪ™I KWPÃFiU1ЕÜüÑØvj2yŸ3Æ“TÕWùc(]CQý×è‘q6gõʪÙ{¨›acWQ!“º¥Ï€‹p¦€·r#;W´â³}ÚGCÕvªO&PQ½%4ühU}S{‚¯ˆ²*![R±‰£9wòñîª÷qä^È´‚+÷Ïl§sïK0ä6}þÿذk/ )&gdƒÖ¿è·ÍÛ:4}ãO‡Ã¨;jâ°Ãìo]ƒ0ëÓÉ|r® ïƒÇùâ`¥ÝnPï« |î]TTÔràðìÌ›ÏgÍeðËÿMÆà+8+Æ|qß?B½;’±›ØZTΔ¼.àÝÊֵؒlñ6öî_ÏÙ¹ã¹ìÊÛø×‡ïpÆE|}X>T¯d§;“a€Að³ü|6ãüãR°àLHoçî_1ÛÌ`ˆíļÊÌð_MÉ>—~5•$8“éÞo Cºfƒg'_|¹Žòî±×yÕ¾tzæŒ`ÖׯeÑû+ÈxS{dS¾ËM÷Á×2þdäЗÉðþÃÀ»•/¶~L‰eTr×été9Žþé›yÑ=T,ùìpõ㊙çÅì):D±'ö:®)k`(ƒ1è|l<Ì™—|@m”\–±—qDDDD¤5üW°ãü3€/6È ³¦“Þí*®m°x݇á$eúÄo‘PóŸ®Å¼‰Z†Ùã2ÆøÂÿnæ€ìÞWQÔ¤Äc¹l³Q[rlóE:¼kUý®¥sßTì[Ni‹Ìi¼¯Ú°µðš.#"îCk©6‚ÌœL~¥û,/rît1“{uj(ÜñE Þ½ìÇ?×uæÙ¿gh¦8ÊGþŠíu³ÅkûÄÃÀ^=OXÛ~)¤9ÓÁWEuCë¯$O; Ù8m‰¸ÝÅm¾z¶uï¾6ÎK*Yià­¤¼®òÈrVR^í¯öÉH¶'RW]Ô¦‡°ë|'^ÄúvØ6OÒ:'t"Ë™u\GË¡39d;1≽ŽÎ´<’©§¼úhk%"""rR÷`•îÿ?þQ{SO§[j'ºåM¤›e|uå|¶êy44¾czdžq·0ª[Oºå_@·üàÔì.|‰å‹±%÷Å>#ôàc/Ò¶=›•}&Ô¬g[m¬w_[yhð¸©7[˜Þ¬k²üÃ;æ}ó¿˜T0šìÎ9³spb7‡÷/â?›> õèJñpF~ÞI^bǶó`Ññ7""""""í.ð³ø¾;*W1‡\Å–Fó òs¨Ä2½ÑÊw›ë|©“Ó3(bûÞ½ØNô6 SKYéÊŽ%î¶8ží#'ÄÉÝÏDDDDDäD1áÄ<äì+%i<3κŠô@¥YsèCŠZ|¸™H|tœʉˆˆˆˆÈñ8¡÷`eØlÔ7ÔP‡›ÚŠ/ø|ëÝã''ö5‘Sƒ l€ú/XöÙ¿wÉéA¶ˆˆˆˆÈ©Á4OÐ{°E¤uÔE\DDDDäÔ¸‚}r&"QÙlú쉈ˆˆˆœ LuiOê".""""rjðw7T`‹´»¡[DDDDäT`¢.â"íJ]ÄEDDDDN ¦©÷`‹´+=äLDDDDäÔ ×t‰´3݃-""""rjP-ÒÎT`‹ˆˆˆˆœô3‘v¦‡œ‰ˆˆˆˆœLLeGªU`‹ˆˆˆˆˆˆÓ41V¯^m–––ÒÐÐÐÞñˆˆˆˆˆˆˆ|å$$$““ƒ£sçΤ¤¤¨À9 ¤¤¤`¸\.ÓëõâóùÚ;&‘¯›Í†ÝnÇðù|¦îÁ9v¦i¢G‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰G<3]Û¹wòl–„ Ëco?ʸl{“éËÖ<Ë7=>ðòXqÏtœM¦vóÞ}7rע°¡·?ù67ŒÎÆ0Œ“ºá\»–0ùªŸ‡ ›yïBæ_RЦvNvÜ""""""rbÄ÷ ¶3“^MêËU¬+,:y᪚\ô)EÞ({KXQ\tÉM99[*‚ÇSÛdXM§]b‘öç.⹌>¶ÉÐ/Öí2m ëÞ[eø"¶¹› õÞÊÂ&Cg3,ωˆˆˆˆˆˆH{‹û=Ø}ÆNl2lÕ[)‹X¶÷ £·±zcQ“a‡·¯k:áì äÙiwq/°³û §É5ìUŸs 2|PIá&bÔ×,Z½ƒðkØ^v}ñy“énœÐÕ×""""""ÒÄõ!gdôcÚXXÖû{V2,##4d÷gïÅncѧÝ9>¡êù0kF–ãŒÔÓ4›Ì^Y²Ÿ½û‹(­ª£Hv¤Ó%¿={æâŒY‘{q»-÷P;8íþ‰½•%lß±¥u$wÈYÃz´~{xݸ#oÍv8H´E;·á¥lÿ.v¦ª®ŽÈÈɧO¯žäfÄê ßú¸ÇíŽÃòP5oMEEÅ””•RUW$àHN§Kn7ºäåm‘z(›ˆˆˆˆˆHtñ/°É`Ô´©°jYØÐÏ7äšAÁ»„u+ æ°\Ñ^Ä–¢yôé¨ðJv±"r13œ>¨hã[<þ石$Ú­Ý¥Üx÷¹þŠqdDŒ)|õfßo‰y콬øËE¬ñ·Üº`‘¥‰y¼·ðšV]9÷}Äm—Î¥I83Íò_]hyRº—Âe/ó‡y ˆúØËçñ“\EAFø’Û÷»/\M&@Ín^}æîf͹üö|ÿ[SÉV7‘÷`÷5©É°e+·Sü¥¤éý×…ø‹ì ÕKB?—ìnÚ|ìÌá4Ö×n>v—Îi®¸ö/å™ûoeúìG)¬‰—þk—bÞxô¶ð" ?±uÅuÉçÜ­¸{;¯ýb†¥¸.ãõû®cv3Å5ÀªE ˜=ý6–5y\ã®ÙÂ}Ó®j±¸XôÈ<.¸íÙèOu‘0'¤ÀNí;„™‘—m X–l_õþkë°E«·†îÃÞ¿î‹&ÓNÓ/ôó–Wÿ‡[i¹`l\Ð3ÌþîS57Í’'¸ÿ™U­m1\Ù~uÑ­4‰hê<ÞüË –³¹yï¾ ˜¿¨°• ¯bÞ¥¿aKM3“4·›e]Ï¢h£ ˆúïU𫿯9¶í """""r9!6ΞŒkRa/¢0Pao_Ñá{æÝñò&CWo)JXóLøÛ©£F0rôðˆ©²±¨ÙÍG‘x§N¢o*€—_øc“«á³ðÌü7¨=z0ýšóö“·7‰gÑÏŸew‹ÝŸ/gÁs‹Xºâ>Y±‚¥ÍŠì”R³{ÎÝô ñÌ{y{ÁaÅ5”ð¯GŒ˜p,½ù(7LGŸ¼<ú ›Ê= _ãÆˆ"»ð‰E6}UxËq»+89YA&¹9Á;ÒíäLçûwG”Ø…û©hqy""""""§·T`CÞ°1M†-Ú´—š²íMX6r@©yý›\9Ý´³„š¢­MîMž9iD È­¡hc“»³¹ã»S‰|vöè«ùu“š[ö5W9^ÎsKïaê ¤:íØNRc?†œU aa“`Í{ó/!;râšbÖGL[pãÍLÌl?Yÿø¾ØUIl1âv€;2¾Âg¸jòl|öu>߸›²J7W,àóÏ?ü÷ Ÿ~NDDDDDD¤'à)â~μAÌZnÛĺÕîˆ+ÎSÞ+ì=™4–Yz{¾a=SŽljÒö¸1yþÜù(²`{2ˆÂÉ á3aQxçír—ˆ^=^þëkëruPQïôêD´pjж5¹G»ð™›÷L“V!ÊëáOeoeÜöq+/n¬lS;-D\,q;™~çÂ&]Λ.r ¹Ÿ[¯¿œÉ³dM‰#."""""Ò’W`c§ÿäÙ-N5vâ°P÷é¼£,cüWnW†¿¾‹Ëϲ<…;:O›GD3•þym¸|ÝŒsþÌî8ßÃ|°>ÖÊ´wj·-\ÊsÝËì™c[^PáBnºè·Q^k&""""""V'¬‹8@—þgRÀBš{ ÕÄáÝC?§æ a*ºN7ÎeöY-]–=¸"¬i ÖõÖ¢íkOäê0væL*–,‰XçEÜýÔ4Þ6±qP´úøÆ¼ùÍþ¸Ýk`·ãtXSåGÇ.•A/aÐÄKøñÿÔPT´í[Ö²âÅ,Z-[‹øh×(–Ñæ%‰ˆˆˆˆˆœ.NhmÏëÏdh¦ÀËð~–Ç¥6½;\gÉküÕÙ“Qcá-ë}Ø«ÞfkÙŒkúT16/mró1¹iñ»Ã¸àòxèžéTÎèÊEsŸ WøÌ\^œþ^è¾éÔüŒ…°¸Lnn^”–ÝìÞ²Ër2ÁA~ÿhÓ5Ï[YÈk‹WQŸ˜Ë9_ŸN^ŸAäõÄÔ×0¯l7‹¿›ïæ^·ñ ¨À‰évÈc|s7üL Ü”ÊIS›io2ýÃú‡§Ò÷ÌÈnΫ¸õñ·ˆ¼ëyÿ²g˜ß¤pŸÍÀ¼øØùƒzãr'^ÏÝQVcÁõO5¾,£+gG¾~ë™[y}KÓûµ ßZÀU×_Ïõ¡ÿf³öpÛï‹vü‚ûYÀ‚ÿî¿‹§?- ›Æ™Ý‡Ë/ÜdÞÁý:Åm;‰ˆˆˆˆˆœŠNp ã§Ç7vúð&¯¯ê9dRì¶nOäuÛÑWÝD“;‰ýœé·<È[­aËÆÏyõÑy\>ï™&íÍ^0›-ÜÏÝ&¡û¢3˜õ“Q&XÈÝù<ðs.—ýôM¦˜ýtî}êuÖl)dãçËxêÞÙÌþyÄ›µ/€I-݈Ejþ°&ÛjѼóâG[¨¬qãu»)Ú²ŒßÌiº­ÒÒS⸡DDDDDDN='´‹8@vÁÈ&]¡ƒÎ٧ɰԞÖû°ÃMåjxÆhîœß¼÷_áÃW-äç«ÓØyÌ™ÚönÖ­eÏ›ÊÓó¦2gAøš>s+¯N˦“1úüzæŸùyDÏõ%OÌgɱZ.`ÁœiÓyŒþ\;»€U ­Ý¿ Y0÷z4;ãL‰ˆˆˆˆˆœªNøl² ˜õaÕcYÝtpj>Ñ{‰eìÀìh#è3ãž»·å'–‡LÇk^Cv “µé¡ãQ »ê¿‰Õý×?xªx*3æ¿ÉÝ3[ßæ ~ÏÔ®^ÇŽÛÉÔ?Èí­xxx£±<ôÚ-ñ½Ò/"""""r :ñ6ÙŒ™¥b;‘‚¨nF“÷aû§ŸF¿fž±5è’óÞËqss¯ž*˜É¼ϱbÁ5M_õ•1 µÙËûޤ¦]¦SS#æ°÷à»OÏ‹2÷B^X±?ðs.WÌÿ„…ÝÍÌfnWŸ:ûnž{s)·Míq\qC7üe)O?0©-¼ûò›Í¢¥aâ1tG9Ý>ŸÏ4 #.™¦yRƒ·.Ϻ^w%‡KSQíÂãqàHJ¢S§\r³;N7ç`ì‘Û¾¦¬„’£Gñ8’À傤Lºäu!ÃyüE®išM—WYFYéQª]Û*33“œœl¢-2^ûŠˆˆˆˆˆÈ©Ä4ÍS³À>mÿx×5V}"ãŽG~T`‹ˆˆˆˆˆ4÷[DDDDDDätdšæÉ¸[DDDDDDäÔ§[DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$ñn°¬¬¬½×IDDDDDD¤EÙÙÙqmÏðù|¦aí½^"""""""_Y¦iª‹¸ˆˆˆˆˆˆH<¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH8ND£¦iR]]MMM µµµ'u…LÓÄ0ŒÒvJJ ©©©¤¥¥µií¹=N%1·""""""A†Ïç3ãYP˜¦IYY8'¤~o–Ï磸¸˜¼¼¼Ò~}}=tîܹU…X{oSIGË­ˆˆˆˆˆHišñ½‚mš&uuu$%%a·Û1M󤯔ÏçÃ4Ͷ섄RSS©««#99¹ÙB¬#lSIGÊ­ˆˆˆˆˆH¤¸ØUUUäææ¶[1,ÀNäò“’’8zô(III-Øí½=N%)·""""""‘â^`744´k1y2Š0·ÛÝâ2:Âö8•t¤ÜŠˆˆˆˆˆDŠûMÁ'£jÍòOt ­m¿½·Ç©¤£åVDDDDDÄJöq,ç«°=N%-·""""""V*°c9ñÜ­mït¾/¸£åVDDDDDĪ] 캺:vìØÁþýû©¨¨ˆ:Mff&=zô _¿~$''·iù'²P:aví!>xc1ËW}ÉŽ":ö,¦Ïº”±½Óñ”¬åùW2cÎEtKlÚF´â;Ø®·¾¯=‘D{ÓiMwo¾üe$ÐÐÐ@BV>gN8›ÝÒ›mÿdêh¹±j×{ûöíTTT0nÜ8òóó£NsðàA6mÚÄöíÛ>|x«—¢¯tžÛ{€'.¸†çéÇõ?ºš³SÊxÿ¾Ç™ûÇ™ÿâ{Œ¯ÞÆSϽÁ¸ëfÒ5¡-qÖòú÷.dÑÅOñÌ•›NÓPÎ[?Åê1ri'àâ˧žâqà‚¹âž+Çb?a[²mÛ¼#åVDDDDDĪ] ì;w2eÊ”˜Å5·|ùr† Öêåw”"¬-vmáG<üráÿq~/=cúYÜ1ýFþ¶t;ãÏJòIL41MðzÁnoœ?öl‰™#j,f ;þûn.ë¸4~×,ÿËÜùÐ6r ³ú¥4{…¼5W·cÅÔR-·""""""V¶Ñhd!뿲²²f‹ë üü|ÊÊÊZÝn[b8ÖÿNÄöò¹kñz½x<<‰}˜ý£8¿ +0v9ï½ô$s¦NåÜs§r÷Piš¸÷ÀÜf‡;Ø^ <<—‡ßù˜GæœÏ}«aÍC7rãïß¡&ÆrÝõuþezŒ‚žå07Ü÷Ü»ø[Ž|—œÒͬàm¾¬+A>?ó8ÿÜ|ˆk§}år+"""""bÕî¯éª¯¯§¾¾¾Ù;111Ôvk—ßš8‚…؆ èÞ½{« °ÖÆÒ–íaš&µGRÜÊÐ)_gØÔ‹ùá½Ê÷näÙŸÿßÜœÍÿo0†œD^¯ÃpcšIcܸ<“Ó?Âç«§ÞÜ Ñ{ Dt½nä¢bœqN²R6ÝɵyðzýÓä¼…·ó¾ ¬Ü¸—ü£+3÷!¾Y:—×·ía(+`òÅœ‘äÃcÒLü_½ÜŠˆˆˆˆˆXÅýì¶ÞïjF‹¾2 ã„Ýo›ŸŸÏ°aÃBÿÆû^ݶ´·çÝëøÎ·žç€ÇCCCõõš7œ¾ÿMàÕõþ6ƒWü­1˜ õ@ v{°=7‡×4Æàçh~]ì‰á÷ȯ{›‡vÀ„¹¸*ŽG(³Œ/Þ±=°€L&ÿd2ï,~™—YÃäQC>æ:V¾°ˆ—_XÉU³F‘`½Ç—ëBï ³€—¹÷Ñ·Øsè;W¿ÎÏù \yI>zž9Ö¼Î;Lfh¾Ô¾Ãé·óu^ß9šs‡d[nˆÿW1·""""""VíÚE<==C‡‘››ºJm¼ÿúСC¤§§Ç½ññ®g<·‡iš$õ8Eöñ‡ÜÏôLh\¿ náÙÿ¾„´o©áÝÀ3ÁŽ/g}2ss/Œ:Ÿ1 L3Áç] ~Ë÷VÝÆ«O^F†u;ã/°_ùí¼xÆ(®ûɸê¢q¤xë1ºNæå?ÿ„ßüàw|ûŸþIF_r7 o9Ã×¹ƒ˜;J™@¾Ã‡7½ôƒÇ‡^N§7sBÌø¿Š¹±2|>ŸÙšw·†×ëeÛ¶mtëÖ­UÓñŸÝn D·nÝ¢؇bË–-8NÎ<óÌVÅPRRB~~~‹O'?‡bÀ€؃/¢>Îí\w»ÝNB‚ÚªZ¼€Ý™B’à¡¡€ÄÄİ{Öñù|x<0êëq™v’“ý= ¾v+!!‡ÃÏ磾¾¾I·ü¤¤¤°XLÓ µÜއÃ[­N’F(›ÍºW¾¾¾>›iš444„nh.þ–öÃŽ”[+Ó4Û÷ v¿~ýؾ};Ë—/§²²2ê4äççÓ¯_¿¯äUζÆáõzñù|8œN ÓôÒÐи>.—+lz·ÛúÙãñ`Øí8h,rƒ…­×ëÅëõFß4Mêêê ÜÈîØÁö½^Ã_xšêëç±.“ˆ‚ÿZãµNÓšmÚÑr+"""""bÕî]ć ÆÀcÎc ¡«¡­]~G)ÂŽ%Žc»¹«º­-^£‰Ux·4¼·GË­ˆˆˆˆˆˆUÜ lh}bN§§Ó·v#‚Õt”8¾ê:bnEDDDDD‚Úý=Ø'jùå*g{oSIGË­ˆˆˆˆˆˆ• ìãXÎWa{œJ:ZnEDDDDD¬â^`'$$tˆ;øó‰’’’ò•ا’Ž–[«¸Ø999¸ÝnÚe…NÆUΆ†ÒÓÓ¿ÛãTÒÑr+"""""b×Û0 ²²²X¿~=6›œœœV?À,^Ndæv»)))¡¾¾žqãÆµøÞ掰=N%)·""""""‘ ŸÏgÆ«˜0MŸÏGCC{÷´”ŠŠŠ“ºB^¯—îÝ»sèС¸·™™INN½zõ"!!›ÍÖl!Ö¶Ç©¤#åVDDDDDÄÊ4ÍøØÁF}>^¯·]î=öù|þ3 l6[ÜÛ7 »ÝÞꬽ·Ç©¤£åVDDDDD$È4Íøßƒ,~ Ãh—‚ÒºÌQ$†úï«°=N%-·""""""Vq/°)´=DDDDDDN}ñïg+""""""rR-""""""*°EDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ l‘8P-""""""*°EDDDDDDâÀq"uyëØS³™Oõ¦«½×QDDDDDD„D#‰TG:½S“dOŽ{û†Ïç3 È[ƒ.o-;«¿ÄiO!/©7)Ž´“¹½øäè˜Ð铺L9=hß:±´}¥-´¿È©Fû´´íwr:ryë(q ¨n7#²&’dO‰[Û¦iÆ¿‹xQÝn ÃF¿´¡'½¸‰%ÉžLÏ”r»²§‘ €IDATfkÜÛ{íòÖÒÕÙã¤l‘¶ê‘ÒOeÜÛ{]å)'#!ç¤l‘¶J²§œç…ŽÀöáÅaK8)EDDDDDD¤£ÐkºDDDDDDDâ@¶ˆˆˆˆˆˆH¨À‰Ø"""""""q [DDDDDD$T`‹ˆˆˆˆˆˆÄ£½9U˜¦Ùªé ÃhïP¿’Z»}AÛX´¿ˆˆÄC[Ž¥ ã©œ¦¶§t/ÿóªIcò7®aP¶¸·d _¼ rFrõãHÒABŽEÍ~Þz}1«7íähM*º÷bòE³˜2°3^w .ƒ¤T'öÀ,Ѿ‚_j­þtâ.ÚÂo¼ÉÊÍÛ¨©I¥×ðq\zÅ% ËKmïФêhû‹ëàg¼ôêg$ö;‡«f ÇϬ?‚‹>W?+fü¬ëß3É2½‹5¯¿Äª2Hl2W=¤û÷ çð^^ü õ]Grõ%ãpF™æt?†«àß/õ‰YKÌfÌ9ç…öé¹}ýûï:ûMˆ¹ÿ í“_´céù_¿ˆq}2Ú;4‘å´,°]ÅYðÄÁ5÷ÿƒOÏeñÜs¹ÕXž^ú†µø÷} ‹çNkÃô§¾²¯rÁœûÆ­ZµŒEÏ,`ÞÓosͰìöQ:ޏ¿x*vðÈ3Ï@A'.š1ŒÌ˜Sÿç¿bÇRžyf¦“ñaßjn¶¾ðOÆšóöcþt |ÇŽÇe1Š96Ö¿_šX0Ÿ_¿¼”}Nì…§b3<óØÂþÛVÚ';²æŽ¥7.XÄmS{´wˆ"ÆiY`ã°œ]t/]ú67 ˶ OÄfš4×)Æ4ÍVŸí ^Yhnz9=5Tîú,P\Ïä±×îdDj‹~u9 –Á‹O}Âw§_NB*@*IIÍw½ò³‡Mß8¼¹yNå}m?O‡¾àgòÀs7Ñϳ•GæÜÅ2`ÁœrÞgß¡Ëq,¡¥mØÒg?ÚøÈ6­9Œu5²¹å77_s±·4îÔsâ÷—h"óçu{±;íãí þ2b~×Dûü·å³ïõz±Ûí$¥Æ*¶œŒúþ¯¹·öó·ùOPH7ßý_ôHi ÞчÄfckæ3\¿ˆë­éjzzíŸÇ ôwJw?ö Fd;p•mæé[ç³ øÛ»Û¸ð;£šmâXŽaV)}fñÚkçãHÍ!½™¿•Ú~<Ò>Ùq5=–I*báÝ·²°ž™÷¿œ³ô/ M9ö¿Oô½'§=ä xdÎÓì'úنݟ¿Î½·ÌfܸqŒ7Ž[î}Š5EnÜ»ßãÞ[æñ೯òì}·øÇϾe×ðâ}égÏãõ%¡¶jŠÖðè½iÇÍæÁ?¢²½W^âêð¶À©÷‚^äf¤âÌèÁ5w=Æ—_Î5Óüý'?â…eËøÃm·ðè{»ûÙžmü_”éÝ»ßcÞì[xô­Ý¥ÖðÞ£÷r˼G)tTòÑ‹rK ­q³oá©÷¶àmï•W°Ð¿y`Ñ/˜>¨}†Mç¾—˜â Škü?¹K66~6ÇcÞ}/²¥Ì¿BŸß§^ä©àçu\øçÕ]²‘§,óßûè«ì®ô¶¸ýÝ»ßcÞ-·ðè«Ëxë©yŒ75›ßã'ßÿ~hØøñóØXû˜pÌñÙÆ£?ù óæ=È–ÀÁÅßÖ-Ì{ð-jÚ;'YÜ÷—VïƒÓß÷èS<8o6&O`Üì{yoKYÔ8£îîÝQ?ÿ1§´U²ñuæÍÇ„ ˜}ß³|²7VÖ š:ƒKfÌ`ÆEÓè@¦ÍšÁŒ—pÉôa8‰ýØÚãŒ([ó:÷ΛÇ-·ÜËç%e||ŠŸN®þŒ3ˆ‚‚†»„Ûï @áÞý”µâÕÜñ¥Åñ%kyüàÿýkU­8V•m|‹y³ýùž}ß³¼þâ£Ì»å^ÞÛ¹ojŸì¨¢KóúŒãÇO<ÉXVñÚòÖü}Òü÷Þ±Î{*ÿÝ#_M§çì ‚±LeË òÇW¯à¾ñ a£k¶¼ÈU·. ¸ýÞ_ÃöñÈÂ'¸époÌ3Y²j¬Zc§2µ–.bÞœEÀX¦Ž-`ÙªeÌŸ“Ê å¿¢oåÇ|÷Ò¹3gßHÒ¶,\0—Ï>Æó·;Íqêè{ö9À(|‚«¦=AÁԙ̜tΙK¿ÜR^|h}¨;]j—^tJvö³‰ÜÏn.† Èm2½ÇSÁ²ÂUPî -·bçV-›ŠËû?ø3sü ÆÎæ××ögõãóyâ®ë©|èM~<1·½7Ñq9¸ñsÿc¯bBÆ+‚Î>3ødŹx€ÄD r#÷\4‡e@ÁÌÙŒ«ùœ…‹°lÑ&®˜Ož§ÂòùÉ̱…,YµŒùs:1ìã»éãÚÒ8ÿÔËÎ=s?KžÙÍߟïÝìö÷x*X¶jËV­ ŒŠÑdØŒ#󽫢ž¸àXã»’LYƲåÐ}ílMÍcû²—Y²jc§ý€Ó탳¿4¼¼b>ݦ¿œÙ33Y¸d w]¿o>ÏKŒÞ’¹%ÊwÃg%¿äÒü¦ŸoIôï’ÏŽ<Êÿͪä¢9óý«|ùlzí}„«ZÚJ€Çø¡ ‚;JìcS6¿lKÌãÌMÁÃL—öm|•Ù7ù¯|Ýõ×ÿ·êq®xð_§äñéä:ÌÁ}%tJ»˜w—úûNM5˜DϺQ±Ž/Ã?¹‡Î-waɲeP3‰9g›Í«z–~Ìs~ÀØ™³é²÷æ/òÇ4®ú'±WOûd‡ëXJÆhn¾},7=²Š½‡«ñôkþïo3 ?qÁ±ÍûÙ‘Gy ß;§ìß=òÕtz×uý/ã'7\ƲÙ?gÙýðö¯GRoÿqt:“‡<€'½g ÉeW ⯕‚›yó/ß%·è=f_z…LåÉ÷0:µ{'Ìf 5¸<°kù?)ÆÞøß¿v÷bÃõó)|æE¶ß8ŽÁií½1$ìyÓyoáC<õø#,\VHá²%<²l cþë,øñ/H;ø«–Måû¿¸‡~6wI¬ý,›oüøFr,ÓØ¡zû§M–<5ä\µ¥ß\$¤÷çÒÛ`rB½ú¤´÷æ9nŽDÿ_YÃ{6)íNÿC LÓ¤ðÝø‹¥Ëà™{¦ã¤†þ÷Ncþ’%ücÅMÌí˜iìí¼÷—Ȩù&‡§ÍaG©vÁî^`Àå¿æ©{fJcý#ÿÚéÄån—uû7˯ÞÏŒ‚lª·ÿ3lØ…ý²(|õ'1 ;¦L<¦øíHeØe7Ãò'X¸ø n›:YpíôÁí¾“.îûK+÷!coçÍ¿Ü@.nF&ÝÈ]‹ Y¼|;c†7N²'Öwóï1úƒß‘q¼(|õOѧÿÛ‹¬ìäïYpãCü嶉ÀlºÌ¾´™{Z[Ø~Í|6{œ)ŒZ2ŸÙK øõËOqaï ·ºÇ§“kó®º(bØlþûŠؾ®ÉÔQQ1Ž/[;BÞ‹5í®Oý…Áì<úã©Øù&]g_Î3Ú'¿Rš;–fgûo²Yµnµg7׺ï5÷·pè{¯­óþíE¶ý8Ø\9”Žáô.°×’SpÝø7n}fó¾,l´Ã {W¼É‚EáÃÃŽ.ù9¤äô¦?P8u2ò7–,nöl9ÀªgîâÒg›ó §/%»wQáèÅw·¹®2öíÚ̲מå‘E«Xý·ÇY÷ÍÑ¡©=. ¥¥ýÌ>½ußK ~|k)>Ø8¸àÂ0o,X¸ˆ»n \*(˜É‚ßßIŸöÞDÇÍߥ°pÅ:ÊnGèñTÞÝɈaæÔè€ÑÃ{Üs—‡ƒýûKÅÆû"1»¹ò×±’UàL=+ý ,¼äßË÷ßz¿ އÙÒ/výúG~©tl½µÉ5Ø›mŒ3…ç|B˜5k4ŸO~—=I¦j=>ý½ZѳoOü{ö$ðɉ,gù+³çdJaŽòÆ(ëãKÅÒ+’×ÙÕrÒG÷çq’L`J:Ém̯¥O:Hɱô,Àš1Ó™:2˜©Ÿ[® èÙ¦~aþ²û^ÅþîU¶lXøpIò(î,÷ö v•ÿX>‰¨±7%XþJ蓯pöh {–-(þq¡Šb)C¨“²‹ŸÇ€Ï$¶ú;º„]hé÷Ò8Âã–AäTzý@pÓ:œŠÊ¿€í:5W«ˆáÿ&Oåñ‘¯ÑÙf?3q£Xþ¡£^牂ۉ¢gò^Ó«¨DPü:eß|¦~¤ƒ€q,z¾†üE0¿ûÝõ¯&Q6îË’q~LXGäô"}üðˆ#&ÿ΀ÐWh«‚¼ÞOáƒîóÉÌRM¥}î~Dê?‚ûøÀÕ£6·Ó¢÷SøÌ-*‹ˆè8`4M ^÷i¥ý Œ6?³5&D…Ù>šµß 4®u·³3È÷Þ¼­*úK¥šÎÔ9™ l|‘ðe–éÑnü^˜¥±µ~ÐòŸüئÄ÷ÿñ‘¯ñ˜ü›ÂûÂû1Ä-›À,¦âsuWQ7¸%ÖǦk6Æׂëq¶bðcƒqÙÿ93£ãxmæ:"z~ÇÕt|ú{éÉ)vUSÍÚùÏÂÏÌ.:°+oŒ²ƒò¾Í‚ñc q1 Üc¶(}ÒJŽ¥ÃˆôóË_8Èç3ŽÇºÔÂùTþïVúž­¿{› þîU¶l˲(hKµ=îw§{rÛÙ9ÿ®Žbǰµº<Ë‚ŸŸ· NÎØ±S9¦ °$Äœ%¹àth=l_xi©Lëó(Q‹¦âCŸ/³ £Y¹ä9ä­½ÕG-ÿ—‰\0Éò‡'.†¨üɵOÀ8>ûa<Þ¸ãûÄhtq1ì:ŸI«ÀgËîg¿œ%)·t~•Of[újÔ²ökƒ ð‚¾ÖáÙ„†@Ì2&À‚¨8ü‚¦2ý±¶Žn;Pâÿò"VÎ¥Q 'K!³–3çIË>*½z²dÍl€èe òÿ€17r]Ü‹UçzsýÚüò²deQùˆÈ8ð aIÔh•ÓþÖë.ù™ÖçQ6,šRæ˜ày‹ñ-Þ0/@Ù¸¡ùG?>ã‚ð¹g0ìÜ_*8Þð F{,‚ðË%‹£g¯á±âï(®çBM+>[òµÊ/´6ò×oþ(‘s-ù£—- " òû+Q2~[cSÝ`ëãŒs᫤nƒ’G^_NÀÁ…l¯?éëëøTõ _ЖX!ñn߀¸íÇH¿å1ª’éÅ¿ÖòºwaAÔ&…„̤Ùs™äS‰=–>yg¸i,‹Ë_8ðÉ[—ñþ&]¹Ç'¶Ž…ëßbÙÏ–<ǃÕú¸GÜyyyf{¾.6åGzÔä°ªÈö+òÞâ÷ñ™Ð§ëA¥E[ü=¦·üì\ÒÓsA©Â][tÔ+ïè»ó•×·Jö+¹úrM¦Âÿë²ÞÕXôY^©~fíÝŽ}/WŸN®I‰»»¶Œú˜rsÑçæ¢¼©ïÞ©}­rí …ß%¨ÜÝ)ØÅ’ßMË÷×DÉv²¥d»—.o«ýKo¿¬:o~¶¡Ô˜Pñ÷–ŒÏl6“zâ7vÿ©cW¸åìÿìu¿òHóê7ÖÜ)ý¥,™§¾¡ßˆ¹„F²àɤ§ëQªÜ)ò ¶iëoƒíñ¢œü&=éz*mÑ~Z««¼ö+ol*kœ±Ö‹o3Ï`¸«Æ§¿ƒ½_*3FÝjzyc•þÄWôµ¿ ©üëíáxgç½cˆÂåÛ—ÒÕCú¤£ÝÎXªtwGK_½I›W&Ó¹®S…ŽOÊ:®è±­±ïn;îw{Ï]Íf³\"^>%Zw÷RŸ*Š[üªpw¿g—“ªµ’ýÁu¨­¦ß¬t?+Ès¹‚ßÕ5<°uû¾R¥Â]U}úZéöSãáQÞ ¬+¦ìÿ…Baµý˪ÛööJ _JÇ—¸/‚°òo\ žKïæÕ§T†cúK~~Ó .gæJÜ˨³´’ýÀÚ÷ßZþ’»¡¥B›¬²Û¤0õÇ™ê6>ÝIneŒªLzYÛ²¦Fë¾L XHDô‚£‹.˜:ß[î£Ò'ëæ±Ç›á3¦þf«ïY¯£ü~k«lù?wŠ{r‚]‘?ö<Û%g΄5öîÒ×n¿ þŽ6¼m”Wö¾ÇçùP¨=hÑØ‹{ñÙá•Qÿßê&¬YÓµG«Û¼•“´¶òÛ³®ÛÝ–µtŸnßІ•ŠAéͨ¿òð‰?8sé7\Üht_;Ú6®Uéz¥O:†½Ú¨ªþîÉÿ¡¸Ý“l!„¨®TµãSëöë·N©õ¢mÛ{óÁrB”¦¢qÛ.4–Ûa…÷™` !D5!gòï òÿ „|„÷®{ò)âB!„B!„½É[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØL°…B!„B; ¶B!„Baм¼<³=ßU›ò££÷I!„B!„(W:ƒìV—Ùlƹ*‚lWÇïok›ý™çÐí‹êKúVÕ’ö•!ýET7Ò§…#H¿÷º?Sâì^§\".„B!„BØL°…B!„B; ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„œ€ÕIfN&ëwË®£1d²N)W }}ø¼£CB!„¢Ú‘l!ìhÿ©ƒl>°åŽœ\d²Ø|` ;Å8:!„B!ª‡¬`gggsFÏ¥ —HOK/3»‡;š4â>Ÿ¸¹¹9´‘ R7ñŸöð̰9Üï …â¶ë4›Í…?ßH\Ë¢“u™0×[¨ÛV|U{U·ÍÝhÕ¶/¬¦™óò0›Aáä„£›gÕ¶/è×1À±ATRñïJø˜Íœ©ýê÷n¿e+ÞgL ‡‰þ£A>( ?LãØîƒœK6âÞ  ¾´ÅÃÉRF‘—žŸ–[ݦø=ÔO¥¥N…"›Sûã8y!†tîÑ™zšüm™29±ï0§¯äÖ陿AéŸâvYútV©þW_[”Gú™°7ãµ?Y¿ö ½_LCÕÝÙÇÊ:†0$à?óû¡²²_eM¹Lή¸Vá˜^ÐÖ½^ *³­+¯p<‡L°OŸ:Cæµ,zvíM£ËÌséÊEŽŸ8Æió:øÞïÐF*¸‹=¶ÑÇ4{Gt#{/³"gsÀ8„Ññº•øRv±çÂeÆW•±ÿõß-Ê\¹6ÉNÒS˳ kÂåKǹæV·š.”74š 9¤^ÌBÓ¸*r-?7òD­º½‹OîÔöJíƒî'^™A‹I÷N°…(‹Iµï†²*m<}}¨ `ºÊšç_`•:ør$æ(ð Ÿþ2š¦œeIï lzø²'æ(øŒäË/FPÏ)‹ísž&< :õàHt,ħ¿L¢©óU¾9ú¦:ƒøx׫´ÒÞÎQ ‹ísž*Õÿ>ûõUšª›¨®rSN²âó¥øŽ¶L°«cZ<«Àûa{ײˆý‡F°b¸O•ÅSÐÖ_*³­+¯p$‡L°Ïž9Gß[\…i»bwØu‚mí Vñ4kg¤ÌJW@…‹ÙŒjòW8¥¶WV½fãAÞøè9ŽæäPÓ×[ˆ æ}óÙ<-e‰ÎåÇnkÿ+Ú6Öb¿g™Md©‡²|öxÚÕp)üøÜ‘•¼±îKÒkÚþ 6÷ŸÇÖ™ÝÙ·e,Ÿ(^aëLoŸÌ[N:zÏË”@dÈB¼kÈ£$„¦>ø"‘qù¿¸®^gúU:]÷ Í5Îîdð°yüvêi8»Ž ´ ì»zz+É8±™à‘l=8ˆFOÎýŒ Þ˜Æþ à9|óëÓŒqßÉ*¼¶rv¨A·“Á!óø>v¯z;º5D5`Jˆ³Òÿ¤‰ª£ñʱ¢T–,ZàV)oçX´2+Ê&“ ¥R‰Jm9ru2›±¶g–í*qñw”jE9«áÖ¸µ|œcÅé¦u”ÊÇk»=îÆ«î&¹ûzêu›“ë4æzêõ*‰!ùÜRÆ~ÐŽsÛ1`ngBwlåF~Zú¥µü³0íþ{ðP™uXË—u~!Ÿ†ñëÑ0¾×žçufÜÖŸŠ šúAgÞ9ªjÒá¾@úµB›šPD².-%ä¿} ýæ™üm¶#øÓ78œ]”þÌûϱ7»üýO¿´²Xìíøçk)¸XßVÛü¾o!s‹Ò¦ný†»-´ê˜Ì}øbÊ$ÚÕp!;íÎüA:ЬÓ>ö(¦<0é³É¸’ENZ×Î\ãÚ™tr²M%êqqV““ú;{ã÷r(éz9eÌõÙdœ¹Æµ3×ÈHÎÁ”çè–°¯CK_$2h$! 78:qçr¥Uß™8uß:<~ã•u|c=Ÿ9ä¤(ÞÝ ­}†ÓÉøë}Âïýr¿’O|ʘÔÔ”fÆ<þ‡·`ÚQëasΓlL$ùT6O^ÊÊC¼·iÓç°úÐäèÈ4ž+~ ieÿ# Y3ƒú!&?ƒáÜ,;8›lW~4Xm›š¯çµQ´èø&ounîð–œÍ¶îOѧ*:J5Ð?è%š7R¢yá“E¤4}™¯Cž¦n§gyä×hŒ–ñj[ ^¹ŸOK ‹¸Ÿæ2ýྒ•©½héÕ„$µŠþÛ(“­"hà|&ôòE¤%þÆÂOßæ7\˽$ýnq|Ó>÷åã]Op~Öþpt@âÎ¥¬CÀðaœ×fÃÚ¢¤¦LcÛ#€þ*ÇŽüξÕóˆçz·ªþxèôËô;“Ô4ˆ?“@¢ùøõ¡Qáµyîè _ŸÆkÙ¾xÌ ÏüÃÙý¬ÐAˆÏ­Üô#Di×N[éëN“ñrËíBØ™1'‘#º3äÆœtöÄE³'î æG.!{Ïzf…ÍdƒÏÌ\޳n¯Ï|—6þë¢ÞŨÉ1$t6Ÿu¯Oâ¾(BÖÐå¡Åt1`JÈ»Äçãð®\ýŠÐ±Ðµ¹©Çy78”ô)|<݇¤ØõÌšð"æµëñÕ}̬ˆBW.§:…oCBy=¦ßíFyw☒ðtÈODxWrŽmfZX4LÀpv3£&G”ï{ô}¹ïÏᇆ#ûx[g7—¹o]{-¡K-[Qh¸¯m}Â÷òܰ¶4UÂõ?~c5‰ðÖ`¼\ÔÖåÅKêqfZi§GëœbrðLâýF2MwŒ§~!4,”C™óY1ªƒ£»Tµç°k+F#yyyV/•P(89UÍûÞÝË1Кù£ÿKg%Ь+®„rÌSž=‘ISB‡Œ£­ |û…qü¯Ñì<°WüŠê¨H¾žƒ·òïŽ!¹)ë>ÇF]"÷·©Í±ÛÁk2AžPüúŽÜXzÿȸ6ž@>U§3t]$ß]Ò3<ÿnŒò.Ó޷Dzÿÿ™ð ¸mÒ„Aüë`+]VÛÆÅ}#zúñXÀSxÁ»û‹ã‰‰ôQ"ÊPCk¹,üÐþo)\Ã:¿‰ƒéOÓϽ.-ëxpÉÃM üšÆóó‰#toÛ ¿#ð?°äbu©Õ^Ô­ÑF5ÔœµZf/Ù}ßã^íH>»“ Zž|ðAÞû&ϯø€¤»ý½¹ñ|4æ3^Xþ­´*Îè¡ði#BØ`ºQöy{CêIÖE¬c ™Äk|:?,cöBO^Ü”k§±Jx\CѱÄÝÀTª&\ÝëäO®Mœßó=/N^ ASx¦§<#@؇J[×jÿ“‘PT˱LѤ¡󷎳L$›Ó)b½f¡‹+´èKÏ™_y(=Ú3kî|ºv@“› MšÑœMÌÄóÍÄó«g ¥!@óÉ„þ±ƒðD¸¸sGx˜ˆ‘Ò@ç…;Ø¸ë ¾ 2H¿–†ªs{FoþŒz× Ý|þ—€ >{g(M•@óqDèÏ0i£%]éÑÎJ¼ìßöpñ¡ñïà)5×ê¾u©UÃf-ú …ðP~ûóEšvpãhÔ™M;-苵õù_6ÛŒ7~ç7VÛéþÚ‰'ˆ‹FÐB´õaɼö §žê@kÛ!ŠÛä° ¶Á`À`0Øœ`»ºVͽ*7Њû ÿ¹ððÃÿ¥ŸÙÌádà/Â?é[²PrÅ/ÀrU——¯>Ú6ÌPwO{Îc}ìfføÜÇú$èŒ P±;YŠ«Oïž…¿iš¢!‘\Î̤¢]]Õ*  íŠ5oÇ6ÁÁïAUËjÛ`ÒAÂBBæþ«Jþ_ª3sn±Ã!“‰¼üÿøš® î Ø·ãßÌ9’ͬ&_óÖ›Ö^”˜`—d­Lšum€K /Z7ÐXVàêöa€×¬½«¯"4±ñ¶Óƒp·kœ:~Š?âàHÓ?ùó¬;­›×qÜ€&î:}¨5¸6îMXdoÐdz¤ïÞÜПŸ&åËåZÏ[ȤHý"Oú|ÆéžmpÎÜSâò<€œL€üIOúYÖ„M`U <9k>/?ÖAžM!ì&WŸ >­K|V¢ÿ ñ·¨Ib›Àh\Áh*¼’Ré^â7ì?­T JРðŠp¥•ø”O` “ï(YÈû<µ^þa©k™5u‹hÁ+s§p_óòOd*5 u‹/·ìÑÜÈ×Ýj¼`´yLåî[¹jµ'4·êx¶£áµÈöVÚÈF¼šZ@t™í”3À ð¦~±3:v¶a¬|Ä¢’v<ªP(P(6'ØUu~fæuà*©€;7øî›GùÑó]Få¤ñÉôOh–Ÿ_÷ûR;÷§ŸÖa(7_6˜È?é§åÉÞƒY¿in¯G&þ<צþ-FŸÈá=4³\s#ù7.~Ϊ ÏÖ öÿ>ñ;þÜqrô¶Úf:kãaê˜ßx¤¾'¾'xá4çZuÃhÆ:vHƯ‹qU)¸Q+?€tþºz¥0¯‹³PSð(´ŠÜQTºŒªüouVF:YÙÉì8žH]uçïú5—l|iá“HèÈ EGE0)Ç…ïÂÈ[TØEO1+gÛÂz[>Ð6¥Mp&‰ë '8mlKXdt~î,¢~ýŒ+µTx5òÝ.ê‡ÒN ɉè£4MÍܳ¼ß›^ä³íÃhêîè½ÕW›Neö¿Acäòpá0ÖnMŒß¸˜°eðþwßÐÑ[¹'˜ÚëuÀ‰Üë)À®Sp+NîŠn˜²®#ùßþL›ÇrNéƒéìE¼ɶQãÈJ¿Êï›W:}Þß}OŸ†¶LYYÀ2 · WŽíŸÎùñ~d5Þ"Êr÷­|®ø„±?Ó¾.Gx†>šR¹LYÙ6ã5e¥Zm'çÃ1€G±ö…äÓgª¾3ÀA9s´®†û™þ¿¥è®_`ï¾P"N‡5éÒýY`·%-#ÝïóøÇ¦E|s)£DÉW|‚T·ýKt#‘MѰëÄÂIyiE¥Ì¹™þQw¦ü[âDÄúÈ|wNǹÄfБÀæž%j±V «_pŒ7W…q8QÇÞƒ3xsÏ1h0†çü¬·… ƒ1ƒ„Ä­¼³d™äŸQeÚòÓW\4žaÛ¼=l'ÅH?ÅFÏ>*¹ê¥°òóÍeβ=Þ2i×§œ$zo<ÞÍÄ·¡š‹wýs–4Ly‘Kض?šmû¿á5?h15‚maÐÜþDµWôÉ–½ž€è9ü/FÇõÔŽmYMx4 Üþ ³&ŒåÓí'¸žžÂžOÞe±ëÑÏ–]éÄQ~¾“Äô4þܲŽÅ:ܵ1ñ?¬f3Í™õJ/œ¯] ^O¼.ž½¬- û°Õÿ„¸#”:™¯ÁhºÁõÿ›ü:GCF6÷õyø–wï$þâö¬žÏü@ {Ö°è“XÓ3I<¾“çÆ¼Ë¦³’b>äÁos"ƒI…:ÿÆëšj%æœË¬žò&Ÿl;_æ^Ó¾å_ ârj&—mæÅ°XhèRn¼`y&Ô‘Ë:âÒò¯);oyqxúöæIv6ókgõ¥¬õ÷òâmÚ{ÕvjÖ+¿}þÄù„â÷oæéßBH0­åµ‘UÎ! >îî5¹’”€Wíze®bÜ}%)wwûŸ“Õ4›Åü€óL‹YÄ?–. a«7™Ó½=.´çã§™¼uÿXlI«Ûd î‚ó¹M€.uÇ[͇nàvÓ©ÏvoÍ}WxªûV"sçâKI\ÎH'ýzÉ™QC¯4""Ëÿ­)ãF.ç~%è]Ô…ñAr‰²ÎÎEišF“ùxðU¦mŽdÚÊHjx…ðÁóãqWbµmœ“ÇÒðØl"Ö ´ì¯Wk\sþbÏÖx)¤^±m‹BÙ[x5²ï=1œõ}áÂÅ3_ñöºõ8åÓ&@ÙfËoF(œ-çÀLÊ\ýl«Ì¯ßÏ%ºNAÝÆð^7€kD¿Š³Žn‹*à¦-¸ÒBˆòhÀ£è4Lý€‘„OJ'tê$Vä8i6ã½qŃùSO3múëDæ§½¶ü ü½”˜ÍmysÉxž›0ç>Ï/75œÇ;8ñó×±„|±Ä–_Y¹Žg;È oÂÜ­õ?é_¢ê8;»P´Züg -à’¼‚³’ºùŸ7ê=”À¹¡L ¶\ÂÜ38ˆqÑ|2qƒ÷ cýš7Y8r¯|ø1Ä~к¡ôêÁ—KÆó¯ ïòÜ2K]=GÏàõG¼ñÔ‡2ñôÜãlÈì|k¹9\Œ9J²n™û¡ôî_ïBFåîA-Ø~µX¼áÖãm30Â2vß8¾ú°ì}[1aA¿t³‡%˜&ô›Úƒ ²x¤W‹Â‹·µÒ»—Íxmµ“+Þ|¹|óÆ.äÅü²‚§ðåäò¼†¿"//ÏlÏK±cS~¤]?›yì#;=—¶>íhàå]æûJR'tâæ®¢Û¶ë+îÏ”8›Û/±-“žtà Pjqwu¹é½q7HÏÖ¦”-~B ¼|7oï›ÕíY–3ŽÍã^+¼¤·xÞ›ÛàævI?=ƒ§Öíá?Óvñ€é:é&psó,³.kõÚŠ½DšÕ¶1–Ü_ÓuÒ ZjªKl¯:¾_¯¼¾ðÌ{Ï[MkP·-ž®p=õW*ö<»ÛâáÑœZ.š|–²¯¿~닪¢*õݵ¢:ö;Q¶Š|+ô¾Ö\YF#*µ¦Ä;^-ã  ½¥ªUQ–~f Kog UÉmÙã­âÞS¹>]²ÿ'ýLTFeÇRkÇ—ÖÊ(ydésÁY…F¥SúlWœÓóí¶Tú>×úJ€L¾?Œo\ÈŠQmó·a*Q¶øølÊ5k4¢ÒjŠMM[=ƒy…£|JÄYòøß@VŽgµ¦ð9©Eûe*3^VYêØ¾¬}ËÊQá¦1s|ÍÛåÇa¥ÝJÍ5lÆ‹Ív*µ?ùdœ(ògJ=ê ²[}f³Ù1+Ø>­[ò׉Sì>CzzF™yÜÝkÒ°qC|Z·¬º@”ZÜ­.ººàîæYJJç»ùþñ¬KKùǺ¥\ÎAÃFãRZËd4×-ïýuó´ÃÊ}´Ú67•QzâîVÁÙ{€ÆÕ,CÙ³ç+É'¸RÉúnGZZÙë‚8…€ÊÊÊcÈ”®Ôt·öˆ2W4Zy|™péÂ1*ÿŒ$%m±o¥ÖŒ)ë+"æ±"ú¯hGâ®…DÆÁô·šZ/[<¥¬±;÷<¿éÄÌ9>ؤ´õý)¯¶FѾ—¿o`Ή·Ç-Ml•å}ß­·“í4QUs‰¸‡;¾]:Ò®C;«ï¹vrrÂÅÅÙîO·Õ±Ëëôé•þr8»Ó°V7zvœÈ¸–ž¥ê¬èj‡[ÃÇx¬£-­4IñºneÅms¯ëëۇͶ8:Œr ñrt•&}LTÖ­ô™[뤊¿ƒô3áU1–:×ógóÖ%üòãNûÚ'â´óÖX-_^fU &,hQfš=cËÍ«¶GEë¾xeœp<‡L°œœP«Õ¨ÕŽÞý¿‡¦þÂ_qÛõ¸ÔìëCú8zw„ £Ÿ³™]Çb¬®d;’ÆÕ!þ0¬×“ŽE!„÷0×Z-Þ‚@G"„É[mª˜=Ï"É©»Ãèþ#ݤ£ÃB!„¸ãTÕñìrœ|§Ä!çž|M—B!„Bao2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØL°…B!„B; ¶B!„Baм¼<³B¡°[…±)?:zŸ„B!„Bˆrõ¨3Ènu™Ífœïô ++6åG‡n_T_Ò·ª–´¯¨ é/¢º‘>-Aú¸×UÅâ°\".„B!„BØL°…B!„B; ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„œ€ÕIFN_î^ËÖ£[Ñôާ­«–¾8ÎÑ¡!„BQíÈ ¶vôÛ©X¢DÝ‘“k½AOÔ(¶ûÉÑ¡!„BQí8d;;;NÇ… HKK+3‡‡Mš4ÁÇÇ777‡6RCêzfüð3#‡}ˆ¯+(ŠÛªÏl6[~0%°%n%û’®ãåõû=E#¥%©2Û°Ÿ½c¯ê¶¹[-ݶÔjš9/³NN8ºy–n[ÊÀŽD%~_ŠÑÅl@W»7tðîÝ~'ÊV¼Ï˜ö³ñš l‹²ðÃTíÞK|ò <´ ëðt²”Qä]ÏOËBS·<Ô…ZJK E'öÇòÇ…4\<šÐ­‡?ÞÚ‚íé9y`o©´ÒGÅí(èceõ¿ÒÇ„½¯ã˵ñðËOÒXuwö±²Ž!rö°æg9¼'j+ûUPÖ”›‹ÑY…êŽÑ+ª ­û½\f[W$^áx™`Ÿ:uŠk׮ѵkW4hPfž+W®pâÄ Ìf3¾¾¾m¤Y‰[ùùÂfMb·ˆLGyõý@bP³¦ÇV²rLJ¼7ùgúUò¼BVÊV~¾°¡Ìøª$ö¿±þ»E™+×&#ÙIzjy¶£aM¸|é8×ÜjâVÓ…ò†F³!‡Ô‹Yh×BE®åçFž¨U·wñɺÂ^©}Ðm"dj8>“ZN°…(‹I–•ïN`YÚTú¶ÅÀ”À'Ï?Î2øøŒæë_&Ð { ð#&&|ÆñÝ/ÓÀIÏ–9ý˜~AÄEÇÁ¬ûuÍ\-i³¾-Ö\åèVÕƒž-sú–Ùÿ¤‰ª’“rœˆÏÐu´e‚]Ót,[Ï ï‰ÚfN=G÷eÝÐ5Do[eñ´u—‚ËlëŠÇ+É!ì3gÎУG«“k 0-66Ö®lkg°Š§Y;#eVª®f36ªÉ_áP”ÚÞÍõšÍf.þ±Fxô±£Ìºßýéwè·n +öï£_@÷ Çà~ßRvO‹ÀEYF¢sù±ÛÚÿжM5woŸÙD–z(Ëg§] —ÂÏYÉë¾$½¦í¯`sÿylÙ}[Æò‰â¶Îôçðöɼuण÷̱Lù<$ €†5äQ ÓEO æó¸üß\ W¯õ§~f™f¯ÛÅ#͵äžÝB¯a3‰9õ<œ]M$>,øî ¼•¤ŸØ@àÈp~88”ÑŽ03 BæF1%°1¦±Ûé<µ¿ŽâÍv'˜õmÙi3;º5D5`JˆµÚÿ¤‰ª¢õNlì0”ÊòE Üê1åí‹VfEÙd2¡T*Q«]p2›±¶g–í*qñœ+ÔŠrVíѴ|–ØØa8Ý´ŽRùxm·ÇÝxÂÝÄ!÷`§¦¦Úœ\hР©©©UÃÕs ñÝçzÑ}n^Ýñ=7òÓÒ.}ÆèÂ4Âî+³kùôçßáÑOßdçÑ7yà½z<¾¨ Ïmý¾¨ i¯~ЄiGOò×Ù@?^ºßmËñôq† IJmOi!þ×—W¿˜¿M/?Çì‚ô÷yäýGÙ]þþ§]ZR,v/FÿðëÛj›£û^åѹEiã¶~ÉÝ¿ZuLæ>|1eíj¸vŠgþ hÖi {S˜ôÙd\É"'-‹kg®qíL:9Ù¦õ¸8«ÉIý½ñ{9”t½œ2fŒúl2Î\ãÚ™kd$ç`ÊstKØ×þ¥Á|4ŽÑ 78:qçRѶï$¦NJH¬’Îþ>SéÙÜrm­Ê«1>ùiFCø ÅÏÛ2wo;ˆP?Ø~:…kO îe™Ì(÷ Ô¢â“l¦ aÒÇ„#ä^Œaâó‹Ðé!÷âv¦ŽŸÃêÕ ñ÷÷Çßߟ…›ö°ËâÂßg­Ž)n/îÙÀÔüÏýýCX¼é¹ùiéºf…ä§ŸÃâ…S™º8†\@q? Ç”φý J³zNá¶B¦.æPBn…÷%U·Y!þôèÑÿñ Ù{†ÂÁßZ¼¹gY8¢7aq· „9[З³oÖéÙ2'„Y_/ÖÀ:ú‡°I§/ÑÖ‰×z;þâžbiþ,ܰ_ŽÙÿ&YúÉËË+üg‹““S¹ynÉõÏx*2œ\¯' ‰áô‡ügß^P~ÏZ_O®y ƒ÷Þë;€ø#o³tëð8É«%êXk=Ÿù:W“"yk3´óŸ1Š/Ep´ÿcø*áꉈÍiMÛЫÅ/¬ ¬G£üjõç–ò³º4éP*lcÎY®¸z*‹ç¯å!å~þ½é}&,Î&êÕhsNazù¢Òcá§oó®å^’~7H?¾š Ÿû±fWgg-㘣w.¥ÃGpV»ŸÈµEIÍ cÿ#€>CGްgõLtŒæáVîd׃.`™~§s- tg.‘`>~iRx¹Žß~°î$çµÇÁo@™ié/w±\š.ÄmH9}Ìjÿ“>&ªŠ1ç2qº¿ÈŒ9iÄÄEÂ’ÈH²÷¬fjØd"}BX¹gÝŒ9•vþ; VÿHð乇.b]÷†\Þ·–Éaóðh-þÆß2]ðTÖ„÷àZìgL^]{’›zˆ·‚'2‹5ÓÛq5v5S'c^»“®ºÿ25"‰Ù+×ÑAÄ×!S“]ûGQ£œý0%ía`ÈtšÊÊðdÛÀ„°( ÷ì‚'‡—ïR¿Nîû¡D5ǤÇ; 8»¡Ì}ëÞ+ÿZ¶¢ÐâÓ¶!3ÿãÅah®„Ô?v‰+½µ/µµ)é7›ñ’zˆ©VÚéÉ:ðrðdt~ãX²¦7NmgrØög.'rTGw©jÏa×VFòòò¬^*¡P(prªšöÝ»? —ö,½ŒnJ Yw\™ÄO5»÷¼G-ò:÷« k¿…þ+˜ŸDóª_±:*¯ÏàýÌëØ ’[ðŧïðµ.ß6uø>6¼Bê .´¡B8°ovl€Zc˜Ý½Õø ÞÇä6µüO}~ëV²þR&/äßQÞeÚ»÷,"—ö¼?ákrÚ’‹Õ¥V{Q·FCÕPsÖj™½d÷}7zµ#ùìN6$hyòÁywì›<¿â’îö÷äêøï˜Æ-ßJ[­šSzÀU.å3Þ(û¼}nê|±Š@" ×riÓ¹°Ð…µùçà_;–e:À# §Ž^gÀTª&W45¬§É)ìA­­'}L8€å²ä¢¿¶>,Ù:Å2‘l2¿ˆhúΞˆ¿ Z "`æç€ g_æÎ]Ž`´¹éÐä> ŠÓ‰éÔúãt„5c8š¿Íì?¢™™çv~IA¬Ù‡†*h2øEÆmŒfÝ®¿èÚ ë×RQuöeÜæ(ëUº9þ—o€`Ö½3œæJ ùVêÿbÌFKºõxa¸/:zÀù‡úÒ³CcL©9V÷Í¿–íS]>ý†Cøbþü'Í;h9µ BÑA ™ÅÚº¼xu;¿°ÚN¾µÿ‡Ž`"½Œ hÛ–H û‚Ou¡]yg#ÄmqØ‘©Á`À`0Øœ`»ººVɶUnj -­ ÿ¹òÈÃËd6s ¹ð³>¹é¾ï使K¥./Ÿ7ƒÛ6ÌPw$#<ßamlÿñiÅ—I008„Â;r3~âÕÕ#ˆÍ€ö]?`ñÀçÐb7-jþ¦mþ8YÉÅÌ *ú×U¥VÍèP¬y;·é ¿U«mƒé$$ü‡Gç¾V%ÿ/Õ™9·ØáÉD^~·¯éªü ð÷íø7sŽd3«É×<¤õ¦µ%&Ø%Y+Ó€f]ÛàRÃ‹Ö 4hêöa€×¬MqtKÜ{‡M‹ÜR8qüOŽÅA\Ó£?ëA»æ^ŽÐÄ]'W¯µUã@D‚^Ǿ!LÜð(û&ç»å5øï¼0ÆDBÐèI„øDðWÏ8gî*qy@N&€¬Ì$«i¦ E%„m9ú«àÓ¾ägÒÇÄß΃šÅŽ!µÆ\@FcáeÈJwˆ_G ÿØR58k4@CŠ{U´õ€/ÁYSø–1ƒ£KòާöËo° õS¦NË|˜4÷Z5/ÿa§Î-p^ÅŽ—[õ ÅâýºÌxÁh9ò2ËÝ·rÕòevÌÜz’Qíj±.B#;Yi#ëñ:kêQe¶SÎ Ðïb«nM:ú?`¬|Ä¢’v<ªP(P(6'ØUu~Fæ5à )€ÖÓ‹Mž x%'èÇWÓ¿æ¾üü'_Èç!x±¸°ŽÜróe ,³è„ôfí¦¥ÌÛÞ€ zòbo0›Á°‹G «Ä’‰«éV³¼è8ÉCÍ,§žn$ÇpèᬂŠ=w¢pÿÏAá¿OŸ; @¶ÞVÛLde<ükÌI¯_ ߸p<寙߻n-ÃXÇnÉøu1®*7jàçÎ_W¯æuqVêÂ/¹µ¸t7Tùßꬌt²²“Ùq<‘ºêÎßõ#jÙøáãs™É#CŠ>Ž gLŽ »Â+÷ò0! ì]Ô—©9sÙhù@Û‚öAÀ™+¤&ç”±# "÷ççÖóÕ¯\ª¥¦~£n ÛÅyýp:hÒ9‡OÐ?¹¯M&è~æ¼>¤Tš\º+ì¡~ëýOú˜pk÷õê6¾Çôe°ü»]tñÖBîqÆ÷(ɹž\&•‚[qôÛ=1f]ƱyÿËL›ŽÇpFÙÓÙóÔ{xûGMAŸžÀ‘Í3yúH}Kÿ†¶§5Æ,=p™ôÂmB±ÝàãŸï\«ñq.wßʧâàq0vÛÛ×'ŽÑÌö)½´fÌʲ¯1+Åj;9þ ¨U¬}!éô©¿©Gˆ»ý‚Ñ[Ò½Ó“À^ýßBN^?Ëî}“˜w*j¸Ó½ûh`§%-#“¿¿ÃÈMáD^*ù¾îŠä+þ ƒzíÿIXð»N+œ”ïÞ6jÆôÿ'®×~e÷éŸØ}ú'Îd0çîeÒG-ûó/%ND|9„õçNr&ñþ¹æ}  4¯]">keºû1qÕ›H<É2qÏ!h0—ü¬·…ƒ1K‰ß3mÉx2€\½<\Åš-?}Å@Óè¶½ÁÛðqòX<ô3QlL³^¶äõ +?ß\æ,Ûã-“v}ÊI¢÷ÆãÝüA|ª¹hc[w-S–ÉþýûÙ¿¡~à3u ûóqÕ‡ŠN[µêÑÓYs‚ÔÔ$mYÊÌhxbp4ú¿˜:a‹·'5=‰˜O¦²@OõhA­–àGÿ÷ùÒS9¾e t0´kS›iBØC­–=¤‰;[©“ùZŒ&© 'X=y q€!#‹V}ž"™¶x º‹g‰Y=‹°@ Íz?,cî'1$¤§“p| ™Ê7gsIŒ™ÃÈà—Ù~â"“·ü?þîj%æœ ¬˜2޶ŗ¹€×âÁÁ@$Snâbj:m`XX 4t-7^ò¯‰»ü'º„Tòlä-/€Z¾ý !šé3?'hÖ ÊZoñ`Íx[ôjµZôÊoß…›8›„nÿ¦M„´“¦*çlwww’’’¨]»v™«Ø÷_'%%áînÿs²ÚfóXp– 1áŒ\@ãVﲨ»/.ø²fàI^ÞÎÈÅ–´zM^gÙÃÝq>·,ÜâR÷u«ùÐ}hnZ×íȨîí‰Ýw‰Ý{åvX5„•År÷yì(ó|®r1#´ë×KÔÔØ+•y‘u´`òȯðUB¦‹º0>(YÖÙ¹(MÛhk'2aóJ&¬´lµ¦×VqQ|41š'öbëšÙügäLB>ü‚ öƒ(­¥Wß-™Ê” Sy|™¥®€Ñsyû‘ÆÔÒÏaêéL\¸ýѳ×е›Ãù˜8®ú甹JïÀüzÃŽ´|äCôÕ‚x‡>Áj¼íCx#öMaó‡eï[Ä„h†þò Í8,Á4gàÔ"èy¬WѽEÎÎEí[^¼¶ÚIEc¾[Ê»cÖ_Ö/xßMç5ü yyyf{^Š›ò#=ê ²™gïÞ½¤§§ããプ—W™줤$t:îîî<ðÀvÛ~‰m™2I3@YW×›Þg -;³0­ lñåå»y{kW×cQÎëì7*ðnébõ¤~•ëvñþ´£s;côQQÕ¤ G¨Š±Ô¹^O~ÝÉÎ÷ðDZcÐn*+ßxŒÞZ«å˫ӬòaÊŸ2Óìy[n^u+¦,hUé6+^·ü-º»9d‚íää„Z­F­®È[ëî~Úú/òá /Þv=.5ðæŽÞaÃøÀq`6óã±­VW²IãªáIÿ`žïõ¼£CB!Ä=LUˇG†ûðÈíW%ÄE^[ÅìyIÎHÝÆ÷Ïøþã†B!ħªŽgï”ãä;%á8÷äkº„B!„B{“ ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØL°…B!„B;Päåå™ …Ý*ŒMùÑÑû$„B!„B”«GAv«Ël6ã\A¶«ã÷·5ÈÍþL‰sèöEõ%}«jIûŠÊþ"ªéÓ¤߉{ÝŸ)qv¯S.B!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØL°…B!„B; ¶B!„BaÎŽ@ˆê$3'“õ»¿e×Ѳ Yާ«†¾¾Œ|ÞÑ¡!„BQíÈ ¶v´ÿÔA6ØrGN®² Yl>°…ÇbŠB!„ÕŽCV°³³³9£‹çÒ…K¤§¥—™ÇÝÃFMqŸO ÜÜÜÚH ©›øÏ{xfØîw…Bq[õ™Íf˦ lÙÉñÔêxuå¿GñVZ’*³ [ñÙ;öªn›»Õªm_XM3çåa6ƒÂÉ G7Ϫm_Яc€cƒ¨¤ÂïK1ñ1›9Sû;ÔîÝ~'ÊV¼Ï˜ýG ‚}P~˜Æ±Ý9—lĽA|h‹§ÒRN¡È&þÐQþŠ¿ éÜ£#õ4ùç¤M™œØw˜ÓWr Ëy8Y¶¥P(H9q˜\æ†KMîïáG /Maš·ÃÒ§³8µ?Ž“2pñhH穯-Ê#ýLØ›ñÚŸ¬_{†Þ/¦¡êîìceCð¿ŸáÙáÝPYÙ¯‚²¦\&gW\oá½¢ Úº×KAe¶uEâŽç öéSgȼ–EÏ®½iÔ q™y.]¹ÈñÇ8m>CßûÚH²w±çÂ6ú˜æ`·ˆLúþS0Bšîd‹d펯™?e5]+_Ê.ö\ø¡Ìøª$ö¿±þ»E™+×&#ÙIzjy¶£aM¸|é8×ÜjâVÓ…ò†F³!‡Ô‹Yh×BE®åçFž¨U·wñɺÂ^©}ÐýÄ+S#h1é¾Â ¶e1é/°öÝPV¥§O 5LWYóü ¬ÒA§_ŽÄžá³_ÇÐÄ5“g=ÃühèЃ#1±@KvN¢•ú*_Œ}S¹ü4-œÚ0Ÿ„‡é³ƒ€Yß}O€·ò6ö@ˆYlŸóáQÐ)¨G¢-}ó³__¥©Êѱ‰ê*7å$+>_ŠïhË»:1¦Å³j<1¼¶w-‹èÑO±qh+†ûTY<mÝñ… 2Ûºâñ GrÈûì™sôíñ°ÕÉ5P˜¶+v‡]'ØÖÎ`O³vFʬtT¸˜Íب&õCQj{7×k6›I×}Ê#<òãšy’unC#Wu*‘Î÷ׯpì5ï›ÏæiḔuç\~ì¶ö¿¢mSÉsÕŸÙD–z(Ëg§] —ÂÏYÉë¾$½¦í¯`sÿylÙ}[Æò‰â¶Îôçðöɼuण÷̱L D†,À»†`Nx±lüí<O¶ptkˆjÀ”Gx<9÷3&zcû ƒ‚çðͯÃx=ÐÛÑá‰jJã3”cE©,ÿX´À­SÞαheV”M&J¥•Úräêd6cmÏ,ÛUââî(+ÔŠrVíqkù8?Æ>ŠÓMë(•×v{ÜW!ÜMrdz=õºÍÉuF s=õz•Ä|n)3¢Ÿ ¢[÷y¼Ûo.@ú¥µÌX7›“9î ¸”7ºv)U‡µ|Yççñò¶\&úû›#ñR»R£Ý|–h)h:DhÄ\ú­#8ù88á©fžhêµ·:Iͺ´”—"¿¢y‹z8u €^CxgÄéìfIþ«LŸø5”sU}ú¥•ÌX7/?vhÓq&ÿ78÷rÚæ÷}3øÏŽ(’±¤ùvÉìOUÉÿQu`2÷á‹)“hd§â÷”´¾¯=Í:ácÒ¾y dg“•aÆÅ ²Rrg4ÞZÔnEgJ\œÕä$ÿÎÞø<þHºŽIo«Œ£>‡ìÄln.î4µÕ(«Ñ-}‘È ‘„è×ð‡ÁÑш;—+­ú¾Èľ®$ì_Ê}QJòÙ¿Àg<Ý›[.ßvõò¦` ¬TZN†ù¶«gù@S€Œì\’t_ÏÓ«ƒå3W^ñŸ.¥q½öQây˜™ƒ}Àd¤lÈ?¢–ªôrtCˆjâúEЃ½,“iec?^óƒâSx™`‹ªa¸Kè´#üó“q4¼ö+ÿ÷‡¸¿§Šßðä¬Ù<èrŒi3¿ pÒ;¼:ò4Àå=›Y:9‚=´ dÖžô~\ ],Í|—í:À/ˆÖ©œuÈ¿&öÀtñ0Ÿÿ'” q¾¼¶ä5†tkd²gõJfED[j x†WßxŽŽÞ[â¹®û…gÎÉßæ¼Ö7|š¦—ïw>3Ž : n¯œžÆ¢ýH+#ïÈÇ:”³Ø”Åö9o°ï¾W ÞÖòQnß:<~ã•u|c=Ÿ9ä¤(ÞÝ ­}†ÓÉøë}Âïýr¿’O|ÊøgS:ûná§@dÿÁ–cßóÓž•ðç™VõK…mÌ9O²1‘äSÙ<=x)(ñÞ¦eL[œÃê7>B“£#Óx}¹ûIÈšyÔ1)ø ç¾`ÙÁÙ„d»òC ÁjÛ|Ð|=¯íˆ¢EÇ7y«s{t‡ç°ìàl¶uŠ>UÝiîRýƒ^¢p#%š>YD@Ó—ù:äiêvz–G~Æ8`¯¶Õðç•øù´²ˆûi.Óî+Y™Ú‹–^MHR«èÿ¸2Ù*‚ÎgB/_4@Zâo,üôm~õÜKÒïÇ×1ís_>Þõçg­áG$î\Ê: Àyía6¬-JjúÈ4¶=è¯rìÈïì[=xž¡w«(];òZ„œÏk³»¡?¸ŽxfZ§zÜ×a _<æ†g~=†³ûY¡ƒá>^$Åë€,¾ï-6D ç¸L{YV¯…}\;}üúШðÚP ÷ô…u§Éx¹ƒåö!ì̘“ÈÝrcN:{â¢Ù÷ó#—½g=³Âf²Áç æG.ÇY·×g¾Kÿõ QïbÔä:›Ïº×'q_¡aKèòÐbº0%ä]âƒÇóqxW®Ç~Eè‚XèÚÜÔã¼JzÈ>žîCRìzfMxóÚõøê>fVD ¡+—ÓN·!¡¼SƒïöC[Î~˜’ðtÈODxWrŽmfZX4LÀpv3£&G”ï{ô}¹ïÏᇆ#ûx[g7—¹o]{-¡K-[Qh¸¯m}Â÷òܰ¶4UÂõ?~c5‰ðÖ`¼\ÔÖåÅKêqfZi§GëœbrðLâýF2MwŒ§~!4,”C™óY1ªƒ£»Tµç°k+F#yyyV/•P(89UÍ’ÛÞÝË1Кù£ÿKg%Ь+®„rÌSž=‘ISB‡Œ£­ |û…qü¯Ñì<°WüŠê¨H¾žƒ·òïŽ!¹)ë>ÇF]"÷·©Í±ÛÁk2AžEõe]Ûʪ˜µ$9ªçþúeƒ‚d\O ŸªÓº.’ï.éž7Fyçðöí±ìÿ&|®@›4aÿ:ÁJW…Õ¶qqĈž~<ð†DðnÇþâxb"}äöÂ2ÕÐZVÂíÿÖ2¹8¿‰ƒéOÓϽ.-ëxpÉÃM üšÆóó‰#toÛ ¿#ð?°/ÿJ µÚ‹º5Ò¨†š³VËì%»ï{¼Ñ«Égw²!AË“>È»cßäùt·¯bçÆóјÏxaùW´Òª8£‡Â§aƒéFÙÏ0¤žd]Ä:öè’I¼fÀ»V©z€lü6™ø¸x’R³hÕ¸–ÓŸ&Îïùž'/… )<Ó³ÉÜ€X60‰Q¯‘sæW&Mÿ]ç³@h„¨´u!î¦R)E·?a–c™¢IC æog™H6 ¦SÄzÍCWhÑ—ž3¿òPz´gÖÜùt ì€&7š4¢9›˜‰ç›‰ç VÏJC€æ“ ýcá‰pqçŽð0#¤  Î w°q×|d~- UçöŒÞüô®ºùü/?A|öÎPš*æãˆÐŸaÒFKºÒ£•x!Ø¿;í=àâCâßÁSj®Õ}ëR«†Í8Zô á¡üöç‹4íàÆÑ¨52›vZÐkëó¿l¶oüÎo¬¶Óýµ7O+ … hëà ’y%ìN=ÕÖ¶C·ÉalƒÁ€Á`°9Ávu­š;zUn* ÷þ5ráá‡ÿK?³™ÃÉÀ_„Ò·d¡ä8RŠýêª./_}´m˜¡î0žöœÇúØÍÌð¹õIÐ/8 +û: U-4^#ò× ûgþ¹h<Û6òøs#ʈ¾>½[xþ¦i>ˆ†Dr93“ŠþuuU«€&´+Ö¼Û<¿U-«mƒI ™û¯*ù©Î̹ŇL&òò»}MW p€};þÍœ#ÙÌjò5i½iíE‰ vIÖÊ4 Y×v¸Ôð¢uåR º}àõkS¸‹™Ø¿xÛéA¸Û5N?Åqp¤éŸüyÖÖÍë8n@wƒ> Ô\÷&,²7èãYÒwÓ6 àÓû¶²*Ηðïfãïí ¹X3z,_—ôÃ5ý,kÂ&°*žœ5Ÿ—뀋Ù̲DLL -Ðx“v2)ú4£:àîèw½\}2ø´.ñYN&@Y“n!ªJMj;†Ô €+M…WR*ÝkBü&‚ý§•ªA©q^ ®´òï_‚Rã laÒà% yŸ§ÖËÿ ,u-³¦Nc1-xeîîk^þÃN• ÐŒºÅŽ—[öè näÇën5^0Z޼ ¦r÷­\µÚá[u<Û΃ÑðZd{+md#^M- ºÌvÊàxS¿Ø™‡F;Û0V>bQI;U(( ›쪺?3ó:p•TÈ?à¹Áwß<Êžï2*' xˆO¦BÁºß—rع?uø¬°C¹ù²ÁDþI?-OöÌúM«øp{=2ñç¹6õÁlæÛÈž|nÇOã^³Tâ֓޵àäué€G©è9œ çf–ÕíÉ¿qðsVAÅž;Q¸ÿ ð‰ßñ玣·Õ6ÓYSÇüÆ#õ=Áð=Á §<ÇЪFË0Ö±Û@2~]Œ«JÁZøy¤ó×Õ+…y]œÕ€š‚G¡UäÖâÒeÜP嫳2ÒÉÊNfÇñDêªs8ר¹dãK ŸDBGN(ú8*‚I9.|6@&Ø¢Â,zŠY93ØÖÛò¶)m‚€3Ièdݸ¿àž>Uºªméäæžeqÿ lx‘϶£iþ¬Ù ¸×mœÁY:¢¨"^m:nõCi§ÈäDôQZ‘ËÃ…ÃX»51~ãb–Áûß}CGo äž`j¯×'r¯§W¸ù÷gñç®Xðè†)ë:0’ÿíAÁ´9ñx,ç”>˜Î^Äëá‘l5ެô«ü¾y¡Ó'áýÝ÷ôih{ð5eeWÈ(Ü&\9¶|:çÇû‘Õx‹(ËÝ·ò¹â<ÆþDLûºáfø”¾3Ú”•m3^SVªÕvr>xk_H>}¦ê;ƒ*Ö ª®†û™þ¿¥è®_`ï¾P"N‡5éÒýY`·%-#ÝïóøÇ¦E|s)£DÉW|‚T·ýKt#‘MѰëÄÂIy÷¶!uïìÙJÂu[v¼ÄúThÑñ1jædúGÝ™òsl‰ë#GðÝ9ç¿gúše@G›{–ˆÏl¥,@W¿ào® ãp¢Ž½gðæžcÐ` ÏùYo c ‰[ygÉ42É?£.ʴ姯¸h=ö7"x{Ø6N‹'~&ŠiÖË–¼~Caåç›Ëœe{¼eÒ®O9IôÞx¼›?ˆoC5mlëî !`Ê{¬ˆ\¶ýÑlÛÿ ¯ùA‹©l  íp£ð§–½ž€è9ü/FÇõÔŽmYMx4 Üï&>Àg|¾é0)©iÄïÙȤeàÔ†äV³™æÌz¥Î×.¯‹'^OŠÞDýÎ=éÄQæ-þ‰óI)–rñôæ+“až-»Ò‰£,ü|'‰éiü¹e‹u0¸kùŽâoQêd¾£é×tüoòë ÙÜ×çQà[ÞY¼“ø‹سz>óc-4î=XâObILÏ$ñøNžó.›ÎHŠù¿Ả &êü»)kª•˜s.³zÊ›|²í|™ xM|ø–-ü‰Ë©™\>´™Ãb¡¡K¹ñ‚å™PG.ëˆOHË¿b¤ì¼åÅàéÛ›'ÙAØÌ¯ œÕ—²ÖßË‹·iïAVÛ©Y¯üö]øçRˆß¿™w¦ !Á´.ïfuqÛržÝݽ&W’ðª]¯ÌUì‚û¯¯$%àînÿÃM³YÌ8Ï´˜Eücé"¶z“9ÝÛãB{>xšÉ[ñÅ–´ºMƱðá.8ŸÛXÏíRw¼Õ|èÖn7MÚñl÷ÖØw…§º?Pø©OÏeŒ»2œe1“ÙCa,ÿ è¹[¹œ‘Núõ’3£†^iDD>–ÿ[SÆ\ÎýJл¨ ãƒäe‹Ò4&óñà«LÛÉ´•‘Ôð áƒçÇã®ÄjÛ8'¥á±ÙD¬±< ½®Wk\sþbÏÖx)¤^±m‹BÙ[x5²ï=1œõ}ÿ캋g¾âíuëpʦM(€²Í–ߌ P8[Ι”¹((úÙV™_¿ŸKt0‚ºá½n׈þ~gÝUÀM‹\v+*HE§aêŒ$|R:¡S'±"ÿ³ÀI³è›ùY"fe2),” ùi=G¿É룚rpV,a#_,QûËŸ}ͳ:0cå$¦YÈ‹‘åf0M^Ñ%ìŽ-o.ÏsæñÜç–§†óx¹¡RTggŠÖA‹ÿl¡\òWpVR7ÿóF½‡87”iÁ–K˜{Ñ".šO&î`ð¾a¬_ó& GÎã•Ï¿ †øÁZ7”^=ørÉxþ5á]ž[f©«çè¼þˆ7žúP&žž[b ™o- 7‡‹1GIöÏ-s?”Þ½óë]Ȩü1:0¨Û¯‹7Üz¼mAøBÆîÇW–½o+&ì è—n6ã°Ó„~S{°aAô*úQ¼­•Þ½lÆk«\ñæË哘7¶èïQ§à)|9¹‡<¯áo ÈËË3ÛóRìØ”iWÇÏfž{ãÈNÏ¥­O;xy—9Á¾’”À ÝŸ¸¹«èö€íúŠû3%ÎæöKlˤ'Ýp”ZÜ]]nzoÜ Ò³õ…ie‹Ÿ(/ßÍÛûfu{–åŒcó¸×pá¦÷Ôô¤›nàæêYøëâõ¤ŸžÁSëöðŸi»xÀtt¸¹y^|óvˇ`ÉØK¤YmcÉý5]'Ý ¥¦Ú¹ÄöªãûõÊë[ϼ÷¼Õ´uÛâé ×SOp%»êãõðhN-HM>KY‹×_¿õEÕQ •úîZQû([E¾z_k®,£•Zƒ²ÔØk"KŸ‹³ZƒëMï½¹¯•[ déà¬B£RZ-#Dq•ëÓ}Lƒæ¦»´¤Ÿ‰Ê¨ìXjíøÒZ…",}nÑxhÊBŸíŠsúq¾Ý–JßçúQ_ É÷ã‡ñíƒ Y1ªm‰1¸ lñqÖ”k ×hD¥Õ›0š8¶zò +Fù”ˆ³äñ¿¬cáØ^r¿LeÆ«Ñ*KÛ—µoY9*Ü4fޝy»ü8¬´[©¹†Íx±ÙN¥ö'ŸŒEþL‰£GAv«Ïl6;fÛ§uKþ:qŠÝbHOÏ(3»{M6nˆOë–UˆR‹»ÕEWÜÝ<+PIé|7ß?žui)ÿX·”Ë90hØh\ʪÆU[þJœÑ\GoÜ<í°rgc­¶ÍMe”ž¸»Uð@ö qu#ËPöìùJò ®T²¾Û‘–Vöĺ N! rE£²ö@M%í­Ü|àŠF[5éÂBú˜pŒÊ?#é¦qT©A£5cʺÁŠˆy¬ˆ>Æk#Ú‘¸k!‘q0ý­¦ÖËO)kìÎ=Ï/G:1sŽO9!Ùúþ”ŽW[£hßËß70çÄ[ã–&¶Êò¾ï¶þVÝêß1q;s‰¸‡;¾]:Ò®C;«ï¹vrrÂÅÅÙîO·Õ±Ëëôé•þr8»Ó°V7zvœÈ¸–ž®ïæÏÝ>Æc}hi¥IŠz­ÓÞms¯ëëۇͶ8:Œr ñrt•&}LTÖ­ö{ŸBØ‹ô3á·ÒïÊ+ã\ÏŸÍ[—ðË8yìOh7žˆ7ÐÎ[cµ|yušU-˜° ì[rìy[n^µõ8*Z÷íÄ+ã„ã9d‚íää„Z­F­vôîÿ=4õGþˆۮǥf^ÒÇÑ»#lø<˜Íì:cu%Û‘4®n ñ„a½žtt(B!„¸‡¹ÖjAàð::!ìL^&RÅìyIÎHÝF÷Éèþ#†B!ħªŽgï”ãä;%á8÷äkº„B!„B{“ ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØL°…B!„B;Päåå™ …Ý*ŒMùÑÑû$„B!„B”«GAv«Ël6ã|§YY±)?:tû¢ú’¾Uµ¤}EeHÕôiáÒïĽ®*‡åq!„B!„Âd‚-„B!„BØL°…B!„B; ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„vàì脨N2r2ør÷Z¶ÝŠÞ wt8¥h]µ ôÈøÀqŽE!„BˆjGV°…°£ßNÅu ꎜ\è z¢D±õØOŽE!„BˆjÇ!+ØÙÙÙèt:.\¸@ZZZ™y<<>>¸¹¹9´‘ R×3㇟9ìC|]A¡PÜV}f³¹Ôg»÷¼Ê‰L¾¿ P¹mØŠÏÞ±WuÛÜ­–n[j5Íœ—‡Ù ''Ýuê×®]£k×®4hР̤*"J;ý&¯ÇD¢jÕ§p‚]©øR¶òó… eÆWÕ±Wuýw‹2W®MF²“ôÔòlGÚpùÒq®¹ÕÄ­¦ åý‰1rH½˜…¦q-TäZ~nä‰Zu{ŸÜ©+ìeÒŸeå»X–6•þmñpt@âŽvsqÈÕ±°W‘@@€11qà3Žï¾x™NélšÕŸ°hð  .&fõÎPÚ:ëXØ{_•QÎ[y[a Qz¶ÌéËÌ(ð .ÚÒ7×ý:ƒæ*GÇ&î%Æ4ËÀ3Ã{¢¶™SÏÆÑ}Y7t ‘ÃÛ::l!þ™`Ÿ9s†=zX\…i±±±v`[;W<ÍÚ™5³R¨p5›±QMþꇢÔön®·ÄöLûxsÝJ<Ô*«õÚâ~ßRvO‹À¥¬ƒ<çòc·µÿmùû~³‰,õP–ÏO».…Ÿ;²’7Ö}IzMÛ_ÁæþóØ:³;û¶ŒåÅ+léÏáí“yëÀIGï™c˜.²xb0ŸÇåÿàŠÌi„U6úËÙ­«‰Ä‡ß}A€·’ô·ò|ƒ„EÃèEß1±§7¤î'dà~ýk<êK«ùÊJ¹—ý½½Ç¢š3%Ä23 BæF1%°1¦±Ûé<µ¿ŽbF`cG‡'î&“ ¥R‰Zí €“ÙŒµ£S˱£ðÀ¹ÜãXÕmQ=8äìÔÔT›“ë 4 55µJb¸zn!#>ð¢û\/ºÏm«;¾çF~ZÚ¥Ï]˜æCØÁ}eÖa-Ÿþü;<úé›ì<ú&¼WÇ5á¹­ß4íãÕš0íhÑ$iýWC8T«]jB¶©ì˜õ—òè}yõ›ùÛô"ðÓqÈ.HŸGÞ”ÝÙåïÚ¥%Åb÷bôŸQp±¾­¶9ºïU[”6në—ÜÝk¡UËdîÃS&Ñ®† Ùi§8pæÒfÆðñ°G1åIŸMÆ•,rÒ²¸væ×Τ“sS'pqV““ú;{ã÷r(éz9eÌõÙdœ¹Æµ3×ÈHÎÁ”çè–°mûNbêÔ©„€t>a›õþb¼¡Ÿ¡øå/;»·D¨l?‚³Òr2¬K;oKfMMÒ³s0²Àçñ2Ë QÕ®]<0¸—e2­l܃P?ˆŠOrth¢šKÕmgVˆ?=zôÀüB6ÆžŸ¢ô‹{60ÕßüýCX¼é¹¹gY8¢7aq· „9[Ð[ËëèÂŽ²‚——WøÏ''§róÜ’ëŸñTd8¹^O<ÃéùϾ1¼ üžµ¾:ž\óï1¼×wñGÞféÖ!àq’WKÔ±Öz>óu®&EòÖfhç3?c_ŠàhÿÇðUÂÕ›ÓšZ.¿úû(æ]ðà½ÉK9úYtVÂ6æœåª1«§²xnðZRîçß›ÞgÂâl¢ÞX6çÆ3dT`ÿ]ó¹ê~¼<ŠÜs+Xtð-Ívå—À\«m³¢ù—¼¼#ŸŽïòïξœ<ü6‹N'ºûsô¯¢¾r·ëôÍ€)ѼðÉ"ËIŒ¦/óuÈÓÔíô,üqÀ2^m«áÏ+7ðói d÷Ó\¦ß|bGíEK¯&$©UôÜF™lAç3¡—/ -ñ7~ú6¿áZî%éw<¥ÃGpV»ŸÈµŽHÜÑlô£AºL €åöÕt®¥îÌ%ŒÏv%4&œEèìžd\…Ž fuòÆx^:}™åri[Î¥’BÜž”ÓÇÀo M /Óâ;ÐÖ$ýå.–Û„°3SÒ†L‡ ©¬ ïAö± L‹‚€PrÏn xr8Á¡‹X×½!—÷­erØ<üZÊà×ÃÉ}?”¨†ã˜ôxg7vUXmW÷ÇÓóž xŽZ†ðn Çþàpbýå:Ý2ÕÐZVÂíÿ¶ð Îoâ`úÓôs¯KË:\òðFSÿ¦ñü|âÝÛvÂoÀüì#¹X]jµuk4¤Q 5g­–ÙKvß÷x£W;’ÏîdC‚–'|wǾÉó+> ©½7ÀxC–¯EÅÝÜ_vî , tamþ9¸Ç׎e™ðH"7WÏ5=@4ë¾½Š.ÎrÚójªžÎû Ë.2ÁUJ­­qJ_è&·ËˆªÿË7@0ëÞNs%Ð| +õ1f£%ÝÙ×¹s—ãØmn:4¹ˆât" ÷ïEG8ÿP_zvhŒ)5ÇJÞtükÉ)"Q=8l‚m00 6'Ø®®®U²m•›hK«Â¿F®<òð2™ÍH®üÁ¬Onºï;y/Å/ÀR©ËËçÍà¶Í3ÔÉÏwXÅ|Zñe Ák¿ÁE¼yJuŠ¿oæpd\ÙÄ–sM lÖ†Ò-àM`‹Ú…¿i›?NcVr13ƒŠþuU©Õ@3:«¼s›¾pðPÕ±Ú6˜NBÂxtîkUòÿR™s‹™Läåwûš®È¿ߎ3çH6³š|ÍCZoZ{Qb‚]’µ2 hÖµ.5¼hÝ@ƒ nx}ÀZ¹ŠUÜÛç»åZþ;/Œ1‘4z!>üÕ³×v.gYœ‹¾[DOoäžå“ÑÃxãý_øeÁp6.«Á‚ù¥ËÉ¡¡¨j9ú«àÓ¾äg™eMº…°g¸¯bÇ™­z „”îÿ5þcË(m´±ŒÅò®³’WˆêÁal…BB¡°9Á®ªdd^®ùO6°þ›^lò\À+9©@?¾šþ5÷åç?ùûB8Á‹Å…uä–›/ L€ @ Bz³vÓRæmo@=y±7˜SHÇ•s*‹¢F˜ɬ-j7¯Œ v2y¨Y n$ÇpèᬂòŸQbÿÏAá¿OŸ; @¶ÞVÛLde<ükÌI¯_ ߸p<寙߻n-P:vHƯ‹qU)¸Q+?€tþºz¥0¯‹³PSð(4Cê/]Æ Uþ·:+#¬ìdvO¤®:‡óFG·†wŽÜ„ãœ2vdAäþüOô|õk—j©1fe=éä?¶©šÓ#–m»NFÂqN;”YNˆªV¿M7Ðíâ¼~8´éŽÃ'èŸr‚GTc–¸L:·Æ@±Ýàã€nã\¦/ƒåßí¢‹·r3¾×J®ü8çç}¯y…¸»U£ F+®{§'=¼ú¿…œ¼~–Ýû&1ïT<Ôp§{÷ÑÀNKZF'‡‘›Â‰¼Tò}ÝÉWü õÚÿ“$°þà!w–?)¯ÍøQ§øå 웞ľé'yT ªö+Ù7nî¹{™ôQKÆþüK‰_Faý¹“œIü†®yèÂ#Ík—ˆÏl¥,@w¿1À!&®z“‰'Ù}ðU&î9 &ð’Ÿõ¶±Pc0¦s)ñ{¦-O«—‡«X³å§¯¸h=ö7"x{Ø6N‹'~&ŠiÖË–<¹¢°òóÍeβ=Þ2i×§œ$zo<ÞÍÄ·¡š‹6¶u÷ªÈi! ë/ú¿˜:a‹·'5=‰˜O¦²@OõhA&퀖mÚORj*º=_1ft ê@Mý_¼1ñ™2Ë QÕjµìqüßç[HHOåø–5,ÐÁЮMš¨ÆZ<8ˆdÊÂM\LMçâ¡ ‹†ÅT´MRN°zòâCFä_[wùOt ©äÙÌ+Dõàlwww’’’¨]»v™«Ø÷_'%%áînÿs²ÚfóXp– 1áŒ\@ãVﲨ»/.ø²fàI^ÞÎÈÅ–´zM^gÙÃÝq>·,ÜâR÷u«ùÐ}hnZ×íȨîí‰Ýw‰Ý{YÍÃ¥øÄê*3ÒH»~½DžÆ^©Ì‹,¨£“G~…¯2]Ô…ñÝ\ÖÙ¹(MÛhk'2aóJ&¬´¼¬¦×V†Ïó? ]¾™ž^JÌuG³rV:cÂ&™Ÿ0z6oꀊ,žr‚‰e”¢Ê¹wà%Sy|ÂLÿÜòQÐÔ% ë ë×¢ê(½ùnÉT¦L#8P ò!úªåç&½‡> ÁÑãÅG£ybß(Ú †ð0Fì›ÂæŸ%hîÄRy#&DóÄþQùWO qwSäåå™íy)vlÊô¨3Èfž½{÷’žžŽ^^^eN°“’’Ðét¸»»óÀØmû%¶eÊ$Í`e <\]oz׳´ìÌ´‚²ÅO”—ïæí­]]E9¯³{Ü \¨À»¥‹ÕvúU¬ÛÅûÓŽòéi&иÕ.¼<øæí–ÿ>ë’±—|/·µ¶¹QrM×H3ÔÀ]íRb{Õñ=†éÛƒÞ{ÄjZƒºmñt…ë©'¸RW©Ý.æÔrÔä³”µxýã[[ª>ˆJ¨HûÚº¥DÜ[n§¿”`Ê%]Ÿ‹RåŽVUTÎÒ§Lèõ98«µ¨”%ëSäJ•é‹âÖU®Oç¢×ÁY[¢ÿôAQ9•;nÎEŸc, Ò‹—8«Ñª”`Ò“™­B«U–:&V(òJåÕç¨KäâïR‘±·2Ìf³cV°[·n͉'8pàéééeæqww§qãÆ´nݺêQÖÀÃÍZ¢+nµ+PIé|7ß?®¿´‘ëÞçb<:l|ᄸҌ¹@*À­¶ÎòÙØG«msSem<Ü*x {иjÈ2”}™Ó•ä\©d}·#-­ì‰uAœB@©ÂÝÝÚs$”hµÚ[('DUS¡ÕJÿ3¥­~wÓx©ÔRÃò¸ 2&Í¥ójå‚4Q8d‚íááA—.]èСƒÕ÷\;99áââb÷'‰Û:3VÞY³‚ôJŸ]sö¤q­éÓñ &·¬]áúnþ\Óðižî؆ÖVš¤øÄ¾¢uÚ»mîuƒ|u ÊÑa”ë)ÿ'Â-‘~&*ãVûË­ŒŸBü¤Š¿›=ÿ¤ÿŠ{…C&ØNNN¨ÕjÔê{ã©«Úú/òá /Þv=.5ðæŽÞaÃøÀq`6óã±­VW²IãªáIÿ`žïõ¼£CB!„¢ÚqØkºîö<['gþîãûg|ÿñŽC!„Bñ7»'_Ó%„B!„BØ›L°…B!„B; ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØ"//ϬP(ìValÊŽÞ'!„B!„¢\=ê ²[]f³çª²]¿¿­AnögJœC·/ª/é[UKÚWT†ôQÝHŸŽ ýNÜëþL‰³{r‰¸B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØL°…B!„B;pvtBT'™9™¬ßý-»ŽÆeÈrt8¥h\5ôõ `tàóŽE!„BˆjGV°…°£ý§²ùÀ–;rr eÈbó-ì<ãèP„B!„¨v²‚Í]<—.\"=-½Ì<îî4jÒˆû|ZàæææÐF*`HÝÄ~ØÃ3Ãæp¿+(ŠÛªÏl6|i+{“quV¹Œ*Ú·{7—JmÃV|ö޽ªÛænµjÛVÓÌyy˜Í prÂÑͳjÛôëàØ nAÁwÀ”p˜è?j胳£TÜ‘¬õeá‡iÛ}sÉÙ¸ÕmŠßCðTZÊ)ÙÄ:Ê_ñ×À£!{t¤žÆ©°Üñ=‡8—lĽA|h‹g~¥÷êØ'þ–>Å©ýqœ¼‹GC:÷èL}mQéƒÂžŠ£Åð¿ŸáÙáÝPYéseM¹Lή¸Ê8)î™`Ÿ>u†ÌkYôìÚ›F —™çÒ•‹?qŒÓæ3tð½ß¡T ;q{.l£iö‹è?n™ÌçI%?}Ì#W[zV.¾”]ì¹ðC™ñUMì_ýw‹2W®MF²“ôÔòlGÚpùÒq®¹ÕÄ­¦ åý‰1rH½˜…¦q-TäZ~nä‰Zu{ŸÜ©+ìeÒ_`í»¡¬JOŸ@ܸ£ÝÜ_j䯳¤×6=|Ùs|Fòå#¨ç”ɳža~4t èÁ‘˜X ˆ%;'ÑJ}•/FŽf•:ør$æ(ð Ÿý:†¦*G難þ²Ø>ç)£ SPŽD[úæg¿¾*ýOü­Œiñ¬ZO ï†í®—Eôè§Ø84‚Ã}¶ ‡L°Ïž9Gß[\…i»bwØu‚míL\ñ4kgÖÌJW@…‹ÙŒjòW?¥¶ws½–´þJ‚nƒwî[ÿ–c¨yß|6O ÇEYF¢sù±ÛÚÿжk¹ÿ÷³‰,õP–ÏO».…Ÿ;²’7Ö}IzMÛ_ÁæþóØ:³;û¶ŒåÅ+léÏáí“yëÀIGï™c˜øtâ‹DÆåÿà‚ò¶*Õšþr~ëz6Ђ°ï"èé­$ãÄf‚GF°õà žip„ùѲh/õ¬©‡ye`(±¢‘Ûo¬ÒAèºol®Ápv'ƒ‡Íã·SÃhÚ¡†£÷XTs¦„8£àɹŸ1!ÐÓØ_<‡o~ÆëÞŽOÜL&J¥•ÚrÄçd6cíèÔrì¨ÄÅÜQ–{ ²º-ª‡L°¯§^·9¹.ШAc®§^¯’’Ï-eFÔ"âsTtë>wû ÀH¿´–ëfs2ÀA—òF×.¥ê°–/ëü<^Þ–ËDxws$^jWj´›ÏòA-M‡ƒK¿u¼Û6ž£À#•‘ˆQUwW—2cκ´”—"¿¢y‹z8u €^CxgÄéìfIþ«LŸø5”sU}ú¥•ÌX7/?vhÓq&ÿ78÷rÚæ÷}3øÏŽ(’±¤ùvÉìOUÉÿQu`2÷á‹)“hd§â÷”´¾¯=Í:ácÒ¾y dg“•aÆÅ ²Rrg4ÞZÔnESGg59É¿³7>?’®cÒÛ*cƨÏ!;1›€‹»Mm5ÊjñÄWZõ}‘‰}]IØ¿” zGÇ#îlÖû‹éFø ¢³·å{V³m_^ó‹`ãéTBYÆ`ßvõ,™5–‰sFv.ɉÏ8º7×X¶àåM G隸g\¿¨z0 —e2­lìÇk~ðA| ¯#lQu®ë~áã™sØ®üžàµ¾¹àÓ¬0ýòžÍ,ÁZ2kÏpçÓ1ãØ â&ðÊéi,šÑ´2òŽ|¬ƒ,ÒˆjÃ!ì¼¼¼Â¶899•›ç–\_Ë ‘‹0x !4x†Ó+X°o2•_²Ü÷ /¬™Á;„wúöáÜ‘9|¾uxüÆ+%êøÆz>sÉIQ¼»Zû §“ñÖú„ßûä~%$Ÿø”9ðϦ>d%ý@&°>²/ëó«®Ñd +ž{“º7…mÌ9O²1‘äSÙ<=x)(ñÞ¦eL[œÃê7>B“£#ÓxŽrç×# Y3ƒú!&?ƒáÜ,;8›lW~4Xm›š¯çµQ´èø&ounîð–œÍ¶îOѧ*;Ì]¬ÐK4n¤DóÂ'‹Hhú2_‡ÞÅÙÍŒšüQ©¼]{-¡K-Gï©öá°×tFòòò¬^.¢P(prªš%·½»—c 5óGÿ—ÎJ YW\ 嘧Š}{>"“¦„G[øö ãø_£Ùy`;¯øÕQ‘|=oåßCrSÖ}:ºDîoS›c·ƒ×d‚áW Í@š0ˆŒ`¥«ÂjÛ¸¸bDO? x C"x·‚cq<1‘>rn™jh-+a‡ök™\œßÄÁô§éç^—–u<¸äᦆ~Mãùùĺ·í„߀øØ—¥€…ZíEÝ iTCÍY«eö’Ý÷=ÞèÕŽä³;Ù åÉäݱoòüŠHª«Ø¦w÷}äâïusiй'°”Ù =yypSN¬Æ*àqÜÜ,Rõ;Øøm2ñqñ$¥fÑJk™ŽRO².b{tÉ$^3ÐÐ[Ö_DÕRÝ]ZÉIDATiëBÜ L¥RävQuÎÿòÄgï ¥©h>Žý&m´¤+=Ú1kî|ºv@“› MšÑœM„`ÿî´÷€‹=ˆoL©¹VòfÒ¥–œ¨ÕƒÃ&؃ƒÁ`s‚íêZ5+*7Њû ÿ¹ððÃÿ¥ŸÙÌádà/Â?é[²Pr)Å~uU——¯>Ú6ÌPwO{Îc}ìfføÜÇú$èŒ àÒr&?MŸUX¼WÀ";ÖM§öB™ìúônáYø›¦ù ÉåÌL*ú×ÕU­šÐ®XóvlóüTµ¬¶ &$,$däÿ¥:3ç;2™ÈËïö5]5À öíø7sŽd3«É×<¤õ¦µ%&Ø%Y+Ó€f]ÛàRÃ‹Ö 4–Õ¹º}àõkSB5ÛåËåÏ[ȤHý"Oú|ÆéžmHÝù«â| ÿn6þÞ®{5£ÇòÎû{‰ {WµUãÞ„Eö}°Àr,¢tw‡øMûO+£´ÑrÄb0åç­i#¯ÕƒÃ&Ø ……Bas‚]U:È̼\%òŸ>|ƒï¾y”=ßeTNðŸLÿ„‚;Kt¿/å°sêðYa†róeƒ pÐòdïÁ¬ß´Š·×#žkSÌfÇŒbÞŇXýÜxKVn`¸xZ{&c"‡ô<ÐÌòNŽÉ¿qðsVAùÏŽ(±ÿ ð‰ßñ玣·Õ6ÓYSÇüÆ#õ=Áð=Á §òèRkntì6Œ_ãªRp£V~éüuõJa^g5 ¦à|Cê/]Æ Uþ·:+#¬ìdvO¤®:‡óFG·†wC NÛÿIQ¿~Æ•Z*LYÙ@7î/X‘V5¡[ ¬Ú–ÎÞEO37gÛf÷¶¤i›Ò&8“D ­|(BT˜W›N ÛÃEýPÚi29}”AcäòpQeLYYÀ2(¸¥®Û>ˆßøaËàýᄀ£·rO0µ×ë@ñËæ”ùyW ¯w·{²7wí4ØÏôÿ-Ewý{÷…qê<Ô¨I—îÏ»-i‰è~ŸÇ?6-â›K%ê¨H¾â¤ºí_¢‰l:xŒ†]'NÊ]H'ùÂ"¦ïø†s×ÿà»Æòctk×sîA¦Ô)?Ç–8±>rßÓq.ñ{¦¯Yt$°¹g‰ø¬•èêãÍUaNÔ±÷à ÞÜs Œá9?ëmc¡Â`Ì !q+ï,™F&ùgÔE™¶üôWM£gØöFo[ÀÆÉcñÒÏD±1ÍzÙ’×o(¬ü|s™³l·LÚõ)'‰Þwóñm¨æ¢mݽn8:qW)Ö_ôg˜5a,Ÿn?Áõôö|ò.‹uðX¦Ôjâ|Æç›“’šFüžLZ¾AmèÐë Ø2‡ÿÅ踞šÂ±-« †!ƒ;x QU<[v¥GYøùNÓÓøsË:ë`p×ò+Ä­júàÃÀ·ükáO\NÍäò¡Í¼ ‹?”WƒÑtƒë :þ7ùuކŒlÀò,¥#—uÄ'¤å_ia-¯ÕƒCV°ÝÝkr%)¯ÚõÊ\Å.¸ÿúJRîîö?'«i6‹ù癳ˆ,]@ÃVo2§{{\hÏÇO3yë"þ±Ø’V·É8>Üçs›Ëã¹]ꎷšÝ:Àí¦ R;žíÞšû®ðT÷ ?½?às&eŽ#bß¿x9ÿ™V¾]çÖÕr·r9#ôë%gF ½Òˆˆ|,ÿ·¦Œ¹œû• wQÆÉ%Ê:;¥iMæãÁW™¶9’i+#¨áÂÏÇ]‰Õ¶qNKÃc³‰Xcyz]¯Ö¸æüÅž­ðRH½bÛ…²·ðjdÞ{b8-êû2 ÿMlÏ|ÅÛë,µsÊÿscBäm¶üfT€ÂÙr̤ÌEAÑ϶Êüúý\¢ë„Ôm ïu¸Fô÷«8ëè¶°; xÈ”FTTÉþâêÓ—ùSuL›þ:‘ùŸ½¶ü ü½”˜ë>KĬL&……²!?­çè7y}T[™¸ƒÁû†Ñf`„/dì¾q|õáPç–λbÂï–õ¤w7E^^žÙž—bǦüH»:~6óØGvz.m}ÚÑÀË»Ì ö•¤NèþÄÍ]E·l×WÜŸ)q6·_b[&=醠ÔâîêrÓ»žož­/L+([ü„@yùnÞÞ7«Û³,g›Ç½† 7½[:?gWO4JJÕ~zO­ÛæíâÓuÒMàææYxyðÍÛ-ÿ}Ö%c/+žÒiÆ’ûkºNºAKMµs‰íUÇ÷–×·žyïy«i ê¶ÅÓ®§žàÊßp¢Öã9µ\ 5ù,e-^ýÖUD%T¤}mÝR"î-·Ó_J0ÈÐPªj Q•³ô)Yú\œÕ\•%ëSne4¢RkP»/Qú¢¸U•ëÓ²ôFpÖöÛÒEeTî¸Ù@VޱpL,H/>^â¬B£R‚) }¶+­²Ô1±B‘W*oVŽªD^!þ.¦ÄѣΠ»Õg6›³‚íÓº%8Åî1¤§g”™Çݽ& 7ħu˪ D©ÅÝꢫ înž¨¤t¾›ïϺ´”¬[Êå4l4e¾åÚf,ùŒà:zàæi‡³|6öÑj<7•QzâîVÁÙ{€ÆÕ,CÙ³ç+É'¸RÉúnGZZÙë‚8…€Ò•šîÖ¨©D£µr•„ÊJž.ÅVúŸø›)mõ»›ÆK¥mþ…¥'Í¥ójä¢qÌ%âîøvéH»í¬¾çÚÉÉ g»?IÜÖ™±òΚ¤Wúìš³; ku£gljŒkéYáúnþÜ­ác<Öч–Vš¤øÄ¾¢uÚ»mîu}}û°ùÀG‡Q®!þAŽá–H?•q«ýåVÆO!þÒÅßÍžÇÒŽÂ!l'''Ôj5jµ£wÿï¡©?‚ðFÜv=.5ûðê>ŽÞaÃèÀçÁlf×±«+ÙŽ¤qucˆÿ# ëõ¤£CB!„¢ÚqØkºîö<['gþî£ûdtÿ‘ŽC!„Bñ7»'_Ó%„B!„BØ›L°…B!„B; ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°™` !„B!„v l!„B!„Âd‚-„B!„BØ"//ϬP(ìValÊŽÞ'!„B!„¢\=ê ²[]f³ç;=ÈÊŠMùÑ¡ÛÕ—ô­ª%í+*Cú‹¨n¤O G~'îuU±8,—ˆ !„B!„v l!„B!„Âd‚-„B!„BØL°…B!„B; ¶B!„Ba2ÁB!„B!ì@&ØB!„B!„È[!„B!„°gG Dur##ƒÓ_­åÒO?aÒëN™”Z-  í+ãŠB!„ÕŠ¬` aGW÷ÆrþÛoïØÉ5€I¯çü·ßriÛOŽE!„BˆjÅ!+ØÙÙÙèt:.\¸@ZZZ™y<<>>¸¹¹9´‘ R×3㇟9ìC|]A¡PÜV}f³¹ðç3'?c½î09. ì:ž‡êÖ*· [ñÙ;öªn›»Õ‰åˬ¦å™óÈ3ƒ“ §; yN,_F£þF…~_L©Ú½—øäx4hA×:PKiIºWû(­¨¿¤s|ï~þº’UØ_<,iм ëiÅú’.fºÚ½t]2.ädBÎÎ΀£Ñ™Æí}ñÖ*¥Š*#c BÜ2Á>uê×®]£k×®4hРÌÄžíü¡;o‹‡š-þ¿½;‹ªz8þf†\PqÅÄ5%W4sÇ}©Ð,±HmÓ4µ4Mí«}Õ~®eùµEË4—¤R³r7·, wMËm4 ”}™q†ùý1€lÃ’ƒ#ú¼_¯^1sÎ=÷¹ÇÃå>÷Üä¶ÿ€#¢xµªªtñÝØÅÏWh|eûÝj¿¼(tæ:ÓDLj*éµáé‰ý‹ÉµKÖ%$R3ÒЧÀEKSšŒ\Ÿkálb5=+à…3‰iÔðô¢²ó]€r/ϲÛ:šåÏ=Î2=´êÔŠcŽÃYÿë|ÕŽNÜs /A¬Þ7•Fšh–‡sFQÍÓ%ç뛿|ÉO3¿¢‚Ú,Þøþß|‚ÚU‡ôS„zu®Ï'Wé?º ÑŸŒä/ÅËLzµ5±_¼Î‰ïÎ;zëîºÔ‹?³L³×ï§·¯†Èt<Cðmªstxâ|+Ëô0uå6ÕaÐï Cðt¾ áUÝN›eÓkZ0_åËàYTwÏþs™Jä&ùÙ^j!cNÜ]²Bˆ{ŸCìøøø"“ëlÕªU#>>¾Lb¸~y¯oš‹>@C»€Oy¯k?\€Ä¨Œ_ÿfxÒ¿ç:f´ (І­z©WÞaÈî &¶·¶­¤ªFGãOY×k€uAóaÆ-„ªëO¼Î¨Ë´Àþ`6rKÀ’uD:7-°¾Ô¨E<ú%Õ­FøÅxxdþÐe´v…Ô¨xâë̳‹Çйª>1êƯ'+vhÒl>ög1}óûáqLÛÊu¬e-ZÎcQÏgËäßè~`píÌ¥"`йHÂÕ[èZ5¡bÇáô{'‘½s¶pé9ÛU‡¸s\ܹKŠæÖä:îwí$®8©4˜/Ÿáê™L#0Ó‰2d¢sQ•š8SCçFe%`!ØÁ?Éé$ž->Z êr~õ`läŸà7‘ö¾Ö™BµwMü”¸gÅœ]Œ¤kVâ¡ökÏX?Ø•@L”í2°&ØG–ñeŸ‘ O]ÆicV£†ž¬†¤$Lj7ÜÔÊRF&Ä¿#û@!„¸÷9$ÁÎÌÌÌù¯(NNNÅÖùWV0(t.ïÌ Áø×ÿx÷ð†)·°Î_ÏÀ5oaôÁü.=ˆ8õ6KwõÏóŒËÓÆ:Ûõ, \ å­mÐØo8­L›X{b ¿w€¿®Ÿûˆð ˜T»!ÂÎi|ô¥ÆZïG¯R÷ V‡k›L&L&·nÝ*ô¿ìò²pðà‡hÂ'×ѻNï¶–Yiî¥á`Ø|’©ËÛýÞàáŠMÔuíœá§£Ûó¶Q‚zûáËA 88Á·úhÀÈ–ðíàýOx ÈŸô僭ÌêÔ‡ë¼ñ+›ñ÷:Ìxÿ´~xß ¦ílˆJ4Å^¦}0l1šðÁèoyªažíù´¬‹Q?ŸOYl³oTºŒhÿ!öÍ#ÞõhèÓÈàdLtY—rË%ë²ðë?~oM®~ßÌõ•ÑU¼Aì/`Ì7.ÿç_y?ÿ“§=¥{Ü=«ãQQƒ‹·*ÏÚÔn¤àÚ/¿s -µ‡SÁbAûâ<ŸðçÖ™}üñýaœj·£ß¢É(,%ý^fˆÿ“µKVñå1€¢o’¸)uÞøTpÌD†}M‡ÁÓ¡Ï BÚ{Y†AÏ{#–0ò³¹4rÓp+PYÏG›2ÒNÌ^ù#¿þºŸ5óFrìËé|´çª£7W<@d(„÷.‡½Ûh4b4mÞW¬P(P©J÷¯’R»j€FÔϹªOEïnËèe±p4®ð'3–ç»ï;î±¹ÛÐWχ¾| Ta¨×;¬ ßÄ»~õù*zãÉ|˜5ôÖ˺k¬†èZ̈8Àuž¥jè}¬[1ç“›ïãÔd%WS’¡„W)ª5 Msuoó†]àøFPW²Ù7˜ÏCô»ôŸ÷z™ü»ÜŸ¬×d›Ì¹Neš1f {Mfg?ÛOõnÃ¥ý\ø~ xoÞy ×~ƒl´mMÙ£WÎäôÖtšøCMO*Ö©†¶{cœ<½©TW‹ À§¾u>äòG÷É¿gHMêš¼©zu fôw82¦£Ã÷¢$=Ëg³ìÏøŒ1Z Êþ»Sh™‰ß>Êv:±ØõçΜåô18VûwÎDêhìן#Gä4ß(0„–1ëÐ¥Û÷n QFd(„÷>‡%Ø ……BQd‚]V¯›HN¹ \ãà €‘ ;°Ùë}^Έºòõ”oy(«þù?qÔ¹Þ|œÓ†¡Øzi`\Ü îĺÍKY°§É´ç…†>`±PÝ«p‰’ßÂÍÑè«ãÀ­¸\Ú9«¡„³“ÙÛržøý×哤§Õ7cXÿqžÇ«VãF¢ø9ó—Sº5 ®Ú§[7~LEgIu¤ce€$R/]­õL‡Ò%ëŒGþÏÅ­C¥49çWL¸¢Êzžšéf™‰qDþz· R%jòžuhq&fÌãȬ¬ËsÝêÒ¤pé©€»£÷ƒž9Álê4–õ{žÇ7ë*ZKNÙÐBÊ2H£~~ÿ0>$øv[›æòBútVu¾ÂçýY0¦SÖïœÉ:ÃýÃþœŠˆì…âÞWžoÇüׄ1î›EœOˆäàá±,¸î:†û¬eÉÑœÿãB6Ï%4*ïûºKR/÷[Uš¼F;¢Ùpü5[NÊIÊ6†‰ÌÌѸó<<’3¨Òø)¼ ‡ûQ=^ùù—<'"¾ ídžËç¹³‘×Ö|´ ·oÅ<ñYl, Ðjp‚1«&s4æ<cLØ ¨6š[Ùî+ FSQ1[˜ôÉ(’Cj,¢pQŸ~C: òÌ «ÿ‡ÿ;ï1äãWpŒÇ¾çʵҴ¦È÷³¢ÐRg"¹vÔÚpzìŽì‹DÓ¬5ê©É(Õúî=õ;Ãö)¬>pŽøøXNìXÊôíÔ·ò‚$‘Ÿ~ë§lÂy/wÃùf$z½½^OlªýÖ¥6Ê4Nø”ÐÐPŽ9‘#û™Ú ü&®áð¬xÈ/'²tÇ bã£9òÝ2惑õ½¹â û@!„¸÷9ä”»N§#66–Š+:‹­P(prr"66Îþîp«³€O:E2úÀ\B–Πfý™,ðÇÖô<ÏK»æò±µ¬J­7XÖ-çË-.•ß°YýZ@›o^·Ï4!üpC:ÜþÚc(Ÿ8İÍ+½|eN;+úõÃf®&'’˜§¥šÞñ,Ín£.ãC¾Æ_ ).šœøàzžeo—¹Õ˜Æš¾1ŒÞ¶’Ñ+­ëôðÁçÏMÀS‰Í¾qŽOÍÓSX°ÆzZï&¨3þäç]ÿǘàj¹Ö-r$í |jEZO‚GmÕ¶~zìkŽÏØ`ý pBXÌN…~V:eÿßçg—¬ËΕ&a½b3N ¸þþ|®ÖšIÍîÃy¢;ÀM®¾¿šG÷Çòé4’Åc?1„%Yßõ»˜ñri®( ýÉL ÊSòÚŠ©b³l7Ïç{Ý‘Ö-ûЍ8žO¦º2zú+|™õÝð+ÑÆÛÑ,²Bˆ{Ÿ"33ÓbÏK±Ãoì¤]¥^EÖ9tèIIIøùùáíí]h‚‹^¯G§ÓѶm[»­?ϺÌ)$ tÇS¥Ê÷®g#‰é)9eÙËæ>!P\½üë[·º ‹3ÞààÈi¸ÿÝÒ)$¦m¶ø×8z¬ßÏ“~ç1óMÍ u­ˆK®m˽Þâßg7öGzÒ݉٥š/j5.GÞ~ÈZ.=·n/u›e¥T¿»©&Ê\·WÜãN®TãÅFYñûÈ"ÊÌR3L8kÜòÜâ#cPü[²ŽP’c!îgöþ°X,Ž™ÁnРçÎãèÑ£$%žièt:jÖ¬Iƒ Ê.¥;ž6ß­ÂÓµb )X/ÿýã©Q‹YÿW3 ÿàQ9 q^EÅ’ÅdâI6®sfTþ½"¶Ñfßä[FYO×¢f$J­sZZ¡eé—Ï‘~—ã¹u­ðÄ:;ÖrK­ÆM-÷þ Rªqs“1(DöBqÏrH‚íééI‹-hÚ´©Í÷\;99áââb÷'‰uv·¸3¿Ùå¥>CììEÍ Ò¹Ù›Œ¯W±Äíåÿ^[ý)žjÖ6º$wb_Ò6íÝ7º={råûïF‰ø tt¥"cL”FYìëd G’ñ'„åƒCl'''4 ÆÑÛW¸U}ÿ {áŽÛqñèÁä~=½9¢^ ˆúi—Í™lGSjµÔy2ˆzCŸut(B!„BÜWä½"eÌžgœåìuùÐè•‘4ze¤£ÃB!„BÜeäkº„B!„B{“[!„B!„°I°…B!„B;[!„B!„°I°…B!„B;[!„B!„°I°…B!„B;[!„B!„°I°…B!„B;PdffZ …Ý ¿±ÓÑÛ$„B!„B«]¥^vkËb±à\A6®Ôê®uH~gosèúÅýKÆVÙ’þ¥!ãEÜodL Gq'tgo³{›r‰¸B!„Ba’` !„B!„v ¶B!„Ba’` !„B!„v ¶B!„Ba’` !„B!„v ¶B!„Ba’` !„B!„vàì脸Ÿ¤d¤°áà÷ìÿýiÆ4G‡S€V¥¥‹'†>çèP„B!„¸ïÈ ¶vtäâq¶ÝqO&×iÆ4¶ÝÁ¾ÓŠB!„÷‡Ì`§§§sIAÔßQ$%&ZGç©£F­<äWWWW‡vR6cüfÞÝÆÓƒçð°  Å·™»ŸýW¢P9«s}kusz7lRªu_YÄ^Ö}S­Ú½Öf™%3‹NN8º{Ví^K×fD)Y,ëæDN<Îå8ºjµðoÛ/¥µèAw¢ Ûã%…³‡Nð×µŒœñâéd-Sd¦(óRZ—U(Ò‰8ñ;"n‚guš·kFmÖ9is"gÂNÈw•ìEycºy– ë.Ññ¥¾TW—ýøÌùÉÇ}”o~†g†´Fm#†ìeÍ#fg*ùwÀ! ö_/‘r3ö-;R£ZÍBëD]»Ê™s§ùËr‰¦þ;´“²¥Çì'ìïÝt6ÏÁ^=4‡%§¯,ÐѾá|?<±„χø9.`Qî9$Á޼t™.íºÙL®œ²ýá{íš`Û:»•»ÌÖÙ*‹R¨q±X(¢™¬ÙEõÖn@ßüÔïö÷IMcÐúM 0])bðxh!Û&ÍÅEYH¡sñ±µý%íU‰þ 3iš'ølö(»»ä|}ùÔJÞ\ÿIEÿ ú¶YÀ®éÞñ Ë/³kzNîÏ[GÏ;zË"íâo¬ÒÃÔõ ôÕbŒÜGßÁ øíâ`j7uwtxâ“p|7«ôðúÊõôoêŽQ¿¾Á Øþ#tûl” ftƒ³,ÜÁ‹Wñbû*’—{N%üÂóÔp•1(Göân»³ãfgÀ¥›‹ú>?³ÙŒR©D­±:Y,Ø Ãº^%.ž CYì1wiâ‡$Ø ñ E&×ÙjT«IB|B™Äwy)Ó6-&"@Më€ÌìÚ )jÓÖÏæ|€Ž^=—òfËÚ°U/íÊ^Úm`L˜¹-o ÷Æ ù¬WOë‚æL]2U· ü·Y½\-þÉ´õ›po²qõ¼ ¬/-j)/†~oÝ*½xwï~¼3ô=š»ZËŸûzSÆ|KÛb®ªOŠZÉ´õ ²b‡†Í¦ó}ƒÑÓ7žÆ»{7‡µÌ¿åtf÷T&ÿF÷³¥3k'Œ¥žx‘?nÜ¢ÁCM¨óÈ>%‘!Ûv@z:iÉ\\!íFàŒÖÇ ëí3%.Î2âþàPD&Æ&`N-j ¦Ô ÒcÒ¹¸è´h+jPÞO\ˆ‹¼~£ðÕ òö¡®£ƒ÷¬Ø³k€:d%*¿6¼ì?E%µ¶Ð²ÝQ‰(†ù7®bmHk­“œn .æø”1(BöÂQ’#ÃY>o&[ÔeØì ÷ª‡H»z’/ßÊwÇüyý“×é߯§@¶ê#÷ñ¡fBúÀ«¯,â±µñð&Ô·.hŽf͘٨FÎæ™•Š5Aÿ ŸNŸÃ=ÐêI^ïb¿:9åÿ„mcéø%„aÝ–à£y®‡Ž/FŒä;=pl4/ÿ5‰ÅÓº’XHÝMeBIÉ! vfffÎEqrr*¶Î¿’°Ža¡‹1z÷cjÐ`Œ}Îû‡Ç3FùŸù_bØšÙ}‚y§Kg.ŸšÃ—»†‚ço¼œ§¶ëY‰‹ÝÄÌmÐÀo˜¶²þÄrþèÞ“‡•wî ŽfÀØÚ~ë\Ú¡­£9Oöë_hئŒ+Ä™bˆ»˜ÎS}—ÒVy‚ù›—1éã V¿ùÚ =)¦Ë¤»ý¡¯Y€QócƒžÆxy-ËŽÏ&8]ÅÖ@£Í¾ùÐw¯ïÝDÝf“y«yô'ç°ìølv ¢sY ”û@÷>/R¸uc;Ö/& öK|ü•y†Þ¿nÇÔcãi9{í­üêiûiSŽÎۘƛzÞµˆÕ¨éþxˤ«éÓs!£;ø£c~cÑoóªb/I¿×Õî=‰Ý½Ôëœ>õ‡W/ ‚§éX_fnDA=¹–¯¸â•õÙy„Ïõ0ÄÏ›‡š®á«ÇµÊ‚ý¼QúTãõN0µçB^ŸÝšÔã뉠“©Bm·IüÔ2…È>P8DüI& žIDÐ(–Li‰éÜ/¼1},±Î_ðFËLšJRð>âGløfŒ~B7Ò5Ogl׳¤¶é#Â6AÐkoÐ(}7sgíeHßúTWBòÙ_Yu úŸ\›còTðè3Š%s[’qz“fm‡Nc0FnãùñKè7u6+ªsxSg}B‹ÇæÓýi>˜ÃÖê!¼òx#‘Ûx~üGê¶ìð -*8úEÜËöš.“ÉDff¦ÍK0 NNe3åvèàgiÀÂáïÑ\ Ôi‰Š©œöRs8ì#R¨ÍÔ~#i¤ÿ®³8sa8ûŽîáåV·Û(I½ö}wñßf5!®6ë¿XÀúnX‘á{À{<}¼¸_'odÞévúÄSzíddC/ 3_h’xb}(?F¥2$ëÎ’âΪ³nÿ»£—ÓV4ìI-zñŸãKX©RØì]/†¶oÅ€Nƒð4Æ€O}8}311t.&æ•»›u&ìÄ‘ï­É5À•ÍOzŠ®ºÊÔ«äI”§ZwOZÕŽàçs§hô­z ¥ÍÑÃYW Xi4ÞTv¯N w ‘6—9Dz—ù¼Ù¡1q‘ûø.Ú>ÊÌW&óÜç{Ìbãϳ~ÉzÂôqÄÜ4RÝGÎ'‹¼”ºJTÀÌ•°-¼0~)ô™ÀÓí+¡ª*…–‘ññ©{ùáû8"ŽEŸF}7ëÌ¡ŒAáH2þÄݱï"èÊ·ž ¶ð}†%¸q¹¢’ˆ}ßqŠn, y”jj¨ÑwÃ~ØËû/ѵsî6Н÷òg_ótsÄV`î38tñY‚¹rúÇôþ%Hj¯ü²èÊw²cÉ’ÔKŒýÁZ®ôlÌŒy iØ­!jÕ¶Amhâ W{”6M}0ÇlÔM¡E9©%lsX‚m41E&Ø*UÙü±P»ªú<”“ºÐ­Û{tµX8ç \`îò.yŠ;Æ\UšâêU¥G£Z€*æ)¯l߯4¿‡Ø ]ƒ‚páv~}òÈg¤P›ÖMЉ¾*ëzå|Òúö¢:¡ü“’%LrU5P‹Æ¹º·YÃÇàøPW°Ù7˜õ½ˆàyÿ)“—û™Å`¾ýÁl&3ëÞC¥npxï™s*µ¾å17x“'ÁÎËÖ2Õ¨Ó²1.îÞ4¨¦E P¹3=¼?dÝ Ê5cjh´¨jvdVhGHà“.£™ô]viîèðĽ()‚53G³ê œ±—4Å%ûïN!e*àò¾µ¬:æÏÜgÓÆG†¿Y3üÞùà›fµE¥Ñ¢–1(@ö”Z-P‡Ê9džJ÷~‚F Wb¼€Œí»7ïBÕ¯Ò9wÅÕkGË&ž€¼ý™Úæn;GPýJlÜcÖ4-ÑanÁX¡^»Îð¾õ¸I©ÓAÄf‚ÚL*di“õèÊhΪëQD]!lsX‚­P(P(E&Øeõð€””à:ñõ ±[ü¸±?;½fò|F"ð˧,'ûn ýK9éÜJ¬ÈiÃXl½t0.n ìØ— ›Wñ¿=UH¡ Ï6¬š+"=ß¾µ&[g”‹ÃÉèTÚÖq³F÷ÿ­œÕPüóòlÿßóÄïˆËgÈH-ªo¦°.&ŽøÞU½À¸… E“yt©-·L&šµîIò¯£R+¸U¡­<’¸pýZN]g  !ûQhÆ´_pWÔY¿ÕiÉI¤¥Ç±÷L •5\19º7îÜÑŃ˜‘1ݳ:Z¿p«MÃ>À¥XÒ7G(î-†>ÍÖN/°bÏ`jg=9Ò`ˆ,´ Àœ–´æáìAu-ZªÝIZüó2¦±{¶ŒAq÷É>P8‚!!¨L:XOÚcæÄêE„y?CŸ´ „oŽ %ûî˜3á\q®;á9m˜‹­—&².ÃTÑæé‘ðÂVvÖ¯Æ)žfZ£’Í›ÓÒ€k$çÄ ×N¿æDüð³–Á?n¤™ ç˜Øá ÷%~ʬº— ®=#¤å#ý€#Lùf)ú„¿9tx*K.^wZ<´–%Ç ÿc¯n^ÌÆ¨ä `ÂÏáyNDlÊ—õ\ŽÙ”5Ë€fúz•hY€–­‚ÓL^5‹“1zŸÆä°ÓPm϶²Ý7VjŒ¦d¢cvñÎ'“H ©¶çYt;~úšk€¶ÆÓì~s o~ŸÆ¿‚ti?$Ú^6ï¹…Ÿó/ÉžkÒžzã<ÛEàãû(þÕ5\-b]åE½OÂö9|s@OBü NïXÍÜíЯoÓœ?¤Bd‹Øºš­ÔeÆËp¾ù7ú"ôÜH5YV¡–°‚/7ŸäF|"a?0vø÷iHÓOƒÂ1d(¡N»îÀ·,^NL|"a[˜´d/èÔÔîØXc-KJ!æÌ>ž1“Í‘y§ JR/÷<€GÓGF8 g}OàŒî9I¹%ãVO˜ÌòÝW ¤«ýh7à{þ³è'þ‰OáŸÛxaV8TwÉUK‹É|‹„h=ߌƒS€19°>÷éÔ?z"¢1YWÛ2ƒ­Óyp-6ïŠU ÅξÿúZl4:Ç¿\‹mÚ:3XØé “,æÕ¥‹¨^2sšàB>íùãw-æÕ­e•kdQ·8_Þ XÏíRy”Ízè×®ù¤Æ<Ѐ£‡¯1( mž’´›'I<4ùg‚ãø'9‰¤„¼™QuïD–„ÈúT›‘!Ÿñ°R]49ñå_ÖÙùv™¶Æx>í{IÛB™´2wï`>|n:%6ûÆ9žÍ’5Ö§¡Wön€*ãa»>äÅà*¹Ö-r¤ï`\h%æ?9„ºUýé‘uáÂÕK_óöú 8eíÂÍ(€ Ò-ÖO&(œ­çÀÌJ nÿ\Ô2¿n™ÇöJ³èÓzó[Üdû–UD:º/ì j§æŽMbêı|žõ]àØÙŒ ô¹£vÅý(K'­³'³B^ÈSòÒŠUxÛ({yåzžnó Kf¤0vÖT¾Ëú¾ýðɼñ|#¼¨Íœ×™&cP8€ì…#¨|{°âýX^˜8“°eÖïÇÎfxû*(©ÂWŸŒâ?£gòlVYûáÓ˜ÐÛç²ç‘•ÞílÖã"€6_RR…γû°jz½;ÔÊõ½«~'®¡ÐX•>³Ö³ˆçC³bíS—=×­?×èøs§2)Èz©zû >Ô=¶åcöÒ÷ð`öìsñÊá‘|ý¿'œW°îç£÷Ò÷Èà¯Ó"›"33ÓbÏK±Ãoì¤q¥VEÖ9zèéIù5¦š·O¡ öµØhÎéÏâªSÓºmÑíåvöƱ"ןg]æT’Œ·@é†Nå’ï}~·HJOÍ)Ë^6÷ âêå_߯ÕMX–1’m#_Ϲ¤7wÝÂbÍ]f}GvïNÚO[sIfpuõ*´-[í{ž2›}cÊ»½æ’Œnxhœó¬ï~|7`qc àéùÏÙ,«V¹^*Hˆ?ǵ»pòÓÓÓ— .Ia“×ß¾µ¶ìƒ(…Rý™L¨5Z”¹î³ºÇ(\©Æ‹²â÷½fÒR 8k´¨”ùÞÏj¼%cPØ•ì…#”î¸ÙHZ† œÕhÕÊ|ûRëþ2»,{Ù‚ÇͶëå_ßÑú25a;gu̹ÿÚb1qfÍÛü—ùüy¿˜}hU®¤ Ïž¯ÅãZ)Û»‰‰…'ÖÙq–kjZµ<1W”5%Z7ÞÊŽ$ãO8‚R…ÖÍÖ¸+bYL½üÇÍÆ«á¼ÿîjöƒ×C[å}¸™áo~9õÓçøÙ/V¥7÷Û±WW+;ÅpÌ%âž:ü[4£qÓÆ6ßsíää„‹‹³ÝŸ$^ÔÙ¦âÎDe——úŒ•³ŽêZÓ¾ÙFÖó*Ц­öòïZ}šùQÏF—än«¤mÚ»ot]ü;³íèG‡Q¬~mú8:„R“1&J£,ög2…#ÉøŽàãf¥†š :0õÅéúåKÚÕ¾Œ~¿îÅs§Û-Dq’`;99¡ÑhÐh½ùw‡¶êPæzÇí¸xtf\¿ÎwÜŽ(;ß‹…ý§ØœÉv$­Ê•~mz3¸Ã@G‡"„BQ€Ê§9!š;: !þ5‡½¦ëAaÏ3`r6­|Þ=„áÝC†B!D¹"WLŠûÁùš.!„B!„ÂÞ$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„™™™…Ba·Ãoìtô6 !„B!„ÅjW©—ÝÚ²X,8ßëA–Vø]¿¸ÉØ*[Ò¿¢4d¼ˆûŒiá2îă®,&‡åq!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ìÀÙÑq?IÎH櫃ëØõû.R©Ž§7•=ý{2*p¤£CB!„â¾#3ØBØÑoÃÙttÓ=™\¤SÙtt»NÿäèP„B!„¸ï8d;==½^ÏßÿMbbb¡u<==©U«~~~¸ºº:´“²ã70mëÏ„ þþ*P(wܦÅbnrð÷/9øw$x4ç‰6ÏÑÐU•S§¤ë)*¾²ˆ½¬û¦(2²Æ`ÚÊuiûX ƒ¢ÌÉ>P”7¦›§ùjݺ½4šjŸâÁáûâŋܼy“–-[R­ZµBë\»vsçÎa±Xð÷÷wh'eK‹ÙÅÏo#Ðü?ìQ4 –ù³!<<|HNeCØ*æÿ™®¥<¯vc?ÿý]¡ñ•Mìw¯ýò¢Ð™k³‰ôØT*x5¦ºüu†›®¸z¸PÜŸ‹1ƒø«ihkV@Áús /4ê;»øä^a/–9šåÏ=Î2=´êÔŠcŽÃYÿë|ÕŽNÜs /A¬Þ7•Fšh–‡¬Å¤ßÏ£r¶¦¤dDÐV )#©ÀúR£Ñÿ=Æm왵No¿ÉÑôìòèýA¦¿ý‰QŸäŠÝ›á[W}±~Q}óûáqôŸw»l䮯(§s¡w…ÙÒ™µÆÒØÝ…ôÄ‹½ô'I@GFðéàþ˜3ÁœšNòµ42Ó¸yé&7/%‘‘nÎÓŽ‹³†Œø?8qˆ± Å,cÁ”šNò¥›Ü¼t“ä¸ Ì™Žî ûˆüü&ÒÞ× µwMü”¸gÅœ]Œ¤kVâ¡ökÏX?8•PdY^Ñ, žK«‰+àç†É˜~OÐ*kºZרS[Áž¿n8zsÅ@öÂQ’"0gTÚ´iC›6Á,ßqŽì£ŽÔ«GX”S6ŠïŽ\-´ [õ ‘;˜8g3çNl& €É#ŸaΖs·4_eù¨`VŸˆut7Q"™ÁÎÌÌÌù¯(NNNÅÖùWV0(t.ïÌ Áø×ÿx÷ð†)·°Î_ÏÀ5oaôÁü.=ˆ8õ6KwõÏóŒËÓÆ:Ûõ, \ å­mÐØo8­L›X{b ¿w€¿®Ÿûˆð ˜\»!.žCéïñ!‹—ðW@’#V` .Úœµ7eDrÝÍõ‹i<Ûw)ðßÍ0úãt6½¹·Œs$›.‘\‚íï¿æ š®LzÃåÏY|ü-ú§«ø%Ð`³o>÷ýŠ—ö†â×l&ÿmîÏù“o³øø¶/R¸uc;Ö/¶žÄ¨ýß?EåGž¡÷¯Û1õXƸFZÎ^»E+¿z@Ç~šÇ”ü'v4ÞÔó®E¬FM÷Ç‹X&]MŸž ÝÁ-ó‹¾x›ßP{Iú½Î·÷,ŽôR£9qêa«§£g8ÝêËÌ(¨þ“Ûøq€– YŸ ‘a,ÑÃ0¿ªÔoº•w+P6ܯjž6ôß½G(A¬Ü°`2¦‚>#`Mq’¸™úKQh„ÆÑ-îk²„‘ƒ'¢šÈÊ)í¸un7¯L!Æù;¦µŒebÐhƒg°fJc®‡¯fâè ÝOÏý-©€†ýñ%€7ŽÏçS•ÂfߨtѾ-ƒ:=Kc4ø4‚Ór2&šîrïa¡Üݬ—…Ÿ8ò}Î\ÙÌñ¤§èª«L½JžDyú u÷¤Uí~>wŠ€FЪÇPÚ=L\®¶4o*»W§†»†H›Ë"½Ë|ÞìИ¸È}|íÆÀGeæ+“yîó‰½OÞ`ˆÿ“µKVq@CôM5}ä‘—Rçf"ÃÖ3xüûÐg!í½Q> E¡e9×2&ཹ~ÿG|•Ö¯«7ï ,bꢊ¼Ö·.gÖ½Â2=à‹$Áw…ìÅݤß÷5z‚XÿÖ|•€ïVâÎ¥ŠJôû¾â}XÒ™êj¨Õ÷Fþ°õû/гsî6Н7ö³]„4÷‚ØÊÌøb<¿^|™!Ü8ùãzŸ–þMôBÜ}K°F#F£±È[¥R•²Õ’Q»j€FÔÏI Uôî¶Œ^ Gã*2cy¾ä¸Cä¾0E­)®ž}ù¨ÂP¯wX¾‰wýêóU,ô Æ8òëXÂ3<?â$ÏVu㯌û_K¿_Ì3cffÍäæC`ÝÛ‰·›ïãÔd%WS’¡„I®Z£êÐ4W÷6oØŽou%›}ƒù9Íé áÇÏÜyoÁ,F„BŸác ö[Â…öM‘9DQÖd(ÁYë<„wα¡’¦½‡ð°ÅBdLEà{FôÝžw¡ê$wÎÝFqõ:Ф`ï–Ìê 3¶aHý*¬Û×4/éa®ç°[¡P P(ŠL°Ëê!É)7kÜ<0²ac6{½ÏËñ@W¾žò-eÕ?ÿÇ"Ž:÷ÛsÚ0[/ Ì€ €;ÁƒX·y) öT#™ö¼ÐÐ'«  =ý«º[Qu o-ÿû©PH‚ÍÑè«c­+îWvÎêÛG…%ÜþËóÄï¿.Ÿ =µ¨¾ÃÊøÏˆó<^µ"7¸hÅÏ™?¸n™L4kÝ“ä_?F¥Vp«B'Zy$qáúµœº.Î@Cö£ÐŒ%h¿à2®¨³~«Ó’“HKcï™*k2¸brtoܹC‹»01cGfZ¿p«K“>À%ëï‹»£÷ƒž9Álê4–õ{žÇ7+¶ä” -Pv[,»§o‡á‹ñ˵‹3DŸá¢©ï‡Éú&•¯]BT™»eOöÂ2®UI#û¸ÔÌ‘Õ39à=‚ÇÓn#Ùvä%²/àŽ>s€çÆxp § S±õRÁ¨Ô´z¼°‘ÍõkpŒáÌn$§0EùqŸ\0Z: Â÷Í"Î'DrððX\ŒwÃ}Ö²ähÎÿñ!›ç•÷}Ý%©gÈU¿J“×hG4ŽŸ fËI9I¹oµFÀvÆïZÁ¥„HÉŒð¨ÛoÃ!Æ}ìÇ+?ÿ’çDÄW¡ýØpù<—b6òÚš€ôöÍ{9¹Åpˆ±Õ+°,@@«À ƬšÌјó<>Ž1a' Úh^le»o¬4MIDÅlaÒ'£H ©òÐ [vüô5×m§ÙýæÞü>?Œ/ éÒ&~H´½lÞë76~οL${"¬I{êól?ï£øW×pµˆu•õ;Ãö)¬>pŽøøXNìXÊôíÔ·E!'£ÄƒN¿õS6áǼ—»á|3½^^¯'6ÕŒ~ëR›e¤Þà(0¶sÓ¼¦^`âèÁ|¼ç ñI±X>‘÷õ0¨]]Go®xÈ>P8BÝvý€/™·üÑññèÃÖ3zÉvЩ©Ûñq`™µ,)‰è3;x|ÄD6Fò¶Q‚z¹çtM;3’ÌšJŸý»¯EyâlNGll,+V,t;ûþëØØXt:ûŸ±r«³€O:E2úÀ\B–Πfý™,ðÇÖô<ÏK»æò±µ¬J­7XÖ-çËÀzÁ-.•ß°YýZ@›o^·Ï4!üpC:ä|[£åfÝÀŒão1ä8YíŒfÙ A`ØÌÕäDò´TÓ;ž¡ÙmÔe|È×ø+!ÅE“\ϳ¬³óí2·ÓXÓ7†ÑÛV2zåJ<¼GðùsðTb³oœãÆSóô¬±^†VÅ» êŒ?ùy×ÿ1&¸Z®u‹é;Z‰ùO¡nUzd=?éꥯy{ýœ²žÃiFdn±~2)@ál=fVPpû碖ùuË<¶WšEŸÖ#˜ßà&Û·¬"ÒÑ}a>F²xlã'†°$ë»>c3>°¦£C÷œTô'­³'SB‚ò”¼¶âGªØ(»r!{`ˆäÐS“÷¢Dµ_/>™xžÑSFðeÖwS?ÛF{o¹xQ”=Ù GPû`ýû1 ž8‘ˬßõ»˜‘í}PâßLdÂè‰<žUÖiø<þÓ»&Î'ÈžóVzw²Y‹‡·|I‰Ýg±lútðutQ*ŠÌÌL‹=/Å¿±“v•zYçСC$%%áç燷·w¡ vll,z½NGÛ¶mí¶þ<ë2§h4‚ÒO•*ßûüŒ$¦§ä”e/›û„@qõò¯oÝê*,Îxƒƒ#§å\Ò{»®µgUEÜ”ycÍn+ñ¯qôX¿Ÿ&ýÎcæ›$šAëZ±¶òþ\Øö{ž2›}s+ïöšo’htG§qɳ¾ûñ…%Û½æ÷¶YV­r#¼TŽk%x•Úòôô¥‚ ÄÇERØäõηv”}¥Pªß]ƒT“ Æ e®¼æ~w¢p¥/6Êl¾»µ$ûO³¤TJµ·\gSe ŠKöÂJwÜl 5ÃÎÜÔÊ|ûR3©©9eÙËsæìEÍ Ò¹Ù›Œ¯Wt2^T<ÚêOñT³†4°Ñ%¹Û*i›öî›]/ÿ^l:ºÉÑakP›Ž¡ÔdŒ‰Ò(‹ý™ŒAáH2þ„#8ä¸YéJíÌ~q!½ýäé¢üqH‚íää„F£A£y0žºêVõþ7ì…;nÇÅ£“ûõpôæˆ"Œ   ;Oï²9“íHZ•–m‚x®ÃsŽE!„¢µO^š ¯å—Ã^Óõ °çg9{]>Œê>ŠQÝG9: !„BˆrEŽuÅýà|M—B!„Bao’` !„B!„v ¶B!„Ba’` !„B!„v ¶B!„Ba’` !„B!„v ¶B!„Ba’` !„B!„v ¶B!„BaŠÌÌL‹B¡°[ƒá7v:z›„B!„Bˆbµ«ÔËnmY,œË"ÈÆ•ZݵÉïìc]¿¸ÉØ*[Ò¿¢4d¼ˆûŒiá2îăîìcvoS.B!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„œ€÷“”Œ6üžý¿ ͘æèp Ъ´tñïÄðÀçŠB!„÷™ÁÂŽŽ\<ζ£;îÉä ͘ƶ£;Øwú€£CB!„â¾ãìôôt.é#ˆú;ФĤBëè2—Ü>MàZfªÈeFö¢¼1Ý<ˆu—èøR_ª«e|ЇCì¿.^"åfí[v¤Fµš…Ö‰ºv•3çNó—åMývh'eKÙOØß»élžƒÝ"2þÌk‹Fq¨ì¡#îô&6„mçÓ7–ã§,e|7öö÷ÖBã+“ØïbûåE¡3×f鱩TðjLuø'ê 7]=põp¡¸?5cñWÓÐÖ¬€ƒõç^hÔwvñɽ:Ã^,óuÖ<7ŒUzx¤“?§ü<ÍŠ_GP[íèàÄ=§ÐñÒ‡Oö¥¾æ:kB†(ûtÿ8ü´)ìœñ4 ·Ã#Úqê@xN™ò—õLž·¯àºZåÇ¥}qsô6‹û›ìE9bˆ9Ïç_îĸ5ÁâAá;òÒeº´ëf3¹rÊö‡ïµk‚s¸ˆ2[gØ,J ÆÅb¡ˆf²f?Ö—¿]‹ÅÂÉ}s8šá#ŽðlUâÎO#xÓ&¾8¥gnK¿ÇàñÐB¶Mš‹Ka‰¹sñ±µý%íUéÿIîo3iš'ølö(»»ä|}ùÔJÞ\ÿIEÿ ú¶YÀ®éÞñ Ë/³kzNîÏ[GÏ;zË"íâo¬ÒÃÔõ ôÕbŒÜGßÁ øíâ`j7uwtxâ“p|7«ôðúÊõôoêŽQ¿¾Á Øþ#tûl” ftƒ³,ÜÁ‹Wñbû*’—{N%üÂ0ž8‰ŸNÊÙ¦é·ñxð&½Ù­£7XÜ÷d(î¶;;nvSââ¬!#îEdòglæÔ¢–±`JÍ =&[€‹N‹¶¢å}ðÄ…¸È à7Š_k*£òö¡®£ƒ÷¬Ø³k€:d%*¿6¼ì?E%µ¶Ð²ÝQ‰([÷Çþ«XÒZë$§ò­á:_/ᑉÐËOÒkQöd(%92œåóf²õ@]†Íž@p¯z(´«'ùòÝ©|w ÀŸ×?yþm| ´a«ž1rÿj&¤¼úÊ"kQÏo2a@}ë‚æhÖŒ™jälžiQÉÑ]!D±’`gffæüW''§bëü+ 뺣w?¦ Æø×ç¼x×Ã?ojº†¯×( öóFéS×;ÁÔž y}vkR¯'‚nL~¤ p{v%â»OùŽ>¬ÜÈÑ›*²’ ƒg4Š%SZb:÷ oLK¬ó¼ÑòÓƒ¦’Õ•|öWVƒ…¾’\‹òÁasZ&“ “ÉÄ­[· ý/»¼,:øF°pø{t«Ó–ÞÝ>ej@?šz©9ö)Ôfb¿‘4ªØˆ]gÑÚöÝ“§’Ôkßw šÁ˯§ùAÜbgøðE/hÔ¨pÑ«¦±÷üFæ,ÄQ|[wÉö ÚÉHÿÎ4øu¾ ¦=ü• Xop)î2íÃaÖíwôroØ“§z®æÝ–µ¹¥_ÂÊ_l÷‹®CÛ¿Ëœ~#xØÛ?Ÿú€311e>^Ê+w7ëL؉#ß[“k€+›9žP™z•ª4ÅÕ«JFµ TÌS^ ؾi~±!ºáTnðŸöõdú®PænÚDõZ4LÞCT­.T.4úªt¬ë•óIëÛ‹ê„òOJ ”ð O¥Qµhœ«{›5| Žou›}ƒYÑ‹ž÷Ÿ2ùw¹ŸY æÛÌf2³†½‡J ÜàðÞÿ2çT:3j}Ëcn>4ð&ëRüÂØZ¦uZ6ÀÅݛմÖûB+w¦‡÷‡¬»A¹fLMUÍŽÌ í©|Òe4“¾ëÁî1Íž¸%E°fæhV€3òÒ€¦¸dÿÝ)¤L\Þ·–UÇü™ûãlÚø¨Àð7k†¿ÂŒ±õ½.¨€+aÛ9E7^k[ÅÑ[( ²Ž Ôj:TÎ96TÒ¸÷4²X¸ãì`lß½yª~…”ιÛ(®^;Z6ñ,àíÏÔÞ0wÛ9‚êWbã&³¦iIs…p8‡%Ø ……BQd‚]V1HII®OöeÙ·øqcvzÍäùŒDà1–OYN¬úú?–rÒ¹;•X‘Ó†±Øzé`\Üر/6¯â{ªBžmX,Òb¶pÈÔ‹Ð7gdµ’Àœ÷öpÃÃÃFô1œŒN¥mëÅã·â~ã •³:÷•‹%Úþ¿!ç‰ß—Ï‘ZTßLa]Lñ½«zq A‹&‘=s. º•uF³Ö=IþõcTj·*t¢•'@®_Ë©ëâ¬4d? ­$ó—qEõ[–œDZz{ÏÄPY“Á•²¹ ä®:ºx32¦±{VGënµiظKÈœE^†>ÍÖN/°bÏ`j[wjÖ]¥!²Ð2sZ:К‡}²ÎBªkÑ:VíI¨¸ÁÏÓ÷ÂðÙÔ•ÝŸ¸‹d(ÁT&²æhæÄêE„y?CŸ´ „oŽ %ûî˜3á\q®;á9m˜‹­—&².ÃTÑæé‘ðÂVvÖ¯Æ)žfZ#¹ B”÷ÁcJ¯å#ý€#Lùf)ú„¿9tx*K.^wZ<´–%Ç ÿc¯n^ÌÆ¨ä‚©æ"Ë*ÔòVðåæ“ÜˆO$"ìÆ.ƒGú4Ä 5ž“ÀËåÞkqwÉ>P8BvÝoY¼<œ˜øD"¶0iÉ^Щ©Ý±'°ÆZ–”BÌ™}<;b&›#óN”¤^îy¦2ŒpÎúžÀÝ‘»¯Eyâl΃k±ÑxW¬Rè,¶B¡ÀÉɉk±Ñètÿr-¶iëÌ`a§+L:°˜W—. zýÉÌ h‚ Mø´ç_Œßµ˜W?¶–U®5’EÝZà|y3`}<·KåQ6ë¡_¸æKóL@Ž¾Æ €¶·c©7‰±Mþ`IØ$ž³~׫ïfžªì‚ÅÇ?ÉI$%äÍŒª{'²$t@Ö§ÚŒ ùŒ‡•ê¢É‰ò.ëì|»L[c<Ÿö½Î¤m¡LZ €»w0>7 ›}ã÷ ÕOÏfÉëÓÐ+{7@•q°]òbp•\ë9Òw0.´óŸBݪþô¨jýúꥯy{ýœ°^>nFdn±~2)@ál=fVPpû碖ùuË<¶WšEŸÖ#˜ßà&Û·¬"ÒÑ}aU;…0wlS'Žåó¬ïÇÎfd Ïµ+îGi\:i=™òBž’—V¬ÂÛFÙË+×ót›gX2#…±³¦ò]Ö÷í‡OfÂóÀbÁÿ7§€®šòµp Ù GPùö`Åû±¼0q&aˬߎÍðöUPR…¯>ÅFÏäÙ¬²öç1¡·Î.dÏy+½ÛÙ¬ÇEm¾¤¤ g÷aÕô(zw¨åè.¢T™™™{^Š~c'+µ*²ÎÑCÇHO2Ðȯ1Õ¼} M°¯ÅFsNWšÖm‹n/·³7޹þ<ë2§’d¼J7t*—|ïó»ERzjNYö²¹OW/ÿú6®n²Œ‘lù:.ä{ 9•¸´TêªèTh é¯i ZÆ»“öÓÖœ@’\]½r.οÞâßg7öüñÞ7¦¼ÛkN É膇Æ9ÏúîÇw7¶žžÿœÍ²j•᥂„øs\K/ûx==}©àñq‘6yýí[kË>ˆR(Õï®ÁHšÉ„Z£E™ë¦¬ûq܉•j¼Ø(³ùîÖœý§™´TÎ-*eÉ–•1(þ-Ù G(Ýq³‘´ 8«Ñª•ùö‡ÖýevYö²›m×Ë¿¾£ôejÂ4vÎê˜çþkçžÎÞ8F»J½ìÖžÅbqÌ ¶_ƒz\8w‘ƒG””\h΃ê5«ã× ^Ù¢tCgsÒÕ«W )X/ÿýãiQKyuýRþÉ€^ƒ‡ç$Äùc©ìQÌÝS&#@ªpõBÇ*bmöM¾e”^è\‹>˜}hU®¤ Ïž¯ÅãZ)Û»‰‰…'ÖÙq–kjZuÙ<QˆÛ”hÝäÂ[q’} p¥ ­›­qWÒýeÁzù›WÃyÿÝÕì9¯‡¶’‡›‰rÇ1—ˆ{êðoÑŒÆMÛ|ϵ““..Îv’xQg½Š;#–]^ê3gÎ:ªWhMûfcYÏ«ÄíåÿÞµú4ó£ž.ɽƒú7³+öè›]ÿÎl;ºÃÑa«_›>Ž¡ÔdŒ‰Ò(«ý™ŒCá(2ö„#8ä¸Y©¡fƒL}q:~r’S”?I°œœÐh4h4ŽÞü»C[u(s‡ ½ãv\<:3®_ç;nG”áÏÅÂþÓlÎd;’VåJ¿6½Üa £CB!„(@åÓœ ͆ÿšÃ^Óõ °çg9{]> ïÂðî!ŽC!„¢\‘c]q?G  !„B!„v ¶B!„Ba’` !„B!„v ¶B!„Ba’` !„B!„v`÷Û 'Ì™&Go—B!„BQ8KÙ4k÷ÛÍEGÚ­ä²î!„B!„â_q²8¡Rhìß®½ôr©L‚áÆ]é!„B!„¢T,hHÀÍYg÷¦íž`WRùà‚š›i1Ü2ߺ+ý#„B!„BÉN™N¤d$ñOÚ%ê¸5´û*™™™…Ba×F3ÌéD¦ž%Í”ŒÑ’q·ºK!„B!„°I¥Ð uöÀ×­1¥«]Û¶X,e“` !„B!„‹Å"¯éB!„B!ìAl!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„Â$ÁB!„B!ì@l!„B!„œ ÅG!„B!„B”g …â‚°ÑÑ!„B!„åÜF'`=pÀÑ‘!„B!„åÔà[…Åbh ž::2!„B!„¢8l¾Nü?›1N­Xš$IEND®B`‚celery-4.1.0/docs/images/celery_128.png0000644000175000017500000000477413130607475017527 0ustar omeromer00000000000000‰PNG  IHDR€€Ã>aËtEXtSoftwareAdobe ImageReadyqÉe<(iTXtXML:com.adobe.xmp MÁÙjIDATxÚìOLUÇßοý¿ì²ü“-•?©¥”ÒZ9Ô&h8™´j¬MøSMèAã̓'õâ̓‰/&5µKŒµÑ“±h0±©T5ØbC üŸý33þ^aqׂìÎÎ<1ý~š³Yx3›ý~æÍÛ™}SeY Ü¿Hx € € € € € € € € €v ÿáñxYÙÅë}Í´è¤j§zˆjUˆ*ˆ·Ú<ôϯF§#ÞÊ)¯ZÆ4ÙñHr= _kT«T—©ÎP}Øê5x»ìÝá<üA)Pèûhq’ª›ª‘ˆ!¨ÅgâÁÝúÕHÅ*°ÙÕ ’`¦d(ø£´x“ê Ä!noøjnW…3’¤ÖÚ\ — ³1ØclŠ žïåïr“‰8¨›O'"-³²¬U—¸ªª¨>(zH២Å5„/p¯÷HìpóÜîØ!Õ𳼘7, x> xý8¡I~–ˆ¶ÍkŠ?æðªÛ €Âç¿@u‘ˆÃ«„ÙîèA]’”2Vï+H€õð/Qµ"qø”«‹JKÉçÒ&¦²¤mºý _p·/X]´-Cá«.næâ¶¬óÑí DòÈ,QvÀ¤n_qq3)ª·ÿU€õÑ>|‚© ïe4àsóô¼IÕ×êØR€œÏù@ ao ûªÜÜÄuªãþ¹Ü'ïéj’«úç´ ±]ey£S«ãçþ £š§ºÍÖ® QøéþqžçGº^J­ê‰XjãÍLUJðEõ_RЩBå `™·‡XdIaÕ±’ö~¾§Ÿ¢Ð‡lõ>ÙƒW»i£‘ˆ¥<ò “ÍnóAªG솟טã Ä!žŠ²]v›~LÕ•½¾o{üñw÷o´#±xµ Ú;ÍÿÕÉRÃ߀O›¦)#±D¶.îñ‘|/…ŸtäÿaYfâø/ˆÛivšÂÿÕ± ký¿uqˆ'àÚiöŽ£ç Öz«qˆ…Ñëú‹>öÓÞ?î¸tüÇ·vEUþ–ý]Ìoœ~k=€ia~€`dÙÖ¿«®Ä#Ù»Ü?ãô븫a(V†DDµqÔÍÎ Zf˜½#Ó²u'îôëÈ–ÉÿB€ýn 0ŽHÄ’6ùe{«Øf»%À"‹e™,mèÅ6;<±Ô¿Ç .#ñèi[GÞWÝàkÄ!ž•ô¼f}Ô ´8*À±ÆÓ|¾ß D"X€ä¬füB?Iàu²à ±¤Ì: ,ÚiÊ/Þ% d'8‹HÄ3¯OÛmúÕ'$AÈè00ÊÖ¾Y ² ßb¦™¶ÛœOÓÿ‘$è°»‚¼;„¬ßõã;Ä"–x žU„êK]MQ_ ßò1Ÿö›†y±ˆC’d¶·¶Ã‰¹œ‚&†l)ÀàH÷‘t29œý †XxkJ¸ú½\>5ì’àË\î¹|¢uà{EÓú‰Xæo²Ù…)77ÁgŸ|Aã…ž-ǹ|zåùß3éL=¢Ÿ%ÔRßɼî^*æãƒ–Æ`ÏĦ=ÀÆ‹QÕvY‘‹8 3ÃÆn³L&åæfø4¤×6;Ç3-çfH‚‡%YÒ8’©%6N˜FÆÍÍÛV€»ãý4hEO –e}Ž]»ñ-õI·F≂È‘ AQ•ID#Žý¼äYÕÜ@ßv¸çéêϤR=øˆ(ðGbuUm¬2æèx|ˆÔäòìzU¯÷QY•o!1˜–É&§f¿ý1Dãƒå¤C«=c«Èë Fº^æ7”0ÒF1‰çT©³•û–4Ŷ»÷³œ›E—|»øÁ‘î'MÃxO/Ç cqD‚ÕVuyÃl$P üüE„ïÌíâ·èž²,³›O6åó ù”3Ì:r¿WÊÍh°öNÀ]ö«!Y–µ=ïc…þ‡à>dâ-€ € € € € € € € € €€Ä_ WaÐ]ṈIEND®B`‚celery-4.1.0/docs/images/celery_512.png0000644000175000017500000002072213130607475017513 0ustar omeromer00000000000000‰PNG  IHDRôxÔútEXtSoftwareAdobe ImageReadyqÉe<(iTXtXML:com.adobe.xmp äÎÓá@IDATxÚìÝ p]w}'ðsߺҵž~¿#‡$Æ1I«ãn—BaÛÄ*,ÅeÃnéÀvHJ¡°-›…°dJg6´ZÖ…NÇ4Ófhg™uxõM“bO !!/DzõºýY¢&µI‘¥«óÿ|˜_®,ËXç÷÷Õÿ{þç•kµZ —œ        €€€€€€€€         €€€€€€€€€€.€€€€€€€€          €€€€€€€€€        €€€€€€€À3€\.§gÉ-GöˆP[C †ÚjM¨þP§TO:¡:BUt`ùÊçŠI¹P}´\ì|¬R¬Oòå|!_*rÅJ.—$)OMÇ+NùãPá uO¨»BÝêÀ¶ÚÐÑ…øþ&ç~`A'úsC=oªvNU:áu ›}µÔý£j©ç‘j¹§^)tÕ ùâÚ0¡wŸ…¿®ê¶PŸ µ/„cÀÒLø½áågB]ê%¡^ªª3Y•K*ÅαZyàû]•±°g¿2Ÿ+¬[¢o&] ¸!Ôu!<*œÝ ¿<5Ùï õšP&'—ìȨR¡#YQYõƒîŽ5'ÊÅ® ¹$Wk³oñx¨…º> `á&ýôØü/MMú¯ UÓ€l«–zê½Õõ÷w•û; ùÒúeòm uE€ùOúéÉy¯õ¦©Ißñ{€ÌOúÝ­žêúÃÞ~W>Wè[¦›1ê]!|Z˜ý¤ŸžÀ÷ÚPoõó&}€ì+æ+IØÓª¯sC3Ÿ+öfhÓ® uM-àÌÿ–ðòÖPo µÑÛ Ûrá]•þ¤¿s󫥞•IvÏåJO|ç™B@:÷G¹§&þ‡—÷„zC¨¼·@¶ås…to¿&þ‘B¾Ô>µ*ã›|U¨'C]}Æ0Ë @˜ôÓ‰þ?„zo¨—z;d_ºÌß×¹±ÞWÝÐÌåòå[°çtçDq LüéÆýr¨„ÚîíÇÄ?е¥Ù[M/ÓÏżқžxéÓ¯Èt˜šø_ꃡ.övˆaâ/§Ç÷“ÞÎ \’+èȤôÁKN½O@fÏ“zÞëC=߸d_zŒ?øû;7¥Kýé¿Éÿ_íõîP;õ“™Zzßý&'—üˆ@wÇÚdUm°öþÔ}fé·Nß683+aâOïÁŸžéøß’“OÓ 㪥îduí9IGiòAz&ÿ™¥W>¤'Á¿/3+aò¿<¼|*9ù$>2.]î_YLúª4cnÒmJŸ"¸¬W¦žÆ÷ñP¿fL"Ù-$kWœ— ͘»žääómþd2H-ÓÉÿÂËÝ&€Xöú‹ÉºîíÉÆÞ&ÿgçÍÓ,«Caâï /×…z‡1ˆCµÔ39ù§æåYk†êìÚ}tÙ“ÿ%áåó¡.0~Ù—Þ· vN2й)Éî-û]ºòÿ²Pµ,„Éÿ]áå›&€8¤Ëü›û. “ÿf“ÿ»l²Çm>ñ§—÷¥gøÿªñˆCg¹7Yß½#)äKšqvloë&ÿÁðrS¨‹Œ@ú;7¥7õ±×v×¶ Lþ¯/ûBõ'€ìËåòɺ$+:VkÆÙ·®-@˜üÓKn eí éRÿ†ž 'ÏögQÔÚ*L=½/}dïï€8” ÕdCïó&_Y¼ÌÕ6 Lþé7óéPo1.qè(u'{v:Ùoñ5Ú"„É¿^¾ê Æ å¾dCÏŽÉ;ü±è†—<LÝÙï‹¡v€8t•û'ù§'þ±$ZÒ05ùÿU¨W €8Ô*+“õaÏ?ç2¿¥tOúŸ%‰_&“?Kæ®% SÇü¿hòˆGzÌ}÷sMþíáöESgû§'ü9æ‰ôúþôlÇüÛBú4Àé‹ö8à©ëü?›¸Ô •b-ÙÜw±³ýÛÇ­ÛjC—§sÿbƱ˜üâQÌW’½;MþíåsÓ,Ê ÀÔí}ÿTßâÏ&ç›®Ð6Ž†Ú´­6tlQV¦ìs£¾Ä"7y¶¿É¿íÜNþ‹²0õHßH<Õ «kç&}5¢½µ5€GÓ_œÕ€0ù§Ov¸ÉäîŽÕ&ÿöô¡éÉÚÙ<ð©Pé9@*Å®dÍŠó5¢ý uýÓ?yV„½ÿw…—Oê9@Ò3ý·ö??)y¬o» uiØû?xê''çþ…aò¿$¼|3TYßâžô·¢²J#ÚÏž0ùúéŸ\ð0uÿ užÄ¡§cm²¶Ûý6tm˜ü¯>Ýoœ“¯3ùÄ#]ò_½â9Ñ~nuÍL_°`+aïÿÂËÍz‡ôÁ>éÍ~:JÝšÑf{þéäöþ[gú‚;&ÿÞðrw¨5ú‡þÎÍɪڠF´ô„¿wî˜ÿéÀBÝ ùã&€x” Õde×Vhé¥~W<ýlÿ™<뀰÷yxùŠÞÄcSïÅIg¹W#–Þp¨‡º>Lþ³ýCÏúÀÔÝþîu®1ˆCOu]²Ö –Zú`ŸôD¿ëž~‡¿Ù€g{àj“?@< ùR²º¶M#–F3ÔmÉÉGúî;õÁ>ó1÷¿9¼|7T‡1ˆÃšÏIz«4âìj$'—ö uO¨»BÝê@˜ô.Ä_ð¬VN>И¨›ü"Q­t'=«×kÄü÷Þ¿êàÔë}¡îõØT=>õu£a’]Œoh^`ÿ¡¡_Ýl<â±iõÎÉkÿ™•t¿mª¾êïÃÄ>ÒNßàœÜrdOîÄSÇžjÔ5ã ‡žÚÚä9_¬3{"ÝGµ/Ô­í6áŸj^‡ÆNŒ|Øä— +·kÂæÒP_•Þ|çæ¹\Š·¬VöÊ×Ç'†›†ç=D¢wźäÜ /ÒˆŸv<ÔgC}"LúG–]j™ó @«õQ“?@Lraïÿ¹ÚðÓÿ&ó¼þ~Y®Ü|÷[ c#£éÞ¿3ÿ"Ñß½1\ÿ8¹ÔÿÇ¡~'Lü,û™Ë @£^ÿ€É .k<ê7øV¨«ÂÄÿ/YÚ¨ül¿°>Q·ñèîZ•tVzbnÁ‰PéÜ÷’¬Mþ©Y­ì¿s÷[ÇGÇ<ð "kú£¾Óû?†Ú&þïfugµÐh4~×[ “wýëŠö)ïŸ õ3Yžügµð¥CC—޹ë@\{ÿ}Q>ð'½{߯‡‰o ûŒ ìý_ë­B¾˜ôwG÷ÀŸô’¾×…Éÿë±lðŒ—þå]o.…½ÿ‘V³Yð–ˆÃªÞ­É–µÇ´É÷…zu˜üïeƒŸñ2À°÷µÉ .+{·Ä´¹‡C½“ÌàKßÚÚ¨76x;Äc gS,›ú`¨WÆ:ùÏšæ“VË» "}+¢Øï{*Ô®0ù?汞!4þ½·@<ºªýI¹”ùç½5CýÇ0ùßûxŸ6ì?4tn£ÞXéíþëcØÌkÂäÿ7Fû  ÕlþŽÖDº7f}÷‡ú¨‘ž!4Í×h @<:;z’R1Ó|ýa¨=aïßÉmg û ՚ƭˆG÷ýK˜ü7Ò3€V³uU«ÕÊi €Ÿ “ÿÿ5ÊÏZÍ_Ö€x ¥¤«³?«›÷H¨÷åY€f£¹C[âÑݹ2É%™]øýí°÷ÿ¤Q~†°ÿΡ­ÍF£ª-ñ¨U3{Õ÷¿„úŒžEhµšoÕ€È@v—ÿ¯{ÿM#<«Ðzµ–D4 ä Ig¥7‹›ö­0ùÙÏ64›Ûµ é“ÿr¹Lÿÿ=£;˰ÿÐP¹Ùhöj @D šÉåÿï†úk£;Û€Vë•-OÿˆJg¥;‹›õîø7‡&ÿ×j@\ª•ž¬mÒH¨?3²s /Ô€xärù¤£RËÚf}1ìý5ºs ÍÖ vÄ´÷¿"‹7ú‚‘shh@L sÇÿŸõ#;‡pÓÁ+;šÍfI;âQ)uem“nÙVš0²sYÈå.Õ €¸”KYÛ$7þ™sh%Ï× €ØV2<òwÎ iy €€åìÞmµ¡Õ9€VËñ€L=üõëFt>+­d•VÄ£X(gíÀ6ªó[èÑ €¸@Æ|Û¨Î/Ô´@XƾcTçBèÐ `™ÙVzبÎk q `¹ú¾ï @«UÐ €xò™Úïû¡÷ @+§ÉÖOýÇ è|WˆkþÏeêÇÿFT`6?üs™:ò;bD`Šéj}î“rÕÕß@’ r @LüÔgúÀ˜VÄ£Ùjfis:èüÀ¨VÄ£•d*ôÑùX®´`þàI­ˆG³YÏÒæ¬7¢óFk"K›³Åˆ Ì&dk zxxïZ£*ðŒ`"k›´Ý¨Î/<®€VæÀEFu~àa­ˆm •¥MºÄ¨Î/ܯq™hdêp/1¢ó G´ ¶©{ÀëDÀù€û´ ²ÐÌÜM`_eTç U×+ËØÏÕ9€]ƒ76çDF²¶I»ï-Ù¹­¤j@<Æêdz¶I}¡^idf0Þ8‘dìRÀÔFvîàíˆG«ÕLÆê'²¶Yo:<¼·Çè Ì´ ½ÃÕP»ìÜÀ½¡F´ £õá,nÖ=<¼7gtg¦®ø––D&žÊâfêçîìWR_×€˜VŽ%­ì˜z¿Ñ[¸]KâÑl5’±‰cYÜ´ÞëÆ@sßH’lFANo$›‡Rÿ#„€¼žEØ5xã“áåNmˆÇ‰‰'³ºi‡z›žÝ @êoµ ¢0ž€Ì.þþÞáá½½FyvàmˆG³UÏòa€Õ¡>n”gÒ‡µ ÇÇËòæýÚáá½üL`×àãáå+Záñdz¾‰BÀ€‘žy å0@DÆêÃI½9–åM\êÝ!ð™À¾PuíˆÇ±ÑG²¾‰¯õ[Fz†°kðÆô`Ã1€±ǰ™9<¼÷µFûÌ+©/j@<Ò+걬of:ïýEÏÎlâ0@d«Ä°™Ý¡n !`‹p»oLO ýko€x}8–MÝê+!lNï¼â‘^ 0šÍ‡ζP_5ýê€?àTû¾ý+÷Ö'êÛô ÕJw²}ËÏ&ù|A3ÚKz‰à%§Þ'`ÁÏø©4X*¾0_(Œè;@FÆžJîûÑ?kDûI/|÷Ó?yÖVRû ½lbtì¶ðw8) ›ÖìLÖôYn3ÇCm¾mðY]H½~ÇÞ¥Jù7ô <|grôøÃÑ^Ò“3Þ»h+Ón:xåÿž¿JÿâPÈ“ó·¼,é¬ôhFûH ´)}ŠàY_˜ö†ŸG±\ºUïâÐhÖ“{ðd¼>ªí#Mc¿4ý‹E{ŠCµÖõÊÜ8 éäïߘ ´7O°(‡¦í?4TnÔëGõ Æ µjrÞæ—&ùœËÛ@úà¾þÁ®ÝGõ9ޝ߱w¼P,^êc‡á‘Ç“ïýà›I«å¡±m ÷_6ýÁ¢ !`¸T)…PxÂ8ÄáØ‰'‡ü;! =\¶$ õºíÿç‰B©tž'‡$´‡íK¦V-U*ƒÄî}à[I³ÕÐŒ¥sÞ’€“+Ÿ{²P*n å1R‘Ho”žàê€%³nÉÀÔJÀp¡X,–Kß5&qHÏ ¸ç_KêqÍX|µ¶S!`¼ZëÚBÀW @Ž<‘Üýýÿ—ŒŽkÆâš¼sQï07¼ò†‰±ñÿl|âP,”“s7¾(©U4cq4»vóíö]½açç¯*wT~#„’–1Ⱦô0À=÷=yü©4cqL.¹äÛñ;{ý…ö¥ŽÊÏæ …ã}éUG~øÉÜ~eÿï,{¨mÀdر÷@±\ÚT(+€8üèñ{“ïÞŸž8¦gÏ=m¦BÀco¼èÏ·•*å/$mtžgϱ&߹䀘' rVÜÕö`Úv~þÊr¥|E>Ÿw½@Æ'F’»ï?<ôXº³êÀ»=ýOÛ]0“ý‡†V6ë[ë?€8Ô:’Áu/HÊ¥ªf<{?yಠ? wîþÐÄøÄû[ÍfÞXd_¡PJ¶¬¹(éïÞ¨ÏέÛjC—OÎýË1L­liÖ[Ÿ˜8ÏxÄ¡§¶&‹­Ìß[Cø“e¦Ýtp÷{i6›%ã Áj@¾˜lXõÜduß fÌÍÑP›B8–‰úÒw~µ·Qoì¯OL¼òûúÄo6M± é*ÀÚVõnþÕ¡P—„0E˜¶ïŽ+ aû>Ѩ×ß‚@Å¿€ì+«Éº•ç'+{6‡9.ê+ÆGC]&ÿƒ§~2Š0íæ»ßR¨OÔ?w6ÞqÕýƒ“+…|”‹í “ÿ§ŸþɨÀ©öß¹{O£Þø`î(ôÒÁ•!¬éÛÓ=® “ÿÕ§ûhÀO‚À¡¡7kC¸¬Õl9Xqé|×ÓµfrE ½©PøLV7õ†Pï  %̼"P }¸¦Ùhî a`½·@ö•‹É@ï–4 4ÊÅj–v¯ uÍ™&àÌ«ƒ­FóCÍfcW£Þè×€ìKo*Ôß½¡5нq¼X¨,ׯÓþÞuºcþÀÜW.ýùíf£ùªf£±¶å.ƒ™W«$}Ýëë}µucåRçr¹ŸLz©ßO?Û_X˜0ÐzõŽPo àÂÜ[ ãÒ{kk›}+ÖwUÊù\¾Ý~ö§wøûP¨ëO½Î_8»àœÐ»ÿêÕ!lo5›½V²,—T+µ¤»kÕXw×êᮎþB±Pî]¢o&}°Oz¢ßuÓ·÷ `!Á¡¡rèè«B?_êÒV³uNžR]…B)éêèmvU{wUûw–{’R±Z ójí,üuÍP·…ú\¨}éSýæû$,‚›^Ù|iÒJžZ¾#ô{0|¼*¼ö„ ÿ@ZaJa4 á× Á _L*•®fGyÅXG¥6R)vޕЕz±Pi†Ê‡ß/ås…Ž$É•’\~øÿT`h„õP¨{BÝêöP¤t!¾¿Ÿ .€€€€€€€€         €€€€€€€€€        €€€€€€€€         €€€€€€€€€        €€€€€€€À þ¿ÖŽÆq'fIEND®B`‚celery-4.1.0/docs/images/celeryevshotsm.jpg0000644000175000017500000022712513130607475020717 0ustar omeromer00000000000000ÿØÿàJFIFHHÿátExifMM*>F(‡iNHH € ÑÿÛC   %# , #&')*)-0-(0%()(ÿÛC   (((((((((((((((((((((((((((((((((((((((((((((((((((ÿÀÑ€"ÿÄ ÿÄS !1"AQa#2qÒ$BÓ%Rb‘¡&34drt¢±Á6Cu‚£U“•Ñ7DS’³á5EcñÿÄÿÄ11A!aQðq‘¡Ñá"±#ÁBCRÿÚ ?û7¼I·}šèȰ-þ:TÆÚ‚ðh$!J$’GÖ­fë©<¿äË|÷þ‘GðÕcì¡ØhÑZ ·ØZå®<à<7‚­ÙüÇÒè+Ô{±òËxLj£ô×Ï‹jMª?,·žp>"Žá«(ö¦3Ž)é³uÔyÓ-ý¤QÇü4ø®£ÜGË-ãßâ(ý57ošÌöTä~¦µ6 ´((Aæ¶p}Y‰‰¤Š×ŵ&Â~Yow øŠ?M}7]G‘2Þ=¤QújɃLcR‚¶.º':e¼zH£ô×Ï‹jM™ùe½ÞßGé«-k1:,‰’"°ú‘T$ä£=³V"gx†ë¨÷ i–ñê~"ÓAuÔy9Ó-ãÓúE¦¦î¢Û£ŸC ‚ZÏ=…lìiI¥p+Ô›ùe½Üd|E¦¾üWQîå–ñïñ~š›>,©XŽúz9 u)9ØO`kf“¦i"¶.ºœé–þŸÒ(çþùñmIµ'å–òq‘ñqÿ YiPVþ+¨÷cå–ñŽÿGé§Åuù2ßÓúEÿÃVJPVþ+¨ð?É–þ¿Ò(ãþ|WQîÇË-ãþ"ÓVJPV¾-©6¨ü²ÞFp>"Žá¯¦ë¨øÆ™o¿?Ò(ãþ²R‚·ñ]G¸–[Æ;üE¦¾|[Rl'å–÷sñ~š²Ò‚¶nº#e¼zÿH£ôÐ]uã2Þ=ÄQújÉJ ×ŵ&Ìü²Þïoˆ£ô×ÓuÔyÓ-ã×úE¦¬” ­‹®£ÉΙoŸÒ(ý5óâÚ“`?,·»Ûâ(ý5e¥oâºp,·ˆ£ôÐ]uNtËOé~š²R‚µñmIµ'å–òq‘ñqÿ }ø®£Ý–[Æ;üE¦¬” ­‹®¤ó“-ñÛúEÿÃOŠêLòe¼žÿÒ(ãþ²R‚·ñ]G»,·ŒwøŠ?M|ø¶¤Ú£òËyÀøŠ9ÿ†¬´ ­›®£ãe¿¯ôŠ8ÿ†ŸÔ{ˆùe¼c¿ÄQújÉJ ×ŵ&Ò~Yo<à|E¦¾›®£ÈÆ™o¿Ò(ý5d¥l]uâ™oÿG鯟Ô›3òË{½¾"ÓVZPVÍ×QäcL·_é~𠮣ÉΙoŸÒ(ý5d¥kâÚ“`?,·»Ûâ(ý5ôÝuáþL·Sñ~š²R‚¶.º':e¼zH£ô×Ï‹jM ü²ÞxÈøŠ8ÿ†¬´ ­üWQîÇË-ãþ"ÓAuÔ~lé–þŸÒ(çþ²R‚·ñ]I„ÿ“-äã?Ò(ãþ|WQîÇË-ãþ"ÓVJPVþ+©0¯òe¼ŒãúEÿÃCuÔ~\i–þ¿Ò(ãþ²R‚·ñ]G»,·ŒwøŠ?M|ø¶¤ÚOË-眈£ŸøjËJ Ùºê<Œi–þ¿Ò(ý4]G¸ƒ¦[Ç¿ÄQújÉJ ×ŵ&Â~Xow·ÄQúkéºê<Œi–ñëý"ÓVJPVÅ×QäçL·Oé~šùñmI³?,·»Ûâ(ý5e¥lÝuà™o§â(ý4]G“2Þ=?¤QújÉJ ×ŵ&À~YowG鯿Ô{€ùe¼c¿ÄQújÉJ غê>s¦[úH£Ÿøkçŵ&ÔŸ–[ÉÆGÄQÇü5e¥oâºv>Yoïñ~š|WR`ÿ“-çÓúEÿÃVJPVÍ×RyÉ–ùïý"Ž?á§Åuì|²Þ1ßâ(ý5d¥kâÚ“jË-眈£Ÿøkéºê<Œi–þ¿Ò(ãþ²R‚·ñ]G¸–[Ç¿ÄQúkçŵ&Â~Yow øŠ?MYiA[7]G‘2Þ=¤QúkõÚ,¨h¹XDxòd"0uPáJ•œ vâ­¨]†ÒmÞ9…¼9¤5´ãc¤ª?ABl¢ý–E}Ý#ö{!¶”¦X+ª±ÙŠ€Ïæk¨W.û-’óZKìñ†ÝZYz<®¢áx*#?‘®£HýQo‘5èkDO°àr/_¥•6¯=Ž0,är+E‹-Á­3]ê’¤-/$ì@kilçÌFìŸcœ÷«u+Ó£âgFŸLv˜ú±:"fª„«}Þe–äÀŠbºìÏ”—Ю«e`© cžkÔ=:·™µ±qmÅEa/—[ܤ¨ƒÈp@çñǵ[iOêµDR‘×o•DLÖU£H—©g³ ç„—Ò¥%iÙ%H-í÷ üg“ZÖ›Ö-šNÀ¦î-©§XC$¶·Qܾ‹ìIä÷Å^)[þ«\E""›}¿'ñÅÚvˆ«…ne‡œ.¼S‹'ñ,œ¨þY&¼µi½?'jC+a ‚%AJ'â+z•枤ÌÌ÷j›Q=©3ìrÙ[ jK­­ AXPõçê0~™­²§¸ÈC!mC‹ÞOŽ=rx­ŠS×µ)±F„h®7y› ¥!§[i) ÷)Ýž?ˆ­úR¦­Sªk$E R••)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP*TÅ~R-";Js¥qaÕã÷P Éþ9Pz®KÑ‘i1Ý[}K“ /iÆä’r“ô4Ì{iT>ÊQ Z+AªC«LÄÇ‘áЕy*ßž=+¥×*û0B•¥þΔ¨¥1åî p3žõÕi²Û¨mÇ[C‹¥ P@w zâ±¢t7T‰qÔR’RêHÊ»ýϧ½RþИΧ±]cÛ¦NŠÄY±Ýê-+q°åàF{Z£ií;|·Ûš²É³M_Ÿk–J™Kl¶Ž¦åç‚‚1ßµt:u^{’vvdÞ­JC«MÒC8ê(IF“¸çŽ}ëq‡šÊaÄ:ÒÆR´()*B;×vÓ)Ûf© iùéñ…‰­ ÀP.Ç6N9ÆÕ¿ýjïöim~ßnº•ÂvßMÅéẪe•c§÷rBŽ=3Xˆ¬T¥p¥1LT SÅ”Å1@¥1LP)LS SÅ”Å1@¥1LP)LS SÅ”Å1@¯:Û -×ÜCM eKZ‚R‘îIí^ñ_ÚImä%m«…%c ¨ Óu·JC‹‹p†ú­M>…g¶px¬²&ÅŒë-É”Ã.7)N ²‘är7{zæ¤ÊÄ/1®0¥$*,د¤‚Aiä¨;ö>žµˆ^mj†s€_^66$#r³Û99®,æ½Ýí:F<§Ù…hħ bƒ¼„…6G¬g¶¾Éì·;-Çk\Æü(H]¤»ÈŽ„“Öÿ»Â‡˜ÎjîÜ.œ©r’ëIB ã Ú}y¨›šË<«qžü¦¤%AN-´•+;–JG™G=ϵA…v­>ÒXñ2aÇ„;u}$çØ9¨ðÅ¡ÍPå–4K‹«j2ž[ß¼ýÐ=NøX'Û"­þ %Ö[p´”©ÄF*(ékr/?ŒÙ‹0¶ò `eN`©|þ÷–µ”‹+1D9«œ„Á)‘o|²öoÏôOWw VÇVÄgtÂã[œ’©ñÝšÃO†Õ:Rºis„ïPY ð Ú¦cé·‹- ]dø©O%çŸè´wá!;J6íÆT€;•¦F*×'OÊ´¹>\{¤6üZâ²Ò¦Ê[®à’••d¤îÆ8êë&Ý SO´üF‡ÒPè-8=Á÷ÍB5£aÅNmo¿ Bf*kn¶¶)HØSµ@‚¼`Ðk4È\$õ.˜Y_‹™°îá!JÝ„’xD×ÈŒéioÈi™2÷0¥)så!)VÕ©K P‚A85±7G³6ë|©ò]yŽ‰ËˆmEJl’;|›‰óãæ‚U?`²Z™œÆùM:â›Ëél¡K ¬,ùˆNõ$W‡_´#SÛíbW) +]d%ß:V¬¥üÉNÂsÁ5h¿éÈ÷fm}¼tšB’BÒR R GcÁÆEj£FÅEÎ4ÆäÉ °XPiA*Ê™FÄänÀ8?Äå«Ö7í÷—®¬A·JvD1äi7‰·uŽyþíYA8çƒR6†¬R­åûr ‰}ØêJï”)m«i(Qpn³D³$ÆcÜæ´©ŒFÝ@BVÓeeG'Ì®Hܬœ}MÚí ÷3 Ùá½ì!! ô `P•M N¤áYÖDRÿŤoꩾ Â7ò¼nÏOZÒ"Üå²õtS 6ø]D´t™ÔR’Hòp±éœUÂNbV¡‹v}£…2Ði%A% ïqQòç5´m-ü*\ë½9%Â¥q¸o$œ}0J,1Ûj+i£>àéC_Ò³z;IÎs»LþñÀäTúYç¬Ú½O\u%  ”7¹-¤þæpIî+2´ó©v+Ì]dÆ”Ë>N4Ói5»pNݸN=UQË•‰›Ûð]‡pè¡Ûˆ¹ÈR–X¯)ßÀ8XI'†¶-¯Ùç.±æEu·BQç*OS¨è ج•c9ãöæ§¡éQ/’®L<òTû«“ÒÂv¥å£a^q“Æpr½ë MÄVŠ¢Í}™ÆRe -4Ú0¤ ¶À¸ÚHàü–n.%ÕÛié­;pF?<ƒ­EG:½&ÔFä¸ýÀ!jZ6x©…Ä”~=Èݹ;}Iì|¦n©·&LÃ)KK` ²Ê7);Òûöä¤ä ó^äè8"CiN!.ÆSªJÄFv¦7ÞÝ¿ºœgŠËJ-7y/¿5ÁÌf[0ÛJB m¤!%G£8 ÔrÌ´í_.ÜaÏ”Û7vÚ†êš^ùAV2‘»'‘ØdZôê´£Q£¾§®;$„%2¦)caÂ÷ +rvžùµ%#K¥è—¦ã(F–ÿˆ  ¥µ…¨vó$‘‚‘‚Ei=¡"»fmñ¯%†–ë™K-•«q(òýÙN0­Eb·±c“v—ntJD–ä)–›MÆR”´†Ð²³…ùGŸåÒBJ” ätÐV ¸ oOJ“e‹î‰ŸL@¸5 SóQ!͸6Y Üv§r‚ðœ@ÜFMye0ôÉšvzœ`,¬‰s6ŸŒ%[¶¨Œà€I¿;L"dÅ>äù)¡”Ëm)@Lž’Š’OO$çÅyùcl)Ð[ºÎnß$;µ„lâ·(¥XÜy'ŸSA1‹Rí‰inKÈnKlHD‰óZ””Ÿ)VB¼Àà‘V¦-yüùŒŸæT{=­À‹qv:_’™KSl4‘¸m!)NÜ”ƒïV„”$)EjDc'Þ‚å‹_õ&ÿó?̧Ë¿êMÿæ2™ST …ùb×ýI¿üÆOó*º¶­ìÛîȶOmøÒ“¶>,ùê)e!«©„ƒ¼gØUò ï:fÞÛsƒ-N–§¸—”F2…'nÒ8÷@<ýh ´çË÷›;”%±Ô¨&å$…: IÞ2‚+YÉVõ[6uDµ×•.‹¤ÅĶ|»ÿ!;‰üGkeðe‰…‰&KîmF_Qî’1€žÃ Æ@ª)ëÃn¼…¼â_q µN¥¬ã<¹Æy ‡¶G·»"æÅÚ¨+‚Ò$8EÞC‰Kj #q á@$äsé‚kÖ£‹m³Å‹-¸¯¿Å¥*R¯QÜ@HBJüê9Î8íSM:å¢ß2*.r]TÊë­¦ÃÅg.æW?½‘À«5îÆ«» 0ìù ÇJB^mAêàƒœ‘”ž;§Í]·¬ªÕˆ²ª,Ô¥ÕºÓNüRAR–ØÊ²ü'¸<”ž*z}†×ާNaÊ‚|×) ú“ÔàW¸ºN$mBýÝ·ËÏ%’ΩFÍùÆãåÎp ”}jnCjuPê›Vs”€sô úVu×Óý·c©êôϦøVÕg¶¡·T¸ó›d:@¹È äžê}?ƾ=¸u]1gxVÊÁp\äg)ïÇS¶A1ð„%°Úu)(é¬ yÆâ}¸äžÞõ—À½IuÏî⦸ÆUßžÿZåÿ'”y#ùæÿõ¾õ¢)o­KJqa)*»H å$òw÷ÅdUšÜ™ ³Ð’V´…ŸéYN<¾~}êivõ¸–Âå:¥6 ¤©IIÇvÇÖ½?¾—d8Pœe;SÉ9íÇð§üžP‰ëRoÅÑl–Ù]l3,!§?‘¸}S¿€}+så‹_õ&ÿó?Ì­ôCÛ0H[Ëq@($ôÈ5·]:~ªuÞž—®“ëîÁ3Pã!†ÃHÎ7¸§'?‰D“üMg¥+n¥)J)J)J)JCêIïÀM°ÆRG^{1×”ç(Q9ÿZ˜¨C¹i·d·¥9§“¿þñI'÷4ìM¥Ï¾Ìì×Ù·û¼Ïú×W®iöQ01¢´S¥™1äaÕ3{J—óìk¥ÒkZArâmr£90u›y²´¬(Œ€¡Ç¾¸ö­Vnϱ©µ¢ThFRo[%NJo¥»zWž9Ê}@ÚsÞ´u®­¾Û5}¾Ã§­f.T5ËqÙN- h%J%>œâj‡íTJÓ7 ú,”Û¡:†V¥8à+ZˆHõÆF{w¡uæ6¬’tô™Ï\£9'cAqÚ‹µP]Zö”,©@`v%Dcž+Bëz7]!m™%ä7w/¨GuÆŠÂ] *m(%+p€0’pr}3Q0>Òõ|¨Ð]‘aµÀTÇ^BS3®ÞÛAÕ8x$§ìjùkûQÔ—YL¼Üu¡ÿ^@Cά–Ñ”çw•_ˆÇzdO^K:žð„®<¹|t‘ÔKñ#ä(ŒíÚHÆ@î¡‚y­x×WîziÈÒn° „ÂQš¦\m¤(þ6µYãjrr1»~ÔõxÓ‚ò› •mᵪ:^t¼„-eZ‡` @ç>¸Ç5ï´ýHÝä[<.š[ÉmÇ$­2(ˆøº‡o¿P¬žjñæÈ¿iÙ—9+c¶ëtã©$,;!Ä`¹¡D¤¸dƒÈª=ÞT¥ê VŠP1'¶—^HKHÙÔGá ä( ŽNïΫº‹íÃRéûÔ«]ËOÛ*2¶¯kŽrö ƒüj]ßµ]PÅ‚5ÙËU,¼YhÈt-¤:¥% Wî„’“Ø’=@©Ï›­“ÏÝ^ºYžj]ÚܨQ¤ÆÄå²ãl9”¦UµYòžwàkäÛ”)Ó —kº:Ûo7Ô’´5¤¹É ?NchI óŒTµmA㇙҉‹-•<ÌïÿAa*Ú@8ÝÜcoøT|ÿ¶Mc ]Ê:´¬7>²‰.0][m‘êT1ëV»Õ"–D‹Û^9h·Âu-¡f2’¢·T?PRN}|ÇÏ7KoKKøÛÏ”1wtÈL¹=#-d©ÀJÎB²Éª"ûDÞ‰XmÄž sŸñ«“¿hú¾=ÑÈ3íZ~­@EÁÕ<óå-6¥„«h'vHþu)çÝkºq¦åý9ot¨Hqˆ¢SRÊ.pNJ‚‰ä„àýëzÊýñZÎô¹q]KŽD ŽË΀—T2Ç*“Œý0S­¿kWÉ–ÛÁÈ–F-ðÝè&PL•¶òðHÁ!>_Ä@ÆGÔ ·MSSdiHíC{üÓëKÉBÿ% +çÍSWê’Ì˦,Yέ{zJ¶BPN œÙÉ>€}+âYnñ©4잊ڔ# „¥¡kv¡¼g*Q8þÅr»oÛ¶§º<¦mºV,·RêCÕ=È¥­ÿkú’ád“pbÙ§·°Û*¤»â m‘½{{3êA<àB¦m÷K»ôˆšìvâKêy·/”— N6( ’Sè3€«}‚ë|¸ØÚ’Ë6ÉNœAqRv%HÊAm+I=ÁÁÇwâ‹?í/VAvoÛtÞd<#8|[‰LWvìt«§‰áÑ)èFBT¡#/.ÉIÀ)ÆqÏcSúOQxË æÝd'É'ÃõC`%D”„í(*J†UÃ|`×1¾}±ê›\ï 4ô@†pë!â’· J?Ú»}*2åöó©m’<=ËLDˆþºouP¬zÚ‘¶ÒNûí^u ÈZÞ ±·Yð¯ÐZCanå[²H$ž™;€çuG"÷ÙjÕ¯i¿ !øòÒâYh……%´©dØäúdú×9öëªd@vt})ØLðãèKÅüÔ8¿yûj¾BU§ÀZ­·!rc¬ÏE/…gzS´œ“”ŸÎ”Z­Sµ•í‹=½ä*vB|fæËgf6%GvÀNUœ¼sR j«’õ?2!ïLÆ#ªÞ–T\-­”-nç²J‰íŒ H®x¶½`¹ÏBFŽmRÙNçKo {‘Ü ÖcíŸS1EÍÂ"<°·d„=±JÀ@%]¿táV˜t¨Wù6ëN 3.‚Løó€Ž€€§6¥Gr€ ƒÉ{Vœ½ksoNÛ¦‡£!Ç—0ÚT·CjÚ‰* Q>©J²w5Tº}¯kKl˜Q¤é(Ýy­¥ÆJ^%YÛëÜzV³_mZÁér#5£[rTa—šKo7þÐî*/.‹eºÍgZÜa<ßBÝ"bŠZJºÎô=!ÿéàe\ç<ØÕî¸-Ëí£RÂ…X°[ŸfLA-E¾± $¸¤agÏöèæ^µÜ,°-S’ÂÞm©(|6¤«oÓÊ É㊭ÿíxÿÜ6ßþ#Ÿýh?KR¿4ÿíxÿÜ6ßþ#Ÿýiÿ´eãÿpÛøŽõ ý-T ç^צõ'…ñÒÜæÔ°R-™p“0UÈì3ŽÕÊ?öŒ¼îoÿÏþµjwíSU7w¶ÛÅ«O8¹ÑŒ´ºÜ·ÓMÛŠÖ8v+8Ïñ4m2\]5 2¦GZ\¸©†ZV¤ºÑ$„¶rN3•+‚¨¨¥I¸+íD¶]u®”œ)JûتgºRЄ¨TFJȵT5Û6§°]^Ÿ¶8âL€ã+uHSJ ÁÔ Üoí_U¹¥Õ|M’ËáÒØyLuÝꆊöHìTïž3ŒsNJanÑ·f> ¸Zg!0ÐSh’úŸRJ÷>ârJwÝœðN+/ÚØ;-,¿3¡q’7FÄ)–X¤©ãÏ$p“’sŽÙ5F¿ý°êKCÞÍ?pjS‹a*ƒ!×@u;w6{dÉísÁ4Ól:Šü·ZnÓѤ…†ÛbL—Pãîp„$gž;œÜóAÒ™„Ì~¹Ò´ˆq·¾àZ°ë®ð”ã8á '·ï œ¼cÁùKIÞ2W•} ®1;í’ýOĺɃ§Ñ⛳«#¬¤ï)ÏáÛÝ$þ.·¾Î~×.Ú¾ç.+–«td1¾§‹À€sßÞ¹õi蚸|N­:zZ§U¼‡E2R#8Q¾¬m¬¤¨¨©AJSëÜcéŠÊT3-Iq߉$»µžÜíã¶1‚>µ¡óòãiCWÔt´•%ÅNÎ}¹¯_0\Kh"<êöÏQ[ð£€qÛükÃüÝ9ÏÚ~WÄÓñ/þ¦iÄùVyF"ZŽc:Ñtu®(#;ÜúÙúÖY+eER‡†âoăûßP}k ôì¬lé¡%JXud'î}«u,Å2ã¡îšTR•„ ž?çŠ.ˆÞ¼Ù¿ë:1XõR¼~ÒVwu!}U«y+J•”óøT“Àú˜®y¬5ÕÂÁ¦Ÿº¶÷m-¨GS‹Ý…?Àúå¿ûEÞ?÷ ·ÿˆçÿZö|>¨?Û‡Òø®ž®‰Õ¦kíGéZUkìëQ=ªôuºôû GzPp©¦‰)Nמ3Ïîæ¬µÙí)JP)JP)JP)JP*»¬ÿ“ÿÿ3V*‰ÔSD$ÛIŽÓýi̱÷ƒðn'Ì>£Ò™d›J•öQé+ALogF$yLž|å@`~bº]rŸ³~Yû6àÇ—ÿZêÔ…q¶mhî×uˆ1¥ÅzSÉZ~ð„¸¼«Ðdç±®?í æ4Ì‹C¶¨ Fä*:Þݘ{ª­Ã<äð?ë_¦5}Úm®ÿ ÄLgZM¾T‡Z’ÿI*é©£pr¬ É5†±TËÉŽÅ¡Ó ã… Ü‚¦ Ÿ.ßÞ ÆsëŒR¹)‡çë—Ú ºT˜ªa›ôt‰rf:òg«jy°’–ŽÞ1Ø÷ÕµnûO…ër•àfí‘Ð’òz’RÓE’œ¼î$`æ»e¾öõÊDù7Úƒ6†g§Á>-¥JR²ržP}+Ví.ûkÓ–¹2dÊê&*ä:¦ÐÚ–^+ICn‚9@JŠI¿×žËw WÚ8B-±˜+DÆ›m•°ZPâV•‘ÅxJQÉÀÇ&¤^ûSŽ»üy¾kÌ”H.C¨qäJTd§AH#Þ¿@G¿S¢Ô˜Ì–\RÙ¥+ò8–÷IHAÇl=j w)è±Ìz Ù3Øz\v!¼°ÓoH%Xw¤¢R“ƒµJàãŒWÏš?8ÞõÍÖeê|»l™£I(¢æõ(%©Dr¢É÷5)?]A™¢!؃,¶ØŽ…·ÖHi¾š‰RÛòä-aDä­~–ÓWyr4í¥ç-²®<×ß<×E! )![”œ‘ŽJAV­Òâý»S>Õ*ƒ)ò] ­¦Ül$„¥)ÁO$ç¾qÞ“µü¢ÆïÎsµ®UîÑ%H¼%»dbË2„†üVíÛ““·nÔ‚R=sš“kí=–âݘ/Å\™oÊdÇ-+üê6”¬­óÊ{ä˲خyšNô¹2¤D¸Déº:—\ -%XÜ‘·j•¸P8â¤5…ÂánºCu…¼ˆ¸Ž–ÐËhRqo­.ƒæÆÒ6ã×>Õf1>Wt‰Ëò¤=_xŒ»~é>%ˆ.!ÖcÈ@[`£ðäzâ­.ý¡3:ù|–&ÛŠ "#‹´¾ZÖ¼“r’xǧ¹¯Ôr.nGñÊrÉ45 ZVKm씨«'Ó"©Pî÷yšzC¯È}.3ru¹NEa-¸Óa½éJ€d%E žøš•ÏŠy÷~x¾kn0¯QXC‘¸Ü„àÓoš…‚‚‘ÁÉRIÿgµljMfÝÅåÀ7Hw4ôJÑâB¢ ¶€¶Œd{Žxç¾k¼SvvãgéçüÄžk¦”!ÄE|ù¿6ét ]$ÏÔ Ë¸KRK/¡IÜ€…î)ÂN¾ ”“«íOÚolµo›át’óîÉiäy¥!£”ç`8$7ý)«îÙZ”ÌiÖ´ º§¶VðÚÙ(?‰IÙ¸sÎp9ÀÍdC²¦^4ßJcí9%2dt„ôƒI@`Œ‚¥­>¾†¥ö-»ó6­ÖíÞ,vës,IWEäÉyù=>£‹(HÜ0™Y=½ª&~¯¸Ev¹Ú~n©å!$‚IœsŒ×éû}ÎâÔ[³rIrçàÝ–ÁñMºÎÀµFÔùHボã¿§-ž*Õf‰ºß>äûÃ{2p¶p?yÂŽ|Äš_s‡ãay*Ósmî!k~LÖå³Ç•+cÜ•çøTí³VAfñd•6®5n·˜>EÛò²±ÆÿÂx8¯Ò÷ •Â³Š‡|Bb¿! 6ÂPÚ™S]-EDyƒ`ý0¡ÍG]:ëfbTkraõ$B[+p©´8‡ (R”‘éܧ çŠA1GiöåÏrC®h‘S= Il­Lµ°%Ì$ ª<œ*·xÕ±®Úy¨ËEÂ,ö˜1‚"Ȉ´ ò¦ñ’yÁ¤ô¯Ñ—;ì×áµ46–ˆd¦r"Lmô””îAR|éïÇðMK§R!ÝLÍ­˜í¹åCéJÂàoy”ìÎàFE.Yù†Ç®·_`Ì\5–Z´¦ÖæÒ•/‚âw?²*rÚ“l=sRÙ›¹çÐû/¡,u KA½«½ `wHÈäsœ×uq~ ´Ó%ÉÌ¢ÉÊÔŸ*Èé£qÊ·` c•®VˆQ¥-Ôg„‡zy# çxíŠ_Ït³óŠ~Ñ:Y‹S¶»z×èëŽçKÊÑQ“žI*ïÇu{ñ$~Ò£OºÄ›v³ >Ì#©Ø¸¨·J÷„« ŒmVy$ý+ô< LíÆÿ2ÞÝ»£§_âÊI ­±ø•Aç€Ig½iÚ¯ŒË©¸Ç›5íòg”µ½="¥¬”§; B¶ñ“ž8Å.® çÚ’¼}ùI´Ç\uImѹĭMÁR†’1Û§µsHï-‰ ¼Œ…¡A@ƒ‚9ï_¬m÷›‡Ã-W-pzEr|<¦÷…*J’Òœ¸01‘Û'ŠéRPPË¥–ÚS¡$¤/„“Ž2G¥-¿›Ùù1:æÌ½[p¼ü>â\˜×%ÇÐáe°¢’»01æëÚþÑà;Q!ØnoÉ}¶K¨-¶§R»hP#àá^¢»í¿RKkF['LL9xÚ†ÂÕ¿ÊT|¨A9€1œÖO›”§l«ö‡=¦·V²CjuXò¤í>ÅX ì +dåùêãöâå‘×àJav‡Z\uÅ}(%)m)VàRAVää+¸9¨W¨í‚þÔ·áKDV¢NLJwÌBÈjFHÊR=½~‘Ó:ç ]›\Póöàúó!aµÈ=U„g€ÙïÇ¥X4Íá[s¯Ìn^ ‡ÈàWéë­ýp54Kr¡°"<¦Û2£ø×»ò¤„«€¬nÏŠÔzꛯQËPrjbLJZmÅî#z[gí Y>¿JW%0à°þÔcǸLZ™œ¦]j:x"8u’¢Ý›6ÞÙî{W‹®»·'MFB2MÂMºDgÃE×”¢V€VÆ#¶+¹KÖrãÆˆŸ¨Î}rL¡À–‚N@Ù¸nÜ?ÁÍm#U<«¸dÛc" ˜Ä%n|uÂÝm+f1»ŸB})JìV›ù³ó‚5©Û–á6ÁéQ"®î’Ø!®š\BHïÙD(àö©hŸi–æ¯Ó§½j£¾ÔfúÆðçE#wm$œ…#=«¼Ûï²Ú´]e\Óo/31qã¶ÙR‰=M‰I NãÉÀ$浤k7[²Ež›c¾™e{Z +iQHIXܧ õ¥jSÎW-tÄýÖœzÞâa°Zy·°®¨qjóñçFÕãFAæ¨8úWí+%ýok•m'Ä)Ä<éÚ–ÎÆ¸ó«*$Ž0}jåÓGõýÔç¹Ãñn“Ôñ-–)6Ù踶ƒ)3~Û 2æô¤§j‰)ç#Øþu0Þ½·3¤›´³RÄ~ª’ò@ÜÓÁ ¢ ²¯¯úWéËÝÕè—F @‹ ÇŒuJZ¥=ÑNĨ' 89<÷ì={Ö’õBS©n\|<”¼ “…¶€¥¥$#èp£ZW>lSÿÞ–èýÁÙ~>(UµÖTò]JµºÚ°ó»œÖº~ÕÛ(¸$B–Á[¥ÆmM-jµ‡ ÐyÂrT=Èú×n‰¬%Ha–þ «ƒ³‹ÑyÕ6ê6¥‚¢QÏáÀÛ¯CXÓ®]uØmG²õ\S]WÒÙRÀe4v('Ê ¥`ëOOŸr´óÙùoQêWî°íQ[T†cC‚ÔE2]% R %[{sŸð­9y²Øõr.D¸9 „å–KÈê”àîVÜc•vÕúúÕp™p»Ol@ˆÝ¾$…Æ.õIqjJAÜ·y±ß5çV©N½&+åµ4¶Ö‡X;À ‘Ürr)mËìü¹dÖðm6„ÃjÃtEJð‰Ká-º—“·ïÒ˜§éß·éíml‘6×6T;¢ÝeQĘi™¶*ƒHÚ AìG8÷Í~–½j5@»?˜QœGT…-Ý®(<²”ôÓ61Ï<öò>¤RãNžô{{6Ö:éJ—#ïBš^ß:xƒÛ$qïJÓ6<ú¿8Ë×V©Zžßs~ß=mB‚¸hJ\m YWP€0áÀ£¹®q#¤_qQÒ°Öã°,åA>™#Ö¿d®îåÚÀô•¶¸2á\eA¢¦Â²¶ò T¬mYWbÛyáþáAüøÁö¦µAºhþ¢º4Q?ÝAüúÁö«ÎŸÖÚî¶ K€f%Â}¥¨²V· ##ñŽ{}kögMÔO÷U"ñr—g°_^~cŽ®<æÚ +iµ–óµ c ,ã>¸Í!%ùªû¬¼Þ§ÌZ®m6å¸Áh¦@ê;õxã I9%#€Và×èsA¢Æ¸n‰ á¶ÖkoxPZ†2V)8'¹¯ÒšBç=ûFæÃ.JLõÄR”òØAÎZ’<¤$rRj,ß.+ûA0šWìmLðŽ²¤$ %L¤§Ê^r¢AÀHÖ¯ \¿:ß5Eª]ÂÈ»[+tk‰ÚÃO Ò1•6BÎ2T¬äãÐV-/©-V§nò%Džìéj"<¶ÝGY„wT’7«8Üã¯ÔZn\’›Ì”Ê]ÎÞÒŽôž›A×S»©°€o! Ç“X>ûm@ùŽG¹?þ”¡IuYI*p‘þm#93Ou E§¦ïU … ÚÝÇÓBñ;cnIX[‚”Ó<^Ôó¡™2ƒã{ju!‚<¹Ç×î=ö™=—þÎgÄCo)ÀÁuaA¼-9 ã8>ÕùĤä×ï(®/â+mõ•ï(j›PÓÔëš‘é£ú‰þê÷ü&‰Ñ£y¯Ù÷Ó::º=)Ñ:«¿jb/°Oÿ ¬?“ßÿ3•Ð+à úWÚõ>‘JRJRJRJRQŠ×ÛD}Ÿ³ÎfB÷yRNqõ©z¯k"B,¸?ÿ•ÿ3LDZ6•Wì¡ØˆÑ: ·ØRå9G‡tÁVìþcŠéuËþË"¾öû=ÛjS,G•ÕXìÅ@gó5Ô( µCÚ~8Œæ£n¹!£!æÜ`¨ŽÀ$öf½lkÔiû†xB@Ä©))$¦Ü3ëëXu”k¤èŒÂ¶ÇK±d"jƒÉmΖ? †<݉ôÇ8ÆŸÁ¦£R5&$eEŒµ!RV')M¸ØkgO£Ûv@ó{ çÒƒ$ºI0nOÂE¹BeôãïÉòämÊÁ9Æ2 íšßjU’û* J#Mt4eÆZ™Ý±!A%@‘å!Xàä}*µK\;®‡°Ûqš‚ωl¸ØeeI!{vàd`(ãžõ+F]Üb1u†%Ë1d#¬etÌgÝx8—Ý´z€9iYávi»×ù¶ .ðËarOQ)PÇ™Xõ^Õ£:LX_~+r­.¬6è ))'¥<àúâ¥Y„ò/i”²•·àà Wª—»$â´×äÅ’ðÌÒ'IyÕGQVðä2qô¦<î™ó²JË" »Tgí%³Hûžš6'hãÅhCzÀ­A5ˆ­Ãu¤ø‚–BVàÈ*Ç›Ny8ÈÍe6&ÐÌbθDb#ii Gq)JÀþ°)95tµ^.ùM†áHŠÃÆFôîXNÅ¥…<Ç'°4›‘d´”Y4í¥Î³0­öå+ @h! *ã@äšñs“bbñoøˆ†.Jâ*Ül§' Ç—$àr2N*.×by­7r†‹[6ù²%—’âöà¯'¶O¯sÜÖ†®ÒwÅñ·[f¬°‡SaiÆœ*K„wXJ­‚jå0¾ÕvtÝ7*Üê¥&¸~1M-"?X*BxW”• wúVÜ‹;«3–ÍÞä‡$¡iHZÒ¤0Uê„íLš¬EÒríög"xtË Ü"2#J0Ԅ‚“ëœädwÏ=ª*䈦9…eÇZNXylí‚= OåÅ`t¶O¹ÊjÍ99„ìwÈB‚BˆîG (ÆFj©&è_¹Y^c¼äDGß-.¼ƒ÷¹'“‘åw9©xÖ»ŒGp›3«—cxd)¹AÅ /!M ¾N99ÎIúSö7õ3öXðÚsP¢2ã‡2Øyž®9°“诵¦î†R¤ª{­ !•(””‚°0r2j½¬4¼éö˜‘!I›/¦ãª[ŽKKOÍ© Û¼ÉÇ ÖAcº5y"3a—R# 2š˜¤´´6‚އcžÀóß9Å N@fÊÝÂãQ/ \ÆÚi)$/8ßÎ|Ýÿë[v¹ÐîP[•my·â+)Cþ´”œ~DU(Z:s¯iUÓî®,6•IK@:¥ïYYW?ÕPOåÛ»`ÓR¢Øƒ*ã"*Ù}å§áêKIا ’ öøg —aoS†”"&ø´l é}áÎÍøï¹Îjõ6×a·Ûæ½.ßofþöITtíY ¨c““ÇÔÖ„Ø7Yzš9v2M¦&ÕÇq/¤(¼PR]q8ÉÚR}1Z·è»²,bÊ[ž-f2ÔQ( H[N©AI’¥çžF{fвJ—¤Ñm¶.[0(ˆØ¸KD+ )ò`ã$ãš”†Å¢MÆ]¶ԇ¤)¤îR1ÈR±ÈÀç5Ý« §cF¶ Xqy´Ä _FÜ’w ¼s‘ŠÄÖž}w›£— Ê”þÙÂZ’®ŠÑµ,”zœ{ÅÞ£>S»= nÊ[ËxMŽún–šl£oU°'q?Á<ñ!SòÞÓq™´É0tU ¡¸¦Ò<ÅI NPr{rjÌ9íU‹}†I“b•tYz\8ëC®uÔNâAOlc±$sбÉe¹1Ýaä…¶âJ“Ø‚0E%!Ïb™mJc[­Ï@y}p”0‚ÚÕÛ~1‚~µ³ð[i~3ÿ ‡ÖŒ†\è'-Ø$ã€>•JgLÜØÑp¬ÍZ¡7ÒtuÐJà áÁ‘·;¶ð¬ñõÅd:nòfé×ÖÛn¿ –y×dïJv«+ `(+”“æì¡LÐZáG´ÜᇣÌëªÀÜÀìVC‰ÁŠÏ¿zØjÓobaµ+qR°àe-$ (…mÆ3}ÅSmz^ï ‹»O¨JLÖ_K;dô•sŠPmPPVìdƒ‘ŒKé;}ÎËb[ ‡¹ârÛ!ÐŒ4JARˆÊB€Üpã±&JuË\nžä(ËšØÚ™ i%Ä¢±‘ÜÔm–%ŽJn"ßkŽÎç yhT0Ñpã8P) Îy5y±Ü$ëh78¬¶¦-¥n:öBP7nÚ’®} J½GŽák½Ïµê8Ͱ˜OMy°¿“½ )Ë‚?ó~ua]‚Ò¸MÂ]ª ¢6¢´0c¤¡*=ÈN05æ6Ÿ¶Çº?sD6U9å…—ÔØ*G‘(ÂN28üê‘;GÝ´ÀŽÐp²Ó’äBûcfý» 8Ø6áX”îÈö¬ðìwµd‡Äuî=…|EÙ'qi,6@F0 £Hà’Iä Ô]™²â‹ežYœQ Åõ–åŠOQI=—Ç$zú»¥l°ÂíPTÔu4ÙŽ­“É)ã5VcMÍi½Á·Úá J’\KPzÍ)Ì”í=ˆI n8ÏÒ´%éKÓºzÝGiÉ1œ´!rSÒl)Yl)8ê‚ {'½HY^£&Ý2Dž‹ ­è’~ñE¡”½°yÇ}ª>ÜT…Smú~éW¿tym¿çÔ¡¸@c-!=TÞQ))!YÀÁç7* IöÈW.—…WI[Ûë4°ûŒŽ+À³Û„×&ü:'‹p·ú)Þ F*ÆOT&ª³I¸Ýb?à›¸ÃC*o ¹EŽ‹¥@‡AÎ8Èäzw5 «åZÅëˆi”G)yÛaq°à« ŽsÀîA=#JY^ŽÄo†EDfŸñÙJPµí)Ê“Œ¶ݲÛ]ð½KlEøLt70“ÑÇm¼y{ÞÕΓ¦îpmð¢?lL˜î\šW†<Î 0à_UIòqÎïÞï['GÞÔí¤ºê”Ó %-ÉQÖRÆÕ,p‚”î˜ìkHé 2†w–›J7¨­E)ÆåSîxïP÷Æ,±- ·s·0í¹.€ðT!J<(¤$óõ¬V[q¯œ¶Â¦½)keÞ¢”RÉJ@N3€2 Ç¿5÷PµsŸ¦œa˜ ™®© µâJ@X9ÜG<$q_¥I²ÅÒJ´[–ôW•oЧb–YI,Ø$ãÊ?*ølÖÓ"Dƒnˆ^’’‡œ,'s©=ÂŽ9zÕbûbº\oˆš–•&?Aã$¤ÀRK¸Hü[ŽøÁâ¼1bºGø²Ñ3—7“#eÁÉjÃájʤFì6ñÞ‚bínÓÖë{ M³Ä0–úR–Ó- qXHQ$'Ðn?ßSÈB[BP„„¡#hHATH6KÌ-3*p›qnNCÍ3â€ÓiRè6Œ”ž©Í^ÐT¤$­;TFJsœlÐz¥)@¨o‰ØäÛ.2KÑ‚…©¹k)@B¸óz_j™ª¥þÇq—j¼·oq–¥½-©QT®F[é±ÊøPL[ik†»{1\‚Ò·ÇJ-)$Ž)#ÐŽk äØþdè¯Áüh´Q¸¶:›1’øöçn{sŒT^°H´ÙãGè,©W-m‰‡-%]Ê—ÿx}HìJµi-pV¹7 ‚‰~5•uvôÉh!iÙê¥ãÀOnh‰}:1-™¬Ø¢ÛºJHnKlÆ …¤ç’yÁäkÖ£:i—a"ÿ—‚ˆþ"8sbrÁÁÚœ”Œð;V•žÙQï2nð‚nRÙé¥-K FÔ…li HÊÏâ<ä“ô¬Ú¢Ölx0¢DK¶õ§€”¸ 1†Â”? 9Ü{àqß4T„3dV ˜˜ÃøËhO‰SmލIì¬}ô!4Ç ¦²8RwdúqïZ¨†à¿ªfÔL@ÏÛóÛòõ¬÷”ó)l¸B‚¸^Â> ûÖ5×Ó³ŸR¾T†=Ö÷Bö²¤2Ž¢NÏÀœ÷qÛü+ïZœ±÷"NR¶óÀÉ¿/JÓ\IÊiIXmżÏIJ+Æß1ÁÜõ¨'§î*OSp$ž F2>µ!]zs3§xzº3X¦ý¨R”®ŽÅ)J)J)J)JDêb4›wŽaOÎi m8Øá'jÐTµBj˜¯ÊE¨GiNt®,<¼~êNOð§bm*OÙt‡›Ògl¶êÒËÑåu Æâ2=pk¨×4û)D5h½©8™‰#á#Ê®U¿?Åtº e2áoµx›Sì"Fô´ÛN³Ôë8µ¡=Æ9<ŸlûW™z¢,+ëV‰*´~48ß*ØU›·€yÆ>µ!u6Ô¹ Ë›ì4¦]ê±Öx oŒà‘œË5¢mv3¨$¼ˆ©Á 3âÕ…/fÝý-Ø'gÇaP@émW>ûyqè±ÛDfæ3,2KA'=Dà«ÎŒoàŸjÎ.÷ÇìºxÌ͹-JqæcÛÚ¥#Ê¥pHÚOZ–cOØ`@¸ÅChn+­ì’•>¬6×$#$ùÉÂF&¤_rÜQ SÒ#¥¤+ög  $•$¤m9ÁÈ'ŠÐÒnþŸ!“S‘b7"zRÒV”îPÆwcê3Åk½{¸KÓóf[íîBu Ø\ô…!ÄN@B³œÇdf·´ZÙ»xÅînDµ”–Ëê ¼²‚ -çj•´qœ óg´Z˜…&=½kz3€²´™Ky(±9QÚO©ûÌh‚"e =Y ¥c¥ÇSϹJHÆ´>àÖ«r"¤GvÞÔUÊy!«h†ÆíØ9ÂÏaøjr#¬;&+ˆq”å°P¬)ÚF~„ü+YËL~#Õ`/â ÉÜ¢z‰ Úô'îM'‚9WƬ]ÓOܦXÒÃr`‘Ô©.£nÝÜÔA$}x=ësS]gÛµ¼Â™1dHi‡S Q%k’à8@ädrxõ­ØZ~ß,Èí¡õ¢Xòô…¸¥€ nQ$qÇöåg·K—Líû›(Jd--¬…e!HÎÕy™MßÔ6æLÞ²ä6˜h[-È®¡)ï…„ŸàyªwÏÝÓNN ŒÜŸˆ®.Èè2”ÛaÓäIÊÕŒgOa]@( pyÍAK±ÙŒîð†P%)õ:Ì…4¤¶å¸¶†ØJƒ…²­„û`Þ•$æšµº&‡±%ÐòÂd¸\ @ ò+p)Ç5í'iz !¸Ü“ “âÝûÍç+ ;²°Op¬Ô…kÙõ*eê{¥•[•ü„¡ Yé …,“ÎT¢2jÏPìYí+¸*\vÑã#¾V¥¶áÜ…–Ò’“ƒØ¤#ËÛ€qS7[Ï‚šÔ8ð%Ï’¦ËËn8N[ln;ˆÏ=€É5¬­S :‰vu6᥅¡d”§qŠÁÇl“[—[$+›ì¿).‡š q—–ÒŠI¤”‘”’ŠÂ4Õ°\\›Ò{¬²âŠzîliÚ² ‘ÜA[Ç™©mÓqé)ŠÛSEee|ùðŒœ…`jÆ÷Ú ©—!¶¶¤¥ÇÓ¹Ä+bVÏÞÈ)*ʈRTFî{VÌmu,—-µ"R$¸é’âp¡µ!#¨U¸`+ŒO­n·¦-m**˜iæ´–d8‚´…nÂð¯8ÜIóg’}Í]“w¸£:í*6ù}Ω‡%«`o¨ݸðG8Æk*•Ù0žèºÊУ”° N{wï[ñ"1¾c£a}Òûœ“¹dO?¨«ÍºÐ‹+pn’]b t•Lq²µ’HIXP'žÃ>ƒÚ³6X¹tÔŒ[îKЍ²žC!£%öÂvG(¥²rrAì&‘u$ø§‘X€ÈwlÅl ¸[;TU‘È  e“¦í’e±!ö[­%$¾¼:rŽ ÎƒÈÝšð½/i_J˜tµ,,:Ï]ΗœåE(ÎÔ’Fr9ª#N¤7[ ¸Z\1ÕcL¼…tÞ hINä’žËÎA«iïU›…ŽÅbÏ“!†¥JJ÷9=Ô­ç¸Û•îÊ”`gÒ¬ 6„ TI?Äžô©JP*£?PζY®ò'® 1æ–›JÊR²rvïÉíœqVꊙnµN…qfJZv3êÝ+.päœùHÚ“éŒfƒCJ_ä\ìÈ~tG“(KTE¥ ‘Œr•©9;II ž ÅF«VÊ:ÜZÚm§!¦O„t%$©¢Z JÔ¬ànQÚŒàV(V«z`Dn!QŽÛ¾%µ¡õâÎNå+9^rIÉ9¬FÑhù„ÌÚsRz…â2vìêtóØòïÆqÆh+ Þ{²o ÎS[„C°YPL(­‘’w©8Ooë`ó[×y³cÉ´ª(a1dHKO¥Ô¦8ÛÎ×9¬K%ž5¾\+[ï9aL8„ÎqÞžs QØ®Ol”ÄR˜È)û·¦BׂV‘ǯ&—ºµRËw^=gP@ˆ•-´ýÊ€;[J²ÎÒ¬¨‚Žà Öþ´¼=f‡Æ^CùieÇVž؋Q! äþYÙµYÛÔ.ImIø–KåŸHIPÚ\ g1»¹uL ±Ÿ¹8ÓhŽðu¥ºæÄ¥{HÈ…+ƒL@¨Zµ5Öã6ÝÇ¿ ¹wÇC…N­!YÏݤ¥)PÝÛwÒ¶åÞïBÓx¼Æ0D¢ZP›QXèîJVUžIRNSÁäs²åŸMÄz*„ϰÐC!»’ÙÜÞõ) ±¹ ¨ã¸çŠÍ*˧×2à—ÜJ\}§&?‹RR±…¬¶„’;«ó«$5$Ý.méIwóP·ÚRJKÖõ²1(Qþ.ÿJËw½Î¶ÝmQaÉÐÂa¬£Ü+ÚîF{%8^=³EÛôô‹+ñº8ô¸ž¢ÕuqXPìûò=ñžqRÆ5±×!JZ™yËzJ˜}nï-…§nâ¢yÜ=Oz™0«1©®NY.÷!âõÂT¥ Úê“Ô' àdVK ɮɺB¹–‘ ð€ë))KˆRB’JI8W$}*55²|TÎËN¥ßâk(@R·,ìß„y‰äcJ™²D"±AÆžYqou‹Êu]Š”²IQàO¥\ R•”¥”¥”¥”¥ µ\‡£¢ÑÐumõ.L6½§’IÊOÐÔíDêBZmÞ=Çs%ƒ;ÉÚÒ™‚m*Ù‚­3öp´¡E)/r€àg8ϵuJæ_e3Ÿ£4 F”£Éê‚IÚTSϧ5ÓiY[ÍÖßÞ˜¡äÊ} <á@WEŸÄáϦBvþj¨Ç#Ë­º˜¤¼êKÝh¨ðÉh5Èw·>§ËŽj~ó|géø÷”…8¤¥ ©Ãµ8Ü¢22O5ªþ¢i:–¢3*ͧ!'îÛ ’œú•OÐ`úŠA*Rì¸Ðµ*dÇë?1ØoªLp]+XVV´¶¬n aÀ Äm–mÖ7JË-ÊJZðm¬-Õ¼…:Úé%IÎq¹ÆE\¢jGzè›Ë Fn Úk¨ÃŠ{¨ãƒ!°6‚UÊxþ!_nº—¡hnémŒ‰p’òÜs¢¤aA%$d¹œ§ŒPá]ºB¼NÔÍ¥H»[’ñB³ûmêKjIì¹XÏ|çÒn0Ôë RžB^ISa?ˆ¨‘Z‰´j¶&Ùä\dD—¦å.:[ ©Ç(¡ ‘sOZBÎí=J߉ÕvÖ~#¤…µ!Û‹Q „”¨ìh,~ %Dúzñ¥§mØÔsÌdÌ6Ãͦ[ÐÊ·öñŸ7ÞÏ#‘ïSµŽ]­P¬Â+âc&Iqâ° yHJRFH$óŽßZÍP{¸Àžˆ±“¡ º™!A* u8 î;ñHÛîNÿd&¬…tC–—^‘s–¶„ÏÚ#ôÜIR°•q¸wÏçÆkÝ¼ß ê½›²\ Æ-ø b­ƒ­ÖôüYïé¼ÕŽEþÚ̉"L]Å=H‰2 dç`8w>•ö¬Bu0Ye§"-l´_ë²§PT…%¼ŒN{Ÿ¡¤ ükt¿‡]íþîmå‚VµÂi2:½mÅ-ã‡S·'Þœœâ§ìlÞäZÉBÞRû› ˜ ê:ÖFÅ- RBßÓ‘ŽdcT!¹×h÷FÙŽ %·¦]ëpµ))B€ 8HÎw W«F¬·Ë)ù’cÁi܌׉_EKØœ¥x åX"KÒmoü×&ryiðm¡IOH¬qÛ# ïëT–cjIV'>,n0Üu¶ÚWQ K ¸¦”x ÙQ€G5ÐÕ~· ¢máå®A)¦ÒÔ„• ¤)`mI# ö¨Øz© õÙ»¢Žmêm*,;×Ép–ðz™ÊüCXº?©“cŠÜVoJt)÷tî/…Ž’JGr’OŸŒy¯h´Þ¯œœ¨¯Óë’ÛˆHyµ0R§®à”¥‡*=ÅX®ú¥)‰mUœ²ë³ŸS(2Râ î %A@Œ`ßéFõQåÀ‘¤2…©…8—²°êZ+ÉŒìÁÀW¸íJк¿m•zEžêíÆÑp¹€¨ë[KsïI;˜n)AÁ8ãÊ9¬sÑy·Ù¬ñlQîÀ´Ù^ô0¶p»ÊTÖ0”਀³½²jf&¶L+>ô†¢) $)–Û\'pò‡0Ÿ)íÛ"³ÎÔ(ÌÚc†­ê¹OZöœºØH?‡p'‚8É¥‘> ‹u¹¦ÖÝÑ·—"{»d$…* ,”gÅ{Oå»Ò§¤•'bvåYH8"½YeÛBfäHHZT–ÖBU°œyàSæÇ JÒÌì×Ù·û¼Ïú×W¤ O¥¢j¢>úËoÆ JÑmѵXÜ6¸•Ý8ȬŠÓ£y…tDd7.*ŠÂÞK}1»òOoÊ¡þÔ\aƒw (ÃË®·•Œ~Ÿ‰ÏêƒÜÿðuÌ[KYðëXeÖm!C-î;»àg{ŽôŽÄ¥§i8Rì’-¯­Õ%ùFbPJ”]ß¼Ú@8#­gtzWÖÓã‹zÖêQØZÕûå6‚9ÆÆ}ù¨hî·d·_ïPäf^”ˆ‘‡ãJR…l.µQQÜH 5s½Ü.plsg¥…ÇÛ ôÔ@ú m)é«üáNâ;ŽäH%woGÁMòUÈ­å +.¸ÁÆÅ:[é•ç¿ #À*'5ò>“Kù1S>JÜy 4—]i¥”4ßàFÒ¤ žH$æµ\„Ônôˆí-•C†§p)G{ÎŒ!$v;R’x¼*±Æ~Y˜Àz Ñ#¹R%¶ã¬5(ïijQ%*ížÃÌš`tKMš®Ù vR¦£'j ‰ W|“ÛÔ’x¨–´s ߟ»·6Jd¹Ö);Ü‚àÁóíÜBt@ö¬:MÛéÓrˆÑÔÖ\ñ²\K©Nã·÷ï.985ö<ž×3¥BB™T8ÛVá*)q÷yå9ÁÚ”Žßפßp‰£Ì[t8¬Þ§Â®ÃÁ¶BÒ²…%[¼˜QVòIVI5/j±Ç¶·1ÜyB#n6 ÎJÊÈR”¯©9?Æ©ñœV˜Ñ±æ·Ñjá=ﴃ °”ç•+.¯n˜žä v¨{¥â}Æá¦&»äuÈñÝS@¸…)EðÐHà«jHXWd‘õ¤o4LUÔäÀ‡%—š‘‡}%¥Hp{ƒïš®ÉЖç!¹2ÝŽÛ’Õ1IJâJ”˜(X) FGš•ýí¡9hƒä¡ 1’Ü•oqCð… %9õóæ²Ü¸³¤ßp[¡ï:vàáK+û­ØZ›$¥;¸Hd$Tóî®k³³nD´ãªâøDnÇ™9IÉÀïå«+XºÈŸ&cóq¤%ä ÐU»o•#w8åY8O…©/LÜì6ÐÚPÊâD'Åá.H*s’w'ùAäù»ÔÕ‚õp¸Ê˜ßŒ†©¨ê†-…6X)^¹Á“‚9Îs‘À­g꟦þ¦ÒñoñYŽ·¶ŠŽÖÚmIVäàå*ING¡ÆEy:Z*®¹>㈷! a¥!‰ (¯nï\íÎ3éPh ˜,ݺ¦*•»û4yŽ$:®™é”¯h%a\„㓌dŠÙ‹k‘pÔ¶‡néeßmm‡š%hwrw­Ø Ü8ì1R/çe¢­÷4š3•ršô™´¶ùJ™s¨Œa (…z¨+~ÏcjÝöÞuÉο!r\vJRTV¬€p8ÅsKÒäÅÔ›å‚Ø-juÄ”âB÷!òͺQ„ùx ômÑÏ]•§»dX¥ƒ)þŠ%Ês g“b‚TžøÎ8Æ2)§²–//N‰p”ÃO­.HŠ…·V#pà' ¢£¤è¸ó#Nnã6T·eUÔ7”ô”T(NÕrNwŸZ…‰v›ZψԆ–d]Cº**é˜íït9ýT‘íŽàòEi[.V?†ê9ÂKæÌÚLdÊZžyAdÎQÔ$''ÅH^m¶&`Å€ÃkQðn-ÐPÚ * )@~/@;V'ôäi7ñt”âÝRP¤6ÉBNäí'pðI&¨·)Q¤¬ñž¸ η¼#Éš°ÌA¿%EÏß-‚=Iàq“S¯j©£YlA×e;Û,©´¥JRZÞw`œ ‘´äȤòY/J± ¸Ë™)þ»MÇ/hSm#ð `q“ÉÉ54üD¿6,¢µ…Æ+)ðw'iÍR#jy‹²¦R¯1Të’à„¤ü?y;Â<à€<܎犄T”+KZ\ñ%weÏuÆ•:ÒA–w)¤ ˆÇ ýÒ}*Ñ6^cé–˜½·=3婦ŸvKq OM:’sǹ ÆMdºé¶îgm©™&;NÈ2¦ÂW•n(!@‚‚{Œr=ê«a¼Ü#_§AˆãsJçÏR tŠÒAR²á8•„óǘc±­Ìñc¼|À§vmÙH•#¤‡’¦’TÒT‚v¤(ŒÇw&§ŸáWs§ÜL¶%µw˜Ä”´ˆï¸„¶µ0©:ˆ0ÙSÀ;N).ˆŒm;ñ¸tölýÔàã#ë4](´ÞdÉ~s¾ËjS0Û *m¤!%^\ŒgãU¹ÚÆúÍšÞú »!>3-–ÎÌtÒ£€œ«8<íòó[ìꛓšŸÀ™:‰–Äu[ЊËke qΦ{$¨žØÀÁäŠÔV¬Í“rtª‹qŠ«Œ±cÞ 5µ-+xYÆSæI#”«#V›ú#¶XöÏ!1ÚqÇ KLä•«$§Éä#УQðïÒm¶@eÜÕ&|yŠHA`ÀS›R£’IH ö­9zÎêÞ¶Í0‡uô¹†’¥º­©Ø’BTOªR¬žé¬ÂÍÖØº^[ó·x¯>ÜלÜò’Gß#bR_¤m  ç“SõD²]&³­nPÞo¡m‘5E©Eçz žÿôð2rs»1ƒ›Ý\›¥™S. ΋>T (l²µ°z“¤(ß±ŒšŽNŒ5·#®²ácyIZvŸ>ÝÄ@NµyÕW™6û¤XɜŶ;Œ©Á!èåàë¡@†Ç8î};V«™ó£öÖÊLT%भ¡”6„’¼î9ªcD.*#|6æó/ É–ãÁ¶Ñ·kKG‘´§fNáž9ç5ZÜdB|<âža;\[Í4é{ïá'rNÒT¥rœ~/ Å~&´¸®ÖÊå\"0¥Oj3³<8[(BšRÉA ¹HîN|½9¬oýkBqP$4q4J=e#€£‘”¨äãÒµ¿Ÿ$ÙЭÐ[€©…¥-^*B¤¯qì¥O(­¥™ÙöO‡9u”‚U•Èl­`+p)Ç|vâ°Ù_ºM»Üœ~K)Rã7G””¢¼÷ÉöÆhëyö¹=rÝy•2]Aan }â\‡<äa_ãY›5nMÓ Ë™×v|°C)–ÚBet‰(*òå<“¸Èâ¾|®Ið‘tž‹|¤¸%2\Vå«nîäàG&¢ïššcÆÙ‚û…":ã2Y+3Ë!{F³“ÅycSÉOÅž—1¤½2 ­ˆˆTë! »#vFÏwYü£qÜk;¶ø·9’ô‘%ki†S• ¤$'fÐ2xjl)-¥+YZ€¨€ ¿Í¿µrÒnI¾»©æÒ#<­ƒvÚŠ’RJI«”žÃó®’…¥Ô%ÆÔ… ÉPìAõ¡T¥( xÓP®ÖÛœeÒÔ÷êðFP¤íÚSÇ¡@<æ¦ê|Kö½7©A:BœÚ–ò‹ªhôË€,œ‚®Gaœv ´À²øm0ÄÉbG‰}Ü'sê=†0àa `$Z©ÒѨ޼6ëÈqç󌀊u(Ø•“ŒðáÎ3Î3Q:.\¸šjeLaHrâ¦XuhZ’ë$’ß9Æw©\mH>¢¢–ýÁ_j%nµÒ“‚’¥¬U3èÚ•JˆÉYÒ®|á»›rÏo™»¬ÇLÊë8ÛAĸ¬åÌ„Êç÷³ØT¬˜)’ˆ‰uÇ Žê ã*R}ÿ:¤èÛ„"Æ¡¸Zf!¸Ih)¶ä<·Ô’€¼¾âs”îþ¨ä„ç¹ÅX/ŒõݰLêº\D¶¸ijKj $§<ý3œTíìN}ÞÙÓ,5¨uñ2—VúX!;Râ’£»ˆÀü$àuoÝ­1îÂ"e¤­¸ïõÃx+;Tœ(Û 5I!õ}ªÉYGQÆöo_Y( #Iü=䤎wΦþÐ~D[C1£¦J—pH,-Õ4—MõJH$ éßÄ/w¸úDÅ‘Ø7Y¬˜ñÑÛNe´-JHÊHÆìds€=y¬éTºÕÆ2n3›·NKÁQ³bîw¨ÝÉ €MQ-/°.V„ÞŸˆ¶Í½¯ô÷ÝB›W]ÝÈGr“ÂNì(÷­ûšâ‘oÊ–J„Oû ·-®ŸŽ˜F“ïØç5gòBÝ'MH“d~ØýæaKÊI.¥†R¤¤~èäÎ3ÅnJ±Ç—q´Î–§—mÞ[YÂw©IÚJ€úý R'?fÿîþâ´»mA63 JÖ¼ŒyŽìn⥯-;ñ«l[_QVÛ»m6¥´¢PÒ;ÉÓrÜúœS$Y¼¶ß>Å&RÿwÒhË‹+ÈVÌœq’jbÑlïµÈzT©.u_}ì,àÂ@ æQ¤E:{Q•.¸¤KÉnC†NÞº³½$a#n;ثƌèkŽ»gñY†¥©JN ð‚®J³Nø¦–”¥@¥)@¥)@¥)@¥)@¨C©i·¤·¥9—S¿þñ@œ }MKUwYþ 'þ+þf™bm*·Ù<Ðq: /ÅG‘÷Šfö•/çë].¹§ÙD¤h­1²Ž”Hò:€ž|å@cøŠétº“RCÓÍ%sCÊ uA¤ƒ±¤czÕ’8‡Ô’¶X¼0ýØÛÛCûºB^(ÃKI#ð«×¸¨}g¥—¨dBy©-´¨éqµ6óem­+’FJJB†xÈæ¥¢Ú“l'Zs,Ňá… ’2œ$Ò á«lÔŒÎ3žS^ßJGŒuöö+j¶žÊÊGg¾íâÚÔf$½qˆˆò :§ÒáöIÎð¨;†‘nFvÛÆ£8e‰iq´C½D¤í ý2}«]­#"*a9ØfÚ}‡›’ÒßiAÕ…©Cr·nÈç'œžÔßj]Â9bNèQŒ•(£q#Ñ÷ÿ•b:žÜ«(¹G}2JéÇq./¨¼a¾¹ç$c“YgÚL§¤­.¥è †ßÂIü_—Ò«cCºì%¢c¶×Hˆ–ÛLR#¨GÎÞ¢3ÉVãŸljG>n.ŸT¨>¦`­;‹n¹?C‚GøÔ_ÌQ|]Á¾” +/ËÚ:I(©=÷öÆAÅa¶ix,[`ÇžÓrߊ’æ Br¢¬$g„Œà`+B6ŽL{üÙéT2Ô•ÈuC¡÷Ž—’CªÏ™òöå$7«XTBàp:Èa0ŠZ–éÇ›n 9Ï¡ø¬­êˆ²·· ‰R•1¤¿†Òi²v…,(Œs‘“å>ÕF½×-¾¬EÌ~Co'pw¦ÎÄlHAß¼gÞ¤v¬JÐC–µ1%’¸±YЧÝgsÉ ¹ÔÜÒ³å$’~öæÅÌ/G“À÷ªë:Âß"ÎõÒ"%¿¹*‹¹¶²I¿ö9ÎO¥nHÓÖǼiºNLBÛyÆ–¤¬…~,ñü*>–ðV[­¹©Ž-3_/!Nå]!„€ŸøjA<7î·¿u‡nb+’äÈe(uØ€@$…ŸÅØ{ùiÔ1î“ÝŠÓ2[ÀRÙućҕlR‘Ï¢¸çõ°õ±§oÌ\Ô†‹0¦-‚¬•W¦0Gñ¨kv”C7™Ó%–ö–Ê#2µkÞ¢ T@'ðàw>´a:-¹ ìùLEh¨$-ç'Ó'Ö´åÞ[‹t‰ ØÒvIPm¹! ´VRTœç8IçúÔV¡Ò¦]±¨Vg›€ÂK…måxVäíÎR ®=³ƒëO–äªm‰NɈcÚ‚ jK*d6P¤…nÆÕg=³Æ>´ _ºÒ&ÆE¾x— q,­)JžB”RŸ6;¤÷ Öņê‹Õ¿Å²Ã̧¨¶öºr…žÄ‚29¨¨VìO»ÏT¸h›68a+a•$nNí®,¨nŠÞkN[>ˆÈ}˜m„7¸‘è2x=Î3B^\Ôq[½ü5MHáa•HÚ:It£¨'9ÉO=±Øg5§[[¤3)m"Qq”²¶Ù)ßKÇkJ@ÏeÙÇ¡8æn–µ4{[-FeA†‚ÂÝPl nóm ßà Ôm—B*Ùn–Ÿ‹)Õ´†Z[­¬¯p[Š ÜWÂ@ €£U–.¢€ì%H’˜]gÈnRÒÚÉQJ‘‚y ƒÛ4ƒë Ø_mɲ]éôêw¶6©[”œç_ñV¡¦½dnbê„ !ðöðæ·W¸¨aADFÕsÍn?£ä;tê±Ä#)É|0zû–ÇH§~qÜqô¢,Þ-ÎÅ~Sw‹”ºê_ICgÙG8Ƶ$êé“ ˜H]ÀËlº…Æq; '%C<ŸLúÕzݡއkè QÕ-§#­§q çhZV³Ç'„ã«ró¥äÝ!ÇiÃhiÐyÈ[D¸¹“»Ê®;žçŸ¥8‹Ë pжä7àš¸ã…'žQê¬`ÖŒ PÜûsò¢[ç¸ã*BLt¥%d-!iPóc*¿µ».Øãòg:Ü•2©ü*V‘ælå^qõ¿Â«¶]&Ñd*ía2”ñü:ü9HFÅq»vå`sŽ;zÔÂ¥gêëT;${’ä#l–’ó -iC®ƒŽ•O5:û°ÛŽ<âi°Tµ¬á)¹$öG—¡å.0ãÏŠ[0ãÄyoÆ+P ¬­%¾|¹É9ÆïW§XP#…dVx#”TKh™fnìÕÆ2`/޳ޥ!'ú¤“ÁúVÛ—8-?‡gFCÒye u!Nÿ²3Ïðª¸Ò÷Té¸6´Üb§Â/´ÚÚ6R7«pVH<1Ú¼#FÊiÛ)jltxXiÇÚÂÜ «qN7mRO¦àJy æ™;]î,ø~¢Xÿ:JX me]ûe=ën è·ðd³%’H2àZr;ŒŠ©[´S–önͳ*;ȹ¶è}ZRÆå-JN<Ùˆ)Î3ÈÁ'2–5ÂÑjv:&²ä…È‚´)iCyNQ’w(à)Dà‘è1HäžÜ!³1¨oLŽÜ§F[eNµ¢s“ZÖû¸’ÜõKiP¼ŧ Î$Œ…nÈ8 ê.é§$LÕ1nŒÉe†ÛSea(QqÄ£w‘C;óp¢2žqX'é©÷+mòÙ‘P›ƒÈ}µ0ÚÁB“³^nAéŽØ<šŠž]æØˆmL]Êbº­­¼_HBϰVpMyfU®=Éè­Iˆ‰òÔqêz‹PH)ÎOðªs4¯Ä‰¶K)Úä)Ô—(t<•oÞO”g''ŠÉJKøì—1··><¦H—×Òe /<$G<ã>õ¨fVK~¢´Ü#Ë~-Â2؆µ!ç:©ÚŒz“ž|\Vw/VÖ¢±%Û”4GpÓª}!.ìœàÿ ‚^¸¦ÎiÑšfD“%¢–”•Œ¸¤)@ö# àŒæ£_Ñf¹ñ“!µÈY‘µÂ¤uU“´ïʇ¡JòÆj*ÙïL©Q÷œbAŒÔuA YÚ=xWøTU`éEÂÕOߘ•;!ÂmÄdtËhNÏ‘[‘’GppG­T³n0íý/2<^ª¶7Öu(Þ¯a“ɯ©¸ÃTÕÂLÈæ[iÞ¶£zSîSœQ7Ë,‰—VgDráQ\jcÔ)AYHÈÁãv<{Tzô¬×u*îNÏelŽ·Mµ¡gÆöS»fÐeC¹¨&ºXä°ÜÇ®÷ŠöRñ­¹´ŽùÀ;J¿5¶«¬¡Sâ¤Ëÿ1—“÷ßìsæïéT„èû¤ &3°Ÿ-Ü}--µ-–†VŸÞQY#>^1Ú½³åa¬ËeÄ¥ Óíãh?|§²„¡c€V@J²}+["ê›”Ï0Q62¦€I`:’à¿—9¬WË’­0 ±ÉJÒ•„-))Jˆ¹<à‘Àæ±Ú- [äÜ_!§—-r·†ÀRr”§õàwúÖ½æÝs¸éõBa n(ouM+fЭà 9àþõ&ËIȸD)˜²%ÇjKÿæš[+sýNOð¯)ºAT§â¦tc%„ïu òw¶=Ô3?:¯]tÄ«…ÉÉ*•(”ˆÉ“–J–‚ÊÊÁiYòäŸ^ÝëãZf{'Ä2m»âG…Ì„)Õî;”N#ŽHǵ´Ëû-[ØŸmÂ+!•;䀥îÎyÁ#ÍKžõM¥®Q¬²â36!~LÄJRÝK®„”œ•”IìjàÛÔÛ¿m½³ô õJRP£QÅ&Ë[2ÛLWÄ~šÚÃŽ,”„„§ûE@Ýêj«—Ý6»¥®ï3É¢Cn ZR6c±ÏtzcƒA#d½D¼Û‘2*ö ¸¦T•—¢• à㠃؜֪õ<_ÑiWX:· )woÝ—B:…°sÛH=±ÈÍkÚ4á¶Û#FhA*Lßàèš8Æò@'=‰=ëTi5ƒ—ŸÚš[É’Z Z·jNpH 8$œQ›óWå2¸ò¡½)uH”‚[Vv¬rpÓÁÁäVk…Õ¨2 °ãRf9ÒC£(A=·+Ó>•g°Ü"Àº¢ã"ß2lä«t…2²¢i*ü gÞ¦%ÀT†`#zc<ÛÄ%8IÚ;éEžÍjÎß×iKOõ¥7Ö tÊÒ²ŽùÈJÎ1õ­‹ÍÑ6¶ã ü—d¼#´ÓnRŠJ½HÂO­BÆÒ«gY»|/ÇÊÖµ•¥²RT€Ò•œ'g·ñ“Ô–6¯­AfB°ÃCëO ¬-8A󟥬=XÜçZnÝ|•-òö„#¤ ÔŒ+r‡!HPã=«ìY?Y‰=q"‡‚¤¡¬¶¥´ ZAÎr0FH@5GIƒ2Þû.[1"·%æØâÔ6¨ Ä(g>£>µ¹#N\Wn»ZÚŸ6É¢I@[ . ½“‚w`¤)DöÉZI z…Øv‡î2­WÐÉH-’ÙY÷†Œg¾µîN r’Óí¾Ól¸Z +ª¢„„ààãÓ÷i‰V¦jŒ§”œ)¦œØ ’AVsÀõ­ë‡â7‹5ÆCáAÜ^m´ù8#ŸD¬n\S&ƪI·Ëœmó„ÝO¾;0²…”‘»=ÁÆ@©EÕ»¢d€ËñäFt²û/¤¡XÐAAÅVÑÒ™¶]`¡ÛrDÐñÊú™[…c› ãŒv«šß"+ÓåN}·¦Mt8²Ò €”„¥)“ÀÏ|ÕÀ”¥)P)JP)JP)JP)JP*'PÎð)·ƒOuç4ÇÞÁ¸Ÿ0úŠ–¨Eozà›p` xyÌÈ^ã*IÎ>¼Ó±6—?û1ÿ³?fßîó?ë]Z¹§Ù;Ñ¢tOÇS’œ# è<7‚­ÙüÇÒ鹬¼k¢×–†äJyMÉS Ú£€  ôúvúÔLëä´_-ðZq¦áE¹7 ×|C¸c¨J“ŒG׌բñr·ÛR×¥%Kû¡Ó.(¨IHžr}i¹"À­FÛn5W‚Cª•þ æ?ܹÎ=)Hº°]`ÝD·Í²•–òUÂ|Ö›£Beç¢õVÿß¡mŽW°Œc<ã=°{TݵØï@aÈ) ŠP:i(>ƒi–+ ‹u±/?>D8aÕ4¤½!m'qo…(ÃŽùã’Ư8úbã" ‰³šènß-öº‹B <Ï©(Ù¢œŽxô"µ êÛ„n”Õ”K‹ð›{ï!÷Kn•8µ¥JmåDàúgzÕËL.Äü7Ξ¦ ¶º†côrq‘¹8±dr’ëÏ”ܧAŠ0‡|8Ršá!!)†z*R”²?çw×Íœ$g¿oZ–³ßæ\CÒ2âýèe–¤ƒ$–×· J°O>¼pzÙ¹ ¾ÃEÂ,5\×µ,8ì`¥ ÚöùNAÀÈúVÕ¥«d×[tVä¬ï–Bæ<Ç•‹Õ†U¾à:±× U%+h„)*'œç ò2 ÷ ¤L½ßÚÒÌL“*Cj‰â‚܈¦ÖVê°æq½Ü•9PüªrϨ'7¨%Ä’v4‹«‘=b^e]¬y1ø¯­Iʸi†a[–ÔVØI>.D#¡´à¨$§-€p ÅHÀjÒüù³¢DŒ&2ꣿ 0½À¡»<Hä› œrè;1¡:\‡›¹\ÞÄ?ÑÜ7lì¬ zvæ¡&ê«£zjÂëJ”:!R%˜åEm‡Ãah) P9'Øwmréag’ëí¡Ûr”’â]ˆ­®©DmÂJ|äœcúW·¯–`ÀK0˜rK é‰@F0€Î0p)R¼„‹Ü‹MúäýÅO¿2U¶&%hÚÓAe kIÀ<’y<‘Z“uUÒá§Ò¦á½K²ã´ÒX_IR´•)([€lPù;ÕçÀ@)û‡„ŠÔ…¤õ¤ôÒ¤Žû•Œ‘ǯµA1'I.Å)LG·›gU!æSanmû½¹RŽRG9§±?UÜœÒ,¥‡˜3Ò$¸÷MÒ§(=0 *Éìrï]6Qq-ÜT…òõ*%Õ˜Y†§LuJZ¥¾ZIBTµ$Ï<žÃzÒ^­)Ô«·\|<”¼¤ám )@©Iú=jÅ:Ý áÒñÐãÊé+{}f’½‡ÜdpkÀ´ÛÄÕÌ" nÿE;ÔÁʱ“ÇF&µ";M˜PÙ¸;1¨¥—žZ]FÔ°TJyü89 ô5:öS¯Cj= ºâÚê¾Z–1ÖSGb€Æ2‚ ¥`@õ«4/fz;²*#4÷ˆ 6ÊRÚ—´§*H<(ÿ…m»h·;áz¶ø‹ð¸èna'£ŽÛxòö½«[&íMÎáp»OlEŠ‹tI Š\꨺µ% îÛŒæÆ3ŸZñ«ÝJ´Û²cK[e¥¶´:ÃÛrz€H<ŽNEN6ÒÞZm(Þ­êÚœnQõ>çŽõ}jÍÒ„\­Ì½oK©¯B£Å8 r{ýk3f¢í Ö¨vÝèìÆŽäx¢:ŸSmqaå”§¤œaXǩ簯1õK«:{íÀfÜÇ])Ý õ‚šVß:{=2FG|Ôú­Võ;Õ@Š]Š6°²ÊrÈöIÇ”~UðÚmÆD‰ß¿%;p²Î§ÙGŽ;¨ª9{~ï§Þ’ðv ¸7™='ÐW¼‚•qµd¨UãòíPh6 u½†fYâ*ŸJRÚ ¥ÆÐµ¢pžàn5:„%´%HJ0‘€° õJRTkÅÒmŸOß~á!å19¶¼@i%m6²Þv$ dg׫ÍCüRË"Ûp’^Œä&Ö[”¢Œ¥JÀ<ÝÀõÏ‚+GÝ®NØšø’[vRg®–¹ `9vZ“øN%&¢úè¿´3§œ˜":ʰ±L¤¤c*^r¢¬à$ëW bmS­‘of2á4­Ì%-–”’G Ç•@ävsX.ÉóA~ã²ÞâØß·Š7ãÛ¹Î9Å2"4Ìé[o2[œõÊÞÒŽôµ¡°ë‰ÝÔ) yZÈHñÁÇ'{rR_²>̵°Ú¥!0ªK¡@ðUŒà}1šÇ§Æš–ÜÖl‘ % HD†ÚŠ AÎ26É<àò5%6eº$ˆ%-”<úÂc4Sœ¨6ñÆ=é;ªpï×öŒôe¼ù·Œ:KEIi Øø‚ÁÜJÏqíRºî\¨±-‚¦‚ôä¶âa))yh鸢”•`g)Ý[ŒÉ±üÊëM7^ŠJVèc Pž¦9 ¹Î1[W™Ð-íGzåŒu‚Yû¢âº˜8Ú'8ÝÈôÍ1uÃqº\çZÛ•2{¹oeõ.4–› Üó€)Ìãr¶¥ „úƒ[S®MšývEñÆe±ãšDQµ=0 €”žBÆÐ½ÜälT«²´”·¢ÇØy•"X¥*Z¹ÈAÛç ïŽsY¦\4³w)Ê–˜&bZq]T|•¥)ÊÐW· !<”äœzUŸÉéæS*t‘6ëJT…%OKmŧ8)ÈÁÉãéY¯“g@¾CµÇ”ú“tCMFpù‹jmYyY÷-œóê>µ•KÒŒÙ$8å®3Vĸ’êl)IW£ŸÏf¥…ÂÔÃ}ÚhCiµ„–ŠK(s„`c#8ƶ*d˜ÅÂâ­5{¸®UÉ·âRÛª’Ùo‡Ô‘±”䊵釞ñW˜NM\öaÊ ´û„` (¡Dw)'ïÈÍh¢F“þˆB]Ÿøq X À«fæ#'&¦¬-Á)²¥”FejiM4×K¦±Ý%O=ˆõ«%JR R” R” R” R” UwY"ËÿŠÇÿ™«Dê¢2›wާÂç4†°q±ÂNÕ f ´¨¿eq{H}ŸHi²¦#Ç•ÕXìÅ@y®¡\·ìºCÍéO³¶[uiiØòºˆxɸ5Ô©§­¬s®ríòí¥eè;Ж¨å*p'k™Oâ )å'¾}{VS îþ¨†äöPí¶RYq·’œ¼PBÝZ1’y $ “ùký¡j9š}˜†RTâ]Z¾áO"rH#j öOõ¸üûÍ6Ø‘¤Ær,†—!æCY-´!yç+ ;gÚKÓÒfÛ®è›'¡*å+¨ê›c¢’çÓ`Áÿi^õ›ö6™µYzLJŒ’çŽé:–T´•%¤äp“žHç¾kä½Sq·Gºr`<ˆï1Lm•%¦\ZÈZÏœ `ñÉ'šøÆ©»»jƒ*SÂB:âsÉ‡Õ Ö*Nï&RwN9ö¤Ì;uF²“ue-¥ S‹AT‚Z ²‘Òʰ 2¼òÖ£ i ºmrÚ’ÚS™1e&;2ƒ=E7ž  l „çƒÉ8äÕ’eæ\=H–\“è•Èq¦šWR;)FCªVqæP séØÔ,ýi9—e—c*ÚÑL@ÏŠB\)ê— ^£¸€3ÜPX­vifÙ7;ŒäËe$(1$àÄ€¢FV@ÀÏ®* 廤Y:dÆX0d"K.Ý ë6´„¶“»8äàÂ¥l£ÄÙ =u‹)‰ÏnC­±ÇR…¡e  ('”ž ¬Â]ÓæÕÀjLg¢¦2¤-¥¢NIP<ç 'ŽÉ¤Æä*òt•òM±âë¥N;1—WÇ›Z–Ãm©Es…`Œp9Íy»hÛœ˜‘XðÌÌR#Gi§¥KóÄR*^0œ+) dc;@¬DÙá »’¤$†Pó%°”ƒŽ27`ðqVǵ Þ¿Ši0з[‘]BSß ) WÓš­ÛuLû–—¸\U*ÛȳÚÔA|6ÞBp™grGçœzV|û«~]ªâ󖈈ñ!8ÓÊ"iZQ·;¤—01´ç¿¹ªOv”Ë áíqäÚYLV]H¤)L†È)W‰ÜO Æz‰¢®È³ÏŽÿ•n¤ MQê-§KŠ ¬YAÎGbjÁwÔï¢í ,rê×9,;*mEÄ)—6¯vÑÊFyÈÁ¤~öÕ¸ékì¹6‡u.¿†§|Q*KÁÃÕN>ù8 · Ÿ\×@’ËrXu—Ó©(RUÈ Œj½V74ÇMºÛ:ZÖØuô *:w©l¨dîBÆŸÂOµXd-M0ë¶§V„•Ò@+ vñÏÖ“jIsäi‹£Z*™›dºNŽºêÖäíÉVÜ…p=ñY¦o&fœyhiÇà²Ão<äéU•àî Çe$Ý”0*R­q6êé ëûpÉ}¤dg!JVúž+9Ö0ÌË;-Ƥ\ÛC­¸¢†ÒŽˆÜG¨NHsÊa kÒ×h,]›|¢Zf²úZý¤´¨ÛœRƒh `» 9Ķ’¶Üì–ÇTXêwÄîm âQ†‰H%E#nà7$xõ$Ö 9«“:Ýr[½)ëiÄ–ü%.,%°žêQH­Mé«ËWëh˜ÃE´î(ÛÔC¾¨$Ö§Éu_t%êÅ>V¶tŠÓ]Kanºè nÜœnJ¹ô;Oï+ÆÕ{¸Zµ$T0ˆNÍy°à’¹ )Ë‚=G›ó©yÚˆz’-Èï_HR]*BÎxNâ ñŽvç¬o†Ïo¾Ë»ÈT†`Ê ¡A JˆRPRŸAø—ŒŸãS •bvŒ»=g·Ço©ÒiÉ r!}¡·~Ý…>Mƒn€S» ÖÌ; ÁíY"G† ÇŸ_vB·–Ãah Ær IÐíŒ[£IS[¨ ÿ»ÆâUµ_‰8ÚNsÇ­H#VÇ]Å,& Ó¾Ôc0¥!°ãˆJÐ’3»éÇóZ‹ìÌÙÖš›Ó|n¶AhJ’\KÀzÍ)Ì””‘€BJ€Ý‘Ÿ¥GÌÒ7§tåºe•ÊŒëë@\„–Ûܬ );p@¨ÚSÙ=êËnÔϹhºÜ.6åDn‡@SÍŽ J¶ã%X=É sÅ`{]BnÓzb¾¶å8¶¿Î6… á@¸U°ý0|Þ•!©c·éëœ]býÕ×~Ï©B9p€ÆZBz©ª%%$+<`‚9Íʪö}J%ê{¥•AOIŽþ@B@ ±ÓA Q'œ©D dÿ ´S •[UY¤\n±ð,Ü¢!•7ÐvIg¢áP!Ð@ïŽ29ÍG› åZÍûžÆ¥änCÁÄÂ[¿ Žrp;V+­äœÔ8Ð%O’¦ËËD}£¦Ø8Üwž{É­cªb'Q®ÌZY|!J BвJS¸§`;Áã dÔÀ¥#LÜà@‡ûcr£¹si~®2´\ ê­ $Œã’<Ünï[+Ñ·¥=h.¼¥5 €†¤ŒÅWYKT´’@AJrœ³OBÖìM€Ûñm²ÝyÙ)ŠÜt¸ÑYYA_›ÍäÀIÈVö¬o} ÚÚvjbHqôîqµmJÙûÂÙ$åD)*ÈNx­n‰;.Ÿj-âãs”Ò5éK[/o*)d¥ 'ؽ{ÓP5tŸ¦œa˜-Ω!MxwÏ c×éYà^•:í*#6ù]Ω‡%¨ 7ÔHgqàŽqŒ×ÝVü¨vWd¢ë+BŽP À)9üûÖfÍEÐ7Ë Îã|DÔ°ÖTˆý•$…[Ô…•9´æÜwÆá›Î8»©¸1ry2w%+ïÂÕ”!hôÂp9àmîjjé©X·Ü—Ä’ólŒ§Û Ù8¢”nääƒØkìmD™[È)6ö»f­HKnÉJ€Ê²9@¬þQ^·Ø¯04Ę-Ãmל‡šhÈBm¥HWî¤$RxHõÍ_T¤$­;TFJsœlÕLêE]lákqQ—cL¼ƒÓx, îI#^rz·ô ¥)@ª¦ ±\fZ¯-[Ýi‰oKjTU+ßL€xÀ9A÷ô5kª…ÃPζYnò'¹/ƘˆÈwj’ÓiY@ VNNÝù=³JZvÁ"ÓgGÊ•p2ÖØ–¯¹J½Jÿï©‚T}«Léiç\*äáCS,Me]]½3ÒZ6c•(æ'< •Òwé7;2ŸäÊLµÄq(kÇ)Z““°”’3Á8¨Ó«%oð¶›iÈi•àÝHAÜÑ-¡j^p7(à'À&®õó„Ú›5¾îc^d^!#ârÚ(¹€ ¤nØÒ FP'$Ÿ¥LÏ„ì†m´¥*!§V¼á)#>§þua»OvMṪfsP‚pì&Ts +e9'yNÈõ8<ÖõÞdØòm*‹ÑLWä%§Òê s ó\æ§oe§®-k·®ªFSÎ8Uâ2…!M¥±ås)åyä¯ú¦Õ"ì›b"Èr1b`}o6°•¡!µ§)È99Pã󨸺’[ºñëB‚D4­m§îIÚÚUîp¥åDwf·õ¥Ýë<8.0òXËK.:c©ýˆØµžOáãLAÝhÓ×{TøˆÊt5¶èOé…¸—\Z– ¡æ ß»äŠÛ•g¼-êÌÔHŽ1'Å­™eý¤—w) §nB²² Î03ô­kN¥»\§[ØqÁ¿ ¹jݺ´…Ÿ»IJR¯7mÇÚ¶¥Þ¯bÑx¼Æ\!(–”0¦ÔV:[’•nÏ$©')Àà÷ÈæÍsÉ “ ÞÒS "Š”áH@“pK„Œ‚NüpÞßZÚ»Øäܯ¶yà¶ËÇdÅa-¤‚;áÎsíùÖ¼«Ñ­'2âÄЩ ””íêgŒ€FÅOâïô¬—{ÜëmÑ»i,¹"ZL5”c{›ö½‘žÁ8^=³L˜E3`¼¢Áx¶˜ë+‘×--sš%O)iÊ2&¬Ö(’Û—t=¦˜zsép0Ú÷ìJP2¬Tq“¥VÙÔ—7,w{ˆ7Æë„4¨ KckªBHpœ/Èd°Ëšì«¤š™rD'¬  8…$)$§' äƒÏ¥7 ˜¥)P)JP)JP)JP)JP*TD~Z-B3eÂÕŇ—ÝBIÉþ7PZ²CÑÑhè:¶ú—&^ÓÉ$å'èi˜ö&Ò©}”¦ÑzRâf&< ”*¹Vüÿ bºUr¿³Ö­/öp´¡E/r€á9Î2}+ªReêÅo¼–Á•¬´””:¶ÎÕcrIIIÀÈ<W¦š¶±vPmL"âã o¦óô‘ '<$dö¬|‰ƒÒSnÚï[á烘<úíü]¸Î3Å@ªÛv:ù¹mÇ“Ó JÁu„–Î#uTèì­Ê)-Ž=~´´îŸƒU´•&7L:ã/LqA¤…7(ìós¸c‘YΚ²Ì¤²W¤9 ÚâTw+y ûÀ£ÉÝœÕvËô˜Ó$&ÙéI ¼›ˆÞu:–ÓŸ2GâÚ88w­…E¿Útݾ²5اßiiÎðK™BB0A*¼£¶3BevnÏdMú[ͯúEñ¾CZð´íØ šÝ·xü8¬0´¦ŸðrŠÉu§KiRħ¤–%{‰IAí‚1Z iRµÚTí¢Dfcn"RbœJqm€¥)ÎÁy@îHôÀ¬Wn™•ÇÈÀ\Ê‹qR¯UÉ@Nÿ·œgØ·Ãj,6ÃQÚNÔ zúþuò<(ñåÊ”Ó{_’R]^I*Ú0žý€ƒÜÔ%ž%ñËE´¹vSN%|$BËŽyŽ3•‚“·×Þ¡•+PW9Ö¡\ŒT"H +¤½©-ª>@TGFFNìÒvyÚ~Ý63L>Ë >©-–Þ[jCŠÝ•$‚™_ßZØtóO[u¦q”†ã3×(ê%'pIN~ðy¹Ï<ÕZµµ¼$¢õáÖŠö‰%‚ÙßÓ+óþ=¹õÆí¸­«»+”å–#–›«­”¶ó×¢d$!ÝÈh©?…DŒ•ÃÜžEüù}ê½v´éõ@yÙªCY–e¸ê%)‡ñ·qRT0{ …m¿ò å±t޲´,Fmqv†” * %@~\ÕJÓ¦®ôä›{©y‚›Ê_B˜^å:È*p•ƒžw+>â‘¿œ¬Ííšv\Ë|”:Óï©´‹RºéA%*#wÞm$NH©¸£Ã\•Gob¤:_tä’¥yú1ôªÎ£†ã×ûq`<µ1!§\‘á’P´ ç/wAO'ónÇ©­[õCq&t(î4æÓ% u¶Üaw—' ÀÇ|‘H'e‡QÛíSXiW—M ”!Ϧ?ÚQ¸(d(qŒóXb²±zaöÔ¶n–Ûm´Ìq;ÛoðŽžì)#'¸=ù¨}ik¸9ecª£uš…<„%ð¤+¨ÙH Fÿ.3øóÆy¨UiëÈն¨o"ScrrÛa%k<„€„Ä]Ï¡¤\›/7ešSÒÙ˜Û=iQÓÕÊ[mY #T¬îÁÇ5“NöB·¨YT•ÅyÅ<\KÅî¢ÏâQQ''zæïZo×gõY¼øµÛz/®PJ§|B´²”‚„œÂF½\ìqoÙ”-Ä>éO^:޶O•KBJBUß8ñÀ«çÜŸ>Œ·vœoRE“9ÆZº¼ê^eµÉRzŽ$m îÁV<¹ÇÒ¶Ò¶fâËŽ˜Ÿs'hZK«;BU¹!>@rqƒÚ±¢ØÿÍRg)×Ö“ ´$('¤¥‚®1Œðp{úÕ> u‡ËÓËŽ\Lÿ¸ê¶¶Öÿ¾-(žrœðŒÛy¬‹Ú!Ú­m@`–˜KN:÷™N(œ¨åJ;Žs’sZ¿/ØæÝ_žK²êƒ¡/, :Q±D »Šã8ÅUïPî3t\xoÄ6àò–yÈAkJw‚YYÜÞF<çœ''šÉÕ®znìË2g¼·‰?²ª:™)ÿý›ÀÁïÁôª' éÝ<üLÄý¥•©qbbÝRÑò½Ä £Ób¶Q§­3 qÛRQ½Œ®<…§r Ý…)*ó‚F|Ùɪ•†Åvr×u´ÂiäFŠ’ìuE[­¶’;A$(‚¿óÇ`kÜ8—xV?ShñÙ‡ÓéÚoùC™ýÝžþ^ùô · 5iñò&N¿¿zTµ)²V0²NÐT8$ šÂ­)jT&â©”Ûn‡›*˜ñ[j )U» HT¥j«e¸Ô+‰ˆ„HOH¨ôœÚº)Qò¢8Ú22wV•½zŒÚäx„^D_ÉXHX’Y(=N™_ŸñíÎ0q»n8¥E¹ZVψaT×…l4ßIå·”nÚ­¤oç Ï$ûšœ<÷®yyvü•ÚQkbøR„6 ãÙ*V]ó%Ô§ËûùÈ=² _ä·ÖaÖ‚Öé) mE*Áô?ZMˆº­)fL£2Óéa· ­Kt)²RRBT¤ŒŒÉòµ§t3áÜÛ-¥¦úîlû³”nNì(¤ò Õ> wH ß,â$6àiò¥8FNQ´î(*p@ÉÏlÖÉ^£3´âÜEÕkè°%4‘Ól(«ïµ$”äéXÿdæ™0²Ç°Ù$2§"´•!hu…-§”7%N($/q©9Æ+b=‚ÞÄ%Åm·zn>$¸¥>µ8。¥“¸þÜöíTûmAÙ‰(œÚVÔ•@1[¥—–|É$eä’pG9›Òrn1¬ ]Ê-Éçž›aÍËuh%#~Õù’I$(’>˜ƒUÒÒ¬VùwF§ÈiÅÈmhq º¾žôçjŠ3´¨dàã5¡n³Ø'G¹¦#ŠšÌ§6Jc¢qC í‚0=«Fòo[Á1qr[JÐŒ¡„§Í½e\¥^™J€< ¦µå?vvÛª> ãk’ã u‚‚´mm+ØOua+ǯo¥EO½¦­¯Ga§¥Tµ!ïïXoüC©»qŒŒãíXciK{W¹GCŽÈqô¾„©ÅlmIm(IÙ¤€ ŒŒÕJruGÁíýäêC.uÑøz@ÛÔwò®þ]ÜVÌY§õkÍ!볎GœÃn†è6§B‡¢‰Q wÉã5¨»3e¥zjÒé˜zN~ÒèuͲ.' ò+p)Ç5Ý%gvqÃå„õ2ŸïÞœ¬,îÊÁ<¬Õv U§PGöì¥LRËqd)µ¹øÛPÉ$$äíäãÞ´æ+S|»m)EèËC¯ýÒ¤©Á»î‚ÖäñÈ+ Iýîk0³uÙ‹M¡Éꓦ̸ò Ô¶Ü;ám) àö( òöìqRõHµF½EÖsÔg“)JJYà‡:-îÿY¾Æ9ϻ՗k «Ì½1§ ­”­·–Ò¶œ’RFRp8ÞÕæËi¼\¦Ï—<)hNažŽÔà„úó¸ç¿ð¬Z¦SÒt›ªfÝp[Ï))Lt°TàÃ€äœ $œýEflÔ]»#MÚäËbKì8·™J ž_œ åAœ/‘»<×…ékBÕ7te”K µÖ_Lï ¨„gjI#$€júåíëëNBEÕ -Õ -'k);ÏXH·—áÍyaËË&îë¬_¹¡2 Z *Ó»îºG·¹ÎìúUý¢Nác°D€˜³ßy–dÊJ÷9=Ô­çŽ1•ïÜ£ÀÀÏ¥Y@m @É Ä“üIä×>aÛ²t¤¦çC»JpÏoä°µ»ÒJÛY'w›+¹ôö®‚…õ•íRw áC~cÐÐz¥)@¨¹pmSa\Z–Œú³+.päœùH'ÓÍJUùf”Ο¿±g·6J¦·%¨Ûp‡Ò’Ú–0;çj²={zÐY!Z­¿†Ü1º+N‰--)[×ÉÞUœ¯9$äœÖ3j³Beíl]Tå!â ãgSfqœywc8ã5 ¤ Í´Ø"2¿ˆ€íÉN%=$uK$žª{!%[”@å;€÷¨åÙ®JûG2œŽðŒÜ¿Ûè@ض‹¾ùږǹQú)²Ù,–F-ÒáZ[‘•Gu´ÎqÐŽùHÊÎÃÉíƒR’‰ˆH(7X ^ ZGç$ÕGK¡ÐÅöxµN´¾ë Ejl¶„ìÙžpç>֧݇nðµY$øbì˜ò[WUhFÒG˜ý=3Švö^ïLÚì­ê'$6[ø©ËÊkÄ´¨—YÀ$7cšÝºˆ Lgîn2Ò#¼in¹°%Ì9$ÁWNƒm¸'í*DÅF"—_™€JK(Hq/wQ%8éöŸJœÖ¶ù—­ ÛÃ}Vç¥Â·Yê¡°pnRr22@ïëLAÝÛN˜ŠôUx¤ÄXh%ÝÅlîoz”crw)Xî9Àâ³J³iÇ'\…4u§%¤¤%iÂÖ[Ý„’;«ýj¥b·Îµ\-Šzô4 ¶ÓÉE¸> ÷T´‚?ͧ͑Ý#Ú·'Û\U¦ÿv)/]—ãjjY Jú[0¼òJJQ·¿—¶i?’k¦Ÿ²¿Ë‘vÜ·ÔR®®(ÉßÔÈü³Î*_¡kuPå-lº¸ÜËËwyl-8ÜTOïSÞª³Ëïèyì*Ýp•%JBR…ô®Aá9Íõ­ëÕªLëõ­èqÔ›|Ô¶›€Zv!•u>ä’‚=‰«’,È‹^— ¸FLÄ–ÜK¾%‰,¡Jܳ³~æ'>•5c‰o‹›Rã.¬¸§C¥ÒâÏEd’£Æ9>•Aâ4Ýú'.BüJšlÛ¶—Ô¡µÜùÉ`U¿JÇ-ȼÊn "L”f;ˆ «ð¥”~îâ3Žüd÷¦‚”¥@¥)@¥)@¥)@¥)@¨B˜JM»â8€'4YØ3—rvƒôïRÕ_Ö ­ÄYºhR¶Ý#¨íÀ䟥3ÚU/²™Ï±£tF”£Éê§çiQO>œ×M®iöQ—tNƒä¤6ëätÙ=ÝÜT?.õÒè#/Wíæf }§¤!—V]ÛÒ ŒyŽOjøíöwCoÞêä$„«¦ÊÔ„(§pJ–Т9Á>£Þ¶çÃDæÛ…@!Ô:6÷ÊT?‡¢Õ›¡y~tYò™m÷¯ÅIAmÅ„„î9‡<À ‡²kFeÇ}ëƒ-Ç@m—šÜ/©it„¤d9‘ õ«óãkL4¶Üd;)÷Ò☶œÉÊI $Žõ©&ôld‰kñr¼kî´ð””¶…¶[QR0“É9È$çšò­QPÑŸ<¹÷Á÷‰A[èuAn!^\H€ô4ŽIá¹&ñ.6£fÑâˆn2ì‚øxïm´•)8Æ2@ïPòõÛEÅèí*"ì´®:GYjNõîNBîç5bUš*¦ÎéZÕ2:b© #i!^TúòTIú⢣Ps«v¸½$ô:o¹Ó*l2U°³iüG9?å«SÛåZm²çJ‰ÙÍeÙ @œysÀúT%Ç^ˆ³î1“ $2Ä¥²²á%K`…¤*TR½¼äì'h´Z#Zí±á4’â t¢I$žØ“ÀÀ¨gtEµrnud!úË[i) CŽ£bÜÎÜã$“Í‹Ž¶„Å‘S£nCÍ0ï‹ Ž†zŸ…kÜœ„CŽr(­Q&Jl(†äÜãø’™ÊPœ $Žç=ϵeViÄ)n\ç98ºÛ©–¾™RziRP»v…¯¸ä«=êR×dnjÞÜeÄ4©–ߘ™ª yÁI}<¢¤r³ÃJ몓lf,w’‡nŽ.:C-¸ZGUÀJñåÈÉXÎ+£ZG›y•KmEe¶U!·”ï ^ÓÔÈð 2x#5½uÓ Ü.J—ãf0‡Êßa¢)¥nA9I#ØàŒ×›”g"Du:´8§TÛ+Û±®¢·9Œ œ?8ÅX¡-]U¬¡ÙììÜ!½[º¦º¦H % ^ÝÃ>c·{šËrÕ±£?3M¸©’ŒÓˆ-,¥ž©)`m ÚIžkrÿ§Ø¼4Â:ÎÄS%{TÂPrª*I\dV™Ññ„¶blæ£4äwŒT©%·È B”H*ìœHä–ucÑ߻Ǒ …H„(Cw‚]p¡p6|ô¬öam¸Z6t˜YÆ0óá)Z{ «’AJÇe ù3GDŸäÝÖd¹¢kI`©Ý‰-¡++HR2BŽr¬ž©[M¡‹tBÉ*’µ¸]q×’’¥¬à€#’XΡ¶|U6á#2T €BQ¼§pA^6î)ä ç£óv‘"í[¢.D"Ò[’v)kä¥D§©)QÆ{Šö4¤êeÞÛ8}ťŠ²ÚPHH!e%ià€ãó¬’tÓ.ØîÆåË`Ouo<ûe=E«*Œ`/nÜPB½®’ˆ–Ϲ„Ô©j|¼’–v´½ŠRVwHÁÀ<Ô“Zœ¯T*بì¦8yQƒ…áÔê%°á%ü8Ýžâ¾ÊÒMʇ‡'ËJÙiq‹¡¤°¼e¢ yFŒw­—4ÜW¯ˆ¹HZÝé6[i•!Q”í>`ÇŒŒG&‚'çD»o¹Ìb&ÖcKj3+yD%ä¯w€HO'r>µµrÔr¡Ùâ\Ú‹\E„—œjWe„¶Ê•ÎpqÛëb6–‡nM™nÛ”¶—¾8O“¦0 cÁ½k/G·ýcÜç2¨ÇBZX[Ž+rœPRÝÉÁÀ'šµ‰û”"¸mE€\KË[ä: iJÙŒm÷ÏÆ­Gõ»e‹Ã±^Ø‚:Zñ ºÙRÝÏâINà9œÔ‹ÚJó%:óÒUGYF6à•º®,nÉ\ œ õK¢:fx‹ŒéJè…ºñ@P ~RçïLÃÂÿ:Ö‹R¦G‚êf<†¦ßR¥-a)é¡IÉÀ;ˆ$`V9¥†uae-¤¨„$©D`9'è*6õei,)ù²Qµ%kŒ›RT’IIP9ð‘‘RÇœçÖ˜2¯ÚõDYÚn=áq¦´Ó¸û¡ů'ØäŒzŠØ’Ö©pã¶úÝr[hu¾“+XQÂT¢”Ç8¨Çt[.Ùc[¹LuˆÎogª†–¥!%T$‚A9ÁÏéZ2"—h̹];b[KHÚÞNÃwíÜœñ’¦FkF«….ß)ùn¶Ê✕%”!Å$pHNqÞ¥íW.‘|DPðo%8u¥6¬þJÔ4#™ðù2c™‰pH-”aÕ)eAd‘¹9 QÁÎlYôëv›ZáB›%½ï‰ q B9È%)@NÔ¤‚ÉîsHäžOß ±xnÖ·3¢”4¥gnå„çiÆ}«Z5èÇvzò¦nŽ‘[AD)%))àó¸ï¹í_.Zm‹…þÑù/…ÅRVÛIJg÷öï 9å!X8íZîéV¥Ã»EŸq•-‹ŠÒâÒ´´:kNÜí@Î6§…dqÏsEgwVÙڈ܅¾è Sˆé†\AGãÜŒnHNFId{פjk9º(Këq-ïK*é•©IIsrRA<Ôd o‘ *wÅSŠC¢,|øÜ:}=ŸºœœŒw¬±t¢“y“*Dç¼–Ô¦a·´7¹¶„•ysQœŽX¾ìËrÓ©£\aÜ%&<Æ›†òÚPTg7/iÆR6䜎Ã$W×uU¥¸ñ^.¼¡%KJLwà(8^ä¸mõÈâ°ÉÒ­¿ãW žcÞ 4Ce-/xYÀ)óG!Yâ´ßвǵ™’S¥¸áÚÛ<•«$&G¡FÍHT•»PÇ“y›mqIL¦d›mR”€Ú½_ÕV2p?NÔm1 -õ˼W_jk®nyiPûÔlJzk㔡C<ƒœMOÐF]¯pmN´Ôµ»Õq%a 2·H@à­A á##“Å|‚ÝÉP]qÆÞR–ÒÒ…§r¶¬¤É׋¥˜Ì¸364ùpd¡²Ê—iê6Nv G~Ä`ŒšNŠBíÜJ’$,¸@ o)+NÓçÛ¼è Àö Ê[fv Sä)•º–™ý‘§¤•'bvåYàŠöuu”&*„²¤HHZT––BU°8ò À§ÍŽA•òAŒ˜ÿ¹Èeá9Ýx!´‘µ¥£Ê„ #'pÎSÏ9¬êЖß ðëªy„íZÝm§Kßx§ ;Ðv’¥(å8ü_–.ɺj5ö ««ÖèêyrQC„0¾šJJñ·8 ã4ÔsdÛmK—,­M­ÒîpRTq^kfݸ –ZRÕâd*J÷ÊPôòŠÑºY—>Éðçn²Ñ•erl¸±»pIÊHïŽÃnSÑo”—Œ•$%’â·(¥[ww'’94¥ê­¨Nµ©-Ém‡Û’ÚÛZB–”‘´€B¼Àò1аõTkG3Îí¾5ÊLd=$IZÚi„eCi'fÐ2xil¡)RÊÔGQ÷âƒÕ)JVÜÔo±m¸È“oKR#JLVÙë‚VRT¬a#Î ï€z²T%ßM»[®P¦uÌ÷êðFR¤íÚSÇ¡@<惚ÔÑo6vf¹ˆêrJ¢mÉ!Nq´ P$­W5z«Z³À¶ãÊŒ óõay)Çä'$þ/Ê¥ ÙD0ÄÉ)X‘â^w˺Bp®0ã„€+U:V5׆Üy<â^q¡·bJ6%dã9 ôÎ3Î3@µ_žvMÑ‹´Va®h}Å4÷U)B‚Žp0 ’9à‚+nåsz,›Xb*_‹1ä´·Ë»z{‡”„ã*ÏðÅjÙ4â¬öù‘ºÍwÄnWYÄ4BÕœ¹¹\÷V{ ”“Q.¸á1ÝCÁ\eJO¿´'„mRÖ¯Ø,…5»?{÷ %{Ê1Âìžâ·5UéVHÐÜJbæL”Çß)î“hÊT­ÊV??ygLÇjü«§ˆ¯½[é`íØ—•+8Üx‰ÇÒ·®¶¨÷_%¤­Þë„`¬íRp G# 4˜ÑÛ„¨,2;:äÇKÀJ•´¬—ÞÀó&Aõ µ/SÎjÊâÕ­Zâ :_ÂÊ™$§$©*HÁ'±Æ e¤¼#ñƒužÁa„EkNe´-JH%H'Äds€=y¬éVÝjã7 ÍÛæ¥íÑQ±*w;Ô Nîä Ú¬ðC º†btÔ«« ÚåôTœò”´p*ÛÁV{† v·"?Qm…0³µå8çMIq´àþF¾ÉÓoʲ¿m~ó9HyI%ÐÓ Z@ýÐxÁ sŒñ[’lq¥Ü-3¥©ÇeÛw–œ8Š“´•1õö¦L Ö/;mºMCVå¦Xt(—AC…zvùAÆOY.OÍr|iÑSd'ºN%oBHRV“€pAìGN ·Î‚n“Œ)Aߺé´:ek+%*Ù“‚N2MKÚm‰· *T‡åI’çUçÞÆåœ8«!JR R” R” R” R” T>¤œüÛ eõç²Ã™H9B‰È©Š‰Ô1Y”›p~R#ô§2ê7ÿÞ(„©§bm.}öcÿf¾Í¿ÝæÖº½sO²™Å âtZX•F\Pó#iQòþ~µÒé‡öœãèzÕ»¤má/­Ä<Úë¡)é£îÈ!GÏ·ââÖ´¨-¼¸§%!'Ðûf®cUÆDk“Ңˈ¸,‰·Ò”)ÆÕ»iO8)#z׉Èöø..ܯ5ÒÛQD¦¼À$«výÛHÀÇæ@¤~©ðÕ;¬.·ˆmõEŠÑµ½lí_Vqœ“éÎGµR,®Í:Frìt•&’úzÊe-‘÷½NB÷ä {šé²µ÷8VÒëkŸ!{.$¸ÏÝ©yRsœyqüEd…‡"ØíÂJ•;N©•™„4PAÇ9õr˜gõñ¿¤D·º„!f2Pú÷¸¯Ý  }y5Í™zá¨J¸O‘oy›Ä¾¤éM‡±˜ù'hÈq zºùàdð=ÍV­í®Z•>*d¾ßˆr3iB@S4•)C$ ¡ «'~b¥–êd©×¹ÚŽÆóÈr<Ljû# HX*!ä%†AÜ¢¯ÂG¯ -~¾\î—&^ðeM¥Í±]; K›PÔ•m)ä““ê8«<{ý­ö-î&áàÚ]Œ‡S© „““Þ‘/‘&_gZR×&h[Ç@Uœ$R=G¦E^>iÏÉ^ÖÛ­žÌÂÞTx³Zð¨ÿzÖ‚BIq#’}Éì*ÙÓ%ëë4…‚’¤ÄQkï˜SK+-ÃÒʆýÜå?•t™÷–ÖCÓå±¢ ·œûdúÖ‘¾!WÃkb;¯-)BÜq FÔAÁVOБu•SQ0¨-B#ºúnQºVùNVÉ*Á9 rp8⥬+ÝÂÖ·b‚$¸Ò%H+Ž$41µiJR¯R¡èÜŽõ6oP‹SôÌr =Vb¨8âOõvƒÞ¶-ÒÓp[IZPûip÷Œàýi2§üzðué¶-QÙˆ—€ÊÆ ­–·)Äœn$+#/”ƒÍAÂzßp¶j­³ÑË)(·_W•Ó½çRS¿·qÀ‘é{^¢Ž‹à¶tdŸ¼ #¤—Š7†ÉÎrSÎqŽÃ9­´^íγ%ä\¢-¨§¬>’?Ú9ãøÑ\æå)£dµËR ¡¶|Hb"˜|·4…€’ÖRT•wàÍL9*úü5åÆœËGÄ—QS¥MyYB;§…ïÛ$š²]u-®Ûh7gÅ,-µ-’Hëàg$àšðB^½µlǾå<ày¢ ¤Ç·8dRˆ¤Ù.nÛ,r‘nv*ÐÚã!ë³Ü)@VBÔ´,œ©8ú à `ŠÜÔó‘h³º‹‚fÝ éTwÛK¬©ÖÃÉ -¡>R¼`sÁ=Zfê«LfògÅy¡%1q ¥‚sÊÎxìkyw›rk¸ÄJ%ÿ˜Qy8{ýž|Ýý*ò–ÙJ¸j[«úŠém¶­jÃ2›m’ÒBÛq¶ò… sÊ»pxÀ¨K,èìi‹¤›²zÔóñÌx½gò’Bš[„ä„)g°QW|sÒ!ß2é.QÝéÅZ›uò´l 1»w¯|VdßmŠ„ää\ᘫbß§bO±Vp g •F¢›f‡`…x–¤°É[…h“¹Í„%Jó«h¶à“Ít‰IqL:–éºRB´+iǃùTXÔ¶áyç%°ÛŽ2ÓÌ­O$ú…@sæ>OOqR¯:Û ­×Ö–ÚBJ–µœÜ“è*͈»›[õ˜zÞ¡vyéêp2ì‡Hè¨ ¨¡e|gŒdä’@ö­£ª®†v›Üò™ŒÇSÑ›c.-kVp¬ Tå=È#kƒ©í,­ÝÛ¸ÇDùzÎ:ªI<¥m»u€Óñ˜v|d=+”)ä‚îª3Ïð¦jaBÓ—‹œK}Ö;Ý8a†å=ºÒ–—þùyqD §`ŽyÏúJþ©Ë»H+(“áÃý!µd”„í(áC*Æà×±©k]ö$ørêX?z¢Û® –ÖPVyü9Oz܃>-Æ8‘S2™$§¨ÊÂÓ‘ÜdR º­z¾Î‡® [ÚŸém;l…¸J·eJÎÞ™$íÁȨô^X·ÚµkšhGrC2Òê[i Ki[›G|äýAÍ]Þ¹BbkPž™¹OrÛ t¯òNr{V´ Ljn๭B ŧ ®¤¤ ­Àà (TUvª¾³f·ºÜ¨a/;!&vPvc¦•låYÛßo—šÞkS]Õ^ ÅÇ+DÆ#ªÜˆä’ÚÙBÜs©ßÊTO¶qWß-ˆ†ÔÅÜá¦+ÊÚÛÅô„,ûgñ^Y›jbæôF¥CEÂBúŽ0OQj ”ç?„'øZ‹³6Sá^¥[m:„J»=&{€ ÇJœÚ•œá!$yàœbµ&jë³zrÛ0Km©×Ò²–œ¬#`#b‰õH)*îš»Ûõ%¦ádˆ·êbÔ‡œê§j1êNxð}qYݾ[ŠÄ§npÑAÃN©ô„¸²sƒRS³\§±­n^lǶI𢇋eEçz ýÑÿôÀ9çqqŽou ñL¹QŠƒN³ ÅJV° ªJÎÑ뿤iMYw•ë:. Úã8Êœ®t(×Óƒœ1ôìk@êy¿:?·3 y*ilŒ¶PØP^•àŸ~ã°«|ëœ+oKÇͪ­õÞ}†O5õ7(jœäÌŽe¶ë`:7¥>å9È8‹¬.jµ´©WVY+žÔggÉ[HBšZÎÂ0 Gâ§>lÖEj½@]³£1š´—k¦™‡¬¤p%9@J¶§‘¼®Ï^,RXDÇ®6÷Š÷•âúv¶æÒ;烴«ø[J»Ûb…\"¤Ëæ>^Oßgú¼ù»úV‘drë2ñrzDÄ&yNFn(ŽRBRBŠûç$ý0?ikk…®F\·\iL—PX[ˆ#ïàå Œ‚WøÕ…H\ MŒ© c‡R\wòç5Žùs]¦–˜ë”­)XBÂJR¢î{à‘ÇzÌÙ¨º±}Ôsš¾´Ô -¥…":â±áúŸ,…í_îíN³“ÅxcRËA¼?.wßED‚mmÂËŒ¥ ‘»# 烻Œb®nPãKf,‰‘Ú“ ýÓKt%n² ɯ)»@T·â¦|S&:wºÐy;›êÈQÏݾ7?IºýõøÎ¹æÒ#HRSæÂÛQRJFÜ€UÊ}ç]) K¨K¨) ’Gb­ELÔ ·nf}¼¦ão!•»ô€¥ç9çŽ5.{Ð)JP*ƒ|CöÍ9©4M}œÚÖ€ò‹‹hôË€,œ‚®GaœUú¡>dŒ M”¶%¶"¾#t–Þqd¤$%9ý⡌㽋“.š„™3R¸©–[KR]d’B[ç gpJ•ÆÔqQkr௵¥n´Z“È%G«LŽÃ𥴨žådJ¼Xïq/6äLŒ½¨SŠaIYK‰QJ‘ÁÁ9±9­eêˆ(¿·hWX:ã…”»°ºÔ-ŽrNÒlrsL¦­>¨.‰IbZ m§Üqå  //¸žà«ú£’3ÉÅXoŒ‡Ü°KÞêœD¶±ÓZ’‚INpŽq[V«ówå2ìyPÝŒ„º¤ÊHI-«;WÜàyOcšÍpºµ TÔ‡ô´êd")Ì–°®ŸŽžÍ…'ß·9«d=Z‰Î²Ý¶é*[!åãb:@­H·(r…3Ú¾ÉՑ㉮x9ë‡=ºRµ- JÒ9Èì@$H#5gòB«9û1û?¸©+¶¤õÁ:â“¿#ìn©k˛ղ5¬8«eݶ›ZÚ'cHdïÎ}7 ìþêš—¨^‡h~ã*Õ9´2R {Û+)?¼0¬c$zú×¹:ˆ«Ü–_nCM²áh€Júª(HN Ü0…L¤YÎã?éíG¼Â‰yé¸á“·®¬ï` ¸ì{b¯3£ºïðÕ:»9•˜eeE8Ø7ôʹٻ8ôïŠö5X6ù“ͺhƒ©÷ÅHÂËk(!#vy ã8©+EÕA$t_"3¥—Ù|¤+ŽÄ‚ ‚ \*B”¥@¥)@¥)@¥)@¥)@ªî³üOüV?üÍXª'PÎ0Sn!–ëÎi¼Û¸Ÿ0úŠf=‰´©_e‘¢tÄ)¨‘äu<å@cû«¥×)û1ÿ³?fßîó?ë]Z µšLéÐçAz*$ÇmÖvKc¬ÒæÝÜW”sžÄƒÞ£Ñ¡­T.ÂCE­è{¤Z;âPµ8Jˆ$€3ŠÏö‡&D; ²`Í~<¶§PÛ.¶ø*PXó%=ÊG|ãÚ¾H2Öp[…>^ÄÆT¹L•ýÎÒ66¸ã*Ê¿òzFÖ%Žß¦&ü6ïñ.ÉwÉLun'œd)DmH8JFþ57.ÏD‹c&a-JJK@÷AN·'?¨¶Ë•éèwXfd–â-j“!¶‚[Š%§@IÊ0}09¢µDÈrZ“AÖcÛ¤¹!‰³7)§Ò•lXZ»€p;Ò7&4é™CQ37ÅÆðMMrr[ðÿ|V¶Ê K™ü<äqôô­™¶×ˆ²YL”\<{EæŠÛÎ “Oòç´Û”ôf–¡\e¸ßLªc ) GJ›´'ŒõsæõàœúV”I«-îlËÝÍ»`–Žîð·””…Î;­{’1Ïw¥Š%¬šN;TH÷£Ì‘N).†ú`nqKÂSž*¯´Ë×+Ô¹s¤G ;èhè2RémÀ¢H;pHÀšÉe›xg¶"m¾LùKoïÞmÖFÓ€¬¨e@c;AÍA[uÁ¹aº¨®!éw£ºdýëJh­I+a(œñå÷¤Á ñôdȶ•F‰:$e—Ûp¢3 a’”#`B‚’£Â‰J†Hµb:Âl¹FÞËmTÁ.4Pæò¶Nï)WçwV¼mS*ÏnÝiù—C%˜ë+’e6 Ú+ IBA9 >P;‘èk-çTÜ”­6¤E•§ßaÉe,—·¹³¢=ÔO|m¦®þ¯tÚžËd?lxM"(iɈ[o8ÒŠV ¯ÅȤÔÛ…îÎÉãd¼V´Äh¦1B™qÅ’êçФcZ’ÓÚ¶eâT´¦Ðáa uL”ªQC›6’¼$“ß àv4ý–û3ê-*f[…fy«{)S…má{W¹;s”)*ÈöΨ¬*;ãíNu ¥¸A‚^L\IQi8ÚÏW¨ÇlZÐ×·›«Vhfieºãİ$2‡”4¥nß•'í%9Ê€Å|‘¨ç9¨ôà ·½Óº#«lµ¸Ê”0q€”àõ>€R!%#¦4ÄËUÒLÉ×KS±ÌpBVã*Üw(ßð¤RX!9jƒâÃRü+a Rã’~•§õSúŽ,¾³["­Ž£R½ªOŸnܬmÝÛ‘ÆIÅlé{½ÉZy¥® Ë”„É}•áæw¶á QRR³Œ §9Å"Ô^Yåéƒ#Sƹ‡Ya†p!¤(8·‚„î;¶þ®x8¨dh)ŽCš‰—T»!ÔÇé¸èó2áX*Êò'² @ô­Ù71uŠLÇ]ðO<Ôv[fbOMe²¢d ‘œóŸcŒsQZ6í:ç蛌ùLõc³!‡ƒ5—çG”õ‘B# ÄžÓ qŸ¥tÔê°åoШ®Ù¦yªaöߢ•ojêÛÙu6Ý!’âJ”µ)8ód' §8=Æs'a³\-6‡#¦s.I\€îå¡kJ”å©EgÊ ”H'Øb ´Æª”å¾ìÓŒõ·‡ÜI”÷MROYa; ã¦0¸žücÞÁ¦/ß¶;"wB3­>c¸Œ©ÆBÀ!^aÇ9ÈÁæšm²ê¾í{¦œ‘3Tź5)–iM—–ÕÔp#wù¶æà”å<àóZó4ÍÂãn¾C¸Nˆpu¶¦ZKjNÌe|»NqƒÉæ¶.º‘ø¦%´Ç`DyM¶d-J'¨½ØG”§€¬nÏŠÒvñòý«RËBš"LHm·RÈÞ–Æ2rBB–N?º¢µeè7_…&S ”Òä)ѺAmÐðHVOWy>QÝDr+,=)/ãÒqQY¶·:<¦°Î^_I”%8^îAg÷¬35ŵHé¸Vžª²vž¦O± ÜÆElÛõæ-y77 .C3à 6V£ž¦Ä¤¤ Ç’1“šÖ‘®e7c‰=0X–úd•©xi-+iVÐ7„“Üàíõ¬ÂÍÒp´¢¡j—ïŒÌé«n7”–ºhNÏ‘[‘à Zj—cÔO9¬®v…¤–Õ%N6óË!%!–ÏMŸë(TG<ç‹¥\ å–LË£3¡= + *+­ÌÖAmJ %##Èúö¨õéYŽjeܸ4¶[¦Ò›YÚof wlÀú$;šß¾Þ¤Ãº³pºŠŽ©K\ÇËI)J‚v¤€yç“ØqïZKÕÅ:™vÐÃ+ŒÀ=¹HÂÛ@RR€Iö8ñ%²¤´…léþ$¨ÇyàŽ=«[í¡¸k¶ÚMÆ*¤0‡ÜPû•<†—ÒPJÜByR<ÔׯѪV„]µI½e½øò협[Š“"1Èqj NÕ€B†}ÈÏ­nɰË]¾íkjë 6¹©P•³¹Ô)ì’ ·`¤)DöϦkóâÛfÔî«fU†È§a2Ü–@iN%%Å68$ƒ· $$‚~•³fÑ ·K>ë,;0Ëmw7z/œ¶œä{߯>¯ŠÓ]Q´ÿÜU¯Oo(ïXç+LJµ3*ËO)$)˜ë ').d’@ç5!p³µp»Ù®2e¶— äºÛ|!ó€Sœœ€• ÃëŠüüäKK«N°›M¢Jn-/¬¶£¯ ¤…«i@QácUG#À¿¡×tøe¥¹r’ ÈJRAIÃxÉ9W©¬ÇÄÄÒi;þÿ¦›;Äm%!»}ÚríA3CÛ^Duur· Æó¿ 8àÕf²ÛäDrl™ò~lÇŽ›Ø„€”¥ ’pîIï\_ìúß :ÞEª<# æSSM”H‹+Jeù²;W|®ý>¬u#h§‘)1B”¥tB”¥”¥”¥”¥¢u n ·Táç3![Î2”“œ}y©j®ë?ÁeÿÅcÿÌÓ1ìM¥Vû(z*4V‚ièÅÉ.G‘Ðwv:X**ã×#ŠéuÌ>Ëb>öû>’ÓeLG'ª¼.â ?¼×O ‡ÔOÙã¢*ïl°ñêe„®?YA@d”¤x$ŽÕ²ìë|{¬xËq”\&¡Jm8óº„ “ù úûÔ¶±MºK·Ë·£6û`&Z㔩À®e?ˆ$§ðžùúWÅi™¿7[¯&à-å/…´Bz%(=ðTJˆúšA-Æ$é£cšë Aø_T¢BS8æ@ §bI±Ï­¨Q,—H0¤Å…øÍÙ•áÓ†°{$å ŽÜ`Љ¿X.- ¼†e®à‰±ÔŽPq’J¼§«²4-ÇÂÛcj#µ×R£5 e…¸àRV•­ Ê€ +$ã½ t#l¶·5W& ©Rº)vÁ%xÏo­D±tÓ·X FCQäCSèa1×ɼ‚¤ŠN0@$+>•KNÆeÂZÔôgTáKäÍ©°ž’†ÍøÎxÝ·×½h%sv!2ã·-)“¦™AYa”¨m+ çwŽÃ“É èQc1 „1†˜a…¶”þ@p*"ö_åFˆÜ1u)Px¡€¾AX+ǘÀ‘“ŒŒÖ­ƒO?ω³%²óÈj,¥tÒ’²¤£$eA „äú €^‹º®÷q%!°LÅÂ’ µ×B†À€8!k*R²IÚŒv »&ÍlDÂM¶a,îS„†É÷)Æ3ZוªÒbC–ßI½Í¥„¦2‹hQVÔBv¤çíéTøºJëN9 ,4KÒ[[Ì8ëKIJQ‚¤˜@%[s¹' Þ§mvK‚`i˜—¡MÛ’]‘‡ ÊÞHÃc°È‰ÏÅkõúÕvlÝ;6Ó&SÌE¸CfQiiL`þé 'r²@ÈþúÜ‘fRÓ4³s¸´ì–Ö€¢öô²Uê„‘€G¥Wmú1ÈöGíÒTÓÍ š%2÷A- €þs„ž)}VvJ®m–3Ö§E±bK”Æ ÛÕÔa°BN@N[H*Ó½lÚeXß¼NM±¸É¸ õÜmŠs Á;ð7á\ƒ^œ³4½KåÓâí;·ä­dç5aÓZaȶé,ÈêÚÓ]’ÓùD%PNN9Æ;v”ØMIU¡‹ü0ûqx”…¥…–ÇYiHʰ¬gÌ›=µ He6èifAÜóa„„º}Ô1‚:€›¦%¯XA¼±p%-¨¥iq %¶úJHJ~T¬Ÿ©Ï ¨ØºjåOʈ‹lNq 5"Oˆ+3p¼­e* #$nÏ'Š‚Ã|sNÛ˜†Åáˆ(a9ðí¹--€H!)<‘YS:ÊDc€ÂnÁ¾ŸS£…»`s8;sÛœUV‡¼H´ÁŒ²ÂÒ“øåý©[m’¤¤e)À% ØU¥0.ªÔÌ»&;nÛa¶ B@^Ì)ŧnJá8'¹ õv—øLõ°Ä6á¥2Zð›:ŠWáË{r²¬ŒpséI²´³±ínÌbÌ:˜‹\@´!9 í!$qÏ 7N_&ƹIILI¯Ë% ø®ª¼€¥a.ùíòž}kÚôÍÉ­/o²Äa”6—üBÔ™J`õºK©‘ÉäHÑ]ÓÇR¼ÌxðÅän+u1€Zˆpêmå@(dg<Œ×»›TÖ$³kƒÑ‹¸•f!i§²H*N@ Iæ¡ i™íëiW9×RˆJV‡„¤%¬y6Ëœ}xÇqÒR¾O·Û lJØ’ÏW)y>o.V“Ý'cËüiˆ2±Îø(½Afk1tX&1[!Nœœ…cŒsëïRõ^·YäF·éöITïQе±@ À ãp‡§d2܈î°ò¶œIB’¡A ÒHFÅ¶ØæÛSá [ž€òºà&: kWõñŒõ­¯…[ËñÞ0"u£¬¯¢Í蓎åTvô½Í­ ÌÕ¶Þ×EÑ×BBƒÀ €àÜ‚wmÎA8ø¯gKÝÌÍ8òÐÃÁf;o<äéU•àî”’3ûÙèµÜá¥øñc¸Á.²70ïâpGb sï[ [`³1Y…¸©PZYKI  „ãÈÎ}ê—lÒ·X,ÝÛ}MÌLæ_Ky[TmÎ)A´8J‚*ÆB‡9Ķ“¶Üìv'#˜Ñ”铹¶ƒZ% •§nà7% =I4‚nŸ]¾“›šäH꘵ÒK‰ÁXÈïQÖV,ÒEÀ@µ±õ yIT0Ñpà(`n+9ìsQW« ùZÚÒ3LôY-…ºë á ݸ%;w%\þê°xq^.6›ÕÆÕ¨â¥–¡99ä:ÂÓ$+pH8O—!v#ÍQV5YmK„Ü5[ ªj܆ tÒ}Âq€y¯1¬vè÷9â2f¼½åå §È”a' ­Q'h»£Ö‹|v÷tšrBœˆ_hmêcaIéìp¬S¸àÖÌ=?=ÝY"G…ÇŸbâëêêÃah Ær àIµéV[ZÙa•Û ©¦¹¤èÚÙ÷HÇòª£Zfl+MîºÙoi2¤—âV>ù¥9’’’œTìŒúb£åé úrß 2ÂäÆqõ .BKmoVP v`€?©´§÷jBÊ÷ÛæÈ’e¥;N%¡”½°Àã¾ÕŸáRM§nQu{×g\jDGŸR„u8@c-!=T r¢RRB³Æ0G9¹PkMKÇD'¤­íõšJö+ÜdpkÀµÀ0Aˆ%¸0·ú)Þ¡Œr¬dñÅAj«,‹Ö$å ©¯ô‚×E C© p1žãÓ¹¨ó§ï ÖOÜöGK/#r.!M„¶’BwäÉ$Ü ‚Ã#MYŸŽÄsmˆˆÌ½âe)mKÚS•$ {ý+mÛU½ß Õ~ ̤ôqÛg^÷µs„i{”â½lfTuÜÚ_…/ ­ eÀ®ªÒ’3Œ9ãvs[ Ñw…=i.»¹¨í5 f"ºÊXÚ¥ ’ S”à˜ìkHé-¶†ŠºhJ Õ½[F7sî~µ}nÑԄܭ̽.¤„@ê¥n)Ád÷úÖ.Ÿf%Þås”Ëjœü¥¸ÓÁEJKE)?AÁã·¯zjn“ôÓŒ5 ƒ9Õ$)¿„$ç;Šyá#Œzý*M–.”U²v3† RäQµ…ôS–‡²<£ò¯†×o2|Àˆ_9ÑNç죌‘ô5V¾X.Wâ&†Ü´Çé<©*Þ¤,©Í€6à@ÈÆqƒÅxgOÜ£‹ºšƒ w“#§qrB‰|-YBŒp|¼œ £ÍÕÚ†Ýoa©vxª„§Ò”¶ˆIq´-GhQa=ÀÝSˆB[BP„„¡#){ ¢@°Þ`i™PZˆËŽ;9´Ñ„%¶Ò¤+÷P )<$cœÕíJBJÓµDd§9ÁöͪR” ‡øµ•ûmÂIz:á6²Ô¥e*V ‚<ÝÀõÏj˜ª® ±\&Ú¯-[Þi‰oKjTUž@-ôÈÎG(>‡ ‹_ÂçÛ"®1× ¥e”¥ Ò’Há$yTG`G5SlŸ1øu˜ß-–÷–¼ûq¸£~=¼ÛsÛœTfžÓïÚlñ£ˆéÜ«–¶Äµ}ÊOº±÷‡ÔƒÁ*5¤t¬ã® ÉÅ6ä$ËY=LH!HÙŽT¢æ'„Œ >täÄMfË R!¶âºˆ9Á#hܓΠóRfÛáȃRÚCϯlfÊ3•éÇ÷¨+=¾îc^d^!¶ns(9˜AHݱ¤¤$gñrrI©™ðͰ6”¤ÇÓ« Y8JAÈÔÿνŽì,˲Jë-7^ JVèc V%Lr@);sœb¨¿ý º¿e²¦ÂãMLů¨Û¡­ˆ +qRÉ'ß'a‡§n k·®ÊÚ#©ç*ë’…!M¥±„¯)åyä¯ÿnWm¶‹KÏÌTF×%møtÈB²ÙÊ\lžRGzκƙ˜ŠÍ»ó¨Òú¢S—5­Iê![f§¶’¯ÂA^åù’r’ ñŠÐ·ÄÔ.¿ 芖¸¹á˜u/õŠ6»³Øy{ñÀö©)×»"ÔíÛq˜“ã´ÔtmÀÜ•¶¥vƒµD œp*Ãl×6 Ú 9B#Ú\!‡Òœ©nóÀ§8îW¯8åÕ¯©¶šûqóîÕ!]kIꯈEŽß%*v1Û 9VÓVüžàûצ,º¦±ðÄÏ*ZT”\ÛCk °…pFqŸjwXÛݾi¹Ý%0Ä ~3,€„©JQÞŽy*È$ÄV”å¬è–íRž ÊmÇוÀKù Û±e@ ù{Š‘=iˆõD}>|ùU¦š­ßeV­Kí L?y[îAK‹K{¥’ÑTu”‚£°”ö+õ~qû0¿Ú¥} ÂM½ÉOH¸ºÑq2†C1VNâO®ç_£«ÑÑÕ«TtSÚˆR”®¬”¥(¥(¥(¥(¨žŠÊm¾21|.s(k ÆÇ ;Wü*^¡5DGå¢Ô#6\,ÜXyx#Ê„““NÄÚT²çÝoJ}²Û«KNÇ•Ô@Vñ’2=q]J¹§ÙJa Ì…¸&ò<2R<ªå[÷ bº]S[_¥Ú¤ÛâÁ ¾—žuöâøŽ“m„ä”äq• Ÿ`qÉò^¥yÛµ¾5¾;ªŠ©ÍÅzg—¦¢ZêØÁO>üTÝâËð†“qcª'i RaC) Gv>µZnÔ«³w/ ‰m­.$¥Å„„ì ØÝÛxÎ3Ž)ðùÛ-ÖUáQÜ~,¥Gl2:hY;v'’qÊ€$þu 5¬Åè”Je.î¨ÊuÈ…=&v,£w˜ò7cŽø¯lÅe’ñm°:Î\Ï;”@çò3IY&Ef3ð¾å”-´¥­EÊI) ©$󃑚@ÄÆ©ŽoÌÙÖÙ\•£…¡Ô+*ÞA@;“Ç©ê>Ñ>ï&Á6é2ä %N(ƒ.;{#6•¨§ÍåyqŸ¥N³§­¬Ýpi…¦JV\Oß/bVS´¨#;A#‚q^æØ­ÓmBÛ!…aÀèBZPVðw$ƒø¹ïI!¡i¿¹ð‹k—ˆ³6KaJ Buià„™8'ŒÔl½E>Ýq».à—Úa„H\6\Œ”· 6ÞîÎsÁ<Æ{â­°¢·2d¸[Fp\qN+¾yR‰'øšÑNŸ¶ ‹ÓLmïºÔqK@Þ^NÔH«Yµ5Êáhº®Dè‘E6ên äq…¤ùÞî|ÉPN{ŒsÎg/×–¾ýɧàDr+nKuЇT¼mQ'(Ç¡#w=ªgå/ÄÄÇ!ñ¶C¡AhAÞ»Êg«3ÚjÖúã)ö]tÇ Jz’PP Ü7‚¯>ÈÝš»U0öþ …Æ—Ó1¤CBÜunDuÂ{íYHJ¾˜<Õ/çk“ÚeÉhè¦a¹.6Èx¥¶ØFô€€|ê§'ÒºIç9ç>õ/NZå2¦ÝŠRù“¹§ÚÃ…;J‚’ApqŠŠ„‹®¢x‹\) •Ì“;Ž©•”)ÑÆÑœ¨dã°ï[ðõl97YÐCNÅBœ%•%ò •m>T rGÍn·§­J!ˆêeƆÐuhIJ?RA`dãp=ëí·OÛm³*+CÅ*@ÜòÖ”%JÜR”’BA<àUWMHûs-k¶Á)—„ޤrÉañ) áݤOæH­Gµ³25‚µÆ|,Í‹}O«Ã)m¡#ߌ“é=jÅz±ÛïA‘qenwl(um0¡”pGVsl„D!á› ŽÀl„”ŒcèH¤ l}c2êî¢M =á™iph^V¤)e9÷N@>˜Ïz›¶ê‘mK“>KÁå²àE½em­8ÊV†Â‚x#œà犓—g,Ê/ÇR[KN©*)*JI)äŒNF eµÛ¢ÚâøxM”6T¥«rÊÔ¥’¥)D’O¹5¢Õð¿uv$[|·™aÄ´ü‘µ)ij@X$…œ8ÏçQ(×Pèào †´´ºV¢„ É$$•sØÔÛ¶s·_ˆ©… GE.­)Y©íQà |´X-¶…-PZJÛKG¨òÜ©Dá#'PSízÒà¨öÙr#‰íªß"D´Á)Ú…6ðAP*<€3ÀäÔ›:ŽaÖ CZ!¾ám˜ý-.¢^v99Nß§çR’´’Sl6ì#±”-´%-bÕ¹I8PÜ’®H9­äZ!"ê.!¥A%)*qE(ÈíA;RpÈÕË-òà‹k·;–ñ“$Æ‹¤Òââ’•Û’£èMJ;¨C¬GE–{“TÒžv:TÞæ·9*²{yÕ½&ÍM°ÛÞ˜»º!j*Ý»pP9<ä+Yí1iy¸¨q‡IŽ”,Hp,¥Gr‚”•y!DŠ!âßî+ÖK¶<¦Û«}¦c®:‚H(we…dñéÆ;Ñ{T]£A™Ar'¦æÝ½·„R¼)°² Ê•ÈPIÈ`ž«wÀíæ{óÊ—!ä)µºµ¥@¤œ'8ÚqZíé‹J 9Gp¶ã‰uKSî)Ýé%A­À€< B«W-W:*mo·!µD[,­K\E'Å­NôÖÉé)#œSì*ù!ji—V†Ë«BI¢=2x¨µiÛYð Æ=8A!–ƒ« § ©ˆ<‚ Ny©ΘEN¬u:æé ;#në¶„«#9JÇ¿ø¬¿8Ä2¬í"+û.M6ëN-Hlag ÇÌ¡êN=ëeNÎÜ6â·ä2Û…ÖöÊt)²RRBU»)IAÆjÉò½£tCáU¶*[KMõœÙ†ÎQ¹;°¢“È*Ó"#Mjá:ÝsS?-ûaxÉ,¶;¥Å„¶”÷R¶¤vúæ§4Ýå«õ»Å°ÞÄï(Àq.vú¤Ÿî8?Jñòݧ ‰·{KeE.-%HZÊÔ Ÿ1$Lœb½±`·1 q[iΛ )O-K[€‚¥“¸Ÿ(î{ v¤rO yú˜ZŽ-¡ÈΗ )t©(IÎxNâ7ã„䌎+Sãj²Û¯Òîò”ÌA´‰J°¤·µ<`~%ã'Ó½JʱÛåÜÛŸ!•.CjJÓ—W³rsµEÚT2pHÍaF™µ%©Í©—œnqô»%׈ÆÜ£´Œ ƒÀö¢¡$}¡[˜B£¼\yn 6@wÅ+'j³¹8Ç|þu¾ZË—°˜<)ÔS,„„BV€Sß¾N8&·Óv×£°Óˆ’®Š–¤;âÝêÿˆu7n ú‚qÀö¬1´­¹«Ü‹£‰[²}/ )ÅlmIm(O—;I «³5Ã^ß©$*Ñt¸\ xVâHq”<€­¸$«çÔûñZïk¨mÚaOSR\q¬õ”!H8#¨NÓŸÝçš–sLÚœ2ÉŽàñNœÚûƒ Nä;€9N9¬Né;3°ÛŠ¸Î–ÔÊ|K£xYÊÂÎì¬(òB²*+RÍ©D­Su²¬-é >T@Hažš*>¤©D dûàU¦£EŽÚ™ˆ–˜©L”:_K‰R (>½ŠR;p8©*‹­åPç· -¾Lù*h¾´0R663æ#$žÀ{ÕVªˆF»9ij|%j·²JRA@;‡ŒŽMoÝl.¯2ìÖV§Z)[n­µm8ÊIIIÀàñX~ZµîL1”^Yp^^À\NÕÛAPî@¨"akvfÂiضÙN<ô”ÄDt¸ÞíêA_›ÍäÀIÈVúÖ'~Ðm­¹µF”y;œm[BÚûÕ4F3ç!IVBsÀÍnÉÑv§P ÉD•¯Ä8§(BÔ*Ü ãŒVâ4Å¥³³l˜È ·ÒyÄe![¶¯ ÆìŸ6y'ÜÖ¶MÞ­÷¥ÎºËˆÍºHåGrZ”€éH8;ˆäsŽôÕJ‹evL) Žó+mYJ·° NGbqÍHÅŠÄBùŽØA}Ò󜓹dOø Ñ•`·Êµ&ÚóoiVý‰â 9'•FNpN;{T›5k]5+0.NE0ä¼ÜpÑ”û{v°QJ2 Éäàp)Q •!0$¦ÜÈwlÅ­ BËj)PÁVSÈ ÁÁíYäiË\‰Qä¿N<ÊP¥<³¼ åÆp¼FìóX×¥í T¨¤¦XXy¾ªúgqD#;RI’9¢"ר×v°™ö§Wq¦´Ë©7Rà+@)Ü20BûŒÕ¼÷¨5ékS—mÊ-8÷ˆYñ­Î<Ê^ýǰî}M6€ÛiBs„€I'ûÏ&ƒÕ)JSî7ùö»-âD÷¡%èÓì)i¤¬ (“·~{óJ¸V„»L‘¦G“.30îy$Ÿ1ÀýÚ;cµN’¿K¸Ù[z|9-2ÕÀ†€#¥Å§>Q´¤‘“‚qQ‡UÍV¹øc(mÈH“àÞ@G-ÐZ¥çº”pŽÀŸj³³e€ÌHÑšeIj;Þ!x¬—9;”s•I'q9õ¯&Ån7stðÃÆ‘ÊÂÔ;vîÛœnÛÆìgfˆ‹Ó÷[ƒ’/(˜ãw¡„ív9w ÞÊr|å8O>çšÞ¼LœÄ‹Bã[Šü„5!¶K„(pÎ^ô·i«eºß" F¤"+í––Ú¥:°sÊ;{žF*EÈŒ8Üt-’Â’¶òO•Iì~¿Æ–UXºŽkºýëRø„•-¤ýÁ%-¥\;œä¨zš›ÔÖÈ—Hnuº,ô!À ‡ØKÛ= ’•zâ½µa·5w]ÍÈ–¥•uR@IPFv…0H­ù 7!‚@9TRAüÇ5q3¦‘v:‘:´Ìiº˜4¶™ðï­:vÂán8p([Û«*ˆã·jôt– H—òÝ„Çh¸:^¼ùrÎ;äuZ•mˆ €ZáhG‘œàóÏ<ó^ü~²Ýéù—Ã'iÈÁ8í“\§GS»É.¶gï*„­aŒ–º~Ä®«;›µ¶¢<¤‘·ò5íý§v8^²¥’\U±¼(“Øàyxÿ´|2(@HBðQY ç±Ê ;#Dè9ˆRq#ÈÞ óå@cû«¥Ð)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP*'PÎ0Sn)e§zóš`õv…æQRÕ¨`;pM¸2¦Óáç5![Î2”“>´ìM¥ÏþÌìÏÙ·û¼Ïú×V®iöPüfôNƒeèÝI.Ç‘Ñ{?æ°TUýãŠét€¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@¥)@ªî³üOüV?üÍXª'P¿„Û¼\oœÓm󎛄«þÌ{iQ¾Ë!¾öû>’ÓeLG'ª¼.â ?¼×O®[ö^û­é_³¦âÒÓ±åo@Qxɸ®¥H R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” R” …Ôðߘ‹PŒÙs£paåà*NMMT­}Öh踶÷ÜØm[TFä’r¸>ÔÌ{iT¾Ê#¢t’§DÁG† ü'•oÝü1ŠéuÊþËÚqZ[ìåÄ¡jB#ËÜ  Îq“é]RJмZ^¸¼Û^.PS´¢*®{É<ÔwÊòö‘óUÿœó½ž?ôè,ÔªÉÓÉåMÿí³üº|±/q?4ßùôÞÏòè,ÔªÏÊòöüÕçרÏòètÄ¢Aù¦ÿÇöÙþ]š•ZbP$üÓçûlÿ.¾|¯/n>j¿þ{Ùþ]š•Y:bYP?4ßøþÛ?Ë ÓÁ'æ›ÿ?ÛgùtjUcåy{~j¿ñëÔgùu÷å‰{ù¦ÿùogùtjUdi‰c?åMÿŸí³üº|¯/h5_øÇ=FyÿÓ ³R«?,KÝŸšoý±ìÿ.Ÿ+ËÁÿ*oüÿmŸåÐY©UŸ•åà•7þ?¶ÏòéòĽÙù¦ÿÛÞÏòè,ÔªÏÊòö¨|Õç<ïgý:1,ãü©¿ñý¶—Af¥V~X—¸Ÿšoý»ogùtù^^Ò>j¿óžzŒÿ.‚ÍJ¬1,~i¿ñý¶—A¦%î'æ›ÿ?ÛgùtjUgåy{qóUÿóÞÏòëéÓÉæ›ÿÛgùtZUdi‰`“óMÿŸí³üºùò¼½?5_øõê3üº =*³òĽÀüÓãÓ{?Ë ÓÁ?åMÿŸí³üº 5*³ò¼½ |ÕãõçÿNŸ,KÝŸšoý±ìÿ.‚ÍJ¬//ÍþTßùþÛ<éÓåyxæ›ÿÛgùtjUkå‰{³óMÿ·mìÿ.¾|¯/j‡ÍWþsûìñÿ§Af¥VN˜–qþTßøþÛ<ÿéÓå‰{‰ù¦ÿÛÞÏòè,ÔªÏÊòö‘óUÿœóÔgùt:bY üÓãûlÿ.‚ÍJ¬1/q?4ßùþÛ?˧Êòöíùªÿùõþ]š•Z:bQ üÓãûlÿ.ƒLKŸšoüÿmŸåÐYiU•åì ùªÿùõþ]}:b^à~i¿ñ齟åÐY©U‘¦%‚Ê›ÿ?Ûgùtù^^Ð>j¿ñŽzŒÿ.‚ÍJ­|±/v~i¿öí½Ÿå×Á¥åóþTßùþÛ?Ë ³R«?+ËÂGÍWþ?¶Ï?úu÷å‰{³óMÿ¶1½ŸåÐYiUŸ•åáCæ›ÿ?Ûgý://ò¦ÿÇöÙçÿN‚ÍJ¬ü±/v~i¿öÆ7³üº|¯/i5_ùÏ=F—Af¥VN˜–Hÿ*oümŸåÓå‰{‰ù¦ÿÏöÙþ]š•Xù^^Ÿš¯ÿŸQŸå×£¦%Í7þ?¶Ïòè,´ªÐÓ'æ›ÿ?Ûgùuóåy{vüÕü÷³üº 5*²tĽÀüÓãÓ{?Ë ÓÁ'æ›ÿ?ÛgùtjUgåy{@ùªÿÆ9ê3üº|±/p?4ßûvÞÏòè,Ô­+L' E,»:Tån*êÉ)+ü¼   Ý R” R” R” R” TN¡L%&Ýñ:'4YéŽîäíéÞ¥ªW´ã¨³ôеí¹ÇR¶‚p9'éLÁ6•CìªkìhÝ¥á‰1äõS€wm*)çÓšéÕÍ>Ê#0îŠÐo»)-¾Ìy&HåÝÅAXü‡5Òè*šïTHÓbܘÑ#¹âÜR ‰n–£²@È XGž=ÍzF¸³"åÙ.@nâáiµ¥°§m×¹(.·$v÷ãÞ¤5-‡ã¬%£r¸BFÕ¡b*Ò©PÁ JúdT»ìÞËlº±6ä4ZK@´ Ûe´íIÊTÎÒ3ŠG$ðËoÕÂ,+ôHävµÎ1:‘УÔá;pžNâUŒ ËóþŸTv\jD—œun¶#µż -ýG½kÉÐ6ùë´K…Êã!›“É’éuM ¯Zp€3Ààä}+êt &“ q.7s¢—B%Åé4²—1¹!1ÀÇ—#š‘Zn³JìÏZÄV¹fLIIiS4ËSJJO<‚8ç'׎õŠ?Ú>™‘ÇZ˜ñ éíG‡^÷CŠÚ‚Úq•xâ·$iÏßQt3î)xÄL'ÐNÉ-ŒðæSœœòAìÊÓÚa±.r…´¸î£¤—SjÜ’•£ŸënàV¶FÞ֌ޭ†\䈽[“°#$6¼¯oáÜÊN3œãµ[]q-4·8BT£ìɪí“I3e‚ÄXWK˜J&.k«S©*¥~$¸vò“߻ՆKiz;­8HCˆ)Q`ƒRm±ÝlÔvû“qÜŒdä­(aNÇ[aÜ¡K9Œ!G?O¨¬OjËCReOµ"Íœ1xpbl¶Õ0¶AIl,$$,œçw$}+F.“ŠÍº$7&MˆËRÛ[ŠFï2‚ˆ;R2=½j ˆÕÖu¿ ûo„–”{É 8ò’AjÛMݹ=v 6âå! RRûki #Ь§ýõÞŠ¶µt=µ:e)I܆׼%EIÉRIOâ?„§>µ*«,$¡ÿЄû¨)2#€—ŸPpyÍ °é[Á¾ZÕ(¥PóŒîa¶ױXÜ’@8þõsÖ ƒªcÚŒ`¦–ûq–æó¼-i($À7!$’9XÇj•…dL8*a©Ó:®Hñ.ÈÜãŠÜ  €@ï¬4´õ¼ò_YiN¶’Syé•qŸ.s€@ȃLÁ‰}fÿЉw“ye™·»µE.r …ÉÏaýõ'mœÕÆ(Â^J )Úëe üûV¼»CR¸6—ßdÍPRÖÙNR@Œ‚1È ƒÍyÓöˆ¶ zaFqE qneÍ£*QÉÂR@ú$)ÂõµŸ…©åx­á£†ÎÀáNà‚®ÛŠyÅkÚ5+7|é†$Ö[ŠúÙ)S**^Õå#'ŽG¥–‚J»Ú Ó%ÅFÆÔ  î)+ÀP+úN3Ñ'Å\é¦4¹$4J–—¿yÚ pA=· qL ¶õ%¹È­HA’¦ÜÜØË%%'j‚€Gjן~‘òËb5à^C‹mhw/†÷•”c„þï|çõêÛ¦--ìAq–ä¥ Z© Fï2ŠˆÉìzFœi7ÙW/-^%§#¨6[é„à ›‚y']Îh!ízÎMÆ ÝL[™\Ø ¡àÐCjJ$”þ$à‚<ŒV]M¬ÅÜ1òe¹ ä(%D w7¨ö ÖýJ@³[¤CŒä…¶ë"0.)9m J€åœ’I$š]ô•¾èü'ŸSè\f R[PVII-«#±Ú3Œ;Wj¦í©šŠÕMHÙ1[®FmÔ—p„ä¹ÎxªÂ~Ð’›|µÈŒÃRÚ–ÌDáÅ­• î ÉÚRâNÐr¤;ÕÞTV%Åz<†Ò¶^B›q?ÖI#ûª½ FA…nDXò¦!Ö–ÒÙ’ž£=4”¡)òíÀCîQ9'5¿PÛ¼46ãnfL¶ÒâL€BóÇ“8$gŽÕZ²þ¤zÝ%”1!õ¡ý䔆”BºƒNv¬¤g8N}EXívö-¶øðãî-²¡K9Qç$“îI'øÔD=jq™$!n·%n¸¨®m-%n€PÉ*“€N1“L˜G]6F—u‰mi¿3Öd¸R¢‚P®Á;ÁÇ úÔá¸Nfím‡&#$¡}GÛt—3µ)#$`w'øVôµ±›tøQÛS M™.ð0 P@vòñ©I~\Y+*F*(ðw'iÏð ®iýbÝÛP=n1Ãié­ÖV•!{HXƣ€ðF{Ö]Kª›¶Z¢Ì‚Ò$‰)S—T¦ÒR‘Ÿb¢¢JR9&¶lÚZ¢á"TBðê)Å!¥±’â·8Sœ¨ÜžÀ ]4Ë8Væd̘dÀVöfnIx(¤¤’JHÉóŽ8#SZËÔ¯—,µ £nº’…)ì=¹i*áä$ž}~•4ôÕ8ÄÁmm2eÇWO¤µÓ¿à¨ŽØ äf£ÒñػşdÆhèŠÜttËa´þèÜ‚¡»ŒA8Õ¹>ÒfÛîPÜŸ9›‘½µ€¶@-œp8=óÜÒsB9EGÔ“¶5)0^'O­2ØÍ›Û8Ê>œzóÅZj"-¦- [—%÷™ih[eA´„R=ª@°£8Hë¼-ôr:dç;±ŒîôïÚˆ¬ÙuL‹µ²t˜Ñá¹%†÷¢ qm¯ o*Oºr;ŠÍ}Ô¯Û4ä;¸´ê·ëå*oÊw(äã8íýÛ¶ëq$ÉôÙ³_yŸ ’´¨¶ÖIÚ0©îrx×›–ju²=¼ÍšÄFšè-¶”œ<Œ…å'Ðw<žh¹g) ¿?Ø©DtFñ ¼Ê—‚8㿹ªümh·´ó× ÜBâÊ êîo¨ 2±·wcÝ ƒƒV³ £8KÊÃx $ï⢘Ó}%Áv¹ªkÁ´ jqÄ!”¤ypG';'<ÐIY¦ …±‰AÆŒî`’Žþ™ÿxÍarù »©·º^DªRJšPBö§r‚UŒ5šÏnjÕX[Ž¥-N8r¥­J*R¦I$ð1QÒS¨»õ¤ .¤7€\NÕy¶ï#QÒ“Á Œêë[ð1¡1L:¤!ŸÙ\ò Hé‚<à Qþõ½[jt@éªJŒæúÌ¥1ÖUÓ* # ÉýkWRi…ÏÓÖ»\R‚ëJÃÊÁqIH€8<ƒœz}kÉþ6 ­»Äç\vHÚÂBÕîNß.P@Âr‚’@æ®ÕDׯc-×™i/—›JÏ™…¥9H?¼F=*17‹“A»Ä¯†o[H­ë[-4Ù@W' $äãøÔ·Â %×^j# Hp(RŒ+Ì9?ãZ,sQam~îê% ¿  ÒŠ¡* Þÿó¶*a­ž~-5Ö,sšm†áÏè‡pâ ‰Ï qéSs$µ#Òd+c, ¸µc8H&¢UcqÑ$¦Ú¶[Ëd2¦ŠÜY@ y÷qÿí5)pˆÜø"!¹FiÙHÛ÷ëH+$ O½`´Yn›"ZæL™!ä%­òT’R„’BFÐ3Ê''ëD`½j,÷xq¦)(bC¬-KJ›JP„ŽTNóÀçŠÌEk3L?°çK¢RwçvÞÞÙõ¯í9öûÌꇣ¶´0ãj SJQIÞ“Œ…ƒùç9­Eè»Rïß_]SËE9¼ ž—OÐp1…qûÀB¶ª­H5ç—!¤ÃÚ]K‘Ö…$„ã$1Åx™«-°ÙiÉœ‚´•ü*÷ eIÆ@Ü@ù­KN‡·[XÓOÉX{£¸ÚOÝ+r N{“’}ë±ÓS¯Wˆ“¡Ha¥GeM¶\鬬(/$+GvïHDÂ5Üä@A}R#竵•¡A![I°AÅaŸ¨ØjÓ*\VÖ·ZÚ”6úÈZÔp”åCßÛ5ç别¿üZKï¾þÕ$!A );HÈNâžøIQž/j$›<ˆ¶Øñb>¥!ÖÜéð— ¤“Ž}1‘Ï&ƒ5Æá6Ú«j¥"açSBÒ¥'bÕœç¹Àçžjf îÖé×QmjP„˜í¼‰’7/rrF sŽOµNP)JP)JP)JP)JP)JP)JP)JP*RÍ‘ 6³{ ÷Xs€r…‘ÍLÔN¡ŒÄ”Û„™IӜӨÜ?Î,„©§bm.}öcÿf¾Í¿ÝæÖº½sO²yÊ¢t@ÓJL¨ò2µ26•/çë].97ÛŒ}B™VyÚYRRúZ‡ËŸ"Rï´ãŠÐÓ×ÍQkzÕkûàYT6Qp”¯Ëîuâ÷î”’}xÛƒÞº^®Ô_-A)P$LCÏ¢>ZRµÄw&¶~bµ&äݱû”6nkÇìj}=PHÎ6ç¾)²å÷µj›·Ùì›Œé ’ó“›K6á 5·d¤„ÃÍ‚’HÆ}+ÍÃYjámµôÖbÊwÄxÇ€R–F61ªòàä+’¯CW‹6¸s™uWM¨ö«zÖӓܘÞNJs”¤ç‚}XaÞ-ó!³.%Â3Ñ]WM·Pð)R³ ç“ŸJ%U <ÝOrÔ×"r!Ûà5}BÞ™ [{–΀GçšÃöc¨/Wy×/o.GI´¸—Q¤È%GÊ’BU»ÊT2=ë¡Ç'ïéZoVÙ© ‹pŒúK½PèWÞc;Õ•»Í¹×e4ÝÂ2œ†2úC£-ܫڧìPN§¹ü>_¹» ºì”*R¡%⽈INДáI$ãvўؚڨ5úŽÝ[MÄmÄFRØ[dÛˤ¤å*È`0{æ¬Î_mNÊ·Éièòp>1.–B ò}8Æ*Þ7‹re±ÏŒ$ÈH[MFçä=sŠÕQ¦º;û…ÞiSEOÆ‚:iCŠ 9üD”§øäc¸5dÛq‰e˜ðI&[/0µ2¢¤§#qe<žÕ?2hb,‡šmÉkg‚ËRʸòã<GJ¯L×P ÛL·ãLBЧƒìùw2 8¥Ø œ`ó¸b²­Y7»²ub£2¥ M´ˆ^áØå°¥?ÕÇVG|q‚2kV&¡¸|T¹ŸriCAÈé€`¸µí#q)O©VãÆîAÅ^“%¥D©ðå]äùvã9ü±Íazë†TóÓ£¡¤#¨V§@†¬p’·Ma2áÈŽ²¤¡æÔÚˆî†8þú¨@Ñ*b™ÎJe­,†&6— ÚS\¶¢â²Aç¸Å]j¹o™2;÷¦˜Ê“ÜŒ¾¤3ŽšHåyØ=ý;šÊ£n—Ÿ€¨Òb*b:ÓÁÂ@^ýÞE§*$ŸÅ¸r8©XZq1nmL¥E¥I#§‚®·¦séŽþ¿Jñ~Ô¨´ÉÓ¾%Ñ%½ëÆõ)£ÜvíïZ6½S.^¡L7Zá]™&Óª/ ³“½c¶ÒðÊ}øÕ*L²+KÍVžnÖ¹°ÜLwƒ‘Ô¨ËNÔ‚HÉKA`ŸÄ’žÝ¹5-mµ? -¡—%7$ÃAC<È.8Jq”œù~½ò?¾£uõÕû4kT¸ìÈS7ÌÃÌêz.œ¦@'¿nÆ¥ \—!‹J÷ÁtLl­Kiì$ùs÷`ò±ýØÔ‚Z ifá^ß¹Û¥¾Ë®0óa)N 8âÂÊðO¸í[ZZÊ«$IM­Æ¹!IŽÏI¤!)ÉÆq“Ï$š¨NÖº‰ªU;3Ô˜lº é,¨#ïGldg8ã>µµ3]Jµ¡Bt8ÓTÜWoZ”‡['ns‚¿ ç°÷¦<î-7 l—¯¶ûŒI-5Ðmlº‡ß½ Rvœ§Éßžýª­/ÄÜ—rjWˆŽìt‡™ZƽÞd— qÆ0€‘ëÞ±EÖ3åHŒë1aü=Iе’¥õ™ RRSè0<ûâ®’ŠÓâÒ›K'jÎÀ}3JSÐfeº,i÷'\wÝÚ’CjR@@Ô¢¤¤§8YV{vÅo½¦æ¿¨aÝžÃko¤_è0¤)퀧ÎRRI$e$§°5—NÝ掵MJ™ZÝ ¡EéGRöŸ:²T}äœ Ï"bÑ­àC[†]ˆò€ê‚·#;“œ>ô®æ¦[§.ßvfØðåKY,Èn7-e eCw\7žÕ^™¡œ“Ö׋ŠËÐK©¸ê--c%HRÉR€XR‰órA¯µ=ÞãišµÇ„Ë®[U:)iÅùå' $xÈÀú}håÊ^›·@ˆÁŒd?Ù®9p˜ãá´£(BÕÎåg>ÃÌpj.h¹Æ†Ô{{P a¶C(䔄íçøU×ör-Œ9к8䌰ëky½é¶½ÅJNy ÚØ ør95y·Éñ¶ø²ƒjo®ÒؾéÜp~£5[Ò·{‹öÛË·96ç¤F‘ !_L !jY9Úž ³œÖ¦±3)lÚôׄ—T—Ú}ö—%×Y A[ÄghÉÚ1êNO5¬ÞKWûÑ™….\ Û‚Œ‚Ê”„§Ÿ*NwùÁ‚=ê{¥Ê+šup¤[ã3*FÙ |•$Ž’•€ G—Žüv™²ÝÚàíîâ„·˜]Z–]V[ß |ý1ÌžJ¢íz Eµ®‰,ºJã}æÇTÛ+ JTâ»ãÓgµoê8õæTG¢Ëj™Ò‹ ¤( r•ß [Ëz$¢¦!ƒ±Ð˪+m¤¥ìíS€’p1ߌäv¬·M]rƒ" V¢A—!m)ç”ä¶°Ù„(‘´úœçëV²›-ìÇq)Yé¸Ú–°à)*ÎåçÌ9ãŸz‹—aT‹Z­æ@Þ˜_‘ååm•ï-Ž}xûf«vS& ÊkS†ì:zAê©O°ܼ¬s„1€8ò÷ÎXzÖkñ\B؄ܳ-˜¨u¤2Ž£eaKävÀ%=³QWÐüT˜/o].coâÊ qôï[•@{SM‰ª"´÷†ZeÅŽ…¼‡Š¡²¢ë «=ò­ ïÁõ³iÖØì*äb2áuöž>fÃjm°°œ(žp}  Ó“4\iUh‘+PÙ¥Š:Ú·À|ì'-”áã>esŒš÷¤n7 “ïlÜäÂqLL[m4ÎBД÷öäÌž{S41U𕩝2mòàC…à›zXuaéË)i!° O•ÿyÆ*F<Å»*;*°ä`ñ(|)YÈTã%þ/ËŽh7©T…j«³:eÛ܈pK8–c6Ú–T’^én^x#÷°1íŸZúî°›Û MÂi2‹±›KdårB€idà,g‚rÅÚ•J“ªf|RDBÜ40—×¥.+Ä=N¨¶z{ãœúTeŸZËNŽfIŒŸÙ‹7 ’éRÒ>ùÁ‘åVrœ}ÁàH¥FéÙÒ.Vv%La1ä/pZr2 ž3Žã8­)j»@¹\®o:Ûö–ã(¢`ï Ü9RŽàG`qëIØOÒ«6›äÉYS¤®ÊáK(} Ä¢¡œ8¥gn?­Øû U”rÖ”iTÞc2䕹QÒ™»+®ƒ\°¢=;;æ·5£Y/nBz1j˜JœKêFà°•“°F(p¹R«{”øúÂÍ™pZ¶HaÕ­.çs„)ƒœg ãó9Ïg R” R” R” R” R” R” R” R” R” R” R” R” R” R” TN¡€äôÛƒKm= ÍHVóŒ¥$ä­KUwYþ 'þ+þf™bm*·Ù;ñ›Ñ: —cu$;GEìÿšÁQW÷Ž+¥×8û1²[¯_ezX\ãº,,¶w©%$­`à‚"¬gDió·öñÈý¥ßÕA>ìVp-èí8°’€¥ {ŒŸCê=kUvkj”ÚĨu¤tÚu ¥+i>ÈPN3Æ;TWÉ:v| 󌤻ú«çÉiO^sûK¾¿ù¨6¬º^Ùi‡23m.Rfº_”¹Šë)õœ ¬«¿aR¶ADvXD©a•‡l2”(vRF0ÔT1Ñ:|‘˜+ã‘ûK¿ªŸ$éýÄøäñþ’ïê ±8Ž£kBŠ€P ”œ}¥GZì‘-ñ$GOVJd¬­åKp¼§IyŠ»Œ1ô¨ß‘ôþÒŸ¼õ—U}:'O’ ‚¼Žß´»ú¨'$À‰)m.TH﩯óeÖ’¢Ë#áZöë4 sÏ¿+Hû‹qǶ ê+QQ]ñ“ڢƉÓà“àW“þ²ïꯟ$iý›| ñþòïê ±­´, ­ QAÜ’Fvœc#ØàŸï¬mÄŽßH7”²[ÚØ3ßÙõÅ@§Êð+ÈÿYwõPh> " ò{þÒïê ›Lh’ä„CŒ™ð㡤…¯Œrq“F-ðã°†‡¦P¢¤¶†’”¤žä05ò>ŸÚàWÿ2ï꯿$éýÀøäqþ’ïê žLHé&3 lvOáôôôö¬Ž¶‡›Sn¡.6±µHZr¡¸ªèÑ|gWÏ'ö—U|ù#Oí ð+ÀÆ?iwÓÿ5ãP!³1Ù‡¸éPZZCIJÈ! c9ç5õpb91×:¥ a/©¤—>ŠÆGsP$éýÛ¼ 󌤻ú©òFŸÁ|÷ý¥ßÕA<˜¬!8Lv’<6ÿWòúW™P¢ËB.+¡µ!.¶Gbƒù#Oà¾;~Òïê§É:vï¼ãé.þª %k¦DªB“:U'‡Èi »þ×oãP_$iýªx9Ïí.úÿ毧DióŒÁ_Ú]ýT’!E’ÂX“‡˜I-¸ÚT‘ŽØcŠÌ–’²”$þ"ÜcŸ~*»òNŸÜO^HÇúK¿ª¾|§ö”øàçÿÌ»ú¨&Zµ[ÙŽë Ûá¶Ã¼8ÚHJÿÚ`ÿú«\·µÀˆ¦ãœ²‚ÂHhû¤cËü*è>H& ò;~Òïê Ñ:|(Ÿ¼Ÿõ—UÚ-ðÛì†áÆKî‚—KI XGÓÛ| ð?Ö]ýTXiÖz.´ÚÚãȤ‚ž;qÛŒ õÓOP¸ž¡J±ÉÙöªïÉ:p>y¤»ú¨4NŸâ ùäþÒïê šèAÙˆQº’×—ÑN]Ê8óμ›U¸ÄDCo‡áP­ég ‰W¸N0Ö¡¾HÓûB| ð1Ú]ôÿÍ_~IÓû³àWœcý%ßÕA2m –ä˜ø"½†Z‡CH%;öŒ„ûgÛéUÿ’tþìøçÿIwõSä?µCÀ¯9ý¥ß_üÔêŒÂ£ª:˜hÇP)- „àŽÕå¨QYa¦YŠÃl´w6Ú(>é`Oj‚:#Oœ~¾9´»ú©òNŸÜO^qô—Uá…Ê2ŒV ’Ž™x¶7”ÿWv3¥|r7¤92´”•4’Ù$c=»TÈúiO^sûK¾¿ù«éÑ:|LñÛö—U†ŽÊŽÒi Ci JG°Y*¶4NŸ 'À¯'ýeßÕ_>GÓûvøãýåßÕA4Õ¶ L¼ËPb¶ËÄ—[C) pžå@ ã[xúUlè>H>yë.þª §Á'À¯'ýeßÕA(í–ÞåÑ«‰ˆÒf7»ïR€ ÷'iÜGâãÞ½ =°E1E¶ŒHQgãa#±ÛŒf¢>GÓÛ| ð?Ö]ýUôè>H>yë.þª Ç Ät0‰b9¦’zDvÛÇ—øVÅVƉÓà’ ¯žÿ´»ú«çÈúhO^1ûK¾Ÿù¨,´ªßÉ:v| 󌤻ú¨4FŸý…|÷ý¥ßÕAd¥VþHÓøHð+ÀíûK¿ªŸ$éýÙð+Î1þ’ïê ²R«$iü(xàç?´»ú¨tFŸ8ý…|r?iwõPY)U¿’tþìøçÿIwõWÏ‘ôþÒŸ¼çö—}óPYiU³¢tù ˜+ã·í.þª §Â‰ð+ÉÿYwõPY)U¯‘ôöŸ¼õ—U}:'O’^GúË¿ª‚ÉJ­§Á'À¯'ýeßÕ_>GÓû6øãýåßÕAe¥VΉÓå@øä¬»ú¨4NŸ‘y=ÿiwõPY)U¯‘ôþП¼ cö—}?óWß’tþà| 󌤻ú¨,”ªØÑ|göóÉý¥ßÕ_>HÓû@ð+ÀÆ?iwÓÿ5–•[ù'OîÏ^qô—U>HÓø#À¯žÿ´»ú¨,”ªÙÑ|íý…|r?iwõSä?»>yÆ?Ò]ýTJUkä?´§À¯9ý¥ß_üÕôè>HÌñÈý¥ßÕAd¥VþIÓû‰ð+Éãý%ßÕ_>GÓûJ| ðÖ]ýTZ‰Ô/ÆŽ›w‹‹â“™m¾qÓY'jÿ…h§ÉÁ^GoÚ]ýU’.±F–Ì–`ýû+6¥<ⶨv8*#40Šûÿð¯Mÿ»«ÿä]]iJ)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)JÿÙcelery-4.1.0/docs/images/monitor.png0000644000175000017500000043575413130607475017347 0ustar omeromer00000000000000‰PNG  IHDRÿNáã·2 pHYs  šœ€IDATxÚì½w`Õ¹ÿý™-ÚUïV±ä*w\°ã‚1½™ÞB I.r‰ÈMIHBÞ„$7¤{Ià—HèÕZ0Øà‚)î½H®²zï[gÞ?vg÷ìj%K¶lIöóIÌîΜ9ç{Îs挞ÓF3 ƒ¶¶¶©º®ß\cÆitƒ¦i†ÑåñXŸ@èš®®ï 3Žc§;}j<½‰7VÝÅ«ëz(=É·Åbé¤åHú»Óª–ç±æ¹7å{,vû‹ýÅþGÎGO´ªå)öûKž{S¾Çb7±¿Ø_ìä|ôD«Zžb±ÿ±ä¹7å{,vû÷ý5MÛeÆ[V«õå„„„Z[[Û4·Ûý{ŸÏw®aèºJÜjµ†w—õÓb±ô¨²˜ç¢ Ëð±*“ù] ]²§•3:lOŒ¤ê1Ë!V>ÕºBtu3¨áÍž¡®ÊÖÔj†1G—yÃFç[ì/öû‹ýÅþb±¿Ø_ì/öû‹ýÅþƒÛþA;Ž´êêêÝ>Ÿo¬Y¬‹ý~(’XÂb VX­ÖP\± ÑUF£Óíª£¿›…]Ñi¨7€¯Z&f5¯±*qô¹è0±*b,Mj™G—Q¬+V˜ª7ÚN±ÊLì/öû‹ýÅþb±¿Ø_ì/öû‹ýÅþ'ŸýãââöhåååFt¡DdW=#±z\ÔÄÔ‚7 Ĥ~7 ](]U¦èU ­CÕÚ•ach¬DϬXÑçÕòŠÕƒd¦­»«2ëê¦Ró Ú/Z³g¬Þ¡èðb±¿Ø_ì/öû‹ýÅþb±¿Ø_ì/ööonnæïÿ;n·›Í®µµ•‡~8ŸÅbA+--5̱ +Áî ë3£ÑÇ{C´!ºÒ µÔ´»ª¼ªacUˆX•&ºÒFßÝéT }êe×ÕÑSzÔJ}óªšTb±¿Ø_ì/öû‹ýÅþb±¿Ø_ì/ö<ö7Ï/]º”øøxl6[HCCC—^zi„&ëý÷ßÿ³èÞ‰X='j¯…).VæbUÂè™ê13ÞXQóšèµ±ÂÆ"Ö´XÆSãT ×L7ºLb¥¯Æ«þ‹®Œ±ò¦Æ¡ö©ÇMü~ĹX\Õ­jSoN±¿Ø_ì/öû‹ýÅþb±¿Ø_ì/öûû›Çsss)++ þ뺎ÛífôèÑÚ¬‹/þ™ZÔ‚P3«‹Uh± ?Ö5Ñ¡VÂè0ÑqEzt<±®5åÅŒ«+ƒÇÒ«’˜×E¯ÍPkæM½i¢oºè8¢o¨X7ˆN­,jÏY)ÔkbUZµÇIì/öû‹ýÅþb±¿Ø_ì/öû‹ýÅþƒÇþõõõ¼þú뤥¥EŒü···3zôèˆ2Ð8`˜¢W3ª ¹š°¦i›C˜ace¬« §NUQu¨qtUáL]6› ¿ßQàѽ5ÑÓ'¢oŽè㱦[DW:õx´V3N¿ßÕjíTA¢o‚hãÆê¡S+dt¥‹®¬]M‰®Øb±¿Ø_ì/öû‹ýÅþb±¿Ø_ì/ö<öî¹çÈÏÏ'>>·ÛßïÇétÒÒÒÂ¥—^‘7ë}÷Ý÷³X= æ?³‡D5ŠIWFŽË̘:U$z*ŠVÇ4¤It/T´cõĨ•(º'Ëì!‹® ê9óxWk=ÔôÌ º2¨å'ÕàÑÓ?TM±¦­¨qG÷6EW\3]5®hûŠýÅþb±¿Ø_ì/öû‹ýÅþb±¿ØpØÇޤ¦¦âv»q»Ý,X°€ââb\.cÆŒ‰°¿uñâÅ?33¦žP{@Ôž5óÑŸÑÝÓ¡f&ÚPæïèi%ÑFTã2u«zcUVU›ªAÕ¯V@ó{tæ¿X :M«ÕÊŸyYŽ‹%¢×'Vž£óÚ ºÊGtü±n5b±¿Ø_ì/öû‹ýÅþb±¿Ø_ì/öXöonnÆápÄ´jj*{öìÁétrÑEÇÈ‘#Ù»w/cÇŽ,ó½{÷f"¦`µTQ¦Tƒª"Uáj/K¬éÑIíY1 F­@LmfYD×ÅXPiÿÅþb±¿Ø_ì/öû‹ý¶ýŸxâ n½õV’““ijjâwÞ¡®®ŽsÎ9‡sÎ9‡øøx¼^/?ò³gÏCMÌ,<ÕØ]eZÍŒjl35óêu>Ÿ‹¥ó†jå‰Î´Z0Ñq«ŸÝnÇçóu2`¬ø¢+`tÁ›7ƒY‘bÝxfºlß¾Q£F1zôh’’’B7Ÿ œÌøý~Z[[Ù»w/ûöícÚ´iØl¶SêþWóÝnÆ"º>º=UÐhôÍrSXjƒÞU§’ú€2ÓŒ~ø©y–ö_ì/öû‹ýÅþb±¿Ø`ÙÉ’%´´´pá…òÞ{ï1tèPسgßøÆ7b–™Ynöß½{·Ý¢ö2˜ š‚ÌÙÖÚZv®XɾM©Ú·È5ЧŸÎ„ùóI2$¦qÕÂ6 ¨iµ•ìÙ´ŠC{6Rsh/š¦‘Y8Šác§1vê\’Ó³Cñ©Fˆ®pffkëkørëvîßÊÁ²ƒ€Î°¡#7|g>—Ìô¬ÐÔMÓ"z½L}êyõXôãñxظq#sçÎ%// äø‡ÊÀz6ÞuÐ^Äa†7´Þ]Ò{Q¢ÿTÖoÞÏf»pðàA>ûì3¦NJ|||D¸îîµÁ2ïçèø{sÿwÕ°«¸¾ºÿ£(±ÊÑ=°ÑáX=Àf{«>LÕxÔëÕÞxõA­¦­¦¯ÚähÚµc3Ú]=tÅþb±¿Ø_ì/öû‹ýÅþGgÿ>ø€øøx8@AAñññx<|>çŸ~ío½ï¾û~fP«=*š¦qxûvVýýïì|ÿ=2½NÏÏgxr ­åeìüì3jJKIÊÎ&uÈ.{tT¥Å[øìÝg8¸êmrííL/ÊcTvžêRŠ×}JUm )™¤eåušÞ¢Øü·gßÞøð>Ùº -Ý`ÄÄ|Ò R¨j®dýÖuTT&=)¬Œ!®®äªîèÊQÏuëÖ1cÆt†Ì`€®ºøÄÀ0tåŸnD® ýÓ»8ãŸqÞeœ±‡×°˜×þõ‰~ÑDýæ½øLKK#>!ž;v’qÿtuÿ« ¼Šzª ï‘î3õaÝÆzÍý«U¤Ñ»š¦yMt¾ý~DÞb=°Õ¼š½ºæoó¡¥êTãQó}¼'í¿>ºŒc•«Ø_ì/öû‹ýÅþb±¿ØÿØì¿wï^ÉÈÈÀápàñxèèèàüóÏuÔôÄþ6õÕ jÂêºkuýBSUk_z™¦M¹ÿâ‹zþlsfƒ¾Õk8üárþßÒX$ef’š“Ó© Uã¶4Ô°ñãW`Ï—Ü{Ïåd;ˆ¸´s oçÔ|þo^úÛ;lÖ R2rHÍÌ ULµ˜Ô6Ôðîê%ì©ÛÍß¼– Ç_À¤‰T¶náýKyéï`|ª“ž–ÅÌœƒú|¾PA©=<±¦±F`¤søðáŒ]ÔɈf'€èæç{NO/è&- 4£‡ÉŠ~ÑúGEmM-û÷ïgĈÝÞÿj¨I¨q ¾§Tm,Íû²«ûß<¯¶aff/v¸Ã¥óT¹ÞÞÿÑùR{ŠÍO3]ó¼ªYÕçõzCßÕ8ÕvXÍStÚ¦Îè2Q¯Só¥¾§§ítx³WZÍ·ù]Ý]Xì/öû‹ýÅþb±¿Ø_ìôöW;yÌïçž{nÈéï©ým±R#1×O˜™*^¹Šæuë¹qÒ8ò†ÂÞ}xw2k³’?r7ŽË«ë6°wõ§Ì¸îÚˆJ½ûãÞ-«¡d-W]2™Œqã ¹wýKÁøìdœ6ž«.9È[KײwËj¦Ì]„Ýn_5¨Åbaý¶ÏØ]»“9—Íà´áE”zK)®Ù €ÓǤã˜{Y«ßÝÀ†íŸsáœ+ºœ¦aáu±Œíñx¨¯¯çâ‹/L zI††ÁŠ+xûí·9.¹äž>šYáT¯K ÄZÐ/3‚ŸAïÌôÔLÛ?4@7L¯Òì‰3ÐÐbêD¡¡¡S #í$ši+éjšè?õOœ8‘ÿûß :‡ÃÑéþohh ©© ¯×jŒÔ†ÏlƒB9Sz-m6ºXßOzz:6›-¢1b>äÔƒæhîS«™Ft¯·z,zݘڛ­Æ­Imü£_£ö‚«½Ðæ{oÍcj»]®&GjÿÕ¶7ÓSG¢‚±ÚõU—ùGEt/ºi÷®Ú±¿Ø_ì/öû‹ýÅþbÿ“ÕþjÚl6^ýu.»ì2RRRzdÃ0οiØè^˜è‚¨Û¼‰É ñäz}ø·î€Ô,Ng ¼Ë…¿©™\ŸIññÔnÞ ×]bF„±oeRA )‰Nüö@R Z\`­°îuáom&%Ñɤ‚öÞŠÅr5'b½ˆÏçÃf³á÷û)©ØMöØ,Sœì¬ÚM’3…8»×MKG+ )ñ 9-‹’ŠÝ\¤]*pµ‡Ä¬ ÑFS9|ø0#GŒ 11ÝïÇÐ4ÐuN˜,Y²„®ˆÕ¦òöÛo³ð¼ó‚£¾ÁkP§žhh†8¢F‡5S¸M¿ÌÀùƒºt5 CÅ5bêÇÆi:œZÀ15ToÐL#Oàÿa}¢ÿÔÒo ‘˜˜ÈÈ‘#)++cÔ¨Q÷¿¦iÔÔÔ0vìX:=|ÔפÄjŸÌ‡HGG­­­ìß¿Ÿœœ’““;µWêÃCí5Û 5\oï5~ó{ôƒÁüg>HÍŽ uz™Õjxà˜åŽº-º|ÔF]}°¨yŽžFg¦Ý›ö?zôÁqqqdddàt:Ù¿?†aššÑЩ½âjÛ¦öjÍý¯†UÛÉèvÉlØÍ4cõ¼«ÓêbéŽNSÕ~tϵ:ÍK}è˜×˜zzÚþ›×¨)õam†UÿHé®ýW§É™w5-µÜÕiWí¿j±¿Ø_ì/öû‹ýÅþbÿ“ÅþßúÖ·"t™øý~#òßýmjoŒj32{pÔÜ쵈³‘b·Ÿ˜Œ5)Àá L÷µZ°Z,Äk©Ä{}!Ѧl³Ùðù|¡ÝÎ8â2°¥§`OIFs8ÑliŸÃaE·Ä5wàhhi1 ]‰ñq¤%%‘O’3§5‡5Ã0ðèV¬v+†Õ -ÙMK|s'Ã{<žˆŠËðš¦QWWGVV)))x}Þ` QÙÀàlh¬6‚¯ýëÌš5+T¾V«•ñãÇ3~üx¾õ­oE8D¡Šk—Wc >pÆ'“œèÄöÔ0Œ`å Nâ®Ýø.Ÿ¶OæªÙÃBcàÿ:æzò°ÒXúµÓ¨z`t7x€nèÁ©ågªÓì˜ÂUg7ª‹ÔoŽTëÁæz7úM‡60~bôc6"†!úR¿_ó“’’BVVUUUäææ†îÃ0ˆ§©©‰”””§?º!U{Ž£g˜ÄÇÇ3lØ0*++III‰9]¬»©WGsÿG?8£{ÍöÍ|¨ªÆè6WÕýÐ4Û]õÁbör«½æêC²Ýùª>˜ÕŽ”ž¶ÿêÃ.º—Z½^}xw×þ›>3 Ó>Ññ™må‘Ú3b±¿Ø_ì/öû‹ýÅþ'“ýkjj"^—móXwöÂkþÍÈÔHÍ šæóùH=šæªjHO þË€Ô”€óßÜõ `o¤Ùð“”‘…¦…wŸT_±`ŠLÎF“Þ̰„D´„$, I`O ÃÛŠnoEóûh&ޤÌÌPEŠžšb~ÏÍ(@oõouç$ÅžB‚5€v;žf|>z›NnêЈB5óÝ»¦Vv³L***˜>}ºâœkdèè¦åükšÆôéÓxùå—ùøã±X,$''sÉ%—0qâÄÇ@7‚+½ £u7ùî ¾ÿ²â2V–¿ÈÔ$ ÐÃ^¡Hýð'7r«k9ͳ @×дÀ(/ZÐ îRp¶w >-0MœàumÛŸ'ï¬oFèýíªRÎúøFnu/£ùì‚€#jêO¥ihz[?Ag×Ðô>×Z1o˜ž3áeî!G[ô{=^¶lÛÂÄ “p:ã0 ·ÇÅŽmÛ™4e v»-¶~Ã`LÑ6oÙLvvvÄýŸ””Dcc#©©©2õ~V{†Õ6Dm,Íð‰‰‰ÄÅÅQWWGjjj§^ÚèÞfuÚ™ú@ëéýo^kÆ«N¯R×µ™½Ù¦†è)sf¾Õ6B}HDO‘3~Ñ=ÃjÕI¬Þúè6¯§í¿×ëÈ[ôCEí½ŽõànÿÍôÍxU{©ÏÓþæ ]µÿÑÔˆýÅþb±¿Ø_ì/öû‹ý#íoóx<¡ÞµàÕ?®U£%O™LåªU´ge“œžCó 3+àäÔÕ‚5Žv{•Í-¤L™ªX¦H5㺮“’;êúU¸lVââíhŽd G`Í‚f 7n›•ª/©y;õŒ˜2‚á™c(©ß€C·‘¨Å“jO&Á˜ lóZñùÀÊ}ßgjf<mµìÞ²Ÿ!NΦ3¨…ö~3âæ2Ѱ…GM¯Q'¨Ùt0cë7LÇ”Àh¯é¸¢kº˜ÈÛßã¬d ŸaàHNa÷š¹LÄÁ×Àô×°c:³Z(}´ðÚóhýºycÒ÷úƒ£N•Í`×ËîE¿Áú 멯¯G÷ëL™24­[¶RSSƒgýzΜ5+¦~ŸÏG^~k>[ƒÛíÆét†îÿ„„***"fã®6®&æ}ÝÛª6’¹¹¹ìرƒÔÔÔˆ^p³A6{a£V³ïéý½iÙ&D§©ö²ªéE÷FG?ÌÏè‡JôuÑJ3õúèžzõáMK£{–»jÿ£ÛÝ臓Y¾jºf¸Xí¿™o3?^¯›Í†×ë%õõ¼Ñí¬÷‹ýÅþb±¿Ø_ì/öû‹ýƒotP×$˜=,ªqÍ‚7çž1×È‘lt»iÉ‚/=#)=)_F-9Ùlô¸qNÞÌYø|¾Ðh¶™¨i€ÜQgಠcëþJZ½><†ÝÐÑ ®Óêõ±u_%íÖBòFÏ eV4 P×uÆ<4-—Ã;+1Ü`ñ[±¬†‹ßŠá†Ã;+I#—ñ#§vª f^u]ؤ¬>Ÿòòròòò‚k–ý/ôžt¿‹.Q{{ºÃ0t0 Z¶ý›_¯¿þ0S2œè†Ž#!ƒ)gÏ ×b`xKù¿Å³IMI%uöY[ã ¾›ÀèÒ;XóôbRSRHM=›ç¾¬Å0@ïØÍƒ—-æÍ7Ÿ %%•_¾ð©—ýšCÞàÍÛ±oϾ™˼N†àºó@© 77•„”TRSSqX£Ñ÷Õ·³î¹Ÿ“’’JJj*—=ø<‡Ú¡lů™ý—iÁÀÐËùË-³ùÙ›ûA7ðV|È-—ý…jà|Íß™šBJJ*‹Ÿ]psÛwñàåßæÍ%%55•?ljè²üõVGÇÀ‚¢?ðþzÝ00÷m0Ì}ötS üƒ&ž L·7t=èЇϛkò`¼{(¨Ë\¯¯õ¢î¶þ ýS¦L&99·ÛÍ®]»Øµs'n·›ääd&OžÒ¥~¿?ИæååQ]]qÿ†Arr2uuuªz_¨ §ú©6„êÀn·“ššJss3~¿?´MMS}ð©zzsÿû|¾P<ê»èÆUMO×u¼^o'-ÑyR{¬£g5©×ª=ÚÑeež7óâ÷‡_Åc3Ãö¶ý7uÅJ_Í«úðëªýî‘V7àQµªñÅš¢¶ÿjÙŠýÅþb±¿Ø_ì/öû‹ý#íoS#Q×)˜=fáš½#ñ ½àB*?]ͪ½%äkY+hP[YEùÞ<ɉ 3gzz„X³Ã\;â÷û‰OÊ$âÅTíùˆÏ¾ÜE^aYC†P[]MEi._“ÎÙ˜2²ÙÓaN1óš˜Î¬Óæ³eÿ6}¾C‡3$_Uu5ûÄa¤rúø9¤&¦w*Lµ‡F][£©¾¾ž9sæ§›¹ÑY%\¬5ÿ½Ex€Øø!ðk¦ç;4B›®xùèáI|7þEj.fÏ _aö}oPùüu¡‘`tƒŠe?ç’ïd°¡¦ìâ(œý gÔ<ÉiÞv¬z†'êïç“-;)J¨bÍ=çòÆÆÿ`ñiúà)žÙ>—Ÿæß»ingèÀ»¼öÏçžô‰ÜpÉÔP‡ƒAÅG?gá½ÛxkËafg×ó7LaòÓ¨øÁ$¶¿òKvýé:fÔ¬âïnãSºr8{Þÿ#ïÎüÏW¼KÚ%ßá…-•\š¾›» çóüì nÉŠÒ›Z]þAn-¼‹>aýỎÐ,ÐQÃP¦Ðákð5áò7ÚñˆQvC×B;䛃íjšÝÕŸ ?.ÎÉŒé3رsmmm8NÆÃèèéB¿ßïg̘1|úé§dggGÜÿ‰‰‰477“••j¼ÔÙ1j¯®ù;º74zýXVV‡"555¢U{ŒÕöü®7÷¿úàR¯W§•©³ÔÞf3óAk3üˆò‹z˜˜e¡ö¤G?LÕv3/fzêt15_æï#µÿj¾Õü«Sñ¢§î©S£Ûõhû«å¥ê2§ Æjÿ£m(öû‹ýÅþb±¿Ø_ì/ö´¿Mí¹ˆ^· ”™!Ã0H>œäìlvîàPÉ^¶oÜ@rN.“'“>~$$ÐÑÑaP»Ý)<-k$É)7ÑP¾™ýv°}ëFS HÏ9‚¡§c±'áñx"zèbŠþð.øÊuŠã¶_p}TÄ¡òWle@Ä±à‰€¼&à_‡×w[Ž~:5Ø »×ïóùB÷ŠÙš÷®ÝnÇårÑÞÞŽÃáµAÑÚ¨ ktgt£ét:±Z­´¶¶â ¾‚TíV{UM-½¹ÿÍóæõf=]K½Õ‡¡Z~êµÑ³Ô‡‹Ú#ìóùBí¨¦i¡‡A¬·ŠD§¥¦§zÜþ«1ê¹h;¨ÔîÚÕÆæ”?3Ü‘ì«ý7ÓŽ%û‹ýÅþb±¿Ø_ì/öûƒÍŒÔãñ*T3r‹ÅÒé5n·›ÃAÞ9s¹ðüˆ]Ýn7­­­¸Z[;õð˜ëKÌi ¦Q:::°ÙœŒ;ŸQS.Ån·c5 ‡¶¶6Ú[[CטV+™yÜLÇjµ3}ì\ΞÖçõzñù|455Ñæjëô>q³Â¨;>ªŸØiqøðáX­V\nT=ÚÓïÇ?MÀ‘Ο:¶?Çæš»˜—N#à¬yi.½ì2nœ›I»ç ¾òp‰†2Òkè¸ àÒ«¹âÆ3¡ý ®ùFÃøÀ` Žpø³oyÎøO¿º‹Gù*ŸÏË ž  lp)·|íf&Ø´(¯ÑÀ0¼´´kø5s†hÃkdrÁÏæpÞó/P±®‚Ÿ¾ò3—L乌æ¯ÜÃæñ‰x‹Ûë¹ôškÉöz¸âН“3:1¨c ÎÀ,ˆ#—¿Ú£ i«Žrh“„à(}Ä¥‘ï³WÓùßFp7ýàzáÍø‘hªã®n®7ô»ÝÖ¯_‡Çã!11€††Ö®ý’3Î8Gœ£Ký>Ÿ‡ÃÁˆ#(++cäÈ‘÷JJ UUU„Ž™eôÆ&æyµ×Ví95ïó¬¬,jkkÉÉÉ µgf¼æCÎ ßÛû_íMV{VÍŽúTf{QD1íèwܪ½ÂêƒÈLSíUVרźÞÔ¥ÆoÒ›ö_µ‘Ú«å`F۳ܢÿˆP§í©ez¤ö"÷ˆû‹ýÅþb±¿Ø_ì/öû{Ñ4 ›ºÓ¢ÛíŽ(l³7C5®™AÇšr½®B×õPï„Íf‹˜ž ‚Çã¡©©)TX¦‘Tã©3˜Ç̆¹± Ïç ¢™®™³ªyR+¡jT3>³'ª¹¹™iÓ¦Öo§^†JŒš“èrBi¤L8Ÿ{x«¯þÿXþÊLÉuâs5²cÍ2fÏã²{à†•‡øí (²Akck .ÃÌŸƒ™—Ý7|Dûooä¬"'¾ÖF|†rÜ ¥³À:b!͹›î^Íœ_-§È½‡¶™ö#™ÐTtÃ0ÿ98óÒ;Ø~ç?ØvócLHjeé߀ëŸfŒÃÀwÑpö}<3á§ü8'‡Ü«/áþ~Íœ_-g¨aÀÌ €[)iú) ÏÊÅðµÒêSFŸƒkÕ»-ÿH/rrrp8¡ûÏl“¢{pÕÆ.VÏ®a$$$ÐÚÚJfff¨}0{­Õžî£¹ÿ£|j£j6äjÍY ê5jï¹úP0€æ5ê”9÷8«‹ÅÒéu0jÏp4ѳ¤Ô|õ¦ýWõ©ö0ãRgxô¤ý.ƒXöïMû¯ÚUì/öû‹ýÅþb±¿Ø_ìèð°E¯!P¦ö¸¨‰›˜•'zz‡ZYÌpffÍpjÆÌÌx<žN•!V§Úë¢N»0Ï›¿£7¹ˆþ«G&z×H‹ÅBkpöÁ!Cp¹\á²ér”¶s¼æ5“&M¢¬¬ ‹%°KùW\Á¨Q£øþ÷¿ßé 0¬#øéÎe¤Ý¿ó&ý!`ÂwXóÁÎûÑF~÷ÝiLÊ} p|Îcì_r;Ö8Àؼ-gÁxûW?âòIÁ‹/aÙþ2ÅfÒgêOåÒûÈ«ÿÅ}WŽëˆÒÉØŒàftÁm3MÃ0zùyî;w2oDpºÂœï°âé˱ÖgrpðžóH3 g/ÞçÖóÇ:r.bã¿bÚå“x(˜æ/—ïçÞ#”¿AhÊ{·+1ŒÈ9ôÑV 7Ðc\Ú9nu^~äH¼²DÿÈõgè?ýôÓñûýL:{œ 8ûì³Ù´i§Ÿ~z¨ƒª+ý^Ÿ‡!C†`­­­$&&F<8RSS©®®&???T¯Ô)OfC¨6hÝÝÿV«•¤¤$ÚÚÚHNNÅ£vÍý¯6ºêõj›£>ØÌŒÙΙ?5]3¦F3ïѽŦ&uš˜Úk¬öš«=ÃV«50SKy8¨Ë*T½mÿ£{ýÕïGÓþ›º»²oÛ3?b±¿Ø_ì/öû‹ýÅþbÿÀì íÓO?5Ì ˜‰›Sâââ"hµçCíW3j¢v˜†56_é ö¨¨LͼÚ£ö*™zÔBˆŽÇùW«=*j…Pókͱa°9“HOнMÃÝÒ‚‰Éñfh¼WÓðu´âò8““°u£Ë3W³pùmúÇ58!Ü—z§»5µÝÍj¹ŽÁh½m¸}V’’œDºŠZ„~Íì¤Ñ”M|.Ú\~¬ÎDœv-<ÞƒòïäW¥~µü{­?ªüOEýÉÉɬ[¿ŽÃ¥‡)((ètÿWTTŸŸOrrrÌ©Nj›¢Þÿjï†mii¡¢¢‚¼¼¼>»ÿÕ©bæoµ÷WeÞ¿ÑSÄÔ‡ŠÚ𫽵jG¥Úž©=Êf¾Íôü~?‡#‡Ùƚר=ÖjYª¤Òþ‹ýÅþb±¿Ø_ì/öûŸ¼ö°Þyç?3…¨=%jâæE¦ÐèÞÓѽ5æùèž3½èŠ¢ªyÜÔ¡fÜü4¿«=Fjœ¦£{«Ôi/ênájåSÓ2¿:tˆsfÏF 1àÿ7Õ í€§…¦Ú£Á'Ÿ|¡kóæÍ†ÁÈ‘#±Ùl´··³mÛ6^ýu^xá…ÐìS˹ç.#¸Ã{0»#ž„Äœq¶`šáô­qââlá)Ü!‡1Æb‹ÃgÇÒþŽÜsé/¸ó/¿gv®#Òß3{½€ÐnoÁüšá Ó 5ikö8â⬘›ÄEê §oODè·Øˆ³Û±Y©yþHåoÑcÕ¯–¯õG•ÿ©¨_Óëû·lÙÂ!C"Ú³Ý)//'11»Ýqÿ›mN¬û_m—ÔûÌápPZZJZZZÄñ£½ÿÍðj+„×_©ëÛÔWèëÇÔ[m#£Û*u¯³œÔ^oõá Æ­†‹î•V;KÔ6]}8Hû/öû‹ýÅþb±¿Ø_ìrÛߦFh®7P È¡:ÁfæÕ‚0 #4µÀ özhš܈Ï!ÌLÓÜ„ÀÔ£”jx3ÎèÕðfÏZàjOŽa¡ý Ôé'fïZQ̼”——SPP@Jjj`Ôß cþWúaz¤¦Žú›¼ûûî» 3'Ê0E…曎Zà=kŠSf†ÃñUt†*Íô;ÇðϽ#õf”ÁQ]sD9¤ x^M8ša(>ã Ö:iˆþ ßív“ššÊСC)++#???âþ‹‹#99™ââbòòòHLL$>>¾Ó¨~¬û)úþ·Z­ttttjgŽåþ7ÛB³§ÜügöØšªÙªí'„g÷˜m˜™³7Y}ਯBUj{¦jVÒº®‡ÚY Ô™¨N›‹î¡WuIû/öû‹ýÅþb±¿Ø_ìrÚß0 læÅѽ;æ÷èiæÅÑFŠi^cîö­GÇ4®)Öìù0 D"bö²˜Ÿ*j%Q7~PócݬTf1 £ÌÌ»Y1™5kV0n=äT… :fÊæmóçÏgõêÕ=]=Á0 Î:ë¬ð”lu›6ƒàôëðnHaNÏŽˆ,r͹2HÛµ~+‰‰¶`8C b„Óž3‚³Á#稩ãý¦Ÿ°s+úû]¿ÙNž2™þýyyyîÿ´´4œN'TVV†^ÅiÞÿÑÓ–TÔééémÕÑÞÿæT133.µç[½^}è©é›í–Úk¶«fã¬Ý l¦o–›Ù“nƯnÞªîÐݳý€’ö_ì/öû‹ýÅþb±¿Øÿä·¿ÕjEûä“O U€*ؼÈì‰03m~F¿ÊA­HÑÂÌ Õ^5­è—茪יšÌwš½f%11 AÕ©j4_)]‰¼^/@¨<8@^nsæÎ¡µµ5jô10‚jN–¦¬œU~…~ÁaÒ¨êùp."Rè”jt’œ2õx¬“¢_ôŸýV‹•¤ä$V¯^Eyy#FŒ÷¿ÚËl>ÐÌõ]fü@„Vó·y­ÚÆš=´fª>¤Ì¶Óì,±ÛíœèvXí56\fϯ™¦Úso¦}´ÿb±¿Ø_ì/öû‹ýÅþ'·ý-KÀù7Å™=fÁ©£mêo32ó¸Zp±2a³Ù:Mù0ÓT{¢+ŒFñ3…›é©½Fj¥05šaÔëTcªy0u©S_***Ð4…çŸÃn§­£=8œ`­™>‘â‰Á‰×¡ÑU-ða޽jZ0Žàˆ¬ù~öàôl -4¢8¼VÍPݬ`B£¾a.x™2e;8 ÜÔ+úE?èOHÀãõ²ìÃp{ÜŒ1b@ßÿÑÄêM5{‰ÕW¸¨=ÊjjÏyôCÓÔn^g6êfœj~ÔöX]×¥¾ÎFÍc¬é|fO±:ÅNÚ±¿Ø_ì/öû‹ýÅþbÿ“Óþš¦a½ãŽ;~fF ®‹0§.˜‘™SÔBQ{ÔL«A=§bN;Q§S¨KíRã1ãÖ´ðô5Sfå3ã‰î}1uE÷z™S,ÌkšššØ¼y3‰‰‰,8o)ÉÉ´¶µaΈŽÈMЊÌaÿ uuÐèò§èý'L¿Ïç#9)‰aÇQRRBii)III¡eÊý¯¾;ÕÔýRg™šÔ=º!VÛGµ ŽSmücõÈ›éEOiSËËÌ«™ïÐþ!ʵêt5iÿÅþb±¿Ø_ì/öû‹ýOnûû|>´'Ÿ|Ò0#5+®G¾“P\ÝŒ@íɱÛí¡uf„úZ3µ·Ç0ŒN…lfÚ\aVµ¢šë0Ô^Óæµj^Ìë£+Dôwµâ¥§§3vìXƇ¦i444bèþà§Yâ ¾â,0«…AC¯2BJxÖ¶ixuä5(upMw(œ9rQÕÌÈ‚¿MaFp4VSuE„ý¢¿Ÿôx<öîÝ˶mÛhmmP÷ôÃJ}8© ²ú] ý`Uoõµ8Ñ=Âjû©j‹ž6g–St®§ù]} ¨ÓÎÔ½´ÿb±¿Ø_ì/öû‹ýÅþ'·ýý~?šËå2ÔÀB³‚{¼:Ú;ÔÑÝ¢9=Š!ä¨G­läKƒ2[ô‹þ¤?>>ž¸¸¸Ð&%‚ ‚ ‚  Ã@;\Vfh—7ÓŒÐaèÍPFý etQSÖ¦+¹XhͰèp0Ìÿ+qÚ©œà5f:šü±ÐYIC#¸.Cý¢_ô‹~Ñ/úE¿èý¢_ô‹~Ñ/úEúu´C¥¥†¦©QøŒÉœ¦øjÎH#x ׎iáXÂç´ð† †¡L`ÐÔtƒE` ,8!œn(W–©¿E¿èý¢_ô‹~Ñ/úE¿èý¢_ô‹~ÑßI¿®h‡êÕ†FHd'‰@¹¨Ûë{˜^oãHú}>•••¸ÝîˆW5 ýG‡èý¢_ô‹~Ñ/úE¿èý¢_ôŸ ú­V+‡ƒüüüÀ®ÿD¿˜öozÐ"‚kÁ ³ŸÌîp/ˆŽ:j£‚pTfo…ò;øËì A‰;ðU£«¥ J·Ë Ð¯ë:ûöícüøñ$&&";œ ‚ ‚ ‚ œ<èºNSS%{K9b6»u@ø¿` |3St-tAàz#x:(͈LÞÀÝL4x\Ó CÌ]ЂׄN+¿5s*‚9Â0wC4Ì€èÁx ˆØÅ0P z„–®¿¼¼‚±cÇ’ššJ||¼8ÿ‚ ‚ ‚ ')))X,J¦`èPŠÿkÓ1°Z°7 Ü3 áxÔÑn#Ð ¡Ö7˜B_ôÈéF0´afOu¼ƒ½ÁQv#QÄ |p‚™€ÙQ(#¨Äux tý.·‹äädqüAAANR4M#''‡â’âà&}ýïÿ:ØB *aÕ„"Fü4‚ÿ<î]è:*¼:}ÞˆiSëôÕˆŠ{è÷ù|X,qüAAA8†aðÆKرcÆò•¯|›ÍÖ£kív;~Ÿ?èüCû¿æšÌeÊ6†‘]ëT4-ì™Ð‡`þW©uMäË ‚3¢¤ý•••ìß¿Ÿýû÷3iÒ$† É!/7'Bÿ.å .ì‘~Ã8r!=øàƒÝž¿ûî»1bDO [AAA8J<Ȇ øÎwóò˯°sçN&OžÜãëÐûžù¿{÷îåå—_fýúõhšFvv6sçÎeÑ•W’”œtLþoxÃ?Ea44­) øšÕw6øè£øç?ÿ¡Ãÿîwä忆^) áiõáÌÁ%öu zpBàF MÍ\÷ìQ§@h·‡C‡¦‘••IjjêQëGm¾¨!💽%üã™P\\L\\‹…÷ßÇÃØ±cùêw0º¨ˆ?=þ8 \pÁ…=ÓßC}ôÑNÇþò—¿ðÍo~“|°>önøŒ¦ÔILÞãô{KÃÞ lkJåì飱Ÿ-‚ ‚ ‚0Ø())aøðadgg3a¶oßÑ+çCø¼=ô‡Jaa!•••Øl6|>}ô7näá‡JRRbTü=÷ÎpÍ€®iæáõZp}¼¹Î=äË,ûh9Ï=÷iiéx½^VºŠk¯½Œ@<ºa„~-˜ùD3þÀ=°ŽA ö†Šsµ=ºAsk3V›Íb¡®¶ƒ´”Ô^ëu¹„ <Ʋ–óôßÿNjj*C‡Åjµ†ÞÍè÷û©­­å§?ûcO;ƒ0qÂÐõéïÉÈÿ‘xôÑG{ÑÐÁ»3æ±ø7Ÿc|ÿÌcN»+ö¿{'ó…zãû¤Çø}bqóåËOñÑaóˆιí?™_8â.]Áÿ<»5@ú™7óµ…#¿ý¼üÄ ö„ã$ýîùÚ|{r>‚zÞ~ì¯Ø¯YÌÅ#9á¸÷ó÷G_dô×å_AA„#S\\¤I˜8q"Ÿ~ú)>Ÿ¯ÇSÿƒÿ=öNßúÖ·8pàÿüç?ihh 55•ÚÚZÞ{ï]n¸þúÐ&ÿ½õ ,M0 Ã@GG7 Cm.`À†®cèË>ZÆ /K^^éééœwÞy<øàƒü÷ÿ7>ø çwéééÐÔØHff&ö¸¸^éï }ôQž|òÉ…u,€Nûñ¹‚Ø™0%54Êý»ïð±õ_ÏðªƒÝ†9¼i¥%µ¸ÝnÜn7ž7>Ÿ/ÂUGIi -Áó-žÚ¼áó¸Ù¹£„’Ú–@-ZÚzq^Á_³•÷JjÈÉìÇ?YŠ«J©l è«/þ’7ÞXA½¿ä‚ ‚ Â` ±±‘òò2Æ@NÎ(..îq¡àú¿ºnÐØØÈ!C¸å–[Ðu§ÓIff&«W¯FFÚ[ÿÝôm†aD¾c‚½DmD`Ù²xñÅÉÎÎ&>>§ÓÉâÅ‹IHˆÇï×CNž®anlyÆœª &Ô¥“l„vN¬©® ââ⨯«ƒð€nô‡»F§üõÒË/‘™™I||ô°sìH1±fÑÜó§õÜ2wx·!Ó.ø]3¦‹³^pžËƒÝH̱p¸s¿ö 7Žqôþ¼BùúO!m.cSbŸ÷»ÝXá8ü~?V«µ/ +@âxUêM݆%|ø Œ¿l>}˜Œ ‚ ‚ V>øà¶lÙ‚Ïç ø´AÆOVVVè÷œ9sxûí·yï½÷ðz½ø|~† +äÖ[oÅb±DÄk:þGò?\ú!555\yå•$%%áv»illÄãñÃôt#üê¿úï¡ÙèÖüÞ’/Ö&û÷ïç­·Þ";;›¤¤$ìv;‹/&11 Ÿ×±Ç@èºà´„®ýbóÕy±Žš×÷²X,dɦºº‹aÁn·S__O\\ñÎø.õw…Fuu5eeedddpæ™g2iâDBú}>O<ñ{öì!%%…X{%Iÿ±Lûÿæ7¿ñûHvE͆׸Æõ<üý§WòèWçâ4Ïo}/¾ž§>¸õyšŸ½…–/žáÞ»îä­-ÁüÅã?¹²ëiý™PüÅ3ÜyÖl=òO‡Â»XõÌ#Ì»ÓÜÛàV^]ÿ{®›ž8[¾ŠGîýO &vëÓ[xö«“ÙúÌbîX|'§/ÞÂyÏïä×:øó —ŸûÌiûÞ£.ê>¤µ+JÉ™sx9€»˜?ÿðy†^¿€š·_bS#s.ÿ×y¬ûëüWPÈM-f~aàªúËøëã¯QŒbÂ¥wq×Ó¡ø.¤q鳬­Ò¦rï}ƒÉÖàù¿3æÛ?fËùý'<þoâ,ZÄÿ÷ÀÅ$âfãÛÏðÔ{›‚)rÝ·ïaáø J—=É;ò¹zB O¿¶ç9÷ò‡[{±ÖIAA(µµu¬Yó?þñ°Û»Ÿ1=kÖ,fÍšÕéøÏ~ös<ÈÈ‘##O±|ÀÎþ¯¦i,]º”?þ˜óÏ?€5kÖœœ €ÕjÅf³Eø¹áØ´ù¿AçÂÞº†Ú_3Âo·ß¿?¿ûÝïHJJ"99‹ÅBKK <ðMMMx½ÞN‘;N~òðÃŒ924Ânt¤nYÞ(¯ÃåæpÙá`ïExwDÍbÁf³a·Û±X¬X­VtC§¢¢’ÜÜâãô«%ñMC§¾¾žøøx Ã`ê´©´¶µ…6N0 ƒ—/§µµ•‰'b·ÛÑ4 ¿ßOZZZØ Š~àæÊ>š÷”¸v½À_aÁƒÏ³ó?Ï¥}ÝsÌX4ÍÍëYþíéóS¾ dÙ–¯3œZŠkˆÊJ«™ýƒeÿÎwyê¦ñ$[+CÇM€ä°¯ ?ù=î,bâYs¹ô‚Y‘£Ü>Àõ ?|p'£ÆÌàü+.d|¶2‚\"ðÉï¿ÃÞ 39óÜ 8wr!Öžž7iÞêF¸êôaq·¸ªøðÙ—(ºà6îÍ-åïÏ~Â#ßûgÑÜ{o&o?ñ/½·ùwϽÿßüèñ·H›ºˆï_3׎xü¥§øYû·yôÆñJ|Ï’sÎuÜ»ÈËëO½Å î œÇG¹«‘!.?ŽQgpAá{|XêäÜÛngJv.N`çËòÔ'U̼éÛ\1ÁÉú7þÁkÿï÷ÇÅ#q5UиcOï(dÑw2<¯ûY‚ ‚ ‚0X°Z£õÛ·ogêÔ©½¾~Ïž=øýþØAçÿHþïYgÅ{gÍfcÅŠ@`V{|||0ƒÄÄD<ȰaÃé‰ÿˆ_ ù7G畵ð Ö ]îÍ›7ãp8HMM MKv:ddd„z$¢iiiaó¦ÍŒ1"8 H]îfˆÞ„À,Àaƒ¶ö¶Àº~»-´û!Ag3*Ž9šÍjE÷´µµáp::é×G<Ô ¦ë Áî=»C;*&8ペ…epÙå—sÃ7Ë'¬ßãñÒØØØI¿L/lˆ>[òÔìXú7à.þ߯na4À•ßçóßü›³?É®»ž¤}é߀¬|ýWÌMŒ»îûŒ\--d̽š)“³@ñÈœy¦9òœÌ5wß§¤Èy÷ÞËØÖªö|Á[o=ͧomä¡?Þ¹ßsèlîºk,õûùäµ÷x|í{\zÿï¸bL¢€[î¼ Íl_õ&¯=±–ι‹ÿïÖiÿHçƒÔïú3™§µ˜zÓCÜ=¿üÅd?û 5SoâwwÏÇŠ›9/Qz`?ÍÌ¢ìË@÷Þ}1…óoåÞÒí<ñÉ{ì¿j<#ƒñ-ºŸ.´±ƒ·Ø«8ò˜6¹K=LŸ11À]Ì’Oªpžs_›XÏtñÝ_gû7ÅŠ/qñÈñÁ‹s¸ë—1-£Ÿ+± ‚ ‚ ô!éééÜtÓM¼ùæ›ìÚµ›E‹® 9ÝÝ¡ë:Ë—/çÓO×pùå—SPPÐ)Œ9 ÿHþorR×^{-K—.%11MÓ°ÙlË=¿ûÝïøÆ7þƒÉS&Ñö ,zh£ÐzøÐ&Á]ü˜;gÓ ÃÀn·“Ðé_||%…sÿãlé6­fXp#çw D/y™ªn€7”³ÁÇË·ÐÌû¯-<²èc¾2¯ˆxít_¾7΋;øÙÙc&3mÚ,.¾ñ>~wÿ"`+ö4‡Î[S ™6m³^ÃCÿóS÷^]Œ°f0yÖ4¦ÍšÏ­ü;g¦Ñøé+ìhëáyül]µ ç93ÉŽÒçÒòƒ^´Ï‡HÍì807´c¥’3Õçv&$-÷ðà ŒÎ·§u]6>¯ÇL6‚Ôµgcs qg mf iS+Ž¿ ‚ ‚p2~ü8î»ï?Ñuÿýß'8pà@·á›ššøûߟf×®]ÜsÏÝœyæ¬ØM?ûþ¯×çç /äÚk¯%>>>4ËÜãñàñxÐ4ÄÄDâããùãÿÀòeËè¿Çòmg5<AÝxÀÅg0dHßÿþ÷ùÓŸþ„ßïÇb ¬µ¿ÿþû6lX̼êºNuUŸ/øC”)š99žÈåó6›•aÃ#ã5/G·ÛMuuuxÿ> † ÉÁj±„âPõ›3À|û¡H=¸á¡¹Žßb±ðÙš5Œ3&¨GÇдý/¿ô2;wîä¶Ûocô¨Ñ1õ‡¶qPÒí‹Wý=ÉŒŸ½~°™zÀôï½î2`( ödÆO^l¦¶F«®­Ü{Ö4ÿf%ÞïÏÅÆ.®ÒÆCwûù×\tÓ¥lVNµÍn5p›ß‚)¿Е>™Ÿ¼iðš]<ýãY¼ðæt¼Éô`du®žç:1'à±¶µ´Ñ¹ °!?6Õ4ᆘfg;w—iÄ<ï?ħ%0÷ÊQ±/òEn·ïŽÙŸ‘Ȉ¢4øôÍÚ7Àçm’qÚºŽ¯+|f:ʵžNé×s  œçŒïUЋ2AA„ÁFRR·Ür36là…^à?øA—qÿóŸÿdøðá\rÉ%Ýî`ækæìÿ655ræ™grÁ°{÷n4M#33“wÞy‡/¿ü‡ÃAZZV«•—^z Àç-èÒö1t,a79üº€ðëÂß=^N§“«®º ¯×‹ßï§µµ•‡zˆU«VqèСNÿ>ŒÇë Çñ ¼ÀË #2]óºn„^Õze›ÛÛ㦩©‰Ã‡ã÷]×IMMÅ¢ièºS¿ú?‚B»!iiia³Ùذaü±2õ?¬mùòå,[¶Œêêj¶lÞÚ¥~Cé}‰(Ó~dìù7äϬ¢Áåâà†¸sñÇðàLv„+¿|ÌY÷þ– k¨9¸•ÿ{.s¤ÝÝ@uÍAþõÛ_òÐÔXß{ÎÉ|û‘)|¼øN^Øp—«†UOþ‘?_Ÿ?p±ê…Xµ«{zc'L’IÀNòPØòƒgÙPÓ‚ËøëYóö¬)6µø©)-¥¾ÍÛ]Ê×;q/ [·Õ”RQ߆ßï¦ôË7y¯ ÒΚîð7SZZƒÛï§­b+ï,¯Fæìáy mßFJ)dÚ¨#¼â/ÆM?Ü Ÿ1ØÁkoo¤Ùí¦bç¿yê“F˜z>GxÑ@LrFª8¸¯†6·c¸t¦“ÆOžâß;+p»ëÙøÆkìÎ:]Öö ‚ ‚ §uúšó¼®ë´¶¶1~üø#nØÿ×0 ¹¹™Ã‡“‘‘Azz:‡n¸3Î8·ÛÍf#%%…!C†ðꫯð—'žèÒåÿÚ”e±ö ﲯ×ãeÒ¤I|å+_á¹çžÇn³Ñî÷óØcñÐC‘››áߦѹà]ÓÂkâƒÃöuñZØ9Ö”ïꋵÀZ‡ŠŠ eÓ=ŒŒ œNg`jC7úÍ Ôx!°éŸÏçãúë¯ç·¿ý-©©©¼òÊ+ìÚµ‹sΙCö,jªkX½z5[¶n%gÈ|ºŸyóç+Fõ÷3éÓïfç[MŒ_4?Þ8¶àÁç©~ä<’ÇÝ•-\9ïf<÷ƒ@€EOÓ|ñM|û7·²ð‹ú0,zðOœ ë ¿ÿÕáÁî´ ÜõÐW)´öð<°ï‹UPtÃbt:»-xÂf Ìv°‡‡ââ 0íÇ_Ã÷ojç·/=ÅÞ &7u?ùƬØñönÒ˘8—©Îµ¼õøÃ¼å<—ßýáFf}í'Ô{ÿ›·„·‚לsÓ÷¹qrŠiÐØ[;‚ ‚ ÂIÆÆ™4ir§×ö™X,f͚ɚ5Ÿ1f̘#Ʊ'\ý_·Ëòkëj¹ñÆ1 ƒuëÖáp8IJJÆjµ±}Ûvþú׿r÷Ýw‡®íÊÿ5 жmßnX´`À Í܈ xAHvPTbR;wîä¹çžCÓ4êëë™3g‹- _L1Üø­iá n6h Õ!jðàtûð6… ´¶µb·Û1 ƒŒŒLâôЮúGÖ*ó-F`g~»ÝNqq1Ï=÷‡––ÚÚÚðù|Øl6INN&..Ž[o½•1cÆà ®¡î‰þ}û÷1Þü.7H„À+ü}ôQŽDOÃÅÄ碡¥›-™äd[×çã“IVæ–»Zè ™ôdøZhhäôäî&ÿw‹«¥Ä'§GNa|®Z:|1Ïáj¡¡£ë´ýîfššÝø±’’‘ÃÚ)ÍM͸ýàHÈ %±³‡ÞÖ\O»ÛÖ²3{y¾†'¿ù0ÜùwÏʦOð»inscµ&’˜h=ÖÈhnnÃêH$Q)w[°LS:—™ ‚ ‚ œäèºÎï~÷;n¼ñF†Ža¬ZµŠÕ«?¥°°€k®¹†ÄÄDZZZøãÿÈ=÷ÜCvv×ï¿óλŒ=ò˜ü_s(9+3‹—^z)Øà@×u\.•••|ó›÷0aÂÄný_=°Û¿úòÀløÀ:øÐÓANGhmmaüøñÜzë­¼þúëdffrñÅcè†fú50×Õ£Ì*ÐÍÑöð+45¬<§®[0_žgh¤¦¦âru€éé8ü~=TÀ=ѯizðµfÏ„xÜ&LœÀ~ô#^yåvïÞMrrrhÚ‡Ífã´ÓNã†n 1)‰¶ÖVsïÿéÃÿ6'ééÎ^Ÿw&§‡€mɤ§›Œˆø:IH¦K‰Ý¬Ž2ºó¹­R2ºwÊS2H<Úóm•ÔQÈûÈñ75§Å<ÿØ‘‘’ÒyÿGb }•‚ ‚ ‚ 6JJJp8 6Œööv^{í5JKsöÙg³uëVžzêÿqÓM7’——Ç”)§óÅ_pùå—wŸqŒþ¯éÏÖÔÔpùWpøða*++q8Øív’““|Ó@÷þ/h[·n5дpD/HJJ"##°–º¼¼ŸÏw±ôž¸¸8|>_`?•þîâÏÈÈ ¥¥…ÒÒRJKK),,¤°°äädêëëñx<½Žwß¾}œ{î¹Gùï)G=ò/‚ ‚ ‚ D°|ùÇ466pæ™gòÒK/“žžÆÕW_Czz‡>úˆõë7pÉ%ÏŠ+¸÷Þ{»Œïí·ßfÔ¨Q½PÐ=‹…äädüq*++1 ƒ[n¹…Q£FÑ7 mË–-†fÑÌY¡^õC]¯¬b7ÂëÙµÀœ„ð΂¡é ¡sËñ %~å°¹O@8ÖÈu gK§0˜k!އ~«ÕN\œGœ׃Ûí êQèß· Î]Эó/‚ ‚ ‚ œxêêêxöÙgñx¼œ}öYÌ™3§ÓÆûöíã½÷Þ§££ƒóÎ;3¦wß¿þ¥8ÿ}äÿj )))TVV’••´µ´ÑÿÕ móæÍ†E3730‚p`S>óMuZè•u„"0G”…"Œà+ò‚Ç”u á5 æ«öÔˆkÌuûZp}¾š ¡®O›øú•döìsHKK;ÁÕXAAA8Qx½^>\º”aÇõ¹ÿkµX‰‹‹ÃçóâóûzäÿºØ/-b/zBëBë¢«ß Øi4?2’Ð+BCó¡SáóáCFÄõ†š¶þDÉTøúÁ¡ßn£®®ŽÔÔÔ._!‚ ‚ ‚ n***°ÇÅ)£úô™ÿë×ýt¸:”ë{âÿØ"ʻߞj …FºÃ!‘GÇÍWZxc?%`èMÁ s§}͇ 7_I ôn„6Ë‹|m^xcÁÁ¡?33‹Û·PXXÚ»@AAAüx½^*++Ù´i3Æ'‘ ÿWÛ´i“…6bœŽÂ\Koþ0§ D_b˜¢ ¯ÿGž©y&".%­ÎׯÚâopè7 ƒªªJ</~¿Ðéìå/úE¿èý¢ÿÄë×Д÷ÿ >ýƒ½üE¿èý¢_ôŸ8ý6k`J~fV&v›}Àè7 =0ò¯ö&(ÃÜJ"ÁÞ‰€”5óF8YeM|8¥ð93Sè ç4\¨ðF°С`¯‘"Ž "ýšFNN. ÄÇ'ÄÐO×úÍxÍšq¢?Z&jQ´~#ôU3Âq)%‘Õ@ùGæ_ô‹~Ñ/úE¿èý¢_ô‹~Ñ/úE?tttÐÞÞHÈÎXP'Þÿ5 °†Ž¡YÌ¥!1áozè"Ý,~#à›{jA1áï‘å®îÊo¡‰šùš¾ (ŒÈë” J#&´Û¿š6èô···ÓÞÖСê×"¯‹˜5Q?ýS‹D¹0Ô»ÌlH§bÅh(‡Ãyý¢_ô‹~Ñ/úE¿èý¢_ô‹~Ñ>a„ôÿ7bÿ`ü¨/À º*LuË|#˜›P§Z0Lø‚@™ª%l ÌkPT@d¿MØ8á° övˆ~Ñ/úE¿èý¢_ô‹~Ñ/úE¿èý¢¿kýÚ† ”5ÿÁÀ0ÌÂhÑç Ì ÝÅÔ)NõˆšùrBóµJžµðzŸèý¢_ô‹~Ñ/úE¿èý¢_ô‹~Ѫë70°˜§ Cé¨ ­3ÐëÌ8 Cé³0:'ª^k(i…‚‡â7ï6T{-Œˆ¨”ŒQ©ˆ~Ñ/úE¿èý¢_ô‹~Ñ/úE¿èý¢¿+ý¶W_}UÞ9/‚ ‚ ‚ ')†a`;”¶TœAúœÛ/¾ ¿%ôÿü÷‡ý-AAAB†ÍbÑÄùAèC,iSAA„ƒa€Íj±ˆó/‚ЇX-–þ– ‚ ‚ !#ÿš8ÿ‚ }‰Eç_AA8Ø,2ò/‚ЧXdä_AA@†!ÓþAú™ö/‚ ‚ $dÃ?A„ã€lø'‚ ‚ $dÃ?A„「ü ‚ ‚  ÙðOá8 þ ‚ ‚ ‰à†'bÚ éé9$ÚÀãk¢º¡º¿ó.‚pÜiÿ‚ ‚ Â@âøOûO:“Ëç\ǤÜaÄ©Çý-”úˆ÷W¼DYoãtÌã×}‹„ÃOóøŠ÷OÉ…ÂÅ—?ÂÔ$¾.Bجñ´T<Ëÿ~Ú>€t ‚p<‘iÿ‚ ‚ Â@"¸áßñqþ†}•{νˆ|ÔÕí¡®ÝK|Ò0 ÓÓÉy5_6ƒwÞú›Z{qbiv°ÅÇ €×iÅ“™œNœâü.Úu°E±€UÓ° (Ý‚ OäAAÇïUi—sWÐño­ZÊëþƒòˆós¸eá7?ŒË/YLå릦§që4Í2F×,è´°üí{ø¢›N kÒåH÷@ÇÀÕàÂëˆ#9ÁqÌät;f úÛÝ´th¤dÆÑÓR5|>Z|Äg:±Sè~Zë=ø5ˆ¾S ÃÀ’àPÊ©ç ˜r0|ÔWµáÕâÈʉ':'î¦VZt’²’Ir][exÝÔTyHÍOÆaC704ã5;¿Ç÷¸î§µÁlGÓ±4«g’¸AQ9;ãmuÑæ‚ÄŒp½2|^šü$‡ºfèh²u ‚ B×·‘ÿsμš$ÀSóÿ»ìE°X"³æ5¼´ÄË×nü6ÙñgqöèWxg¿êþ'“•YH‚ |®Ê›”s!­ZŒÑµn®;.„Ë-În¡Û¿÷»Õ ©™£Iu$»ŽCu‡“Þ]>GK^~/é5ÿâ­’Mº£g0÷âÿd,Ûyïó×i²žþ= m㣕ÏPÙÃØ9×pÓÅcØòùØÝÞßyíI3¹õÂËIöûða û @ÃbÕ°Yãi¯ZÂ’íë{í€)m"gM™‡C?À¶Ò¥ñj{ä#=ãΜL]Û”¶7UÖøó8÷ÌB*+_¢Þ¯ÓvÈEkŠÜŒ¸£ŠïHôxä?é n>÷r,~,Øl6¼ÍålZ·„5udfØT>­ágÂqv|¾æ1J¼®GþMÜzñ6ôy]3ð79ð;:ˆK°{t‚ ‚ ')ÁÝþûxÃ¿Ä ˜œíjXõÙKXºŒ{+÷–ríØB’ãS±hµŒò.7xeøÏÓQ̪U¿`CCŸjÜG¼Îq_]t7ÖOòVÅ0.Ÿ¹€l‡ð6~ÈSï?KG”¼IÿÅ­“'S²þ!–ì‰åk!9¾#"v¡;kôm,švv%¬¿‘];þ··mfrËu÷1Ô·§ßü#µJ°™ç>ƹyÙ”ízŒ6nŸpœÃmWÜMvǧüóÝ'©=†òñÔ¼ÁŸ>z³ïêG°‘7†ñù籺x3MГfrÖ˜1$QÀ˜]¯±¡ÕF"ÇžÅФ-èšÖã‘ÿä¬IŒÊËaÛñùíÛâH&=5‡$¿ ¿=T‡ ðÑÚÚV'6Wr7÷Ù (‡¸LrrFâd$®úO)ÖÃÞ¡¿ý4Æž9ƒlÀhM¤”Æ£J"!um.3³ŸçÓ:U]2§š ¸¨k(ïbop ~tO÷y yWŠî¬1‹¹}ú4Zë·²«òÉY39mH.ã&?@ºóx~C%­> ~²-ŠÆñŒ’ ÀÐÂ3HÚ¼5Ôy‘Z8‹\;xšªi°XqÔåÓJyÍ~X¦PÇòÆΰx^ô!I ž‘9ãÙÜ^Œ¡Mah:´XGƒÅ‚æñÐXÑLs[ –”¼TÒÒíhn75õi©P~ •øB/àÃg ØÌ×ÖAíÁ6œ…é¤%[Ñ=^+š‚ñØÉžDr¢#*žôiØ;\TW¹ˆKŽ'#'g\;k«xá_«ßíçq÷5·Ã¡xê³å¡ z»‡†C­Üf©ù‰¤¥ÙÑÐi¯k©Ñªûº)­Ëëúд÷üœél-[Ó``MšNvðœßütuPy°šÚæÀï¬á9äq¢¹Ú9tÐER”j 5/›Â‚šËþÌ3_XÉ®Q·ç#ÇßUÇžmŒ‘]ÅÙÑÆ¡*ƒœLس«Ž¼ñ… I:òýÐóiÿò,Ùð þuÀ¼¹ ¸à¢3yä5\]ö%ï–U¢wQ·-€îöÐPÙLK[ÀöiC“HMµukûãƒANÑd⢩ÝIÎä ÉÞ¶…ÆP]ë ¹²ƒªCvì Íø‰dæÄ‡–t•GÍã¦ê°Œ‰ÄëE[e3n§ê«éðßÓBU•—Üû|eXXÊBжljèjJ¿YniL;ë^òÜ~¬–ÈË~w1+7.£Ã=í ÿÃÛËk;vÏ/áÓá_åÖYóÈs+Ó‹ïggÙaN+*`DáT>kØ–}YfR‰§1Æia›'ðslaÀÑ/=ðËÌc*èŸÍËUè٠ÏÎfWe-E…“ u+ûÉŒ*˜€qp/ÎÌI vUí¢û1’náÛß¹œ!Á™ÜÕ%oòìG¯“vú·¹g¼…¶Âï<“ÊÃÅÄY-øÛÚÈ÷cm2ûþÆ‹_¬@KºžÅß¹Šì8€v®ù+¯îÝÂÐ1‘ñ^û¥¹×qvá|íXþÎïÙ¦µ?$!9›-¼q¤îî`èäïsß×§‡6Ø,ßý&omx–¶|^ñ؃U2V9ücù¾®¯;Ù²nGÖÙäXA£Ã þ8ò‡M|€-®×ECû%\yÕ…dgy×~—v.#ÔW¸uêjku ÏÏ £~5+v¿ƒsÈõÜ2}»|Ê‚®%ÉäÝÁ7/˜Êç;þÁÁ®â}·Ž7ør|ïk3¨/‚õ=X–Óã{&h›- ‹¥!x°œek^gÄ¥_aÄð)PZ†ÖEݶ¤‚=ý«ÜÓ…dÆl_ºý9žýüclFÁ µ!¾!L›X ÿæýâtnšu&3ò2ù¸¡1˜Íf]ò'.ÉÏÇ ¸·²äõ_R–’€Õçíúþ|/ÿyËhV¾ÿc¶wä³è¶‘×´ë°¹dX‘wó›oLåù÷Ÿ¤þxäMAas\6ü³£òúÚz5bœ2âÒ¶òçùè`Eĵ›>{Š¢E‘—5!–ƒÁôÞ\8ãbû—¿fME+V‹…Ò=+h>m)Cç0ܲóÏúÑcÏ ¨8ø1‹…ØÛ©YBîzî,Òc†É`ëæñDü§Œ8—ÀÓðKví‰ÐÝVúOVçO༂,NÏØŒ§¨€œü$nÛ† =|"qø¨mh$+=—Qà ٹ· GQVp˜ÝëIuË1•Oá©ÛA=³š[„V™ÂðB;õ»–³5.—‘£&SÀ;XsG5ª®ÇŸr‹/»›ï·®Åšy6SŠ®b±C矉$¦MaÑ\h¨ÛDi›\|´T´‘?õ7|eúÚ*ßâ_?ÇŸr5ÿyÙUØZ6²jóÒ‡_ƤÙßå&ýG|iŒ§µàÎÎs°cãß9d åÌéqá ߦæõ_?D7—š„g¤ö·ÎœLsår–:DÆÐ˘vÚ"¦•¾Gó™ÿÅمΘ ‹]”ÃÔóþ»ËëŽGÞ¬@mÝzRófS˜ZHƒ«Ÿ~:£RmÔ׮Ǚ5sÃü ±úË8X±-qà .ã»Æ'íIÄÛ³)Ìk¦¬âm\Ž)ŒÎ˜ÃéC6°×šMœ58o5åuš9Dÿa7í¡¥Û8ã‰OÏüÓ¡­m'Õ®žå¾Ç÷N°]ДAt즲ÆdFZJ2·wQ·»ÙÎ÷/¿[Ûn¾Øú9Öœùœ1ñ«ÜïåK®;¡6ô¤Ïgr”í[CMi.õ³Îdâ„9|ôé{X‚ù’ŸAñ®)ÕGpö„³¹é«?å…W~GE·÷o&k: v V7€…ĤxÒ]n¾ØûGŸMš÷ÛmÃÝe[-‚ ‚pjùïÛ5ÿf\6›#ô_·Ä%§•œÌ,œñ˜=)»%<Íݘîi)`D®óc±h½ºNpïeKE[X›o5»ê®eVfSGP~° ʤü ’íû÷w“Óó±cÃÙÑãUþ:j,M‹ÔÐ]SöYÌøw–|Éœ‚KILJÅÒ¸–2÷eŒLOQ’ÆŽö,FɲnÛ&λˆü¼ÉXö—CÎt†ØÀS·…ýÑÇZ>ý…o;å­0)û4YŒ6×l£ÔZ£Îf¨#[ÎPh]I©Ocú¬s±QÊe¿d[;P²”†YÿÍü ˜Ü´€ò]¿cÉÎݤ»™8™zÑŸÉËBå®§xmçZ4‹Æô©à ž¿û"»;À³­‘ÔۿŨ1³ø"Ø3dÆ3á¬ÿfvâHáðæ7غgÓ }tXz¾ÿ@¯Q:‘Lù][Ù^\ÂG/ÿƒº´±L:ã ¦ Í&/s4ñt¡1Aº({7×·¼ul¢¢m6£ g±a×$§žM<5ìm,eTÐù9b6VêØQü¿”yÚU´ü€ñ9ó(,+àà¾_³§ `' )ßgHÆ(ö6ùeE)¥•-$¦O#®y»«72tÄCGŒ³¾êo¬¯Þ×s3õôþ‰aÏn|~À‘ÈéS»®Û×Å–ò;vz€C›Ð¹‡l¯{ʉ´¡Ÿ‰cÎêÙ°öʪØRáåÜüs™h—ú`Ölý5KK3ŠÊ¼Nn;}SGd‘7ìÈ÷¯_Ó‚3°m©_«gÓö2âlâk?eÕîÏáxÖOAA„AŒa€Í¢õíÈ¿9ë}ÈIXvì>pÎMÜuö¼U¯³Ñš€5}&3Ò»ºÀ‰Ý)³hR“z~ð¶ì¢Q‹|ûÀö½›˜•9‡a£ÏÁrè5È™ÏPxk¿¤Økéf34sÓ*MµÅÔt±‹µE³„vûéNMZ)¯®œÆÓX5 ‹VÁžŠ*FŽÈatÞvíGA"øw°¿îK&ø1,s2¹Ú‡$NÄ ”•­í³òéÚ9\]ɤQ£›? 8Àá: ¶ÐÀٌȟ ™P¿oÍ‚ßí8ܶ×þƒ{™?t"ýÒ[Ø{ ‹f ºvòòÓ¡ÓSsðµƒ#Ñ‚ßã²¹ø¦ßr±*'­ˆœÃzD<ÛWþÙ ÿƒIÓ®£hÚu€[Ÿgg™†õ¸­«V–ëMki)®¢¯±øÇWG†<‚ÆÄnÊ¡?òf±´²¯~£ §“Ýø1)ãò íC*Ú¡(ÆçõÔxÃ×UT`|úiÁ_­óL`M½SJÁWEZl=Œ³•Šºž;þ@ì{º{jš%â[qñàoÜKƒ;‡®ê¶æéj© µSm¬þ÷Ïh³'î¬;a6ô·OáôÑ)\qç?¹"t&ƒÓ‡Mäc#x,å³åÀ&šOŸFj|åG¼Ë¥ç4|Á²³hÁWCZãzQæ‚ ‚ §6«µoÿ¦Úݸ˜ˆ3óLÆYß§¸›°…¹c±íÞ\Þ6 ‰úýϰì`5ñqÊö÷~/>ìØ¬^ê;Š˜ €†ÕjÁåêÝuþàu*Þª9à™ÃˆÔYL±¾gät¬À¾«:…$|ÎgNIíkØi³Z-´¶µAV¹C °¶•wþ`ùVü#rÈËŸBš¿ˆD ²jVk=¥µM ËÎð´R3s€*öVTöYùôÕ5ÅøGÍeþà©}Ÿ2«+›8Øècê‹[köbµZHLŽ’pV»iÈŒÃðzHNÉÀf°£ÅY°z m̸ýËÿâPöw¸dä"®+ÚÊeÄ'Ä{yáÿ~IC¦ÜRò¿Âi ;h´,ˆˆ'·`,‡>ûo–v41tÈŒw§O¾ƒóÛ·±²ºõøJ°sMÓÙiêü{™‘k?û9ëÊKii9ŸÅ_½ ð0´`|—·tS…ý‘7‹×ÒX8Óg|8'T”~–©¡©Üñññ@ÞC­¸râÁÝN|Î@£·/z3t.·¿qÚ0,(Sfz`¦žÞCÖ€ûoxðyüº§ÍEÊøkm…ºæ²në¶31 :/z†½#Ž3.~‚‰þ·YyØy‚lh:vy@YñslijÇi—?•§ßHÞø¹$nH&Ë’È6_vMÇãE Pgõu›G»Ãxioñcî qæ=ì>13%ââ,ƒëÕˆ‚ ‚ 'ˆà«þúÖù§uÅÍW29%‡3fžÇÞuŸtpgŽ @UåNª³  «]£©õ Ma39÷¼G(r–ñɧ_GЫëzuUÓbŒU²½ô#Fã´I‹ðf¤‚o;*Û0šþC30r×MPM™Þ«Y¨o©rÈΟ†å@ç7Ô5 'Ðâi h¨ßH…ç| RÎæÌa©@+‡*K°höW”pNî FŒ½Gø›·±78Ø7åÓ?xjvÑÀ\²€šÚm!]eÕeLMþ2Êk6:tx ggÏâªë¾Å›+>Ä>êR.›”­Ÿ²ÏÂ$ Ž@™껋¦¦vʪŸd_Þ#Œ:s1ã_ý.{n朜s¸ê²[xcé2y·Î«¼ŒƒÕ]fg!C g2&y§_úMïÿ?öºFsÍ%ó*VTRåë:¼LdSÆ]@Ù¶jθâ?(Ú±`ѰjàpžÆpG Â{4yAA8É Œü÷ñ†ëv®`Ü™ Iνžk¦i¼µyEd€¸‰,œý2ÚÖðeu;îø´3Ô‚«˜Y¶.ðîù éï§(h/§Æ§Œ [,´×ÝuÑTï_MËè[ȘìÝTºŠú#n^ǯYŽðª?-Jwå&š&M!5ãbßÂ'¥á°™—3gXÐFÉÞuA½‡ØWWMAÞ0Fe® l ¤é­ØDýé3Èʬ‹®(ßÊc_•Oÿ°Êf7Y).Êk…tU×nÃ5v8Öæ]”mÔ^ñ,Ë““™[4“k¯ž €·}++> =ï^ÀfÚÈj~ìN Vo+6¼Kîì˸ಯñê'/²|ßæ:Ÿ[o?WóZ>Ù´ûðYñlßþ …3o`Ú‚û˜T\_ñ&ÛŽPŽ«€O3BåQ~è Ús2ïòŸ1pµ×á·g2|ÖUl]õ ]h´'uSë_a؉ʛfÅìöÀdî²úÍŒM›‹»i šØí:j_e³#‘‰CfpÑyúîóìdóÞ%i·zèÕq`Áªf30¬(çJiìh`Hê ÎNçËíë&ί:½½%z|YÝ Ùc.áÒ1Ácº›úš|¹ù5*,è¦n7xàÓŒÿâœÂó¹õ–@}moXÁ';·Ð|‚êgâ,ƤBSéÚÌý¥ë9§ð"Fç§´µ'0ï’‡˜@;7ÿû}ÝçÑëÉd[ÃL&¿‘;Ǿ&¼8±X ¬–CT5Ö1,{&W^šÆ;ÿ~<¢óAAA`ÚãoÅèkç kØpÙ„À}|uTÖì¦Îå™\ݬaÁÕ¸Õ|±âÿcWðõcNÿ)³ó27•å+8XßAzîLÆfåPºíQ–7NàÆ9‹°Ö¿Å _.;¦ë¢9ýÌG™šž´±iõƒl>âÌØ<Î?ïA†Æõ |ÒÂNéçŽù@CíöV—Ÿ>±y£°­/òúæÏÂqd_Ç­3æaZËŸåõ-kC§Î8û¿™˜êêX³âçw„/ë«ò$’–”þf;ÚNH<ŽøLâ­qt´VàîaìÇ=ßö<ÒÍ4¶¶“ÆcÉÛià c~Hr$ƒÞB«·ýØ£ëã8w*=yî¦NÚ3IsÄÅ<70ꧪ'x+]èé:ޤ<âñÐØZ×Ó¤AA„ ÁÝþû~ä þðÿñJûyÌ¿ÜÄLróf“«œom^Ǘ럣Ìk ªíÝú{Ú=LÍ-$7ÿBróÍÐM(y™UåUXâGâ4] ½Kûh¯‹fÏÁLM?Ú¶°§ÝÒƒÑ>^?€q„ðFG§ô«÷>ÎrãÎ)šFzÖlÎÈ2»©>¼„v|©µn+5¾yäÚ|®ÜqîPM)S‹ð·ìà°;RK_•Ïà ƒæö@ÏDZå£çñxÝ ©Æ–þÜ(1J¯¿Šæö°ö£Õ80ò‹vZÝ}åô÷}œÇçê¦Núhnö@³¡×]Õž®óèm\7¸Û'AA„þÁ0 ´¿¼Çqù >—üä!8,qø¼47—ÐÔÝÂL{.ùéÁðz#ÕÕ%=±:Úë‚äŸö_œ[˜GUñŸXvð`/®õu%˘õñ*Aè £òóú[B¿±¯¼âØ#AA„>"¸áŸv\Fþ#pUQéªRhÝoŽç«¢²F ¯õðÝÍG{@â¦æ:„åx—Ií4Ôo ¬oî­îÞp,å#B9±í‡ ‚ ‚ tÇgÿA…sŸy=ÉA/¸­rGÜèO¡kΦ™‚ ‚ ‚pœ×ü,<Þ6:pÓÞ´Žµ»7ÊšRAŽ iCAA„„8ÿžu¬ür]ø÷ÙK„Á‹8ÿ‚ ‚ Â@Â0 ™ö/‚Ð×È´AAa ù?þ ‚ œBX,Ò¦ ‚ ‚ Ã@¦ý ‚ ô52í_AAH¦ýkâü ‚ ô%VMœAAaà` ÓþAú™ö/‚ ‚ $ ÙðO¡¯‘ ÿAA„„¼êOá8 kþAA„„8ÿ‚ ÇqþAA„„lø'‚p ÿAA„„­¡¶UœAAAA8I1 mÆ F}}=^¯·¿õ‚ ‚ ‚ ‚ЇØív222°eee‘ ο ‚ ‚ ‚ œdØívÐ\.—á÷ûÑu½¿5 ‚ ‚ ‚ ‚ЇX,¬V+š®ë†¬ùAAA„“Ã0-©AAAá$GœAAAA8Éç_AAANrÄùAAA„“qþAAAá$GœAAAA8Éç_AAANrÄùAAA„“qþAAAá$GœAAAA8Éç_AAANrÄùAAA„“qþAAAá$GœAAAA8Éç_AAANrÄùAAA„“[ áÔ¡©©©¿%‚ ‚ Âq'55µ¿%tBœAN(o¼ñ#FŒý^°`AK:!¬_¿ž3fô· A Ò>œ:”——“ÇãÁçóõ·aP^^N~~~Ë8fl6N§«ÕÚßRNuÀKÓuÝÐ4­¿u‚p ÐÔÔĆ N‡_ATÊËËIII!!!‹EVß §º®ÓÞÞ~ÊÔý¦¦¦7òo†¬ùA8¬_¿¾¿%‚0@‘öáÔA×õSÆùú†òòòþ–Ð'X,hooïo)§4Òò‚ œdJ¯ ]!íé…8þBo8¦ü›X,ü~Ë8¥‘ÖGá #{§•°§x[ËÒ>‚Ð'ËÈ¿00èÓ ÿ ÃP~¹9¼k w쥪¾â’È:ŒñSN§(;1êdÚo 2ÿ‘´U`“ 6rF‘îèYÞGœÇ'ï.*JöÓäl©Œ)Ê£»ívÀÛöóó›oc=@Ñ,{ñ&Ž=‡‚pò »yŸTWWÐÔTßß2„A†´‚ tÅɲۿ008Óþkøg7Ž?ÀÊ'à7oìê?PlqCBßãú¨Ë¥§qVmz?ÿù1ûóclmr§ BHM~§ÆK<¡çÈö§MM ý-AdHû BWˆã/ô%}îüû+¶ðçã_ăO-aõçŸóÉÒWyà’¢P¸÷—î ­—q†Ñ£º®£ëú gþ †ÿ˜óÒ›8»Â™î$H²9ŽKYFhìÆ°G•gA8 5½§ÍÍ2ê/ôiAè Yó/ô%}>íßÕTþQt çO+À8ÒGpÓwïç­÷ï¥`ý&JÝ×0ŽR^úßRdq;·Ï+\ë¯à¿¾È7uߺ}6Ž`´îš]¼ûÊ ¼úÌû¸ŠfpõÂ˸áÚK)J9÷4\Ûá¼þÊÞ1®hÆ<.¹êV®½xZÄÒ„ž†;°ö þþ¯òþú`_}øëLóÄ.¯ãgoüõi–>ó~èЫÿ|ŒÎTæ|õëÌÊרµò]Þxk)[Ë×SREE33ù ®¸åfŽLÜ÷7—ðþ«¯ò¬__EEÌÈŸÌ…×ÞÀ¢ÙE1GùÍ.ŠŠµoðâ‡p¤BUU w>ôu ݽO;2²wjÐØØÀƒÅý-CdHû BWÈȿЗhº®}µöÛ0 ܇ÿÍœ«>Xt Üq>gLOa^6+€·Û‡Ãံ]ÀŒYý×khßøÞõX(ìW{•{OoéQ| ›žŒ455±aÃ,XÐßRN8²¦÷Ôà…ÿʃÅ<ôÃÇŽ=2á”AÚ‡S‡Ã‡SPPÐß2ºÅ0Œ“úo°¾È߉Œãx¯ù÷ù|ØlÇg+¸hšššHMM=!iõ'1Ÿ†aôýÈ¿£`6΃GW”¼Ïc? <Ï»ú«\zå5,œ”8`#4± Mým®!¯àÅñ/ºú>î» ˆoÿ™'ß/JxàKùøw3xFqü» ÷óüFqèo~à×,íåýßÿ„%%Àûð·óÏà»Ók{îô Sœôy_}›feñåKðÌJ"iÛÆ¯Ç¿OâŒ&q?}â1V¿ôOÃ^ýÀ/8wX&ù Û¹>äøÏã×Ï~—é™.>þ?áÑ%î‚ Û*˜3|[ÈQ/ºúa~ÿŸó¡|-?¿í‡¬V>ö4%W?Æh5Ý!v*v½ÍõŠãßÿâö™y´î|±Gñ9„“ ùÃþÔÀåîÀïmÁjOîo9 AÚ ­­çŸžÜÜ\®¼òʈs+W®d×®]ÜvÛmÄÇÇ—ô Ã`Ù²edee1uêÔÐñ½{÷²}ûv\.™™™Ì;—¸¸¸.ãxñÅÉÈÈàâ‹/îs%%%ìÚµ —ËEFFÓ§O'-- 8ÉëÖ­£­­ÌÌLæÏŸh €×ëÅë*#!y7ß|s—i ýÇqèâIášß¾GêÓæ‡O¾ßéìÊ%ϰrÉ3Ì{à)»iZ¯bv—|Æ3æ÷ñÄC·“Ìž>Œæâ«› –7ÐXòÿèA¸Ý«¶°2ì)õŒþ…—%7?À‹o­ãâ¶ž…»Ü³…Ъ½K~Á¯¾u1`æôWi:ëzÔAö«þÕçqƲŸ™ópV^“+¶wÆf90Z÷ó‹Äk·“Pp Çeƒ¿™Ü‚|ΨktÁðpã^²u-+Ö0ú¾óÄŸh±ÅÉä9—’ìûp½búûžZÊíÓÒƒ¿zŸ œdÈÈÞ©Aeeà ªšzòóÅùz†´‚‰¦i´´´ÐÞÞNBBº®SYYy\Ó­­­eÍš5444••:ÞÜÜÌgŸ}Æu×]‡ÓédåÊ•lذ³Î:+f<ååådggS[[K[[‰‰}÷n¯ööö–øøxvïÞÍš5k¸ôÒKq¹\,_¾œK/½”´´4¾øâ >ÿüsæÏŸßmþº‹³§edâv»ùüóϻݷêht¨#ÿÝ…óù|¬\¹’Ë.»,T›6mŠi«õë×SQQÁ…^ªgUUU¼ÿþû\wÝuhšÖ£ºxÁ„¾ÿíÉ_2ç¬aœ>ëÎ>³¹Ð÷ŸùÖl~ãÖÞö]JvleÝúu|ò䋨ÛÙ¬|ì.Þ>k5Wä)° ~sCkèû%Î&=”^÷¿¸–ûƒ?«¿üG¿žE°þÏwqV¬ømmt´»z®Ey½Ó}7„÷(À:‚+ï›Á’?‡KÀwâì Ÿ7üÝãsHÁÅ9Y³jßÿ5»keÌåÎÌ<Šv”¼Ïc?|ŸÀxþ ®¾;°B"ÐÝö|CsSzŸ œlÈö§^µá„# 탠2|øp8À„ €€C››Kccc(LEE[·n¥¢¢‚¼¼<æÌ™CBBõõõlÙ²…¸¸8Š‹‹IJJbÁ‚dddt›æž={˜4iR§ÍåRRRBŽ?Úº»xFŽIjj*{öìaÚ´ÀàVmm-ëׯ碋. ¦¦†7rá…°ÿ~6lØ€ËåbÒ¤I>|˜Ë.»¬SüçŸ~hæCff&kÖ¬ Å—––Êç¤I“xýõ×CÎWùë.Ξ–‘ɧŸ~ÊäÉ“ùüóÏ#Ž¿õÖ[œyæ™äææ•޼¼¼P)))Ýêu¹\¡éûv»††ÎoŸÑuÍ›7sÝu×…œz€œœÎ;(¥=©‹&^w^÷‰Y: =}¼Û+ÿö{~õûßó«‡ÏÚæЦÍã¦oÜÏ_×~Î{ÏþšyjhWäkçÊ”Þ˾såi ލ8¢Îûýøý~ü~h«êY¸î(**¢¨¨Öï ÚÓÓpá(ιm]on…ãg4þŠÏ¸ùÜ+Yü“ÇxfÉÊÐfƒóŠ"ÃY³Ïæ…ßà×Üun=Kž|„›/ü>Ûš»Oë‡÷?MMÆ'ƒ‘ 6ô·A(Ò>*#FŒ`ÿþý¡ßûöícäÈ‘¡ß---|ðÁŒ1‚ë®»«ÕÊ'Ÿ|€ß笠¸‡ÃÁõ×_ONN_~ùåÓœ={6£FŠyÎétÒØØÈG}Duu5S¦L‰ÎçóQZZÊðáÃ=z4»wïŽ8×ÒÒúí÷ûC¿›››Yµj3fÌà²Ë.cß¾}47wþc0!!¼¼ð¨áÎ;6l­­­Žl||<ðþ®ò×]œ½)£}ûöa³Ù:th§s#GŽ i;¡8º g³Ù8çœsø×¿þŇ~HIII̎ņ†, )))Î 6,b¶Æ‘ꢊ«½C?ÒnäBÓÇÝ3‰dÄíaÉ‹Ñè­igðÜýó‚ëõ­d›Ã93`eƒÕ%{jñA |{5Ñû%g ß,K6ìåÆ]Ãßκ”'¸šü­gá~ý½öP¸›û÷Ï3o&?‡wí¤ÆçÅžT€uý¯{.qëã¡po-ÛÌMãf5°úýÈLûèYÚ½‰³'Ä_õW¼âõÐHÿ¼»ãÇ·CºÃJóƲð®ð4„†’µ|q°ƒÄagñÓ¿Abs Å»×óÂïÂû%+Ùp¨™‰Žü<~ñÔµì¸k1/”<ɯ_:‹ÇnšÔãø&MJéAnað0}úôþ– g|Êhuu9ÇCl©„´‚J~~>Ë—/ÇårGyy9çœsNèüHOOgìØ±Ìœ9“—^z‰ŽŽÀž#v»=äô=šÕ«W³&ŸÏGjj*555>|˜qãÆu ³oß>òòò°Ûídffb±X(++‹é«”––’••ň#˜0aÂ;ÄvíÚÅÁƒ¹úê«À”{»Ý:oŽ~û|>¬Öž½G*:ΞÒÞÞΆ ¸òÊ+ñx:;¿]u–ôTG~~~Ì ÿ¢Ãùý~>LJJ ©©©ÔÕÕQ[[Ûiÿ‚ÖÖÖˆµø>Ÿýë_¡ß£Fb̘1¡´»«‹*^wm >o 6ÙófÀÒçs3Ò†Ž…àÿ’àÖ²¯rǵ³È¤™ÍŸ¼Ê“Š¿šæt€Oy-üúWyvÙ.;ÍÇ»øs§iè‰9§1ÃŒ}Éùëô§¹ev&[ßüKСæMfXáÐ…›6ÛÁ•÷/>ð&<}³‡ZÙýñ‹Üûè‹p7ÿ‰UןߣpŸÝz>·³+yf1ÿ;îin™žÉηÿ“Q™9«ïãì Ÿ'T¼úÊr/˜„:ñ!³ Ÿ‡s Ïþ6rýAÕš¿óðÿŒ6ã¾?ñØí³7ýtŠ”Í+“œQê¼sY0m6g>q7/Þ(ñ•ÝÉ¿ÏZÍð£‰ON6lØ àŸä”• }w»ÝÇ“pª!탠b±X(((ààÁƒ$&&’››áÀ¶´´““ú’’‚Ó餽=0°¤nh³Ùº]ƒÞS²²²ÈÊÊ"??ŸeË–Åtþ‹‹‹©­­å¥—^ ££ƒ]»vÅtþUMÍÍÍëß³³³»Õ²sçNÖ­[ÇW\¥v:ÔÕÕ…Âx<¬Vký³§|úé§äææRYYIGG~¿ŸC‡1tèÐwþâj®#™u€IDATÿI`›»g~r'Ñ1?üÍóHLOà\Å ¿Ùm¸ôÿ{÷[|ëÉõÀûüäÎè gðijq¤ùzΚæãOw±ø¯ü™vN×ÄQpOÜý÷öaœ]‘:lfGCÉ’ÇX¼î¸ïæÐù%?¹™%±Þž#ç]Ag}ýŸsn§½ ¾ÊÙ#¡CéÄióâÒgÞÌ/.yóe?yðY^}¬‡ñ ÂI†üa/BWHû D3räHöìÙCBBB§iÖN§“ªªªÐïööv\.ÉÉÉ455õékújkk©©©aüøñ¤§§ãr¹p»ÝŽu[[UUU\yå•!ç°­­>ø·ÛÅb MÁ‡ÀÚt“!C†D,PøhöìÙÃúõëC›Ú™$&&F,+hmm%))©Gyì*Ξb±X¨««£®®.¸´ØÏÆÉÉÉé•óß•ŽhÇ¿«p­­­BYYYx½^<OÄÛ’’’ˆçÀŒ3‹ÅšuQ]]ÝiöBwu1dÏöÀ6Ç^—Ÿpbéã5ÿ)\ñÈ'<õðÝÄžð8ƒû~ý,Ï}cfðw:7þ÷Ÿ¸$*ÔÕ<ÌÝæ‰ái<#.~ˆWÿôV°]Íc/.劢Ä^„³2óë䩇¿ÚIkѼ›ùÓ«¿ef:½ wö·þʯïŽÊÍŒ«¹YYàžè´âüÆŸú8ÎØäyß›O>ùÃ0(,, u¨lß¾ŸÏÇÛo¿qük_ûZh³»¥K—Gjjj—kÓ{g_ðÑG±`Á JG~~>ÿüç?Y°`eee]†s:L:•>ø¯×KJJ gœqFÌô† ÆüùóÙ²e +V¬@×u’’’˜6m&Lˆ(ÿîꢉ×ÝZ2n¯ OG΄|„‰¦ëºÑWÓƒúbMQ·bûpÓñ ·ùïI~Ž%Î#];ÐËS8ùhjjbÆ ,X° ¿¥œpdMïÉÏ–­_òλ/0bøn¹ùžþ–$ ¤}8u8|øp—£§½E×u<OÄæmGbóæÍ >ü¨¦·GO?<6› ‹%<ù¸¥¥…æææP‡Ä¾}ûؾ};W\qÅQ•‰×ëíñZÿÁ@¬5ÿG¢7¶òù|èº~L¶m¬ù‚–v;.ƒœ )™Óº ÛÔÔDjjj/bœ Ä|†Ñ·#ÿ§º3y<ò,qžêö„„üaòÓÜÜÔß„AŠ´ÂÑ`±XzåøC`”ýhà˜ÿ®âˆç_ÿúãÆÃf³±cÇŽÚwU&'“ãôÚñ‡ÞÙª/f8x\u¸½ÁN-í8M,ú„ã°æ_AˆFÖôžZØ-ÞcD8eöA8QÄÚ¥¿¿±Ùl\}õÕ$''c³Ù¸øâ‹Cï®#ÿ‹ÅŽaX±Ûã04Ù¸{ #]3‚ 'Ù;µ(Þ ¿%ƒi„S„„ÆŽÛß2$G3ò¢ñù\X­6›êIILèo9B7ÈÈ¿  @FöAè iAèŠÁ0ò¯YœX,ZZ1üò¶›ŒŒü ‚0 hjjâÃ? ½RfÞ¼ydeeM€–/_NKK C‡å’K.!99ùˆç2²wòÓÜ\ßß„AŠ´‚ tÅ@ù÷¸j1ˆG×ý蚌üdúÜù¯©©éï< ‚p”dgg÷kúº®óì³Ï²páB®ºê*6oÞÌûï¿Ïm·ÝFKK ¯½ö·Ür ùùù¬ZµŠ%K–pûí·w{n  »yŸü466Düöz=ØíǾA–pò#íƒ ]q4»ýŸHt݃NímÁß²áø@¦ÏÿþvA¼ìÛ·ôôtÆÀŒ3BïÝ»—‚‚‚Ð+’fϞ͊+ðx<ìÛ·¯Ës}±;q_ ØŸz”•dÄð1ý-C ¦ö¡¾¾†Œ ù[ONÙñ0t~݆Ï'ÝdÍ¿ †êêj222xùå—ùÕ¯~Å“O>IUU­­­Óøív;‡ƒÖÖVZZZº<7P5½‚ tÅ`j¶íXßßá”b ¯ù÷ºÐÀzÝßÑß’„nç_„CGG6l`âĉ|ï{ßc„ ¼öÚk†®ëX,‘M–ÅbÁï÷w{n 0˜FöA8± –ö¡££Õ«?ìoƒMÓ0 £¿eƒˆ>òàõ;Âß½]ÏÐu›M¶œëOÄùᄲwï^víÚŪU«ØµkTTT`·Û‰gÒ¤IÔÕÕ1gÎÊËËihh ¡¡ŸÏÞçóQ[[‹Õj¥ºº:t ¾¾›Í¾??—.]: tÈçñûlm©¤¥90ÛÄüºäsà–öaÛöÍìÙ³k@茟š¦QYY‰®ë´´´È§|vûYRR2 ttõÙÐPÍjÃíòÐÖ®Ç ×ÔÔD{{;ÍÍÍÀÀ¸çç@Eº^A8¡èºÞågjjj¨GX×u4MÃf³á÷ûILLäðááðmmmx½^âããIII¡±±1t]{{;^¯—äädÚÛÛ˜no>Í›èßGúœ4iR¯ÂËçàû¬­oÁÏ3€ÆúR†äŒëw]ò9ð?KûÐÜÜ@cSÝ€Ð3X?ÚÛÛijj uȧ|võép8hjjêw]}6·øÑ-M465átÇa·xcêmmm%'''äü÷÷}x"îóˆ¦ëº¡i²+£ ÇŸ¦¦&6lØÀ‚ bžommåþç¸ë®»ÈÈÈ`ïÞ½¼ùæ›|ç;ß¡­­Çœ»ï¾›ììlV­ZEqq1_ûÚ×hnnîòÜ@Avó>ùùÕ¯ˆø=wÎEÌsa˃¥}Xý釬\õo¾rË7>¬¨¿å JúÎíÂÀcýúõ̘1£¿etIñæ?s¨v8_¬]ÀœY£™wÞ½ý-«ßijj"55µ¿eD`†Œü ‚0pHJJâ²Ë.ã¹çžÃápàv»¹æšk°Z­¤¤¤pá…ò·¿ýÄÄD ÃàÆoèöÜ@a0üa/Bÿ0XÚ—«=ø)z ‰b ;þíõÀðÐ6üÞ¬öä£T8nˆó/€bòäÉLž<™ööv"ÎÍš5‹3Î8ƒŽŽ{|n 0XFöA8ñ –ö¡º:°–µ¦¦’ÓÆNîo9‚pJ0ÐGþ5œÔÖW…~7·j´6í!5kàj>•‘ ÿAD;þ&‹¥Kç¾»sýÍ`øÃ^8züÞ–NÇ4£½¿e ƒ„ÁÒ>Ø­ç *‚pª0W{9ºæˆ8¦[ÖTw!qþANƒé=ÞB頻©ï|¬¶±¿e ƒ„ÁÒ>ïÛ×ßá”cýúõý-¡KÜí¸¼É46Ö{d AœA„À`ÙŽ¯¯ó{]².Z胡}èèh }7×þ ‚püÈ#ÿ&uuÕý-Aè!âü ‚ œËÈž 'žÁÐ>44Ô†¾›kÿA8þ è‘ÿŽ*šÛâ#ŽUTêoYB7ˆó/‚p #{‚ ôƒ¡}ˆ5»E„ãÏ@ù÷tTbw¤DkooÇï÷õ·4¡ ÄùA8 †‘=¡o±ižþ– CûÐÑV×ßá”d ühZgw²­½µ¿e ] ο  `0Œì G®wÞ}ïA™ú(ôŒÁÐ>ÔÖ7‡¾[­ò¦hA8Q ä‘·Ltˆó/‚p #{ÂÑSV&޾pô †öA3ÂXîÝ·³¿åÂ)Ã@ù×}mø}2Å0!ο  `0Œì ‚Ð? †ö¡Ãmô·A8%È#ÿ†æŒy\—5ÿqþANƒadO„þa0´²Ã¿ ôyäßíîÀ§wvôÝwKº`@/Úò¸›Yö¯ÿ¤ª|s·áròOgá•ÿC\Ôn“‚ …Á0²'ô=­ÍHJÑß2„Î`h¬QXú½-XíÉý-KNzôÈ¿¿ƒÆÆÎ›šød•=ò¿ìíûH°îa\Qb·ÿ¬{Xöö}ý-W¡KÃÈžÐ÷Ô6ÈnH‘ íƒÇgø]×Ð|”1 ‚ÐòÈ¿ÏHŠyÜÝQÙßÒ„.Î[[¯¼ò Œ8^U¶ ‡ãÈ Ue›ú;‚ ]2Fö„£G3Úû[‚0ˆ íCéá}¿;\G“ ½a Žü»ÚËñø¸]2Å0ÑïÎcc#ûÛߨ¹s'‡£¿å`­;øøã­´øŽ~ƒÃ0ÿ|mÚ±–?þ˜×͇aô,^Ã0h-ÛÊÇo¢IÑÒWWÞAˆd0Œì GÏÁÒ¿ÚÝQAsýÆþ–!Ä` ·~oKK„S–:òïn<÷—íïo)B/èWç¿¡¡§Ÿ~šÆÆFÆŽKnnn—Gˆuÿ)<ò0kŽyV›AÝ–WøÕâ‘Gᑇ~̯l¤Mïy þþ3yäG|õìí;'&^A8• #{ÂÉMCÕjZê6õ· !½}¨o”™-‚Ð_ Ô‘€Oì[W‡´•²áß–-[Ø¿?‹- «¯¯çÿøÍÍÍäççsÍ5×—´Ã#ì>êJ÷±uo9y£&1ª0;n*÷–°ëpíÿÏÞ»‡·U‰ú¯n–-ÛrìØqâ„@⤭)¤¥BBËšéäš2ÓdBÃv¶™C[:\ !Rz @ ™&å„)3 —pHz¸¤M´0Lî7_c˶,˺kKÚÒþý!_$[W[òÞ¶÷û<~lo­½ö·¶¤µ×·¾PNíE‹˜Uf„ ô[î#BÂ6þž޵õ¢×ëéëqc, R9ÿ ÔÖ˜!ìåôG ̘Ͼ‡ŸãdŸ‹¯»†Ž7ö±ÿ¹ |nÙøû†A}Öf޵y1›uX,ÝP>‡%‹æc6j(_tsÛ‚ÌþnÅÊ(…qXÚ8sªP9g1óçUcÒi’Ž]/ºi9yŠV›(¡fÁ|æÏ)ïïWEE%—|ôÑGŠ_à«äž€ß.·ƒø|$”áa§Ò矤å_ {åKEeJpðàAEnüÝESÂׂ!U—P*yWþÛÛÛÙµk—]v3fÌÀn·³}ûvÜn7UUU¬Zµ*ï.ÿ®Ž÷ØüÐföŸ´PUûe~xÿí|Ƶ‡÷ÿz‹(â¢ëÿ™»~øí˜3%ìg^MØæôs¸ï­tz=!B±>HqÝ-<³qË>ÖÝõþ%ߠ ̼Ž_Þñ¿hàÿþ†…÷>¶ñ÷_›5x•OžÛÀ}ouSQU„µÓ …5|cõ/øÁMµ6 ÖnRxá{-ó»G~ßZ ¥5sãÚµ¬þÂì¤c¯mz–õ¿ÚC«;˜±èËüòÑ;òý‘PQ™’(ya¯’?¬6'‹ä¢o ½^ÑE~¦,JŸ)ú§jÙSQ”¨øý]‰½¶5ƒÜâ©$!ïnÿÍÍCq ]]]Øl¶AÅ¿¢¢‚Õ«WSTT”g)DÞzìWì?i¥æò¬¸¼†žSo³é¥Oxñ¡­Ô[|,½éV–Õix}+Ï5Äîp{“¶ zˆ¢H@WμEKYlkÃ>ôiyó5:Ã0ï²k™S ØNñß~›¿5'Ž›‹öåÆêžËÊ—S XؽõqN‡ÁÛÝ…àîÂ!&ßMx½¡ãâkXqÝbÜ–z¶?°)ÅØ³÷ù]´ºKYvë-|©F‡õä^^9âÉ÷GBEeJ¢ô˜^•ÉÏçÁå-”[ •(}~aÄ1W]/¨¨ŒJù×èË“¾’ äO% y7 ½ùûöí# f³™Õ«WSRR2†Þ3ÅÆ‘ãPÇCÜÆ\VqåÇmTβîe@_ŽÑï@ *ëïgh­•†ÖdmDÀÀšÍ;X5N?ßÃÁíGÙóÚ;Ì|»˜ËÊeŸAc_Ä;¿?ɬ_ŸBF(âû[Í?΄YMËxä`Í.¨”™|­—÷u<¶ùÕËxêðácŸ‡¶ãGô¼Ü€ûèAŽY¢vGŸº“¯¢’”nÙSz­²¡€¿“>§–â¢Ö±w¦’s”>?Øìj ¹Pªå_Œè’¾¦8åO% y·üÇZõN'‚ PVVÆÍ7ßLYYÙ8 S^èCttÙ±w5ràÝ|ØÄhD+]Ž‚…5,XPËgçÅîdÒ´)fFqô¯…7®`:Pÿü“¼aÓ%ßb‰!Bõç¿Î?}ã;ܾþA¾÷嚨D:|¶šð„üù%úÚ,¸œVÜa= Aó½’$)Á9…ƒãëíèÅiï W’MбwñûŸd÷þw±óիΉv¥ÑŒÓ{¢¢2µPºeOeltZ• =(ôÑc÷`ëuÈ-ŠJÔùAEE%Jµü½­„E5¶¢‘wËuuuÜÿ®þ™(þ%¥3 œ©÷(Õ5¦hQÉŸ.`_ýI~³q Ÿá({ë-û>Ã×Ëà­”ÊêùT6¿É»6.úñ8>pî%kslØeLW°²®˜MG܈À?|çj@àÕã¥ÞR® ‡¶ÿ¶5|eI%G_øWîÝ-ò³ÿÚ‚à`×#^làO n೜o†Î}ü跇Δjùh’ër.·CnñT’wËuu5K—.E«ÕR[[Ëš5k2¶øÿÃÊñ‰ 9qÆ›òÇ'.ä+ÿðTʾ–¯{„kj °Ôïeo½ü¯ñèíWrëæû¹°ÄÍ»/>îz •¯âÛu& §™=E:Sò6¦6Ñk„ÅW._ý§øR¾YgL|÷¾[¨ÄÍ»»÷Ò*”°âþ_s…Ân‚à@'D-ùZ÷!ví~7%¬Üp !F–øs†ŽXþàoY¾¸ë‘ýìÞ{±äBÖm»7ÅØ¯å+_] b+Ï=¹•TQt7uÆ]OEE%7¨–½©‰µW®‚ ér8ì¸zÕÏ¢ÒPúüàòF ‡Ãr‹¥¢2%Pªå?,%Ý„,zRO4‘HDÒŒƒ«w0Œ‹ÿ/†JýEñ¹\ˆè)-†# ŒÝår¾³É€$Ih4šÁsSµº†ÈñÝ»ùó_^aW½…Yßz”·]s¾—͇¾²Sÿ9±ç¸ÿÖ¿£çÿ÷E®Ö¹ð̘ #Û ÿ{¸Œ!Ÿ ¿¨Çl6ŵO6öÏE3&#®Û¯ŠJ.p:|ôÑG\}õÕr‹¢¢’SúÕO?wîù¬üÎmr‹Ç‘#âµ=¯ðÃï­Ä<]Ù–feñÂþ––ÖÓqÇÎ;w!ßù§ÿ™×ë>ü!çŸÿiŠŠŠå¾9Ãb±PSS#·**cæÐ?Ìñ³46žñÚâùÅÜðíõ£èuòàt:Ç1Ä=3$IÊ¿å9ˆ*¯±?Åee”•£ÕjÑjµƒÇËÊÊ(+.@£Ñ ϤÍГŠ]õ`1?ýî’a’˜0÷+þ‰äÒꀛ°˜¢Šÿðv±×L&cAqt|‰dL4ö‚â2Š‹_CUüUTr‡Ò-{*“ŸË:øwH3GnqT†¡ôùÁ  Ér]§«în‹ÜÃWQ‘¥ZþÅ–ÿP¤JnñT’ üÍ•üÛ¶MÜè1²`ÑBÌžÏD‰¾ì'ÛØqT™Ó6UQQ™€(=¦Werã %߈`†Ü"©Ä ôùA›­(KgFÎ;o¡Ü·@EE6”ó ‘Ä/ê ñ:OR\¶Hn1U†!Ïl>I©<¯Ž%uŠvL•ÔTV2ŠSUTT&J·ì©Œ›µ)ék:M ‹žò‡ÇyrðoGïQ¹ÅQ†Ò燳íMcïdœ9sœ[—ÜÃÏ)Á:öNT¦J´ü{ûŸ)‡=i1¤ì¸SUùÏÃ]ø‡ÿäû|å£tËžÊèñ ‘¤¯9=ÊHzä VÊ-‚J &âüïP€^{TI¶÷ÚäjNiúäA¹EP™`(Ñò/†<ˆa=½½‰7³Rm ¨È‹¢ÝþÝ{^Æb9›²]MÍ9|ãïoÄh,”[d•„|ôÑGr¯26z{{äŸëTÜBLùWJžœ} ÷¹ýy½n@ˆö¶½‰^»•é“#TE ¹qÚRV©<…NE™Ö¯¿—÷\cï+ähâù'Ö³~}ôçÞ‡G³7œñùm[Çúõwó×aß¹”q<úUQ™Ê(Ù²7œk'N5YOÆ„C©'K‡ý´¬ò ñ£.È”†’ç!$õ.ª”áv;ä¾ 9ÁíhŽþöLŽñ¨ŒJ´üg‚ßuDnT0.Á‡¢¹¹™åË—³Ûí<û쳸\.jjj¸á†òrí! ‡HïÙ&7F]Ygͯcþ9Ó1 «ñ 'Úm@9µ-bV™‚"Ðo¹ Ûø{Z8ÖÖ‹^¯§¯Ç±(Håü/P[c†°—Ó}€P}ÒÅ‹ï·2ý‚¿ãóÒ_yëÈë¼pø_¸÷²²A}Öf޵y1›uX,ÝP>‡%‹æc6j(_tsÛ‚ÌþnÅÊ(…qXÚ8sªP9g1óçUcÒi’Ž]/ºi9yŠV›(¡fÁ|æÏ)ïïWEE%—(Ù²‹ËåÀf리¬\nQ& –®ÔVI‡¦UÈ'Ÿ×Õ÷D£ºb* %ÏÞ.÷ng'¥e³òrÝHd¨ŠÆdqû—4f¿WnQT&J´ü ®cD¤yr‹¡2 ò®ü···³k×..»ì2f̘Ýngûöí¸ÝnªªªXµjUÞ]þ]ï±ù¡Íì?Í>YUûe~xÿí|Ƶ‡÷ÿz‹(â¢ëÿ™»~øí˜3%ìg^MØæôs¸ï­tz=!B±>HqÝ-<³qË>ÖÝõþÏ­áâÓ@)_üÒg8Ïaä—}–ºÈ'ß'Ïmྷº©¨*ÂÚi…¾±úüà¦Z›k7)<5½–ùÝ#¿áO ­„€Òš‹¹qíZVavÒ±×6=Ëú_í¡Õ ÌXôe~ùèùþH¨¨LI”œÍ;Aˆ.J;;Ï"ølšT+ñDGôï÷¨Y˜•†’ç‡X üp\ž¥eù¹n08ô¹íêžy|¾¨qÅj×âuž¤¸l‘Ü"©L”˜í?¬­B GR¶ ö¦2žäÝí¿¹¹yðï®®.l6Û â_QQÁêÕ«)**Ã2Aä­Ç~Åþ“Vj._ÁŠËkè9õ6›^ú„ÚJ½ÅÇÒ›neY‘†×·ò\Cì³7i› Ç(ŠtåÌ[´”ÅF°6ìáCo–7_£3 ó.½˜hª7¯ÿÇcüÇîf÷óÿÅÇÝñ‹±h_n¬î¹¬\q9‚…Ý[çt¼Ý]î.bòñØ´×Z1.¾†×-Æm©gû›RŒý8{ŸßE«»”e·Þ—jtXOîå•#ž|$TT¦$J]ØÇÕï^ëóyqyäIô¥’[Ãܶoó({RÉJž´áäÉ?ó™ ÐjíüÛëõàr9ä¾cÆënÀáö!†Ôõ–Jf(MñˆDÒ'dê'±ɻ忠`èñoß>‚ `6›Y½z5%%%ã0LGŽ @=psYÅ•·Q9?Ⱥ—}9F¿¨¬¼sœ¡¯Y+ ­ÉÚˆ€5›w°jœ~¾‡ƒÛ²çµw˜ùv#0—•˪yãy€"¾ýè³\×õ·üïwxå­|ýûKbd"¾¿õ×üãL˜Õ´ŒG¶Ñì‚CÒñõòÁá> ŽÇ6ßàºcO>œbìóÐvüˆž—p=È1Kt3ÂÑç‡÷CEeê¡dË^,~ÿPíî>{3fäÇ¥Weüpö•[•4(y~¢Œ{³yšÜbŒ &º’ëíµâõɓӄÊ$C‰–ÿˆhGŒÈϦ2jònùµê;NA ¬¬Œ›o¾™²²ñšöôè5€>DG—{W#Þ=À‡mAŒ@´Òåè!XXµ|v^l¬«!M›bfô—Â^xã ¦õÏ?É0]ò-–týw¹Y•ÅŸý<å€åŒŸ­…††&<á~‰¾6 .§wXhÐé†$‘$)Á9ѾÆ×ÛÑ‹ÓÞA¯$šcïâ÷>Éîýïb1.æ«WíJ£§÷DEej¡Ô…ýp\Ž!+_ ¤Î¹ ’×Í>2LÅžPE~”5'ª70'y›á˜®`e]1nDàºï\ ˜ù‡¯Ìúxî¡§xêáßÓœÿùó9ºóaî¼sz!jרõÄã<»í ^hpçs¾™Yˆ?gð¸ip|Þ̶-óâ KRŒý  ?—K/(Ãrº¿¬WDŠ»žŠŠJnPr6ïXB‘!£¾Þv¹Å™¸dÎUÖçR“‹å 1OnîJžtyÊÒIºøÐÉPîÏnªüá ÆÐÓħ帓r‹0aPb¶ÿ`8½bR+Ë(’qQþ—.]ŠV«¥¶¶–5kÖdlñÿÎ?­A%ΜiJù#ŠßøûSöµ|Ý#\SS€¥~/{ë-æ}Go¿’[7ßÏ…%nÞ}ñvÕ[¨¼xß®3Q8Íè)Ò™’·1 ´‰^#,†¸rùÒè?Å—òÍ:u·=ÌÊ +é;ù.ï±Ryñ-¬_±°Û† 8ÐIQK¾Ö}ˆ]»ßÇM +7ÜÃBˆ‘…¸s†ŽXþàoY¾¸ë‘ýìÞ{±äBÖm»7ÅØ¯å+_] b+Ï=¹•TQt7uÆ]OEE%7(Ù²‹Ý>TÓ;(ª;™ð§.‹èñʧ|û\§°ÙºãŽE“9ZFÙãÔECüè¯yé[ÉóƒN—ñ¸TTTrRöà …B ÿV™˜¨Þ¹ÃáìÅf릻»¹çåVùWòüj0UÀ\ãt*w“-SÊo£ÜâȆÏçA¤’`À)·(%)þØÝ¥ø|©“„»\j^%’w·ÿ©C%ÿ¶mnÚ«onf‰iHqOöËe?ÙÆŽ;¸Ê<ÊË«¨¨(%ÇôÆr¶½iðïöŽB!yâ}UrƒÛq"áq5é_öØíјs»#÷aJþαw2 ö‘ùFÂá°Ü·cLˆA½½CÉ?]ž©»1'†ENž:‚T-٠Ę•‰‹ªüçÊóêXR·ó(r¸L•ÔTV2µÓ¿¨¨L^”lÙÀï©Ô8]ßÚ&7Q¾DeµSΈD"Øzs_½A©óƒß/ÂÈc%vcr"âuÅÇ?ûƒ…x'åKœ^#½½V|2æC™H|þ󟧭ù/|øßÿ¥ˆ* 7:ݸ9«äUùÏÙXøóq¾ŠŠŠòQªe/Ç“à˜ü ‰N»Å!Ûµ½žÄ¥‚þü%j›¬´·7ÐgKçš=J|‚<Ê¿_HœÜÏårÈx7Ɔ¤½lïh&GÙÛÄ&ŒzvÁ‰íÍ1^¼ÿÞ›45·°÷íéè’·t,@HŒæeKGDS*·¨* Pô¶Ïçå?¶ßG˜Ìª»©Œ/жüÿvë£x<½Ìž]òÇãéå·[•[\•¤(Õ²KPéâïñ_B¯‰ŠÓ3~ϳ%,©–—\aÐFa¯×W¯=(u~‡zR¾nëËgP$ÎêøD ‘7ƒ£÷¨ÜbÉBDŠ†Ð„Âªwk&¼}àÝÁäzò[þ3E#©aƒJDÊ¿×ëeçδ¶¶Æol<…ÁÞ9Á`ÐÓØx*m;¹Pªe/!8r!æ÷ªîáéPrID[_b·ÿÈ”´7ŽÓÍ-ƒ»Ü¾Ñw”¥ÎB(µk¯à÷gØSvø…Ä÷×阸ó‘(´Ž8f†ÜbÉBÀkÀæÐ–SIŒà³€nh¾î¶¶¡·Üàwg–CQ«9(Ù•‡ÃÁÓO?ÍñãÇ1r‹DkÝ{:±ÿaÜâè3‘J’ý yh;ÙÀþýûÙ¿ÿCš:„¥Ìú•$ OÇaöïoÀ#K®dÌרUTTâQªe/–D¥´ÔRqcG¯u½“Q"/ÃUù gn•¥ÎÉrFä{oâÐ w|à rI$<ò3ã÷œ•[,Y¨tàpØÑhÓÇŽOe¬=|ðáßÿ÷ù|²—½¥é„Åôù*l¶.YåTIŒ¬Ê__Ï<ó ‡ƒÚÚZfΜ)÷ý¤~Û}¬_/ïÙ£MÂÞðÝ{'ëׯgýú»X¿ñYZ}‘Œ{øhÛ:Ö¯¿›¿{çNÆñéWEe*£TË^,iäâ´·§u=©ÄÉvíH’˜Ë°8qݧå@ûîõäÖõV©óC  ¬°Ÿ‰\î/QyM¿ânfŒ1è"ÒŸü°·×ª&•Mƒµë 55ñ"}ŽÜçQ™:ŒK¿C‡ÑÜÜÌòåËÙívž}öY\.555Üpà y¹¶4haé=ÛÄáÆ¨«Ñ¬ùuÌ?g:t5žáD» (§ö¢EÌ*3BPú-÷!aO ÇÚzÑëõôõ¸1©œÿjkÌörú£ª?ÍÇ[þ““VX|ÍrJšþõõ»xá㛹çKeƒ2ú¬Íkób6ë°Xº¡|KÍÇlÔP¾è"æ¶™1üÝŠ•Q ã°´qæT+ rÎbæÏ«Æ¤Ó$»^tÓrò­6PBÍ‚ùÌŸSÞß¯ŠŠJ.ù裻ÀÀŸ`¯ÑèäkÂÓÞÑ"Ûµ-݉•T—ýðEÙäšhØíñ÷ÑáÊmž¥Îš°²²uÈø]+‰¼‚TË-Ö¸ãu¦³s¨bFH¥ë”ŒÍfÅb±2{öÐg%’wSNŠø@ ›°ä]ùooog×®]\vÙe̘1»ÝÎöíÛq»ÝTUU±jÕª¼»ü»:ÞcóC›Ù2ú¯ªý2?¼ÿv>ãÚÃÆûÿ@½Å qÑõÿÌ]?üvÌ™ö3¯&lsú¹ Ü÷V :½ž€¡X¤¸îžÙ¸ ƒeëîz ÿç¾Ï÷ê.æÂ™_à×wƒ®—;¸ys=nOü÷“ç6pß[ÝTTaí´Ba ßXý ~pS-‚Í‚µ„^ø^ËÇüî‘ßð§†VB@iÍÅܸv-«¿0;éØk›žeý¯öÐêf,ú2¿|ôŽ|$TT¦$J\ØÇj¹Ðï´NœäB*ñüx“¹IKʲè*áÉÚ|î©aùQ•òu}Þ*]$^ðhµ²G«æ¯£øºÜbŒ+’¶l0y€Ë?¹ÞÓ\â²¥Õâaùïê²°ðü:Ùäòû½¸Ü„kÔ%’÷o\ssóàß]]]Øl¶AÅ¿¢¢‚Õ«WST”o—H‘·ûûOZ©¹|+.¯¡çÔÛlzé^|h+õKoº•euF^ßÊs ±þõÞ¤m‚¢(Е3oÑRÁÚ°‡½AZÞ|Î0Ì»ô ®_ûk[·Œö_å¡mõ@Ÿ_4-NÂh_n¬î¹¬\q9‚…Ý[çt¼Ý]î.bòñØ´×Z1.¾†×-Æm©gû›RŒý8{ŸßE«»”e·Þ—jtXOîå•#SÏMEe<®¹ES,¿²,£±BnãÒU†èsç6ÑRçKGê°Ÿ€˜Ÿe¤˜Ä½ßåvÈ}KFFlqÌ/È-Ö¸#«@Ô ædô9zéíZþ•„Ñ ¥÷~J·y¨"y·ü Mlûöí# f³™Õ«WSRR2ôqä¸Ôñз1—U\ùq•óƒ¬{Зcô;ˆÊúÁ;Çùü๭4´&k#ÖlÞÁª9púùn?Êž×ÞaæÛÀ\V.«ÉOó[;Yÿäs´ °xÅVœg&£ñý­¿æg¬¦e;¯<æ\Cš6ÅÌ(ŽþµðÆL꟒7,`ºä[,1HØOþ‘õÿ;ªøîæøùuôúÂøl-444á ¸ÎHôµYp9­¸Ãz@ƒ.&ÜV’¤çޝ·£§½ƒ^I4)ÆÞÅï|’ÝûßÅb\ÌW¯:'Ú•FÍì­"/§OŸæßø+W®dåÊ•,]º·ÛÍK/½Äu×]ÇÚµk©®® )JõšRPªe/–ÎÎÄ™§}S0)U¦$ªÝ—ýˆbeSIOoßÈï€3‡Ö¥ÎF<¥õrC„Û©\o›T$JüÙÞÑŒ×uz½M\†{Ki"jFødôõ—nùoooMw9Áë<‰ÎÜv<Õ>ß¼+ÿÕÕñÉL***X³f éwËË˧ ¥O> ‰ÌŸ_›¢E%|ºÄ“üfã¶l|„—^}‰-XTPJeõ|*±ÒØØBÙ§æDõfæ$o3Ó¬¬+Á\÷«!ââ¿~¹•¨ãœçÁ?ðËõ¿ä•#Žî|˜;ïÜÀ‡^ˆÚõv=ñ8Ïn{‚ÜÀùœo&FâÏÆÆFæÌ™Ãœ9sÐjµ,]º”––‚Á MMMI_S Á²—,>\ðÊ_Wx¢ã×f÷A_òb®cÖ';‰²Þû¼¹Ë¸­Ôù!¬U^ú^ÇÄÜŒL”S ¢QÞ=Î'Ã½Žº{TÓD> Ý]ÑÍ·á–9C\¾"O¤m›I•ñg\”ÿ¥K—¢Õj©­­eÍš5[üïú÷‡().§££;åOIq9·ýËORöµ|Ý#\SS€¥~/{ë-æ}Go¿’[7ßÏ…%nÞ}ñvÕ[¨¼xß®3Q8Íè)Ò™’·1 ´‰^#,†¸ryÔJIñ¥|³Î >² l`ˆ49Bck#]B°Û† 8ÐI0ÜFë>Ä®Ýï㦄•îa!ÄÈBÜ9CÇ ,ð·,_\‰õÈ~vï=‚Xr!ë¶Ý›bì×ò•¯.±•çžÜÊ ª(º›:ã®§¢2žôõõ‡Ù´i[¶laÓ¦MXûÎy<ž87~ƒÁ€ÑhÄãñàv»“¾¦”jÙÀãJn}ñ¦^\j®ñ ¹ÍŸ ^!yX]À'Ÿåh²àö†ÆÞI?JΜ9.ËuuyK$¨<|¾ž±w2^f4¢-CðYäKqø\.è‹0› H’DÕÚüùÇQ…\£IÒæŽì»c œ È©×÷ðç¿`Öõ+¨0]ÂÓûöG’$4Wî<ÿ!`:?ÿý‹\­sá3˜1¢¯]²vûÖöŸsÇÐ9šÑœÇÚÍ;ùÏ…_Ôc6›†Ê&{íÚͼù=!̘L1ri¢×SQÉ~¿›mÈbVYY9ø· ,\¸ë¯¿£ÑÈ[o½Åž={¸å–[‡Ã#2=kµZÂá0‘H$ékJA©–½|Bò{åõL­ÅédÁãUÎæ×Ägdfk—#w ‹Òç‡Tä#¦7Yþ—Ë!÷GE‘>±åßën&îûŸ-®Þãþïìl#àë¤ÐT#·hŠÂáèü{¸å_§õMe’2nõ5²Uüs…F£‰û).+£¬¬­V‹V«<^VVFYqfðx&m†~œ¼ðøS쪷‹ùéw—Œ¸vìÏðóµº0à&,¦¨â?\þØs’ÉXP_¢k${AqÅÅÉåRQÉ5GŽÁf³qàÀl6ààÁƒÔÔÔpÎ9ç`49xð W]u|ð¢(ræÌDQŒk/Š"GŽA§ÓqâĉÁã­­­èõú¸ö¹ø=`¡þºß/¼ðBVíÇû÷‡õõX:ºGüŰìò)õ÷‘#G“Þ·ØßíÆS>M¸3©<ÍvEÜ¿‰ö;ö>†Ã¹û^(q~ð¹N¥ý\[:ºq8zs?}ðAÒë9}Џ?ÙþcN8žfŽ"ä¯ß!nü>Ÿ†#=²Ë¥´ß.g×à}ú¤áxÜçæ/9 ›\w7-ÍgG|ŽSýVÂý”ã·RÑD"IUòÆÆ€…ÝÖr„³# -Ä“š?Ýý•$‰ÏFª*+GdõWߕɂÓéä¯ý+_üâÅZþ[[[ÑétÌ™ ƒlذ;3“'OrðàA¾ûÝïÑt<òwÞy''NœHøÚ]wÝ…^¯îgBKó'¼ð‡ç¾¶hÑÜøÍïÊ-¢"in9Éþ×Ö´íþéÛ«˜7ɸÊö§?ÿ_>øð„¯-X°ˆ›V|\å™È¼ðŸ¿¥¥5>qÕyç.ä;ÿô?å-oôÚ­lÙúpÚvßù§ÿÉyç.ÌéµúUòPÎ+¾ôw\ñ¥¯É}{²æ¹çŸ¢½£—˃Ù<’sÝW¯æsÿ½Üâûü?ÞÿÏqÇVÿãÌ9o©Ü¢)Š·ßþ=ïýwT‰ŒzåÆëwýû£²Èu¶ùÏ4¶xoØ{˜ŒÛ¾÷o”O?GYåÆétŽcrûÌ$iü,ÿSÊóêXR¯øgŠÁTIMÅ_Ee²QTTDeeåàO,‡ƒ7ÞxQŒæÉøøã™={6œ{î¹´··ÓÓµŸOM²•zÃÈå’×ëÉÙŠçQh•åº{êÒ¢mîª,Œ^gò9ÔÙ×(·xãJX2Þ¸c‚óS)éa:\.'>ßÐ&—ÅbeöìxÚjµ0{öyã.›ÐdQ\ð÷»Œ*©Q´Ûÿü„¾Î&fN/MùÓ×ÙÄüdìTQQQƒa„â?€V«MªÜ§zMn”¶°Žö¦|Ýãj‘[D•,±tO\WQ¥‘¬äË“Ï=%ÎH…,× Š©ïé™y6%ÆB8,&}-œZÕT"Ú‘1ÐÞ€ô‹Ë¯0'´ü‹òm‚‰Y\[LñÙW‘E(ÿ^¯—;wÒÚ?¡Ÿ>v½.½ˆz–ÓÇÉ= •¤(1¦7—'µûºOÈÜÍOExÓ”ú“´ª·ÜXñzr®ÄùÁëÉ,T%(ä6ß/øåzÎÄéI_ i¦V2´ÎζÇ"’ú|‰ÅíŽWþ'rÌ¿Šò]ùw8<ýôÓ?~£Q_~I’ðtcÿþøEiLýH’H÷É>lh ¡¡Ã‡Óðᇜîöf܇§ã0û÷7àŒ‘%W2ækì***ñ(Ѳ‹Ó™Ú5OMÈ4F"ãëú˜‰+±(©ÊÿX Žœô£Äù!Ó|^ßø/%ÓÍWJÃçO¾‘ÒÙÙ†à›Iÿœ¶ƒqîìxœ'äMQø¼½qÿ'²üËE@˜Xß=•‘Èóß××ÇöíÛq¹\ÔÖÖ2sæL¹ïÇ õÛîcýŸDþýåWøÚ˜<ß\ìøå]¼Ù  ètzB~?³®€m·_šQm[Çú}?{e_/χŒù»ŠŠÊJŒéE§ ¦|=àmr[Îk2 ÉP©„HFírE T8öNTøíI_sysãö¯ÄùAάZ„Ç›™1#—ô9l”••½#0•òoh´‰ç%¯¿Tnуà³`µÇo¨%Šùʤ„B ²ðÎѨÏ"¥1.Êÿ¡C‡hnnfùòåƒÇìv;Ï>û,.—‹šššÁŒÞ¹F’¬×"½g›8ÜÝ]5¿ŽùçLÇ@€®Æ3œh·åÔ^´ˆYeFŠ€µÞG„„mü=-këE¯×Ó×ãÆX¤rþ¨­1CØËé>@¨^@ÙôrÊ"`6FhjŽºÇ™«*âdôY›9ÖæÅlÖa±tCù–,šÙ¨¡|ÑEÌm 2cø»+£ÆaiãÌ©V<@åœÅÌŸWI§I:v½è¦åä)Zm „šó™?§¼¿_•\¢´…ýp4zSÊ×áɱÐÎ5w¼‡TAAÔƒ-Œ·šZmŽq•+×i·ý•jæ´8ÝÉïeÐ×’“k(q~Q%ËuÓ噈¤ÓTÉø”’dõ×"]è To¤ ÐG§5ÞÍ?‘åßá’/<Æãͼ ¨ZÉAyä]ùooog×®]\vÙe̘1»ÝÎöíÛq»ÝTUU±jÕª¼»ü»:ÞcóC›Ù2ú…ªªý2?¼ÿv>ãÚÃÆûÿ@½Å qÑõÿÌ]?üvÌ™ö3¯&lsú¹ Ü÷V :½ž€¡X¤¸îžÙ¸ ƒeëîz ÿçþ•WÛÁÿ‡„½á)Öܾ‹ÀÅkylU¼í“ç6pß[ÝTTaí´Ba ßXý ~pS-‚Í‚µ„^ø^ËÇüî‘ßð§†VB@iÍÅܸv-«¿0;éØk›žeý¯öÐêf,ú2¿|ôŽ|$TT¦$J´ìÅÒÙy6åëN§CnÉp÷ãÒi3AW@oçiYåÊ$S{(¤Ì:îJ#ÕFŠ?T–EOÉQâü`·Ë“ˆÎHï%ãvv2‘<‘BB[Ê×½®¶)±N’(®±ñ^×iÊ*?/·ˆ²3<Ó?$¶üg–£¢2œ¼j577þÝÕÕ…ÍfTü+**X½z5EEEc¸B&ˆ¼õدØÒJÍå+Xqy =§ÞfÓKŸðâC[©·øXzÓ­,«3ÒðúVžkˆÍò&mô8E‘€®œy‹–²ØÖ†=|è Òòækt†aÞ¥ý®ýÁ¶?² 7³¸ÝòFûrcuÏeåŠË),ìÞú8§ÃàíîBpwá“ïÀ¦ ¼ÞЊqñ5¬¸n1nK=ÛØ”bìÇÙûü.ZÝ¥,»õ¾T£Ãzr/¯QãzUTòÒöÃI—."öfØÓÔF4V!ês£Ž_P~¦½=¹É<¯ÄùAðûÆÞÉ(È$Ñ Ã%È"ÛhIgÙø3 ±˜è¸Écû§Š÷C:<^LjcJ‰ù÷:Obwg¢-㨢$ònù/((ü{ß¾}AÀl6³zõjJJJÆa˜6Ž€:zà6沊+?n£r~u/úrŒ~QY?xç8C{­4´&k#ÖlÞÁª9púùn?Êž×ÞaæÛÀ\V.‹Æpù޼Ĩ¸ö_¸$¡w­ñý­¿æg¬¦e?½£ºýgF»ÏD@Ò8nÔ¯ç„FJ*jÔXÌLH•XËÒíÌÉ5”8?h4™yeŠbxÜe“3Þ9x]S£”[ªM«Enñ[ùüHlù—¯"WoïÔø¼NVò®üWWÇïTUTT°fÍ**Ò§‘Ÿ^51œ>öK GXø© R´¨ä‚O€x’ßlÜ–ðÒ«/±å‹ÊJ©¬žO%V[(ûÔœ¨Þ Àœäm†cº‚•uÅ ¸ë¾suÿ aº:ú€|~Þ=èè·¹óÎ |è…¨]_`×óì¶'x¡Á œÏùfbdvÎàqÓàø6>¼™m[æÅ#–¤û.@.—^P†åtÞˆw=•Ü Ä:Þx<éc¿ :ÕôŸ)§„J·~$2¹”#9I¥hú|>¼Î“c¾†ç‡>WfßùŽÎñW&ÛfdÐß6öN&ÁVà©âý{‚P"‹eäwÌÚÓ5î²%«Ö 2±åéÒ¥hµZjkkY³fMÆÿû7ýži3çÑÕëNù3mæ<ÖÞ»1e_Ë×=Â55Xê÷²·Þ‚aÞ×xôö+¹uóý\XâæÝŸaW½…Ê‹Wñí:…ÓL€ž")yÓ@›è5Âbˆ+—/þS|)߬ðï÷ÒÞ)@eçÅø‚†Ý6ÁN‚<­û»v¿›Vn¸‡…#Kü9CÇ ,ð·,_\‰õÈ~vï=‚Xr!ë¶Ý›bì×ò•¯.±•çžÜÊ ª(º›:ã®§¢¢’”hÙˆ>—Z$Ã]£­^nq¤Ó{G˸ÊÕcOï:íé»Ò:Hç†î Œ=C¹ç›­[–ëf’í_§™XÉÎÒ•M O‘Œè^Oò$’¯EnñdÇi;ˆG9Ÿ$²üË‘“Ãéÿ •Ü3.¥þ®½öZ®ºêª¸øÿL(.1s÷ÿþ?¹ÂTÇ=;Þàv— =¥¥Qw6Må<öÚ>\.è‹0› H’DÕÚüùÇQ…\£IÒæŽì»c œ È©×÷ðç¿`Öõ+ªØjfíÎ}¬&Ò%wì<ÿ!`:?ÿý‹\­sá3˜1¢¯]²vûÖFÿÖÄœ£‰‘Íy¬Ý¼“ø\øE=f³i¨Ìa’±×®ÝÌ›ßsÂŒ©ŸB’$4šèõTTTr‡czȤ^po¯<™¿•ŽÃïvÖÝS/))ÃãÉKøhÈda ¦O¬¦’Ÿì‰r•6?È™€.Jï~ØmíM¾Ñ!õú×í Ê-âøN¾¡äó¨Ê8¢£½£yÄq¥Äü™²j?U6¶&ã¢üY+þ¹B3,†½8‰×A¬7ÂÀ9ÃÏMÔf'/<þï°˜Ÿ~wÉ`ø¼&AýðcZ]ð–“SL»Dò$“± ¸lð“ÉØcÛ'“UEEeì(ia?!˜Ù÷Þa?Í´Š‰S^k_EEEù(1›÷R8³š½Q÷f•L`ql"XSUF’Îj½²‚Òæ‡L’î 0<ïÅx'OÒÞ^kNªF(£.ù³&¤9GnñdÇã%<ž(Û¿U6"ÒÄ ¹Q‰¢ÝþýnÏýâIZŽ6¦lwÞg°úQTš] •ñBi–½X\†‡ç½y2£Õ¥öÝÊEb¸Lpö5fÔÎçVk5ç[ߨ3Ï+m~ ™—žËu2Pƒ.œQ;1(_Nl)Ò§Ïc OþKZmrƒV@L¨PŽ|à÷%öQRÌ¿”A ve£hËÿs÷>Eଋ¹å³RþκxîÞ§äWEEE%)J³ìÅ’i̿Ǜ¹5p*0<#zP_>ø·M/žÞëÏl_?àSÝlslsJ›äL¾ÖçÊlž±;&NRËLbúƒâäß\ ‘ü>´w4ãu–[DÙð:Oâô%Ϋ“Èò/>ÿäñ¶™Ê(Bù÷z½ìܹ“ÖÖÖ¸ã-GÎ`Ð¥_ÄtzZŽœ‘{****IQše/&³2Sqb•×Ê7.OòGh@’ÏŠCcïDeƒ6õý kÇn•SÚü`ïµÉvíd4†Lœòx--iÛ¸]“?¬ª³3µGÉTÎø‹IïR,ÿ¢8Šüj¶Å!»òïp8xúé§9~ü8F£qìæI’ðtcÿþøEiLýH’„òÐv¬ýû÷³ÿ‡´XÝD¤Ìú•$ OÇaöïoÀ#K®dÌרUTTâQše/­63EÕ/N“[TE‘MR4)<>–Êp(·nØS>wê˜þÆÆ±Çj+y~P,‘Éå‰MO†Ñ ]ø|©FÂSwÝéñ$¿?J±ü¸ÜÙ…Û¤òöP‘Y•ÿ¾¾>žyæµµµÌœ9Sîû1Hý¶ûX¿þ^ÞËA2Mï‰= 8¯?³º1S¥¼ØXÉÄí´Ó5”6?hĹEH‹µwâÄügBˆJ¹EÈ+™¸ô;lÈ-¦lx¼É7““Yþåx&CjÕŸ‰Î¸$ü;tèÍÍÍ,_¾|ð˜ÝnçÙgŸÅårQSSà 7Ü—kKƒv‘Þ³Mn´0k~óÏ™Ž]g8ÑnÊ©½h³ÊŒ~Ë}DHØÆßÓ±¶^ôz=}=nŒEA*çÚ3„½œþè„ê‹è}é¿8Ù'°äVPø·WyÿÝÿb¿u+fkeôY›9ÖæÅlÖa±tCù–,šÙ¨¡|ÑEÌm 2cø»+£ÆaiãÌ©V<@åœÅÌŸWI§I:v½è¦åä)Zm „šó™?§¼¿_•\¢´:Þ±x½™Yœ2IZ5•é yPœré™5L¯„‰™æp@§ o»É@ R3¦ó•6?DݯOdÜÞa?Í´Š…c¾n8”¹7à{•…ñ@ ffIé³”óÈ5‘ J‹†Q†{»øR” µX¬Ìž]=ò¿é×y»»ÈnÓ- | Mc›#UrGÞ•ÿöövvíÚÀe—]ÆŒ3°Ûílß¾·ÛMUU«V­Ê»Ë¿«ã=6?´™ý'£–—ªÚ/óÃûoç3®=l¼ÿÔ[œ@]ÿÏÜõÃoÇœ)a?ójÂ6§ŸÛÀ}oµ Óë ŠõAŠënᙫ0Xö±î®Çðî_ù7“”²ø‚OQÚ÷WÞïî!!+Ñ'Ïmྷº©¨*ÂÚi…¾±úüà¦Z›k7)¼¡¼–ùÝ#¿áO ­„€Òš‹¹qíZVavÒ±×6=Ëú_í¡Õ ÌXôe~ùèùþH¨¨LI”´°%›Å¶' ZŠÇ‚&NõìÚ¹Âë邳F}¾Òæ»=»Ð¯_Ï´\×…K±>Ã\%r“q»pf¹&*¡pz¤©Pñ ^_òϳbþÅQ†¥´w4ðuªÊ¿‚È»ÛsóP6á®®.l6Û â_QQÁêÕ«)**ʳ"o=ö+öŸ´Rsù V\^CÏ©·ÙôÒ'¼øÐVê->–Þt+ËêŒ4¼¾•çbÃÞ¤m‚¢(Е3oÑRÁÚ°‡½AZÞ|Î0Ì»ôR>ó•Ë7ÿùàz¶¾kÒ˸´:~‚‹öåÆêžËÊ—S XؽõqN‡ÁÛÝ…àîÂ!&ßMx½¡ãâkXqÝbÜ–z¶?°)ÅØ³÷ù]´ºKYvë-|©F‡õä^^92¹cÎTTäB©1½Ù,¶Û;ZäwBQP e]¶dÔ®±ñä”ή Wfnµc éPÚü ø}Yµ‰¹ñ ŠšŒÛvöÈ—”0‘Ùµ›ìÉî2ɑⲓ[LÙ’{ˆ()æ?]ÒFå“wËAÁu{ß¾}AÀl6³zõjJJÆÃ ÒÆ‘ãPÇCÜÆ\VqåÇmTβîe@_ŽÑï@è·ÄðÎq>?xn+ ­ÉÚˆ€5›w°jœ~¾‡ƒÛ²çµw˜ùv#0—•Ë*xëgïpñò•˜?Ùž–÷Øù±ƒŸ]>=FF(âû[Í?΄YMËxä`Í.°É$ß3íåƒÃ}@m¾‡@uÇ2ž:|8ÅØç¡íø=/7à>zc–h £/»¾ŠŠJf(Ͳ7€OȬ¦ö^çIŠËÉ-¶"žXÏ2lÅ\V­güJ]öÌ(•Ôdúý„Ë3j— ¥ÍEzyò{ø…Ì]ùÓ%S ™&ÍÖÛb¢! ­iÛ„¥R¹Å”>grÏ0%Xþ5ÚhÖþ‰ò½SINÞ-ÿ±V}§Ó‰ ”••qóÍ7SVV6NÃÔ£×ú]vì]x÷¶1ÑJ—£‡`a ÔòÙy±qCš6ÅÌ(ŽþµðÆL꟒7,`ºä[,1ø9uÆ ¥_æßþù[üË·? ˆ?nÅgk¡¡¡ Ï`vS‰¾6 .§wXhÐÅ8H’”à€ÂÁñõvôâ´wÐ+I€&ÅØ»øýƒO²{ÿ»XŒ‹ùêUçD»Ò¨‹F•| 4ËÞÙ–…S3þ Æ?B;}ñûéÒðù4’a,þÉÖj«’œL•Q—#3o‹d(m~¥‰â#øÆvß•„&Ô(·y%‰¤o3EËÂy'SfÅW‚åßãQ³€Oò®üWWÇ'¨¨¨¨`Íš5TTT¤=·lF¡pzW²PXä¼Ï,HÑ¢’ >]âI~³q [6>ÂK¯¾Ä–? ,*(¥²z>•Xill¡ìSs¢z3s’·Žé VÖƒàF®ûÎՀ޲bÀýß<²eO½- tþgfqtçÃÜyç>ôBÔ®/°ë‰ÇyvÛ¼ÐàÎç|31²ÎàqÓàø6>¼™m[æÅ#–¤û.@.—^P†åtÿ.DŠ»žŠŠJnPšeo€lË© €!löìîE&‹ß\ Ë"ZÒªñþ¹ [ò9¥Í}.åå§HDo_@nrÆd¯¾‘I5kçQ¹Å”1䡱1y‚Íd–ÿ¾qô ‡G¿©,iÇËØ«’ ã¢ü/]º­VKmm-k֬ɨâ¿ö·÷b˜]J[_gÊÃìRV?ð£”}-_÷×Ô`©ßËÞz †y_ãÑÛ¯äÖÍ÷sa‰›w_|†]õ*/^Å·ëLN3zŠt¦ämLm¢×‹!®\¾4úOñ¥|³Î˜ù×Çïfq‰@ÃÞÝ¼ÛØGÍå·pë%fÂn‚à@'D-ùZ÷!ví~7%¬Üp !FâÎ:n`ùƒ¿eùâJ¬Gö³{ïÄ’ Y·íÞc¿–¯|u1ˆ­<÷äVNPEÐÝÔw=•Ü 4ËÞí×é…¹Ež0è â]XÝã¤O…ÄÌ•QR•ÿ\0Ö÷VióC&å c‘¹É”ífd®r ä“LÇäpOn¯'}øƒvŠzŸ¦JöÉ-ÿž +õ䊢"ӨΧp8‡—R×^{-W]uU\ü&•š¸mÓ¿çFS÷ìxƒÛ].Dô”–FÃ4•WðØkûp¹\ /Âl2 IUkwðçGr&I›;v°ïŽr‚"§^ßßÿݵœuý òZj¾Âæ×¾’àüƒç Lçç¿‘«u.|3&CôµKÖî`ßÚèßš˜s412¢9µ›wòŸ ¿¨Çl6 •9L2öÚµ›yó{.B˜1õŸ%IB£‰^OEE%w(Ͳ7€Û›å,(ŽO¹º‰FEù ¶>ókâ“Ù:Žq‘¥­]-ɘ+2Uܼî³cºŽ’æŸëTÖçøý¹©ý-„²sûÎÕ¦C> g˜%½·×:©K¢éŸ5‘)j!öxRÏ3JˆùK¥h4“ÇÓf*“wËÿÙ*þ¹B£ÑÄý—•QVVŒV«E«Õ/++£¬¸F3x<“6C?N^xü)vÕ[€Åüô»KFÈb6›1› åÒꀛ°˜¢Šÿðv±×L&cAqt|‰dL4ö‚â2Š‹_C3Ew`UTòÒ,{d² ‹ÅãSþ‰ÐŒÎ""7ûÇr‹ h¼þôåÉ4š±-§”4?Œ¦ÜZo_æ%CS‘‰u8–žÞìjŽËWÈ|Ã4(ŒO^9Ј-iÛtv¶!§^hY œºê™bþ}îöŒÚ•O›>☧ï¸Üâ«Ä0.–ÿ©A%ÿ¶m7zŒ,X´³‚ç3Q¢/ûÉ6vÜUª¦ŠÊ¤DI–½8"ެšü“»õhô´’™€ßžUû`HÝÌI…Ó™™2f雪¤ùANïžl7#ÌC\ä"×ì ”>.~¢Ѥwýöù|øŸ¯óä¨ÏWÒüii:% ‘&WœüdN¨&3—þɼ’ šp%Xþ"RêçA$LWëi¹ÅTIƒªüç€á.üÃò}¾ŠŠŠòQ’e/[–I”]{¬D"CжF7rʼnwŸù<›:é*¹%(ú\%ÍRxbdúè¶9ä!§½“×=ÚÒÑšQ»p–åg'‚·#åëÉ,ÿWêór‰?XŠN­ü;:Ú()Qݘ•Œ¢Ýþ]¾?ý×8|&u× Î¯á‘Ûþ³É(·È**** ù裵À Ûšðb@%ÐcÙÑ•§mßç Ë-òBš9cïd“λ#Ÿ&ý§ 1JšB2~dz¹ß‚lveZfb¸wçgïGTWO­yÉåI½1l±X™=»zÄq»#7‰63!hR—£°ú› M“Ú‹e¢£håÿg›wÓìÒa®9?e»fg€ŸmÞÍ–Ÿ~Kn‘UTTT¢”…ýp²uE×k•_Zk¼ˆÝ8 %°êGÂñyz{ó¯T|ãg š „ÙoØø=g€y£ºŽ’æ‡Lê± ’›Du¡HvÁº }\’2ÿ ÉVU¤Ï¬ ÉTÛô:Oâ¦vûŸ1ÿ3*gâæÊÊ+ÑjãËÕM-e¡·¯×ËÎ;imw :ÔØÖÞš¯59Ô¨.xTTT”‹’bzcÑh²sU¶9ñؘØã_åÆÚ›]´p¨wÜeœH´[2Op9–DyJš²Í¸ sóY·Û3+­8€Û­ülÿ=öÌ¿“ž 3ªODB‘ªŒÚ…EåyHå1䡽£9e¥Äü»R|ßôZ-U3ç2Ks!úa¹"y*¾©$FöUœÃáàé§ŸæøñãÊpÛ—$ Oç1öï?Œ[}gI’¢??m‡hhˆþ|¸¿nOfÖ3I’ðtfÿþœ1²äJÆ|]EE%%Yöb±tg·¨èíí™’¥˜FC[@ùs¨ËÞ<öNT€±•ÁTÒümÆ}«µ+'×Î6 ©Ç®ü¹(›1eã%0Ñ„2Ëyâì›ZsR <-m¹-ÿbÐ…Ý]J0”<Ì@+E¨*›‡¯}üBTF‡¬Ê__Ï<ó ‡ƒÚÚZfΜ)÷ý¤~Û}¬_/ïåâ¹"ç‘»ïä®»îâî»ïæîõwñ|}æ´më×ßÍ_‡mÆçTÆqèWEe*£$Ë^,Þ,ÊP žãR³ùG gE7–Œð™mÌ´JîúZF}®’æ‡tõØ ”a¨`2mFB£O©tÒY·ȶÍDÇãHÿLÛòŸÉ3ßmë„~o7½6þsÜg=,«ü*ñŒË7ìСC477³|ùòÁcv»gŸ}—ËEMM 7ÜpC^®=T{Y¤÷l‡£Égͯcþ9Ó1 «ñ 'Úm@9µ-bV™‚"0`¹¶ñ÷´p¬­½^O_cQÊù_ ¶Æ a/§?ú¡ú">-à´[ \1“ÙæB<6&³.NFŸµ™cm^ÌfK7”ÏaÉ¢ù˜Ê]Äܶ 3†¿[±2Ja–6ΜjÅTÎYÌüyÕ˜tš¤c׋nZNž¢ÕæJ¨Y0ŸùsÊûûUQQÉ%J²ì ðgƒ9â¼HÜ¢+Žî`âXå‚#Áà%5(QœG9\®ÌÝÔUr‹I_Ç<Jš¢1×'’¾^£/§%˜K.°Û­Ì˜99²‹÷öd–¢‘ÍMDS˜qÛÉ@(bJÛ&™å_ )gãËÛkÃÐ= €ÈpoIù¹9¦yWþÛÛÛÙµk—]v3fÌÀn·³}ûvÜn7UUU¬Zµ*ï.ÿ®Ž÷ØüÐföŸŒîžUÕ~™Þ;Ÿqíaãý ÞâЏèúæ®~;æL û™W¶9ýÜî{«^O@ˆP¬R\w Ïl\…Á²uw=†ÿsÿÊo.û(Ÿ{—-¬dfíU\ýÙøÕ'Ïmྷº©¨*ÂÚi…¾±úüà¦Z›k7)þëÕOŠ Ž´m¦™g`µü,á3¡Ï™Ý{fr(Mù ͼìX¯#ûXù”4?xSdé.)1Ó). ¤Ä——lÞÙ& ð)<¼&›1è&gŽl¼Åº-S˳LÈ Ì&Y¶ÿŽÎññÐh£2!¹¬‘`ˆpÿšB‡.£~Uä!ïnÿÍÍCn>]]]Øl¶AÅ¿¢¢‚Õ«WST”o7'‘·ûûOZ©¹|+.¯¡çÔÛlzé^|h+õKoº•euF^ßÊs ±poÒ6AQ èÊ™·h)‹`mØÃ‡Þ -o¾Fgæ]z)'?9 @_Ã^þóÅÿä±ïåÿ6Æ? ¢}¹±ºç²rÅåvo}œÓaðvw!¸»pˆÉÇw`Ó^ohŸøV\··¥žílJ1öãì}~­îR–Ýz _ªÑa=¹—Wލ‹z•| ”…},>atÊÐ{LnÑA#ý#4<,ëqHT–gUD£*ÿ©èsgF!GŸ¬MIóƒÓ™¢¬Ð •xònù/(Êð¸oß>‚ `6›Y½z5%%£ÏŽ›96Ž€:zà6沊+?n£r~u/úrŒ~QY?xç8Ÿ<·•†ÖdmDÀÀšÍ;X5N?ßÃÁíGÙóÚ;Ì|»˜ËÊe5Ì:÷VþG¹—ëo]á­{¹eÓßxé…¿²êþëbd"¾¿õ×üãL˜Õ´ŒG¶ÑìbÐ.“¼N/îêxló=,ª;–ñÔáÃ)Æ>mÇèy¹÷у³Dt8ú²K¶£¢¢’J²ì 3·jÆ"JjÙ›mÈÅ^Š(Ãú˜µå4¢&gJEì{œŽ°T6êë(i~0êìI_Óé£A+š‚¢açäÆbípس>Çi?ç.Ç;”]™{Htv¶É-n^gñ¬ i2« 0YS ¤ŽûOfù/iò™èõŠ‹fâAÄøÅÏ"[2êWEònùµê;NA ¬¬Œ›o¾™²²Ñ?(³C^èCttÙ±w5ràÝ|ØÄhD+]Ž‚…5,XPËgçÅ:eÒ´)fFçÂW0¨þIÞ°€é’o±Ä½}Š´ø½jz –ðÙZhhhÂ3X Z¢¯Í‚ËiÅÖt1ž3’”è€ÂÁñõvôâ´wÐ+I€&ÅØ»øýƒO²{ÿ»XŒ‹ùêUý»ÍšÌw¨UTT2G) ûX¡ìÊj ÐÛ7z÷æÉJ¯#q)#n|“WuZ³{oÞ¦q•o2Ói½ ®’æ)E̵“¨EO3,s…/˜›õ\6›-¸¼Êv1öù2·dû|>ŸEn‘sŽ_È܃f²n€$ÃïMï1$·å?Z šˆŽ’ÙUüÁ_HØÝàPQ yWþ««ãwª***X³f iÏQ^J$”>& pÁù©PUrÁ§ @<Éo6naËÆGxéÕ—ØòGEe¥TVϧ+-”}jNTo`Nò6Ã1]Áʺb܈Àuß¹€¿ýçs¼øŸ[ùåæßñÔ#»ó?¿ˆ£;æÎ;7ð¡¢v}]O<γ۞à…7p>盉‘…øs›Ç·ñáÍlÛò0/À°$ÅØÏàpús¹ô‚2,§û“÷D¤¸ë©¨¨ä%eó Ý#@ž|‹Ó±Òš$îX§‹WŠD¡%¯rd[½ÁçS7rr…ÏçÃi;8ªs•4?¤ ‰„¢J¿Oï‹ØÞÑ"·Ø“A˜|˜R8» |WEQ>¯-m¹³ý‡¥Rt©6²% É­#4'ª‹iõÓâ^žjI•θ(ÿK—.E«ÕR[[Ëš5k2¶ø?ÏJÎ+qYΤü9¯Tä‘Ûþ!e_Ë×=Â55Xê÷²·Þ‚aÞ×xôö+¹uóý\XâæÝŸaW½…Ê‹Wñí:…ÓL€ž")yÓ@›è5Âbˆ+—/þS|)߬‹fð\ýëû¹°÷¿Î»næ_³–‡–ŸGØmCè$€èÂQë>Ä®Ýï㦄•îa!ÄÈBÜ9CÇ ,ð·,_\‰õÈ~vï=‚Xr!ë¶Ý›bì×ò•¯.±•çžÜÊ ª(º›:ã®§¢¢’”dÙÀí]Þ°¤ÆóeŠ_?‘ÅñuSÉáPö#‘Qº,+i~8sæxÒ×:\Ñ9C Eäsà(«–(•É8G„„ì¬ù~qšÜ"¡Hú9#•å_ðå?I¤ ø1“+ðn['þÎV]Ô.¢ŸZå'ãòî\{íµ\uÕUqñÿ™`6ùÝϾÕ9I1ÕqÏŽ7¸ÝåBDOii4ASy½¶—Ëú"Ì&’$QµvþqT!×h’´¹cûî('(rêõ=üù/Ñä~³®_Á€/‚¡&z¾ÏeC¤ ³9zþ%wì<ÿ!`:?ÿý‹\­sá3˜1¢¯]²vûÖFÿÖÄœ£‰‘Íy¬Ý¼“ø\øE=f³i¨Ìa’±×®ÝÌ›ßsÂŒ©¿Òˆ$Ih4Ñ멨¨ä%ÅôÈ Ëp"ÜS+SNñø‚yëÛn›ZY²óÛíÌúœÑVsPÊüªügA‘¾@T1í‹sžÏ{4›-]=ʵ”gSânŸß Ì”[ôœ’MÂ?€HX9Tòà³àp§ÿü¦Šùïís3Û4=¯rFB½@jë}ئ1¨$"ZÓ¦cwD7&z»>¾žWU2gܶf²Uüs…fX {q¯ƒXo„s†Ÿ›¨ÍN^xü)Þ`1?ýî’Áðù¶ÅeUIÏ×ꀟ°˜Ì˜bÚ%’'™ŒÅe$¹F¢±Ç¶O<.•\ „…ýp4Ræq˜±xÔZòqŠJiid˜§l´.™àöe?g»(ŸJ8=Ù'Cô:ÁìyYŸ§”ù!($Ïô_nžNkÿÞ€ȵ}z4›-JÇç9›õ9Aïq@¹ GCPÈî»äõtÁŒÉµ’ˆ€¯“ÞÞô.ý©,ÿ£MÜ›K$o(¥­?üMЕ ×=µšü=÷T²GõËÈ•üÛ¶MÜè1²`ÑB̆ìξì'ÛØqT©U—TT&%J±ìʼnŒNù7è”ki/b•¢iIÛ…‚Y> T&4£­„¡”ù!(¥HÌeJà,yFW&4åµÅÑ . çög}ÎdÜód™‹$pÈ-ò¸ i3 ƒ–;Û(bR‡ú˜ }nõ!Ê RNÞcþ§•çÕ±¤.{ÅÀ`ª¤¦²u™¨¢29QÂÂ~8=öÑíÆ{yªñ=ˆµ K)<¦¬áñÛcÏ&©V,S)¹V¾ñøF÷RÊüH‘•½@7¤ü7úFæýp»Ò×+OE6ác9ӬܘÿÑXe½žÉ—„3[/3‘©a Ë4¿ƒÜÙþý‚°(&|mFåLÊŠfBÁãç*7*Ù¡*ÿ9`À5?ÙO¾ÏWQQQ>JÊæ=€à_Íì=z4áü)*göïggçYÄPvV9•äÓÔÃN†Ræ‡TWL̯3þdÀÕw$£vrgû'Ü—¶IÀ<´aÓ+iÑkUS©(zkÆxð¯p²³#e»E³fs÷²(1ª¥$TTT”‰R,{±è4£O>çuž¤¸l‘ÜC˜pˆ‘üeòÎÖµ¢eµT d¯„¶†Ræ‡TYÙCÃ’°UL«ÄîRÒ„QZîsÁdš$)÷!r eÈÜÃ(Ÿ›¤J"Îlþ•ÛòŸŠ­œ¸gÆ«”…1åýBšsäS%E+ÿýñ¬ • ç§lg yè¯ðÐ ß‘[d•Ñ××Çÿù?ÿ‡ÿõ¿þ:]Ô¢ÔÔÔľ}ûp»ÝÌž=›ë¯¿žÒÒÒ´¯)¥ÄôÆ¢Ó‰£>7(‘}4ëäÄ O±ñõ)ôzýàw¥±±‘9sæ0gδZ-K—.¥¥¥…`0HSSSÒ×”‚Rbzc±ÙF[* -r‹/+±VÊBr³´ž«lPšV¶ñ Âè•2?¤Úðñ!+CnSÄÑû…C2ÇD'Á+dæS¤Ÿ|.ïÙ†˜M•r²™æ‘3æ?]N˜` êÞ6PæoML¾ÿöŽf5±¬‚Uùïëëã™gžÁápP[[Ë̙ʩéY¿í>Ö¯¿—÷Ƽù(a?¶“‡ï¹“õë׳~ýÝlÜö¼YÔÀøhÛ:Ö¯¿›¿Û ÌŒãÓ¯ŠJ&Øl6Z[[¹à‚ “^z<ž87~ƒÁ€ÑhÄãñàv»“¾¦”bÙ ¶NýhÇ1‹½If¥\XSÂE §É-^VDÄÉ—]\.ú\£ó¦QÊü,Öº´´Œ#Þá‹ûÜþŒÆÓb€Þ>ùk'b4y8&c|t®7«öì¶ÉñAÊÌ"®ä˜ƒ¤§húÈê oü¦¸šXV9ŒËêíСC477³|ùòÁcv»gŸ}—ËEMM 7ÜpC^®=daé=ÛÄáF ³æ×1ÿœéÐÕx†í6 œÚ‹1«ÌAè·ÜG„„mü=-këE¯×Ó×ãÆX¤rþ¨­1CØËé>@¨þ ‡Ÿ|Ž#}pñu×áxç êwÿ–n½†kÌC2ú¬Íkób6ë°Xº¡|KÍÇlÔP¾è"æ¶™1üÝŠ•Q ã°´qæT+ rÎbæÏ«Æ¤Ó$»^tÓrò­6PBÍ‚ùÌŸSÞ߯ŠÊø#IüããæŠïp8F;ÌÍT«Õ‡‰D"I_S J‰éÀç9;¦ó]^eX»åÂ'$I›^©#bˆßÝ-)1ãéwc-ÔwÉ-ú¾à|¹Å˜ôööŒ*ùœR應=0‹#C[|ÚxË¿àmò{œŒ@@9^^cÅá°Ë-BΉnhœÈüÝÔHài"ÄT1ÿã#ccR×ý ÏKñ´‘Ê$ÇžA*¹#ï[kíííìÚµ‹††¬Ö¨ÛŠÝngûöí¸\.ªªªXµjUÞ]þ]ï±ù¡{û­ïë¹÷¡Íü·Õ‹ýÌ«l\÷‹þãwñÈï^¥WˆÝÝ–’¶ùä¹ Üu×]Üy×]<°áA6¬{y ? Zö±î®¸ï7øÂ-?ã–Ûäß®ù" g~"Ã<ê£}Ýͽ÷ß½ÎëØöêi‚€`³`í¶!¤ðÂ÷Z>æw¬çîþñݽn#/}dI9vký³¬¿ûîA„{ÚB‹? —•QpæÌNœ8Á;ï¼Ã‰'°X¢ŸS‹ÅÂûï¿?èJzüøq$IâäÉ“D"¢(ƵE‘žžt:Ýàü2ðºÝnG¯×ǵ—ó÷€g“Ür ün>­SïryFõ»ãì EŒC®ß-ý9j\.ZI‹àŠZ¶<Ž^ºÐàÿ‚«SaÙà}ë±äM®°èÕûé÷{e¿ŸJü-…í£ºŸáphÂÎݶPÂqõöÿŽý\oç ”Œùú£lÖ&EÜ¿á¿{{íY§·×JsÓÇŠ?W¿Oœ8–Õ}øøãzEÈïßgΜÊè~””˜R¾žO9ƒÞ&z{í´w4¸®ß¤¤p:öHtó-v~õºq•S‰¿•JÞ•ÿææ¡d']]]Øl6¶oߎÛí¦¢¢‚Õ«WST”ÛŒ±#yë±_±ÿ¤•šËW°âòzN½Í¦—>áҶRoñ±ô¦[YVg¤áõ­<×ëéMÚ&è‰*#]9ó-e±¬ {øÐ¤åÍ×è üK—²ð’ëYµâž¹g=¯7 èÏ]Êúx%;Ú—«{.+W\N`a÷ÖÇ9ow‚» ‡˜||6màõ†VŒ‹¯aÅu‹q[êÙþÀ¦c?ÎÞçwÑê.eÙ­·ð¥Ö“{yåˆê–£"}}}œ:uŠgžy†?üáƒA^|ñEìv;%%%ôõ ¹…úý~B¡ÅÅŘÍfÇàk>ŸP(¤¨lÿ‡–[„8„ÀØÊri"j\н1YáC†6M¼ûrlb´îžüÕ¯>Û¡<¯‚‰L}tIªqzÖç(e~èîjOx¼À8ÒÅ_3̱Ê; ÷\áñÈWf0Æ8?+ ÿ(z\“/ñápÂÁÌÂ!º»’?7F›è5cS”#œf.C§1RHÎ6•ÌÐD"I“ÇdEûÛßxã7(++# f³™[n¹…²²ä%MnøÍÿN[æoÛé&^ùן8uîbݲïðŽ¿Žíû617G?n£r~ußü 'õ3¸úëW¢mz‡?±2ãkq›ÿ×ÜÿŽÄÏ_z€W¿õãm¼¬yö VÍÓÏÿ˜ÿ¹ý(ï.fþñ!öXæòÈÛøtÐ…;(râÝ78ðúØwÊËWñ"w_S9(ãu7pÿ;¾ÿÂùÇ™ðÿ~¶ŒGÂÿziæM7rÿ;ðï/¿Â×*4ÃΑøùKÿÁûÿßïömbðÊÚemÇ›ìx¹·àâÐûŸ`.ýùïùÚûÿ:âz**¹ÀétòÑGqõÕWgÔþá‡æ§?ý):—ËŦM›ø—ùªªªx÷Ýw9}ú4·ÜrKÊ×TÓÚv†ß¿ð£>ÿ ‰k¿úM¹‡!ùë^Þy7úlóV]E½%ZŽòâ/Ñ£é£çãb|Á¨vtÃìFz­C Ù»þýѼÈôЯ~2ªó¾õWRûéå£:w2óÂþ––ÖÓYŸ÷ío^Çù‹®•[üQ±óp¦ydÜÿœ9Ÿã÷ÍÓ¸ôÂ2þû'_"d}w°Í_úW|éïF}íwßùï¾÷Á¨Î=wîù¬üÎmòÞ¼üåÀ³¼óþ¡¸c.—³9u"À[où3f('ÖXƒ.~½ñþ¬Ïûþ÷î rº<®îãÅ+;ïåDSú—’$‘LW[´è³ÜøÍ›ó&c[Ó»9ÙIÃ'ñڌʙv†ñ”|Š]žøØäïVXivü*¼ŸÃq:)õ\9$)ÿ1ÿ±V}§Ó D7V¯^=Ž7D^èCttÙ)¡ï¾Ç\ýç1€•.G•…5,X0yóÊáØÀ¹†4mŠ™Ñ¿!¾ðÆLß~”úçŸD/€é’o±Ääoø¯·Ïâûßÿ: O²ïá¿râP3¾Ïz8ÕaAÝyo }m\EzÜa=¦¿¼ùàæ³µ; pp|½½T ôJ I1ö"ÎüêIvwBí—oâ«WÙÙyà,($kµŠÊ1ÿf³™¿û»¿ãé§Ÿ¦¸¸I’¸é¦›Ò¾¦”Ó;ÀhJPÅbµªVæb«·ôô[ýgWqÚµ„jtùO¯3–ŽÌ£>w*a4RQVA§5µKg$œ½%N)óƒA›ØzïÑ-F: »1Ó ‘}.ûäB£óÖi”YJÌáEÔçw“Cù÷º²ß@‹ÊLâ˜KDÉ ¤WþSÅüç³|,@ \$NZ Õ y5XJt#^”“rIey_‘TWÇX\ý3Qü+KK ‚èŒ)Û…AÍšª'.øtûêOò›[ø GÙ[o¡Ø÷¾^‡l¥TVϧ²ùMÞm´qÑçÀñsç°(Y›cÃ.cº‚•uÅl:âFþá;WaþûÅ×y×£§O<‹þäA>}ÉBŽî¼“{w‹üì¿¶ Å8ØõÄãˆøSƒø,盡3F?ºóá¡s›Ç·ñáÍ\2«‡×`Xšbìçóy ?—K/(£éåþ…|DŠî¨¨(€Ÿÿüçqÿñ‹_äâ‹/Æï÷S\\œñkJ@ ûX±|LçOÆrT¹¤ `ÈÕß ‹?ƒ.ô¹U¸»{FŸ$,¬Q•ÿL(7O§X0©•!<+뾕2?D“{q\ŠæbYXSBÙ•E 9½¶Õ:rN1 Ò_'$*3d1Qƒ¡ íy¢ÐŠ\ÉsMô3•=>¯ ݹ…>Wf²œÙþC¾“˜LI¶ù$‰H0q®0A¿! «åÕBÞcþ«««Yºt)Z­–ÚÚZÖ¬Y“±ÅãMßeF0‚ítSÊŸÁw/K]-`ùºG¸¦¦Ký^öÖ[0ÌûÞ~%·n¾Ÿ Kܼûâ3쪷Pyñ*¾]g¢pš ÐS¤3%och½FX qåò¥ÑŠ/å›u&ÀÄ¿nú‹ DŽìßKƒE`ñu?ãG—˜ »m‚ýRh݇صû}Ü”°rÃ=,„Yˆ;gè¸åþ–å‹+±ÙÏî½GK.dݶ{SŒýZ¾òÕÅ ¶òÜ“[9A@wSgÜõTT”†V«MªÜ§zMn”RÇ{€±”ÕE¦vmøØ’h’MÈRi²ò•˜‡¯^M|ÖãÑZÂRƒ•,Ê® ×TA?Ì ^PXÁûTLKÓïìËÞ«F)óC²Ló=bÔVTVý]\¬Gò S\¤ÜÇ©Ÿ£¯¢´4ýšñlÇØæ³ñDïq¦m çÒ§B^Âè>‚ÛÍ%%ÒÛkͨŒY»ñF øÚþæIo5t»šäW¥Ÿq)õwíµ×rÕUWQP~·3–c!n\™!Luܳã nw¹ÑSZ GÐT^Ác¯íÃår¾³É€$IT­ÝÁŸUÈ5š$mîØÁ¾;\>EN½¾‡?ÿå(³®_AMÿ¥ ç]Ïæ7®ï?ߌÙ=ç’;vž€0ŸÿþE®Ö¹ð̘ ýíÖî`ßÚþ˜Ÿ˜s412¢9µ›wòŸ ¿¨Çl6 ¹¢&{íÚͼù=!̘¢‰Dû㊢×SQQÉJ±ìeÂŒéÕX{»S¶ili‘[LYñ‡†’F{Ô]¹¼Ä@&ÇFk +FŠM%ô9â•ý°Ð#‹<ŠG7XwW—¦cŸŸ^ŽÝ‘|ÃDÉÞ W)óƒÍ–ø{ïñ=:STá/,’8î3rQŒ Iɽޮˬ’Üîô s>§„ù!Z^n$øX¢¤(¼Þx ¿fÈ>¥Ñ§¿—V»†Eãt¯FKPŠŽ)¨+§bš7iÒ?ý(¼G”J 0ºä…“=æ?›x‹•Ù³«3nŸ+Ÿ1¢'QêE½ÞÄ{ lzëò–©6fE¢èweã›?¦×xŠêÚâ”?½ÆSl|óÇr‹«¢¢¢’¹öÉP–äxTt†¡¨hZÊ>¦òNþÀâ<£ ýDÈÈ€IDAT $C`ÀR*EâMÿù¸ovGêâ­‘#^p‹FLԹز…*Q„P¼G‡A¿A¢Ó™’žëóù².稄ù!I¼y¥ï·VO¯L}]«É¤ÖEæ„BC×óK“#(R¢cê Uô+O‰Q½>A¯å!¯HšÌ7å²ü|ÑgƒÍ>2…V«g-Ú¨GT¬gÀq¿†bÝÐ<è(E(ÿ^¯—;wÒÚ¿ë|ª»½1½ˆz£–SÝ rCEEE%)JˆéåtSbÙ8wT­)eöÞ©[^«Íüñ©õÇ·­ìXˆ]„u&HV M+c®î/jã_Ô™º#š*²A óCˆÄ®õþ$¹Î©2a,š# ôÙ—8L…UŠ‹· éUjDågûöÏÇýŠtÉ«`õ¹&¢”ª²L*¡É½â2¯‚&g¶ÿdè¥øÍÂâ⡵ƒX`øGØÙ§üïçTAvåßápðôÓOsüøqŒFed!–$ Oç1öï?Œ[”ÆÔOÜèæØþýì?܉(eÖ¯$Ix:³ÎYr%c¾Æ®¢¢,{™Ð+)5®4)H'Âb;_Øé­RÑGìAW¼â8Z7ØT´©+¸úŒµ x˜ÛáœÜYµsAAA¼²/ˆ©7ÆÜîìâ••0?$Û” …£Ÿõáå+ Z‚ %íqŒ~Iðô> Ç|Oìžô5ß»û”±†@ Žü tö—Gt…A›"_Bo¯ò”½Ñ&ó¬ö±´wLîç‹Ç›ù¦k:Ë¢ÏZ.Hç¥rŠƒÊÆ4Ñcšˆ#/2ªd¬Ê__Ï<ó ‡ƒÚÚZfΜ)÷ý¤~Û}¬_/ïåìûÆúñóܳ~=ë± gg~´më×ßÍ_‡=—s/c~ûUQ™Ê(Á²7€Ïu*ùkž¡íz_ õÓ<ÌÔͳ2PZO£ZÀ÷hqmJÌã÷ˆ=ÝÜ’òuWLÖAº¯›5ñÕ/,ÞÔ›?‚¯1«þ•0?ü‰ËËD’WþŸ§ÛÇ °úý#-Ý}Þ!7âcÓR+‘‚?Mù¼qÆã‰¿Fô‘¡1…©7W¶ƒr!'¸½£|–QŸ«t² ·JgùïëkÏ‹ŒÁI4nÞ mèe Ó¿Šò—„‡¢¹¹™åË—³Ûí<û쳸\.jjj¸á†òrmiÐÂ.Ò{¶‰ÃщdÖü:æŸ3ºÏp¢Ý”S{Ñ"f•!(ýûˆ°¿§…cm½èõzúzÜ‹‚TÎÿµ5f{9ýÑÕqÁÜrü½‡ybËôƒ„†Éè³6s¬Í‹Ù¬Ãbé†ò9,Y4³QCù¢‹˜ÛdÆðw+VF)ŒÃÒÆ™S­x€Ê9‹™?¯“N“tìzÑMËÉS´Ú<@ 5 æ3Ny¿***¹D –½‚)¬–¾!ëYŸGJ©ÞÛúroÁžhéÒ—DŽ4Î’3½¡¡w2¨7¡·©‰_oy¶ûŠR~7"YnŒ)a~Hf™kê·ì dø ¬L9š‚âHï!Þ­XkHý¹-Ò++w…ÿË6Í\ÎIûÐBΛFWŠhäËðžK’%î+(0 ¦þyü: 'ët%eþåIgù÷ ² 4ÊŒp8ÊÓØxbÄkÁ€-¥Cm5CºƒMÓ‚”yhƒÊø‘w忽½]»vpÙe—1cÆ ìv;Û·oÇívSUUŪU«òîòïêxÍmfÿÉèîYUí—ùáý·ó×6Þÿê-N ˆ‹®ÿgîúá·cΔ°Ÿy5a›ÓÏmྷZÐéõ„Åú Åu·ðÌÆU,ûXw×cø?÷¯¼òÀÕüqó#¼?0ÿ0"‚î“ç6pß[ÝTTaí´Ba ßXý ~pS-‚Í‚µ„Ö¯åc~÷ÈoøSC+! ´æbn\»–Õ_˜tìµMϲþW{hu‡3}™_>zG¾?**S%dóÀ'$^uΨœ‰³shÞè3r…Ùœ´˜˜<®©£Å¥IþìŠÍþ?£r&V[ÚHnc£Óa*4ñǾ!/m8^њʉ“ô s9ßÚ€KfÕÐmKl™ô²[ô*a~Hæž- A@ߟá½ABãˉ_ˆ·Ú¯®`((Ú’öÑëH 7VR‚©óFø„0år œ' Œœ}‹Rköa1”aè¼{2£¶reû—R(ïÅÆièfW þUøã¥É·a¢’wŸÄææ¡hWW6›mPñ¯¨¨`õêÕå»ÌÈ[ýŠý'­Ô\¾‚—×Ðsêm6½ô />´•z‹¥7Ýʲ:# ¯o幆ØÝmoÒ6AQ èÊ™·h)‹`mØÃ‡Þ -o¾Fgæ]z1û·°u¿…Y_»™++`„аçe´/7V÷\V®¸œÁÂî­s: Þî.w1ùølÚÀë ­_Êëã¶Ô³ýM)Æ~œ½Ïï¢Õ]ʲ[oáK5:¬'÷òÊ5ñ“ŠJ>{aK(é‚*Þ Õ†bciÒ~ “¨ÕX9§jä"6>ûÿн æØ¢××Û–òuã°ÿáé\$ò3:QÄxµ+"&ðð0$WÞ¼Î#Y]O óCPL¼$4†MTšoryÅü˜eõ ¼SüºÔöª»²?Ã)~s¢%l¤¤$¹‡H0à[䜨¬ì4s9~—ƒ‚‚Ô†¿á›B“ »½'ã¶²eûW$}ÍcïF[}ÿLé7ÞÔMfåwËAÁЮѾ}û‚€ÙlfõêÕ”””Œ¡÷L±qä¸Ôñз1—U\ùq•óƒ¬{Зcô;úíñ¼sœÏžÛJCk²6"``Íæ¬š§Ÿïáàö£ìyíf¾ÝÌeåuÞ¸‘\ûY3‡ÞBìkrñOçÇ–Ú"¾¿õ×üãL˜Õ´ŒG¶ÑìbÐy0ytX/îêxló=,ª;–ñÔáÃ)Æ>mÇèy¹÷у³8úÆ×"¥¢2UP‚eo€p(ñ¢CW˜`#6…Ï¥'GÉI4ùYrÂ1Õ:;ZÓw˜®41µÒ°²tÍHkŽ×uš²ÊÏ£e¸«rg‚Íw.•â2ñ,ÿVëH·y£±nXXcñù– ìˆÝƒ.ôcÏNPMÃ+¤ßhð:OR\¶h¼o]FÄËoŒc*6%õ¬š,ÊX2ñEºfu0­.kò¸þ€¯X(÷ò‚àÏ|½-—å?H¾±\T`MT3™]YDoÜë‰6ÄU”AÞ-ÿ±V}§Ó‰ ”••qóÍ7SVV6†ž³A^èCttÙ±w5ràÝ|ØÄhD+]Ž‚…5,XPËgçÅîøÒ´)fF¿A`á+˜Ô?ÿ$oXÀtÉ·Xhá˜`å¹ÿý$ n@ldëæ÷ñÙZhhhÂ0ÃHôµYp9­¸Ãz@ƒ.f3M’¤çޝ·£§½ƒ^I4)ÆÞÅï|’ÝûßÅb\ÌW¯:'Ú•Z[VE%/Ƚ°E %V‡'5ГÜ3«½£eJZŒÝ}Ç2j'h”‘!$Åïóë†Ó5Û:¤ÈH÷ö É·~qZVý+a~(Òw86Í]ë T®ˆ%¬¹#âöŒ.hø†¤Á0ró¤7>‘ׯ\×ÿ.^¶¿y4è’›uböùD”ˆÃ=RÉÕi h®½£&µ RœÄIeºÌKåÊeù„ý˜L#´%%f t&æèû“h~(4ÄK¶îPò®üWWÇïTUTT°fÍ***Òž[QRˆ¤m'"Ôμ0E‹J.øtˆ'ùÍÆ-lÙø/½ú[þ(°¨  ”ÊêùTb¥±±…²O͉êÍÌIÞf8¦+XYW ‚¸î;WƒésülíZ~ö³Ÿq÷Ïþ' Šªù—ï~Ž£;æÎ;7ð¡¢v}]O<γ۞à…7p>盉‘…øs›Ç·ñáÍlÛò0/À°$ÅØÏàpús¹ô‚2,§ûú)îz***¹A Ù¼pzÉýš‘„tåþ¼®ÓrgÜÄéCÿˆÉŸQ†½]þí"‘ԙÑaŠT8¾ýX²´O´þ‘ß QL¾„röͪ%̉ªwhúDK“T®ˆ„âr¯tŸó`0¾­f¤âÛÔW˜¶_@9¥,Bøê Ziä\ªO‘3$Y¢¼‰F¢²…bØÀÐS/8í½6¹ÅÏZMæÊpºlÿɼùÆJ(’xƒÓ4Ì#0Ye‘¡ƒÞžÜz¼©ŒžqQþ—.]ŠV«¥¶¶–5kÖdlñè†)Χû”7åO¹p>·ÿÝ“)ûZ¾î®©)ÀR¿—½õ 󾯣·_É­›ïçÂ7ï¾ø »ê-T^¼Šo×™(œfôéLÉÛ˜ÚD¯C\¹|iôŸâKùf •|eùr®¿þz¾rýßSW Ô\Éß×UvÛ: ¢Q— ub×î÷qSÂÊ ÷°bd!ã–?ø[–/®Äzd?»÷A,¹uÛîM1ökùÊWƒØÊsOnåUÝMq×SQQÉ J°ì ¬Î¼68rqIc}Šh¦žë¿Ï?”¦¯ÿþL¯L=aê†<(t9ΕÐÑ‘:æßïIwDÜÓ¡‘âÝ#>tŽ\.Ù‚É7&»å•懈&ò?X¼;±R­ ÄÏâ(´ à =^ÍȹÈÖ¤-÷' -ãt·Òãñ³|&ˆ´ i“³)7ÑpõoîIókø÷p2Òœ“qÛšš)ó#ôöå'1¢?Ň³«.º±œlºó‡6š ²ðtPÉ/ãRêïÚk¯åª«®Š‹ÿÏ„b£™ûþáùÜaªãžop»Ë…ˆžÒÒè„£©¼‚Ç^Û‡Ëå}f“I’¨Z»ƒ?ÿ8ú°Óh’´¹cûî('(rêõ=üù/ÑÝþYׯ f¤¬Ý¹µDϹ䎃ç Lçç¿‘«u.|3&C»µ;Ø·6ú·&æMŒŒhÎcíæüÀçÂ/ê1›MCe“Œ½vífÞüž‹fLý›x’$¡ÑD¯§¢¢’;”Ó;H’’^gÔn÷Rfœvz”OgÊ’.Ùy¥ÙˆÍÀÏÕR¯ßrªÁPü[ —eTfk*‰¤W:ü¾ÉÒaŠšÊ´çÇ¢„ùáÌ™ã#Žé´Ñ9A,ùYéÑôÑê.ä*CÚ®Óâñf¶!–®ÜŸÌ»MkÔcò…Gz Iáä7/”—âmã‹àKÏðGï…/”:á_(’ƒ—BIT>/Ws æ¢/) )Ü8âõdúc&Ò_²oýÏ¿PÁÈëWTêálšz–*²0n³d¶ŠÿXP|5MÜOqYeeÅhµZ´Zíàñ²²2ÊŠ Ðh4ƒÇ3i3ôãä…ÇŸbW½XÌO¿»dĵc†Ÿ¯Õ…7a 0EÿáòÇž“LÆ‚âèø]#ÑØ ŠË(.N.—ŠÊTÄÞu ç±ìr/ìcÑJ‰ …½#àá`꤬J²´)‘ò’èdŠ™O³U ÇBI‰™3¾!ëâ@VæXîÔLÌñ¸bô¶êªY Û4ú “ZãOâufVÆ äŸþ$VæH~Ë0'C+&^©=‘úœÊU6´ ’@z¥ä6¸l²Á+AHü¬±öoHž§1£rfÒ󽞩—Sf8F>[õþ¯q!/‡Îenõ8&@ ÷oVG‚éòU”‰r¶H'<•üÛ¶M<ºi ¯¾¹™%Y&¹¼ì'ÛØ±sWMÞÜ&**›e?gOïàØÿ(§Jˆé Ï“8v¶Ù;ra}Ô+Q1-¹i?VÜí1MYX|2ÁåJž°¯¸°g8ªH]´pŸ^PŠ[)l( ÆýÇoØúè C¹9ùwC eÞ!÷üÇ—{úÝÒÇ;ye¯˜xŽòhR+ÿJv•¥‘.îAò‰#›lðJ%(ŽÜ<žQ9“@$úþöõèR„>L–¼ÃÉfcpNÅþx6ˆ+-¿ë×–g|îXÉ7§¤°ˆ¯]àoýùƒlšÉù>MV&­ò¯Ñh$iÈõ}¨<¯Ž%u 1ÂKÉ`ª¤¦²’Éëढ218ÓÚG‡çZ"ÒxhCÎú•Û²K¢UI‰yPIŽAŸ<.Ór­ÊEê·Š$r{„˜,ÈRþ6IŽä /]Ly1ã4]I˜£^ Ͱǿ¨*ÿIITvnÉü»‘7…ÜóCPJì¢íOjè&qD¥ÙHuåP£8ÊÄcÃc»“…Óh©£UÃÒøz™fC»Û;âX—˜|KOûsbý—Û²KcäÔoŒqš¼TÌZÄ»µÉ]iÅ -“‰D‹Õ°&»Dc©¼ÙÒÕ•Üû"(FŸ‰åÕ*zï_v4š!…ßO _L¾Æ¥dŠÜóCHH\.RÈN™Ž2븥#ÞK"Ù¦ÚAGjË¿()ÇÔÝ3äÍPP`ä´/ñw¾8…u;Wžgr‘(—ˆVŠÔsQ®“ïN*¦M§#Cb-ÿzcüs·µ}|CDB1Îee™=KÚ;šÇUF•ä(âéïõzùãÿÈ%—\¹çž;x¼ûHŸš–ÞUÒ¨ÓÒt¤aÄqÕe^EE%b-ÿµþW«éëþ ³æ}{L}ËmÙ‹Åf¹“‰QpúpQ¢r»Õ¦pµíèœz±â‰r&ØIÛš¨D€(¥³È!þHô:½RÇ€úœ*m¼B+!OVw¥Òç²X†R¬%îo"æ ÿäžÂ$Þ¨„ )(Ð’Ì/¤¬LA_wÞéÓªèu$Ž9>Ó¬Leyš¹—7ñk}òÍU¿?Œ±Hâµ×^ã™gžá“OR{Â*`0€ß?p­®Ÿ8´ò¡6RòM£-ÿñ"Zmî6K•€òãõ¥vcÑëôxÃ$ $@”̜ǹ—-Ç?ÿ¼¼Ë(i“‡”N'b4AtY$+sÚRVùù¼Ë®’Ù•‡ÃÁ³Ï>‹ÃáàÊ+¯”[ ê5àí:·'Â\|E¥úÑo"Äzø¬§øàXÏ]QGY†}J’„×r„O…ùÜž—Kó5v•‰FH¹ÄÕ—\DgóƬü+¡Ž7@8”XIú“š-¬)!ˆ€•æ2l®mA=ÉìiEzåf×Î7Fc!d*ñÅ/n=•Ó«ó.£Í HÆ!Ï„Bƒm0>¦Zœ¤ñÅ£%~ƒ,¹ußæ0-Ék>OSÆ×“{~ˆhF* ¥¥e°G¸t¾–dÛ:hý¹Ë®®šý:|•¹×°ƒ‚Ô×›h †.E-û ¨áÌ™3üìg?£´´”éÓ§gѳüx<.4šx/­ÁD(8¤ÌjÁ õ'M‚W^^Ž^?¹Òa ‚‹t~É‘€ý^†Ct#‚³‡c¯=I×â»)J™11°A=Ü]FåLt=!cø@2$öÒ±iúèórn©¢Ì§$²¾#}}}<óÌ38jkk™9sæØ;íg¬Vÿúm÷±~ý½¼— o#IÄemfç÷²~ý½¼›eŸm[Çúõwó×aköœÊ8ýª¨(¯wä×8 ¿3«Ò<‰P‚âàuNx\ˆ.®b]øª+¢Ö`?y\»ÄÔM⓪ÌÛpìB~24·¶Iùz_5†Dƒ {D†gó"ßd@J®xˆáäïk˜Ì]Ðåžúl#ßSafJ½'ç!$C÷û¼Oi¹há´¸W Ò$“´Êqý@“ÂÖæ '_Šû?7ß|3¥¥¥“Æ›U#ÅCˆ¤^³G"“¯œ¬”ÆöªÓé KCmtýy4 º‚"þðŸÏå]ÆDîúšÈÐæMX—|ܽfrmÜLÆÅòèÐ!š››Y¾|ùà1»ÝγÏ>‹Ë墦¦†n¸!/ײ¼‹ôžmâpct;yÖü:æŸ3ºÏp¢Ý”S{Ñ"f•!(ýy"BÂ6þž޵õ¢×ëéëqc, R9ÿ ÔÖ˜!ìåÌÇâŸq!óÂñìÓ;Øõ¾˜6¢œŸ$Iø¬Íkób6ë°Xº¡|KÍÇlÔP¾è"æ¶™1üÝŠ•Q ã°´qæT+ rÎbæÏ«Æ¤Ó$»^tÓrò­6PBÍ‚ùÌŸSÞ߯ŠÊÔCF&ÂsùЍ6VÓÓñ昴ÉmÙ ™ðøÙptfŠuáÈTÆgdI’õéÙ®4¸&1YÎM)êTãô´$æó¨bÚtô{G÷hCÇ+õúâ(RXÈ¢ç©E›˜ü;ê•øfÕtìŽÞ¯ÅÌ7ÆdŸÂ#CÖX¯‘tH£L›¨/¶´¢;ñʼO—z#­×ÞÃ´Š…y¿eÙ6$«Iäu1€(´ÒÙÙ9á,þ©DâmÞa@›Bù—¤ìò©LÂbêu¶N£Å³)4`ù‡è€õl3py^eLVþU«Õ"¹uX‹3+«iœd!“¼+ÿíííìÚµ €Ë.»Œ3f`·ÛÙ¾};n·›ªª*V­Z…јßxCWÇ{l~h3ûOF?ÌUµ_æ‡÷ßÎg\{Øxÿ¨·8".ºþŸ¹ë‡±î½ö3¯&lsú¹ Ü÷V :½ž€¡X¤¸îžÙ¸ ƒeëîz ß’åGºß±ë¿܉ßòOžÛÀ}ouSQU„µÓ …5|cõ/øÁMµ6 ÖnRøy-ó»G~ßZ ¥5sãÚµ¬þÂì¤c¯mz–õ¿ÚC«;˜±èËüòÑ;òý‘PQ™pL‹põŽ-!—H^wY¢îá±1ê»Â0cÆL¬¶‘™´}>‚ÏB¡©†©Â@Mò,ç³+‹’8 7¤I4ï¼Çü*(}Ô14ÑæDøXõPPÍö?@ÀŸ](KA’Vn{ðÍŒú{~ˆhFZË5ý%>GxÄ ˆ ¤ÑYiCš9ÀñèuûK+ÖÎ.%€€×`€&’Úi5´¡ ôCsBAŠÀh—'¹rˆTÈ=Œ±!´ ·üH)‘#S0o·$A0fܺa›Gáˆ|^ úþ︯_>›&ódƒMæIPUòGÞÝþ››‡ÜEººº°ÙlƒŠEE«W¯¦¨(ï‘+¼õدØÒJÍå+Xqy =§ÞfÓKŸðâC[©·øXzÓ­,«3ÒðúVžkˆ]{“¶ zˆ¢H@WμEKYlkÃ>ôiyó5,"Ì»ôR-¿»·¼Ê}_­?D£}¹±ºç²rÅåvo}œÓaðvw!¸»pˆÉÇw`Ó^ohŸøV\··¥žílJ1öãì}~­îR–Ýz _ªÑa=¹—WŽdž HEe2ÑkOž¸.È<|®ScÊ:,w6ï•^ÜǢθ$vɯßÔŠû^“ ŽÇ6ßàºcO>œbìóÐvüˆž—p=È1KÔåÓÑçCEE%»»3`ïþ 3Îùú¨úÛ²7@²’^Gú×} mp„4"¦#¾`mŠøß@dêXýadMòtTšØ\Š Kë;‡"¹q…„äîú…ýý‹‹õqÙÚ“U P‰ââ•ÂàÐgz©‘^wüýÓ'I½•ME¹ç»=AöüHTÉîIcÙë‹ø4»œ£Û‹Í·2PZqz¥~ ïFóÐ{"S›‚m=m( Q“|Ã".¥bZ’ÐÁ!·èc"Š÷(Òéô ôÞ0©Üþ'£é?¹‡ŒN§G ǯøu†Ô^gšó³¯×TÅ€@ Wäo çT™€Ô!c!Ý OÆ÷râ‘wˬUßét"eeeÜ|óÍ”••¡çlУ×ú]vì]x÷¶1ÑJ—£‡`a ÔòÙyå1çÒ´)fF¿÷äÂW0¨þIÞ°@ñ¥ßbI]’$|¶šð ~$úÚ,¸œVÜa= A§Kw@áàøz;zqÚ;è• ƒ${¿ðIvï‹q1_½êœhW“$¡ŒŠJ¶ýÉ­bDÑ4·ýãQ÷/·eo€D%½fTF“­&r°k¥äÊ¿×ÓÅTb &ù€U¤  õ£´¼$ú È£W’²Á­¾w…E#\N†¿×ªEf€¯¼ßo|­4©ø”‡JsüB<$f»±ñNÛÁŒ®)÷ü  ¬LàÉ0V×—ãõ|¢ÒŠ^ýÐæ@£#G‚¤¼Í-¿&ùqÔ+%Íf‰L ƒŒ&2µbþ¥ó­ îa{Ã-ÿÕ•sò*c8”8|ÆïzvÒ«‘š¨da‡*ãKÞ•ÿêêøRF¬Y³†ŠŠôqL%UÕÂéãÇáÕŸ¹0E‹J.øtˆ'ùÍÆ-lÙø/½ú[þ(°¨  ”ÊêùTb¥±±…²O͉Ʉ<'y›á˜®`e]1nDàºï\ÿú°¹íè·¹óÎ |è…¨]_`×óì¶'x¡Á œÏùæøóâÎÇÞ”?î9çó凞LÙ×òupMM–ú½ì­·`˜÷5½ýJnÝ|?–¸y÷ÅgØUo¡òâU|»ÎDá4 §HgJÞÆ4Ð&z°âÊåK£ÿ_Ê7ëâ-eC}ö·wÛ: vZ÷!ví~7%¬Üp ‡{ÎÐqËü-ËWb=²ŸÝ{ –\Ⱥm÷¦ûµ|å«‹Alå¹'·r‚* €î¦Îrª¨¨€O¬$"z2¶ä GnË^*"†èÂ3у\ß¿æÐ‰É-‘,JšM&ÆRâl´YчÓçL«ß'FÝÒ†—brâFÑz|èÄÖ<Þ©‰Ëôò!E²´¿úEÐ_ë$ÏêMd—¹ç‡#æ‚H!5ÓÓW,ıUò¹NÅýŸ¬´bìæ¤¹¬:i½=Ÿäõ^eC$"á÷ ô¸ÜT-Š0ÿª2>õõs˜±(LH°ò»‘"´úÄJ¾Õš#wn#©bG³fÁê‡ùÙ×?ÅûÛîå¦ïþ’Cúϳþ?îaÚˆ–ñ›êšµ£ûo/ó䦗i&ša°Ü_!qŽH¢˜<>>_÷avÝ×yôÿgïÏÃ$»Ëûnø{öÚººz›MÏŒ„F „ ’ÅfLÀƒc°“à×qˆ×—ÇN ^bâ7ã%ØO°/°cBlË1ì„Å€X$Y4B+šÑtÏôÌtOoUÕµž}yÿ8uêì[í=s>×5WOU:uÖßùÝÛ÷þøÃø‘0®’©èý>W½òF4ý|àþò'‚?ùó/ÂgB¿ègðG?÷z\þ›ïû¾_ÄÃì·ãw?ñ_q ñÒ÷àáõ¸ôñÿßõ}ÿ°/Å?ùÛ8©H`iÅÙa”è` x6w·?n–4YÂ('ÒT.#%9+¯ýëñš×¼ÆUÿŸv®Œ7þÁÿÍF^„÷~ì³ø…V *hÌÍ™-±üjü—¿½­V  ó(†•w} _|§iDÈ2ïþî·U¤â¹Ïüo|ñÁg7¼éŸá8,“Þäeïúî—ãõ»ÿªÿý¯@°„_þŸÿ ¯¥Zà™2 Œù™õ=Ã0@8¾C8¶ÄMx×þ~†oAPi”Ë»N*dßo{×âs?Õ‚‚2 =?…a ÷vfd\ÈR3òóz{‹Úõ'0¿|w²•:˜zd¯¡nøÞ³jÃçÊ$¼z] kFß.Ét¨‰ßêféBQ,.ÓÀU€qˆ'#2þ£è*¦.H¸SFÍ™w7 U´!N%«å]m—úü~»`ô˜4ªObyéõ±¿9íñ¡Ñ¨ûÞ» ‰BD¤d[S,Uÿ‚YuJ¬ÖŠ"!¹&QKËÎ[‰.ôl÷ysCEASÖ ½ð9{›óóÌU±þ•4šˆ€é¸ÒÁÅçÎ#¨ªâ{î÷ñ o{* ÀWŸÀüÛŸÀ}O¯ú©ßÀÿýŽ×£@©>ôÞ‡'kŠ_ÿÉ×ô·õÞû‹øè£ˆãPÁÛã÷ñÓ¯iç+U|þÞ‹÷|ôQ÷qX} >þ©_ÆÆû¿xߺãƒá}ÿý}¸ëÒÃøÊÚ=øvç©ÙŒØïÕ{pšžýÆ?`}vîÇ£ëðñ–{3X<Šw¿ó#ø&€wüÇ×àÉß}~ðùÀþÏþX<†w¾ó£xÀ¿ú¯Â…ßýnü‹sø/—5(0Ï3ºK‡—₎X‚¡ùÓô‰Î.`ôZ„C2öÈ¿EZÃX,× ׿âü<æç‹ I$IöߟŸŸÇ|‘Aý÷“,cÿkâÏ?ð_ñ‰G¯x>þí¾$ð÷Ãþ‘” m*xLÃßû}ço†m#[4÷/hƒö-ΣX þ ï ~mŠ®dd˜´»ñu¢\áf4kƒÕýO;²g¡DøD"<ºgŽ„^‚%óÓÞ­Cí¨}LŽ£Ñ¬…~¦¦£·x%AÈfhÎöj eÏü-ïœêóªZ ]—®%«ÙžæøÀ·žCµºë{ßåD‘»3-Û‘¤#½–“¬-Ã}MZΠ¨ðßQô•¡:³Œ‚f¯h[$·Þ‚cpûâ ~ã;߄߸Î7áù‹+ s4VžÏA»P çqVPÛ8¿ûóÏão¾&ùŒÿÕï}Þ÷#¯ÿè}øÕ_ý ®.ß…_úí÷áÆ×½¿ûŽ×ƒøcx÷»ß'é»ðKúîñ|¿±\Ž[@¥°ŒåóxøÌ×ñøFÇ_ÿƒ½uTð–ú 0Õgñ·÷¿öþûP|ÝÏà#ÿ,~éîÖîû3xm]†Y@±x§™oà]oÿE|¥{3~ñ¿ýÞ °r Çn6³ÂŠîžÿc†?üÉ×€øcøÕ_ý Ö wáüûxUÄqxÅOýÞùúà[÷ýÞýî÷ãáýe¼þ§ /ò­]ßèBö=úŸÆù¿~÷üàÏâ¦âJXØïÕ»î@À+Þù»øãßýÏøãûÅÿx¯_ ˜c @·Ó•ëßx¼oçÙÐíÂ5j_ $V9Loâ©I¦û™ÏXó¨8ïÉ0¤!2ä2ÆÃ5{FÒx¦FÃ2~îO~?Øápúö[QNé”~Å/þ >öÓÀÊ gÏNþ˜fdÌdþV´kŸè»ÓŽìYl^õOò•Þ­¤ß ‡ª” o 5ƒ[ã„ëE7 Â_›EË¡øÝíŒÆ8 ŠØfMôÓÝ`GxJ¦q£Ãÿßå3!&‹°öjÎÖ—\Ùv†=Ó5ðÖ•`µö¤NžiŽªÜŸÓ 0”œßz¼ôš\ÝÝ‹ÿ‚At;§Ì2ÿuÛ%mg€@„g®lïí¡Û:?P†Ö¨°öI–d+¦žÆÞq–z]SþÕ÷àß}åÓ(,ä JMºmÈñ[ëø?_1k£©€LŠïúþ(gðó?û~¬xEüÂKëxí[ßàþ䃇=úÐçq÷/½o~]æøþ÷¼õ•1ËñøàÏÿ>´àU¿„üÒ·áͯ=÷f¼á8ðð¯ý]]7½øXaÌÄø_þFœÆ£n€nèâýoûWøøeào?µÏþ<þé?>‰x†M"@Wæµßû@÷søéŸý¶|úðð}ïÀÛß~½*ø8`k ÷}ìïð¡|ÅÓ/Â7¿„W¼~èÖ§ñ/¿ûÓçïéGŸ€Ðl· ýY ªø›_ý·xÿ?ø¹ßúüÈ[þ~擟Æ>íü¶ ùÂ7q6pÍ2äõÇð-Ï»GÃJƒ1`€$ýóñúÞ“X<òâØk31êUÀóÔ_¬,¤ràÝÑ04Ã>Š1ÃFÎuÄ5küOƒå›^„Aef˜Â2Ž7ƒ'###‚$™-ŠnF)šÕ3©'–=öØÌ8¼²¥FìÅ¿õx ç¯vð²Ža¯êWöÔëK,ÎŒnÖ@(É’ç‚¢#ãVû_(/]³NZÈ»\]Σ@Æ×s_H’m•8û\ËŽ‰®L»s“*øXvΈ+¦9>9(8.‡¯´—§\—ÀŸABJ N)›ÿmŠþÁËT%<Â2ŠmyƳÍÑ곪íe)¬>?òC«X{âøÚ³þ}iudàêZ¿–|ëÓÀ/~xÛïÿ0€ñÞÿ~Ÿkù£'NxÒþÕˆåXûX·VþÐßàaþmøGÿø-¸çôëQÀþò“¶yÿÐ~oþpúm¿û~é-ø¾{~ xÔ±ÒÈOàK=MX‚—ÐpÓm‹€¯ÒÎoЊðì#Øê¿!õF³|èq@åUxóÛÿo|áÑ÷Ú+âÃ…Q!p¿?ù¼â“ö2xߟã>õNÜýŠOÛÖ?Dz`o žø Ë‚}þKñB ï hŠ82Ǩã¿%ä/¥žCDµîÎ\¢i„# 0lÎà¡٠^§ú@³ÆÄÒþ¯eâRúÇýýŒŒŒá©WãëÛ¢) Æ·×c—õ2+†žöïÕ^Oá:‚£Ñ– â/®WÏO{·&ÊAËœ~ÖÔdÆs’èÈ ìîn…~¦’æ9M¤Æ @Ë&e0=1¿•y÷¹öÞ+|-(rpTÝË4LJ ÞÛ•²ÙÎ8IZ/—xä°|£I†¶³DÿvÕèØ•$ÏF™"˱0öÍlˆ?züaÔD5‘ÇGŸ9‚„4WÀïµ#¹9Üvïëð¶·ÝCwå ?…^—iT^÷KøË¿üÜV(ÊÃøÑ{îÁw÷=¸ç»ÿ/|ðc‚O=¶éú>ÇÆ,§Tq©a-½Ž¿üûK(¼òÇñoßöðg>‡|ïûþ_ý¸-ð·þ̳Pœ¹ãjp>MÙO¼¬Õz4NÆ¢ÝJ_~0òyùóç ˆ ÎÕ÷ñž¯~ïùêgðl}š¬aÿ¬:WÒ1‚˱€§ÕÝc= 0¯À¿û™×¡²ú*ü‡w¿ ·gñõ¯™ïÿü/½EœÆÏýÖoâ?òã¸É%Á<öhÌržSùÐ_| æ8n©ßß._é¢pã[ðÿüÔ«PY½ï{ᅢ«øâ—·€Ê=xï?ˆŸyÝq˜)Gñ®?úœZ|)~ëþ J¸ˆOüõg X|9~ã/þ¿ü½§\¿ýÄ£kÀÑŠÿô¯¿««÷ཿù“( ŠÏ|q=ô8$ìãÌçîGí–·ã?ýô]³ŒÛ+žƒ[yÞ÷Áâg^—¤G!b÷{¯Tny þÝO½«§_…÷ý'ó³¯=æ<&«ø?û €oÇoþÞOàÔóÿ ~çW^ ì~ÿ¿øoà¥ø/¿÷vÜôüïÁüÊ«½/ãoŸòÐÛÉür#H~‡ Û»kÆ`æc½VïFg$"KûÏÈÈÈð`Î<öîzñ·ûêéÜèzZS%aV"ÿç/Lô%·/AöÊ,[Ç£×'žFp$@ÑWp=ã¬ÂJÔµAõ­i‘$1ô3ÅÈï‹4TÅ}>)g¹!´díÕ¬.  8²Èw“Mv§9>h²Óô_\¦¡Õ¿žUGð¹¹y ,-ÓJÔ¶Ú?Ö ñšƒª'Û&- Ãàô±%|â«u¹Ãü‘<4Cƒ"IÈíÒ ™ (&‡ªLáÆJ°n„·$íéþ2>x×GðŽÿÏøÂÀU|ìÝ¿‰ÏÝßÅ©çïxÛ{ñ©·€‚3{7þðiàôz¾Ç§?ôËø`‚åú¬ÿ->·öN¼íø|òþFo¿Žû^þxÛ;~_x4ðÉ_ûy|º`õ…xÃÝw㹯.â/êÐÅÂ÷ü*þáûÌÿß÷KÿŸðB è÷Ñ+½o~ùËñì—ñ)Ñ~ÿ~ë§ðûw~?û“¿ƒOý$ðøüïý[|r @Èqø»òáî~ þóÌúƒ«Ï^^pwÜQÁ98ö¯xÿèî»qþ¡ÁYT^“;r¿ÿð·ðÉ»oyÇƧÞUÜ÷«îcrþ¡èÿxן¾¿ûcÿ ÿðp¿óÏ߃ç@@<ó›xÇŸ¾ ü±ÿ€'~¶ð[oÿxŽsÕ1½Ì¥©G“+bìò¢CŸ#éØš1^2ã?##ãºÇ©t^(”ððÄ“ßð9 æ&ÈͯCä¯"W8žø7f¹æÿL›Æ­sáŸ[}âÛd°³¾±1í]˜*rLÝ£™™Ã¶jË'©Ä  16Q=ÀEÒ¼fÃĹ<@HÙ gV¯Ïu1j9Å@Ђ»aˆ k§9>hðkv°œ)B´O¤×¦è6Ï¡8{âå%ÕvT¹¹Èe­Ö£P*Î£Ó nÑ*K2¦I§e–W‘$R±&·‚ýçºX€ˆæÌ?†¥“'p¥¡€ I\– ¼,´}¡w|iàC¿øƒøÐêi¼hØ|z½Ûw¾ðôzߤ]¿ï¸ç¾àï‡/gq n¹¨~þS°+××ñþŸx#>´z'<Û€­â;ïù(dY‘ïçùëøg7ÿ€—<›ßÜÂvÏnýÖŸüæÿ¤÷ËÆéS­ó~'þÔZÙeüÖ|;þ×/ÃÍå6׿z~ ßùÉ?Åéå n®c«áÜÇþõ¶3ŠûÞñpŽˆýÆÓøµŸx#þkÄ11¡ññÿð=øø‡ž—,—¾yu mfüå{ߊ¿üo·áže`÷ì:v ª$ö:´ñ9šÕ3æÞ®»U TI„TS±­È•I$¹Ëö”¼-x‰Ï*Ù“?##㺇wÿ—/»#ãO<ù ¼òßž7SÛEÅ|Œñ­ó©ŒÿY0ü[5jñbe Ø7ÓiÃ’oÍÈ6 C Ÿl\QcM’ ¦Ôº½}e¼''ˆÄj¶QÖ5àz¤^·ïJ ¯vŠþ±zp¦EÒ¬˜©Öüþ}¤®Fšp”IIj!&ïÄMÓ!~©RÑÓRk,€\!ÜøW”éÿM». Ð\4WÀâM,øš†âÒ"º†ÂÑÍ*µðªE»µŽ§·R¼?Àr/ú©â¿¿ãnU¼ÿƒ~eüÆÖ:QëÈ–AÀ“O®¡Ñ>‚4((¾[»r|.d¬ Ü-¬?±µßqÇD·vïòY|ó²ó}GyÇåçðØtÁ£½†¿»MrâJ-Bû$æpY2poü‹„„’1ÙVïñd5ÿ=(ŠÆæÖEßûçž{²ÿÿzÛ2þÓ‰þÍBÍ¿Aú{c[*åtD ­Ò«ûôð)=Ar¸Ð;Å_•ÒõY6t{R; Eò˗ïÁÍža\I‚E½65Ò³€(Ø ×—UóƪÒpŠ]]‚-´F»›(ú?Íñ¡¾¿æ{O"—<>”shðâðÅÈa+2¡Úí+#­.?;:$i_<ÎkF!T,—íq“Ìg=$hD366þ >ùùOãýïú Ü—Ä¡àÚn g¿ôûøÿôQ\B|»è®¬µº6æ÷ äÜ I º§Žžuˆ^ª’ÿ~¸*¹EÝöèÊAs˜+Ú&–Pf«àÑ ‚f Ofügdd\÷È‚yÊqÁQ‰Zm…‚mèq…›ÑL)ú7 ‘ÿ®àŸXY‘¶¸ô[—°ß7XÛBšXßá…ïØ¬§:ºkòÅÉ• Ñ'QåV¢ï‚*š“Ű’ƒÓRÕu_ï8ÓÍX¢„lØÔj{øø:×iŽ„Þð½'hz_Y?ƒÓ À¬SU%Ñ÷‚`¨²[ÛEQó¡Ëº»žIÁ±áÇÑ)$ù×1-O¿öž_Å}¥´üaÿ8û¿ñ_þèÁ›WDrEóo²uÍF÷†qA’$dñO9.oä?ˆzct:ªìˆšÈ/ žo s7ctdÆFFÆuOW0 ¤¨I«3úOq' éúÏBä_ Ø?K≯±—#‘ÓÂúBg ײá®ÕR"ÇPTw¦@·5\‹ÄKáßßh#¡Bò´ŠÒÉpcêzEëEÜÂŒbÑtžU¥ð¶?Yžæø Á¯?AÉÛˆi„ eˆ¶Ä„aê ÿ»–â¿@„;¯ô™J56)æfqÙvȪa¥ÆáŒz^Ex‡‡™¡IèÐÁyZ‘† ¡¸vŒ=À1E‚€èññh¤}Þƒ"ÿÎn#ã Ÿ÷_¯Š$€©˜ã™Só$)²gÌ,ÿ=¢Z§9£ÿ:u ²°*r; ‘¹û¬ï=ÒHV³^*“x¤C T 6dtO´žÃN§3x´þ™®{«G\S-U ¿¾kµÌ „aßã?žg_W|ˆh¤>`[µi£yRõ Øã Ýóyø“BÆ kÇöàwZžý¦):ìë!,òdùäX¶Pî^tÄ”rvÚ¿%dzBö\™5爒‘‘‘1:ÝhîÛûÜýK¹…È¿Ð˽ÓœŠSõ¶æÅ\p´AÔn˜öîMŽË¥Z>È’µÁ{£_ºžiqtåxèoF!ò#z†vß{®È¿,Þ|#·žÄ(ÑœùG–"¨óéÒþç ý’–«[—Ʋ½éÈŒÿŒŒŒë«Õ_œâùO~E£Õ5Ã}íú‰c"ÿÍNÀ¤CO–ºgM¸ §Ú6.&ZϵÂBy)Õò–8ÒbÅþž $ã B×ÃSUÂt*Ì•Ã#.I…¯Gœj´Q+ŽIyˆfˆøzÝiŽ éÇ£Ôäb"!rD᥀ £(œ‘ ¹uírø²£h¥9 »Ugs;)þNÀ5>8’ú/~øhµZ‡¾î]s”†X}ê `†îá,pH‡ðÌ5<oFD?òo² àÎø'ýò—q áÙJcÞëÕ„m@s ªç£ˆkljs˜ÉZýeddÌ<Ûÿ 7<ïŸmýV«¿ZÅsŽËçU0ÜQHB¼—Å4ûx[HuƒºjÕ†‹‘ßµŒ*,ì$v-c9A––i$¿ †MW„­­Ë¡Ÿ1L|FB¨¨êEäY²lž[]`Ôa¦Û5á¹¹y ŽHÕ{SÍÚÌ «WW‰øvÓTcð8ƒâÖ1¸¬2ý^ÞZÊH¾B!qÛ꤈ñÈì>Bã›5¯ ¹Ô§ÝºÒÙ1ÂP¢Ó£—˪- !•%xá‹^Š÷¿ÿøÈG>‚§žzjªû•Iì@”lc4(ª9nv=êI‚¢˜1/Ð5ÿ¹—eÕ¯!pH‘$¢h—’$Õ0 8]Í­s © (šÁÂóNáÞ·þcÜðâ;@QÓRÑŠ Ps½G’$ÐddÝK]p=oKÕÒ9cädÆFFÆL# Û¨ï<0Vã? Up\{"Ÿ¼®tÚ†?ê†ï½Í­‹óÇ[F­¯ª1ÝHÛ¤4ÝÒ™9¡ˆ—Ü:Ðz¤} •0E•œõÄA(*‹Ê Ø«š×±¤D‹>^oX%.«ËyHowëñÎ_í@$‚ÛêûkÀ ¢ïÿiކ§MçÒÂ2þf¸‡ìz¨Õ×Ũ©ÉÊi¬cÎ:œWNêµ=´›ç1¿|÷\2tüÃ:ª]4E› ÜŸííËø¹wþ¾ÿû¿Ú»’Š/}åÓxøá/ö_/}>¹i:Èn|™màß Ã×oîšo¢ÂûË5––Žà?õËÓÞ¥‘ðàWþ _}Ø>²| ÔïÀå^»ÐKaå%îÎ:†a¶JÌ1‹ÌÌELㆎÇ+€L÷ºNìQ¬Ýn3ãP’ÿ3ÅÓO?|‚ àÆoÄw¼ü4Ò¬o»páî¿ÿ~´Ûm¬®®âMozæææb?‹Ã0 W+¿(ž=ûî~é«@²7£]ûTâýš…È¿Pó¯ñ] ¸èûËeM*8Ô¶³3ºóŒ_41YtÔ2ÄNƒaDwwûMXÊë‘Eò‡!¬ÄÅ‹¥ø/‡”$ÉŠ™ÖøÀ·žÃÚšûš¦éÁµ(†Þ¡šoN¬c^ž?Šê~pŒN¢Û=\XG«$è™®Þ`;ãúÇ„ç¡Ê-Ðìà-Ö¦·¾»ÚÓ81»ØÆ¿ó¸Pz°ó'IVÞa€;í¿Dú†?<ïx¸ÿêÚ¬ÜzÚõžÁè.=‰í½Ñ‰îjRÈ=%ëà Ó!*£›|}Y[Ù™âÚÈ¡ÉÈȸ&ØÝÝÅ—¾ô%üÐý~ú§º®ãóŸÿ*xuÕ½5üõ_ÿ5ÞøÆ7â]ïzŽ=ŠO|â€v»úYšÍdµk—ƒ¬W$ïÕ>mÃöëþɧªV°´œìÁ¼Pb@ªÁ –šnší¤4¥9Èw Ë¥¥Ñ OŸ”eó¹êÑ(æÝF^ äõF«f ï%Íî°Jýu#¸$&IVÌÔRþFƒ%Îwý8ɃWk;»+ºÑ7ì£°Ž¹ÑbP×’'ãÄТ„Î Â‹Ûí&;mÑ4ø½]œµãU9ÚQ†çóo¼âv¶«‡ú\Å?î,ßr³ï½¤jûƒ kþÌ2CSÁoЩ׵¸LC#{YQD:¡ÜŒñÿ3C·ÛÅ+_ùJT*är9ÜtÓMà±€sÏ>†'Nàĉ I¯|å+±±±Y–qáÂ…ÐÏ’ ié"¤Ýn ¢b>Ä’*þςڿ³Õâé.(Ô3ÚÖ ‰Ì¨Æõa8Š’µaYó8¤íu̲v]¸,¦ŸHYDÕ4ïkfwÑE¡…œ{ «ã›L‚2KËÑÆ¨¥ø¿Ù 6–Û|ü84­ñ!Е3ÕåÛHIÌÑ•‘l!èÈåã¿cÌ,žµÔ9xnÄGk0tÉ<Žaí"ŽAŠ Vö—ÕÃß.ÍÚý ¡:3Àl™YÈK›ç°³·çIÏWÜãpP†HuíÂD·QÑýç s°ÀlÝ—Vl4Ìå•A2FNfügddÌ 7ß|3î¾ûn4›M<úè£øÚ×¾†;^ôÔÑh4\iü Àã8t:´ÛíÐÏ’¢¥;;¿ö Z¼9CåÛ뉾3 ‘g„ @¿/yR–Ëûíà ØÕÝÙJB§'G÷ÜÓF_(Øuá–°\ZêõèX]Iv-ŸÓX){’g(×GöF’Cˆ‹cX¡°::¤^]N ~? ãƒI¤Oѽ¢:¾c¤S"svWXë&Ä´ æ.^¢ ’ãéž¾cߣt¯…ßAH»H—jzHz´ ŽQÝmLpžl0E3ŸÑA•ceG¶GXËÌNgp-‰Y¦E¸3†‚Tôƒ"ÿ@kÁ:#Ã"Ȫõ]×{lO¬¶¥ŸÃHôë¡Ãá!3þ322&Š ¨V«ýA4›M\¾|º®£ÝÚÂÎî&DI4Õf$ MÓ ëzègã€çyäóp…›¡)É ¸YˆükpGç­IVRÖ`tä´°”L~ª}µ'M‹H§ÚoMè‡ñŸÖH²hÇL‚/äpëñdnGöa$3þ-£î|—Ëú¯ ñ©®Óê»øÞ£u+óC¤çj»©w:Áª*ŒçĺŸÈ(øï¯Q°ï=<ÓŽw¦˜W¯9‰ßšÚ¾ ŠFÚ%R,Ëá™n¯½_€©¥{TîÚ»hOkMI°ïV„— ÈmlÈ“«¥§!~«7[_?;±íÍ'3þ322&ÊÓO?jµŠ/ù˨V«xôÑGÀõ÷Ô©S8uêÞúÖ·âÃú)P…óë[PUÕµœªªxê©§@QΞ=ëZÏÆÆhš\¿óïïÿ{Àcß|°¹¹ùwk˜؞=w$³Œ¯ýÀ™3g"ÿZ=šã–çß­fû·¶v¡’ZW×ûç×cÿj”‚Ï?·ŽR©ìZõ— ¹©îß$þž;g–y\¾hNƶϟO|ü`}ã¢}ܴݶc÷Bàñ·þ65{çb·§X¤qaãrÿ{Š,OýøNûïSO=Ý?’Fëê N‹=¯ya - h6xßùP‰å™žùÖßö ¥z!Õu}ÐG¶¶v¡'Ro‡õû­«kPX)ÑïX ß8·éÛ~ë¯Ü½4µëÈ»=òÞÅÈýQªæçÏl\ üþ7Ÿ¼<•ýæïúúÙþö7ºýó«ªoÿ7¯<Õÿœ4˜ÀóÙåÛ3±_Ãþeȶk¿ö”|ÿ9¬·‚¯K5Âù¾B¨Ø½´Þ_EŒnü>ûÌCþãß:@•\ç+É}JÀÆ%ûºVåÖLœ‡IüU]× ‚8üµD³O³ÙÄC=„{ï½·ÿÞò²-†õÀ€ã¸þ纮ãÇüŸá¥÷Ü…S7̣ўÇþè0#Í¿ýÛ¿÷¼ç=8{ö,Μ9ãûìW~åW@ÓѵiµúyäËhwš©¼Ò/xþ]8}l”±‰;^ñû±ËÏ‚Úÿ¯ÿæ/º^?q;þâ⪫íR%ðÌ#:þù g±[õ«ÍÿÄýŽ^ªí$xð¡Ïã«|•S÷âSë%ÜsoûD#Ñw/=’ÛO·Ð¼l:ž^ñ²WᵯýÔÛðЃ‹¯<ø•ÀÏ–*+ø›ý;ñm·Vp°ÝŠò˜p /¨?Ž‹»¦¡÷ê—ߎWÿ£ÿïîô¹ty ÿóÏÿúçøÛ_VÀ¢K"Ø+Ë8µƒ~ÃsØ­nú>÷/üÿ"ÕÚ§5>|ëì7ñÉO~Ìõ^åøKð¬z Ò‰ýÄë9&éýÀ^u7žºÿâ‡:ñw­qéèÊ ø««/Ht¼ó˜—´”ë~þªo?‰×|×ÏMü˜:÷i±²„Oìß…[— Ÿ43ݺµ:ŠK‹®åWZÇðè³ |ÿÉ6;ßð­ïÕßñ=xõw¼a*û2ì1€cK«¸oçvÀ·½Œõ•@XÏøÁ›š¨nù§7}Ïâ%/yå´wkhîû_êÏ5Ž,÷Zwô³"î½»Œ]Ú_Öµ~ݧöK—iÌU¿ 8¹º€ù/ß;’müÚƒÁ¥mÝ5':ª²ÈuoÆG Çq÷½ùÀò„ –ßÞDkç À¿þÉž‰œ“ Ùlb~~~Ú›áÂ0Œ,òŸ‘‘1Yòù<–——ûÿœ‹E<úè£ày3zöÌ3Ï€¥eŽ9ŠÍÍMìï›Ò3gÎàĉ`7Þxcàgq†ÿ0\º|ªQD·u.ÑòÓ6ü*æIÓš-:=°°höå4«;œxÚ¶%5ü3µ×Plåoƒ,µúÒ•ð6`Í´Ñ\>Y B2ëøëÂUŸo$s\쉹`Åÿ8aÐiR×-QF>‘⾟ô-ùÖs¿X¤QkDÕWY7‘w_WVw”(N®peŸ‡nŠºž~Ìuò”ûF …\»ùè9¤SŸI† ±ØÜºØMÓ ê|P·3ãpÿ3I’xë[ߊïû¾ïƒ,Ë ´m|â{Õ}ÈB¯zõ½¸çž{ ŠE·zï½áŸEaõOæùôm„xå(r0':¹ÂñÈe§ùo´ýÖ‹¡(X(1HÖÑd¡Ä€Öƒ§ªq$Åš'2Ì} ëéÇEÂÜÛ°µ®öOPæÄÙ2&Ra4Fqˆ5ÝŽmxV5ÓªlñcƒÕžK 1öd5:ò5­ñ!Èá³ßî"·8GPP ; H{Âq •4ü% »µä­ G‰È×ìmì•Õ..Óˆ* óú¡C"¤WtN ApG†©ž“ñÖã%È!Îær‘öÍv7Ð"˃·F4ÅíØRûYulÂ"ÿ¼N«q@¨ ´ƒ„·§{¿y;HB ˜ñ–1:²´ÿŒŒŒ™ƒ¦i (r—ž``¯i¦p’$jÜG}6$YI!ñÛ±ËN[íßÐüŒ>@4©X¤Ñ éóS;H±;ÌX“ðíä!CÉÑ;©¿sP‹.­‰ä©Ó^a à0¸Æ$ûðÉ*'ØÑj>$#¤ÝŠîÓ=­ñA'üqÊÈ%nÿi¡æ¤~Ö I$w':[Ø%‰;±Úý‘l°BºN”! é:Œ‚};E?i&Ž•ö~¦M¡TòËTŠc: ð¢ÛøWtG±ÿà {{ñÏÙY§Ó|Îõšt(èÏχ_+Ajÿ㛵,IÀ蚎,«3ER¼%²‘e˜M›ÌøÏÈȘYd¬º^ÇÕÍJÚI§E£Q[¸9Ѳӎü+¢ßhÜÑ€¥åtí{¸<@ªÁB½˜j]‡´“ ùy†j{ Ôœ/ÍVt]'¡›çÆŠ$f Ž¡ª(rÉŒ7¥gˆ²ú`%1ÓZý1:›¸ý§“\/…™¥‹¸“F!ññìv4>~ËBzÛ(aS4D·Æ“BÎïÌ ’ô?œ!œ-3@ì•tDiIX™çù`9@Yɬá+ÿ1ìë#J"´æŸý1 fôº‘ìQ,Jé´9ÐB“kI˜Ïá¿“222®Yº÷ÿ¿½}e,ébŠª , ¸¾~$½¾½»ì´#ÿü)ùÚ¥££«‡6Õº;i'A²1ÈôÆÿ~­ùù¦dÎ ÛHÖã\r„·!ýõp-CHV—“¿ßz¼„ Rpº€j¬D~wÚãƒÅÒÂ2nÛ­Ó À:â`…-’Nu¼ ÀRPÉàßk4êUHµ¾Q£Ãüý Þö^¬ñ„¨Èm´§SÂ0²ã ›Î.âôZ™--ØBR‡ßd‘÷³³¥Û÷™U:DXä¿T&qtÅÖ)…À%AæÏû¿°-¥Ê†rr Ù×´$&{6eŒÃ'edd\³HŠ]Êó<ºâèZ­!rÉyhJ¼èß´#ÿÕºR ª¤ (i”‚ýv°‘¸{pý´uFðÓ 8j¿ÓŠL€,ŽNÑdG:n·{í—mÄA¨ýÿW¥tÎF–%AHÁ÷€(GÏš§5>8KØk°Ì¥Wß¾µ•<H‡«e_]Î÷×K½¶i€úäQ"fÄÓ2l£°Ò¾‰ ¹VÛ‹ýþ,“DíÝÙ:.È’¦ï¬"x _ʰì¨Öya‘P ;ª~p°‰a‘e_¦I»º ­§ãUžçȈòf…dLžÌøÏÈȘY•2åÎ $5Ydiê‘=O‹:xº (lºZR‘Ó‚#Öªt¸'¨i3òâp¶uȼ¸t%ºîu»NàÖãÉÚ*%UU¿žpÃ)¥2ê0ˆ+Y𯸠)mW;/À®An#]¤yÐ2MwéÊÅ"Žžq¤Iû)Ö6]¬ 2D4²Ó:<¢¿åz}Q7¯«8!R«ôÌMN»gš´÷~‡‘´æ¿+¦o·™†g)*Y\X¥®vªS!3þ322fÉ“N/È£°†mÀ(.ð“H‹`Ú‘Âcü/V–ZO<é5™ ymôaŽ"ßéK‚'¾Íê™Të ´hãÿª”<*£x¢‘q©é×õºÛXLå2—%G–|môLc|è¶Îãʦۨ ¹ÁÒäMÁºôÆ»¤ÚF¥0©£Š\hj³[Olèf´4‰sÄtçÆ‹ÊáQIçE·CF“)õ[¥”ìЩÖ&/à8J¼™.UQvíwa‘ÿqHA(Nüx*áöö_}cÓŽüsTÝõÚŠË’ Bj2»ÍsSÝÏqS­~­XQ/ÃÑöHM1™m6¢…Ô "½AwÀN×:JÂþì®ïôiL@OsB‰®ÙÆøÐ9ð×Þ*Ôà)ômÏÂNUC7R§1[—aƳY¢ÀR¨ÊÁû"&LŸE.KÉ’VWݘü\<ä‘g¦K!Wè—¸ÖšQ5ÿÊã~’¶èz]*•1Ÿ?2g^—V†JZˆžC5. *cüdÆFFÆÌÂóîÔAݘ=ï1Ïó ™xø´#ÿÞÉŒžK, ç¥T&A#8¢•¤â°³´üJà¯ã bÚ‘ÿý†{¸oRƒ;R‰àɽ¨I¹¶ÃÓÓKˆKÕL‚”Bù8ÎQ ¦PX’¶bA¤iÑv­bE¥f°û#Çèèþ(§BD;Œ¦¢öÿ½Jyäò¬ €êˆL'uj†}­u‹ÞŸ\)¸jN™™ªCÒÓéÔ‹th7oý,#ÈöóÁ*¡É1ñ&‡e[^$ép;( þ/Œ^šKœÒ?ù÷â­æð‘Âê|9åšlŠEmŽ©ÝNÖefÚdÆFFÆL" Ûh8Df Õ­ï¸#˜FÒ ýÖô|lë´#ÿÞtuC×Á²ƒ=V ­Éä»Õ©îç$ÐÈ^ïnj°úS–µ­+ÉÅšâËK€ôi™¹!A×ÖX”ºŸ„Åe”á·ž/Äô(ŸÆø ˆª)¼£ £¦sf4ÔôÝӨ̳£­{u)]v—ée q>f‰½={[Ó–Ð,—9¬ñÁ÷Ÿ¦M^Àq¤hös˜î•Î%ÑØˆªùW]ZÝáÍ:UÚ„ªÛÏw–$€& a9*‚àòÕ‹ývε@fügddÌ$¿ZÍ-’ee¨òè‡Uü×2$>zb6ÍÈ«æÿmJe1WÜ™ÂêÁ5ÿŠxyjû9nøÖs«­·8¹RÛèR‰d%šÒÆÖN3r™j¯½V=E‡+Ê!Þ¨×~æFÝ^tU飴ýë Ø“ƒëæ£:)Lc|8Ø}Ô÷Þf«;pF˺š>eÀi$ƒuY\¦Ñ"‚Û ÞÓ2vÜH’ݽ,©Ê(¬’JyÁ÷™7î°`eÑÄ)Ú[,”´4¶Ììtš‰Ö1«h°Ç{+#"I{˨šÿ]môº-®À‹a;ål§9«d'¨›ÊÕ­KÉW4CDºëÂ2†ƒ:n8ó#ÕÛ¹eôŒT‚GqTäŸKPN‘†–èw<鲎½!3Ä,1Q"3þ§NfügddÌ$r@=þæÖˆ}ÑããÔªÞ^T">"3ÍÈWðï£*J©„á¼PjðquiÆ,aµF#†¼f,Ã@¬AoQÝ߈]ÆD¢ZN’L<¯4Åž˜Ä`)è¡Ó‚ÏAT[ÇiŒr@Í?3¢–\i´,€`c7)jNBM NG–•é)ä/V–RÇÊ| Òx fš´Ç5Â0ŸÙI;9ôǤ€–™$y¸ÍQ°»`´{iðIDE£"ÿå!¢ñIhW·ÁoŠà , ˜mD3fƒÃ}edd\³´›ÁH9E_ô8º‚ŽÞ™À *Úõ'"—™fä_ð´LZÍ , Üübàû’0x+¼Y§Ó1KN8ÚœÄ ’¢ x Ku#ÑwÚ |ÕØD¢Z>ôÁ¯ƒk gV‘¡ vLÐÂ#"Ððk5ÃKƒ¦1>hžª¥…e<ܼ•`§j‰ a‰*9D‹ABá«™V¶˜% š«{ ¿†¡F¯ä>6¨Jÿ¿Ò4“vr°Œá ŒE,3d&éé$¹ß¢"ÿ†2˜£rõI¥má™1^²Ó‘‘‘1“„M…ÖÓÓÞ4<oN3ò¯kþžÛk<…ašïT5"°&ózHé£z%òΓÅeÚcX&‹´¶;ãR$e{*`è“›HÎÎR#V/â¶ÕÁDä–ËÚåŠÒÃ˜ÆøÐ:pGçiÚ¼®kÆàuÕVªv»›ÌÐc(û^ôx ¡>–ÌñѰ”¨½ ¡AÚEª‚“¢<º¬·q³î¸4ôtÎ$ËÖá·47yG’£žŸ/(¬ô®ƒ ’Όɒÿ3IX}µ Ì^ôCÑãÓþ§ùµ\¯K¥2š}bðúÉùy:°&S¯Ýš‹aÚ$n±D%¡ÀÞÞN´hÙÂÂQÀÒrº*Ak§×ŠŸêû×’£=ÝÅ!ôJ Úo¨zS‰ü{¶‡@3µŒÞWv—*z+=4–ΰ½$ß“q]f‘Cf2V×RMGFRñLËËæP”tíg Ͱï¹5¹1ùçu{=­Öh2ïxÞ~Hm3¸ÑÒ0T© {§†kˆÌøÏÈȘIZõo¾?J¯±,5¡Ã÷jo´»„Vûo¸¨ŠÜðm±(0j2÷ª©íçaaW·¬z}?vyMi£ÖŠ®µ$™ÁŒ'…• _»2 ©PÔѤ‹4ÚoŒJZ1ô;ÓtÂcüs¹Ákz \[3³h W¼ºœÇb%¸$Lê.£y~žîÌ^âZÊÎ"Õ^¦TÉ‹– =ÍÖáý«·l¸# ¸õx²VQ‘§ÐæAsx»Õ Ïí¦T°ÓË èvFß±)#™ñŸ‘‘1“vûëêèÒŸÛ] †6¼ñ_«íCäw"—™fäßÙ[0[îØ!£|œŠô qTm€µ ÍÎ:ÔH2ÝÑR¥½Øït:{¨V£#:dO%kÐ6mýí1f³_ú¤Ñ$ ,;؉ˋ[ypN&=>HÂ6¶·¯¸ß$)¬.OGh®Epo`Y$,´h$,­ûUóâtgv™³dc±²„Ë’‘¸ÕjläŸÍÞmžC¡¬! NQC†¬KNÆèÈŒÿŒŒŒ™D—ƒÓûŠ‘«r Æh&Æ3‘ŸO3òŸ§Ý% "‘ª_/`F³(ÕŸN[«Å×°VºsҢɓ9{©d¼#¦ÍÇ;¨:½s‘¶ý HHجÿ2`‹ÏÀ†8xùDíÀkDR¯›IZ@*<¯éCEßвۺ%Ðf‘EÛ)9¬CM£t‰KŠà *‡'ÚX%ðL×p_ªâ¥io^,^íŒgºéÚ§á™®RÉï0KÚIbÖp^ïVg¤-ð¢"ÿC‰ðyðÞ', ÉB/]*¨ñ‡ç¼ÖÉŒÿŒŒŒ™$lò(uãS “ÒêŒnAÑáTkþ ÷¶)ËïE9Ѓ'ÛÍê™éíë‘zéúÛª™52M´ñHÇNݽ°~ÖW#ëeo/^ä«©˜39eöx¨„½=úu¬Èl9w8μ_ÕOP›-Êrm?Üp›ôøPß}Ä÷eŒ®Þ{+ÞH• ÛÙRSsC1"! I‰×°œrm£ÂÀÉ•Á#²•ò‚ï=I_œÒ¾ †e¼'Mo·°„åŠ9ZJØIbÖU»4Ðꬡ°Éö%*òïᣆ,»ÓCºÏh 3t© 8D&LÆhÉŒÿŒŒŒ™¤Ñ¨¾ßåG£È=j 2ºeÛ4#ÿ—·Ý©¢„2š¡Ÿa‚#ÒD‚höagÐÌ %@-¹Û:º|«öêÍøÈ¿®CEf¬(c§{ýFg,£Â2ºÑO°¸aÞo¤±T=tùiŽ”–<A$wŠ9…ÛxcH%qð¨©ÁXY‘Ñ®?1Úƒ•Rfc; ¦ ¼ß¹Z¯NðvXÆ{Òôv +ƒI#ü㬮/Ò; xÁÎÊRS¶¶Œ«ù·hw†kõçÍ24º<šã½­™Î¹$­‘3ÆKfügddÌ­Úc.U\'*1Ú(N˜“!-’$F¶ašVä¿U{Ì'ìÕÕ‡‹²Yä´àæ¶PL¹¦ÃGnÈ é‘åcö‹ˆ¬‘vGŠUú€oî !âhÐQÆë•aûÝÀ¶žó¥,k¯ùÉK^2Ñ} ʬZëʉ#‘a êX4ôÔsÑ!‘ÿ f³8ÅîÂû“¢3“íR0*$~«ÿƒÝK–“”áücš,‹ÓÞÅ¡a{Þ¤ âjþY¦د ŸAÀ¾:ûà7EìQìÐ¥‚ýîÔpesÓÿ3‡#ž7 ñ&M1½ðaN†AøíÐϦÙ *G¨IêPQ6‹ý…+þ.²Ô˜Ê¾N m*Ô„ÃÀTŒp‡Vk=­YÉÎÔÐá –Ã ÑS¨WÉáÿÜ"ƒBÎò-Há1¯ù—ýÚÔZ@äsüâð]BX •)ÆêÐëNJ­î~ž * ¸¸L#0v“ÚÖk›,¼h;-4ÒÜÿAœÍ'W   ¿doo;õºf ËÁ˜´DTä_$$ðähEr÷TÕŸñĦã0Ÿ79%ñp–m\KdÆFFÆÌ¡èáj³ëëçF"ÞÔhV59NàjZ‘ÿƒ¦rAH£kÅÐ~ƒÆn‰wmA¨ÚN ùÖã%ÀQg/ á~«Ýˆ]_±T¾®ÖBYplÆáª ¡›çÁè›´â‰^¼[[Cõ&=>´üT§å ´y F•8XhŠ-ª¨%ìa¸,¬:Ç‹äé*šµo½oƒPB@Q$üÖ¯Ì~†Ž³tˆí¥·âlÎ1$Úäµ%–»Ïöÿ¯éJã¢"ÿi[IF¡iîu±#r*ô×ÇrØÜº8Òuf¤'3þ322fŽv3Ú³&J3MjµÈϧùw·Óêm«œÃ>ÑzÝjN LëÔ#Ò›3aN„ö¥áÒwt¹Ýz.t¹F#>òŸ+˜™243Xê´BfSaŽ+–Á"aðš5'y¿“(LßaÒãƒu-[]¹Ï Äи…AÄmšÃ‰ûTgøºb.,”ý™H£êè’mˆ§š“ øh[Ô;(rŸ”Åe†âÏ ™áÇßi Áž·‰årr@Òšÿ¡·Ñp·Ë”Ú¦ôYXÕ‰U^'r›1^²'~FFÆÌ9VÔáSDŒ¼Ÿ-ß^ýlZ‘ÝpO”X–Ã¥fÝQi¾ûõÙe^©0ÛÀ «™@;êy J­ÚcØ-¿=™õº‰¦1šÒ‡ õè!%M“dø¨ uøº{]1ïý$"•ʈUø Fˆš]J¹¶á9à‡{¾²ÿúVÍ}ÒX‘û*1Xù’¸½}83Ëœ- EÁB)ù8WóŸFh3 E .Ûiià  @ÍÑŠ3Jä6cüdOûŒŒŒ™#.…^hÿàÐÕѪš+úŠ+’åeZ‘ÿêþ¦ëu¥¼*âEmÐzÀºôñשOgÔ-,µÂ±ÈQÑ‘)–%A:Ú…‰X¶Ú \ÙŒø¨œ©ž¬æ÷ê=‘'M½67I3å•Ї¯ùo¢ •ðG=e5Ø œôøÐ»Á‘ö¯´úã[Ï¡ÙIf„HæºÚ¬ƒ…Ó`S•‘´Ò<Œ0=ãßP†ŸäÀá7D "Ø16ñÈ¿g;¨!{Òkl!C¨€ ú†‰Ö ¦å*‚ÇéaÓº'Éö6úeÉl'+ˆ£íÖ3 Ø™@DÊãùºG§Äh6RÏÑæ|D5†Ï2ÊœÌøÏÈȘ9$%:µºÙ>úahþ^í•‚Û•ø„âB}<ô³iDþ%~÷6ÜÀªðA4tÿºhr¸ôÀY…£ê}•þý+E3#'§"¢é§†=4àOïm7/áòV´uòžˆ¦Ar#i¥y±´FX½8pv'$ãŸ8Kj° Ö$ǾõÖÖžu½§ÀÒòhœ‚–±gl“†™-Å0,ŠÅÑüvž %xµW5Lªôü<ÝO½v"‡'êmhÃ=kn=^Cùçº~xŽ…³M£¢Í¥Òƒˆ‹üëâh–ÑÎÒ`IÓy)-/ œ½á…êÅþõ!¯‹ŒáÈŒÿŒŒŒ™£¶)òsÃGUÚí¦ï=Uô§îï_ZÃñ#Ç“¬jD‚iDþ;­Ëèv݆\—­'?Gû#†VõZƒ$É~ø œŽ¯õÕ¤fŸ‡ú¾?ÒÞn7Q­î&Ú®3ûN®†Û¹(½v¬Úð&(Ì ÑŸÝ  ¾ôïV#8j;Éñ!È¹ÓÆhĸԜÔ7Öâ„[­Ž ‘Gnø’s]!û¡j*´‹¼&a˜Ti3-Ü?¾ÎzÊ;íèH@:‡p*±, ô?³ñð‰þ9cŸé©ô â"ÿbntiôýn †]¾½Ÿk;{e‡¼pm ™ñŸ‘‘1s°T´@S’Òq(:íRµ!D(g÷ìCÿÞ4"ÿ­–ßÁ¡ËÆPªð^ħ#­L|_'AœÖ„Óàg¨èÉ)­Û“z e_Št³q1Õ¶å˜Á磃ҳºËÙR‘mµ[ ýZ°Cg’ãƒNøÛü±zq€5ckNtÚÿÕ-ÓÉ+`t:$ W ¬·×ˆUð­õÖ8= N ÔhQf¼#‡¤ÚÆzXÍ~RJel€v†Õ)â0”ÍG¬Ú?="sN\úEü¦™¤F’ 5l{܌ёÿ3G\ÄHjkèß0 £¯jk±ùl°P[cïj¢è¿Qã:È?/ø3tct“|¨tŸ³¹µ1ñ}ÛÛWÀD&: ~Uߊ\×EGä_'Ê®éúÎWÐ䓉&VŽÀP“3R 3£¯ù>¬êhRh5BóiDè3Pó¯Ô÷‹Ú蜂QÙ1Û“rù(š+Ê9 ˜«j„Šé7žUñÒk››[¶ãò²4\FA:F”2e¨^FÑ3½Ò´¦©ù×”ýÄËú¿lg'¨’YŽvJ#ȄҵßåFî>;äÚ2†!3þ322fޏÖL_• hiÕ†bÁ»EãºÝ(’ÁÊ|p4Ëið«zø5Z%`8´¶·¯@p(þ×÷Åæv²èŘÛ2W΀²~íÂúÙ±ÃYEìI¯!k÷gwr€¨œGG$DÏd’ãC£úuß{›üè²ÒBÆÈ¢‚¥€¢üÆ¢aP'ö¿¿×3|‰¹¡ë¤›hC$üû"é‹cßQ`=S‡q*)¬! ƒ„¦¾(2K™z0z™1H^ºFí¿Ó¢«…#«R_iEF22ã?##c¦„íØvFq™Ih5Ü}‚ ¹èšéÆÞU‹‰þGMH&ùoÕÃ~ͯˆlˆ2êCöëuR%€¼’«i³š:0:‡…Rp”Üiðï ÷G¯ÆQ³*ð]ð‚)@)òWÑá)Wô,f4™r·9üJ1²h‹+rZa¨þìNHÒÞß îO>Ññ! lŠ6F#è%X6Y¤–ë•wŠmmĬßãOûïH%t›ãWûDÓiäìæ1( ¡B ðé–”÷"7¼¢»Fh B§søÆ+Å0Ëái·ù'Hàè²9?iu7íê-ûÙ–Ï›4Û 9g(F¦-3 dÆFFÆL!ñÛ‰–¶Ý‘ªÓØÞ¾ÒM“ñáڬÇ/’ú?éȧyû ÿ>©Úh&ùNèQ*A¹6ûøvÈpñª†ìΩpᓼœæÎ4é6/C•[¸ô­ßG[N&0 *i:­†œÒƒ°¢}q-#;Ú-><ÂÄ"’q_/9z'p¹IŽ^특¹yÐ:‰=ÄoqtÀ÷S{¥˜Ô^K»E¡Þ@ïN<¯ÆÑÚKpkîaõämýÏ{W±XY Yo’0ý>Þíú¸² Ìp†ë°´zÌ·º£kß5 h=#Ž2ò¨r@Žö§¦Æµû#à­­‹Ðu |ý«©¶ëÙêè"`–襳Áõ‚Õ>Í F;-’Äç¼6`²ãƒ·ô@§G[ D‘é¢z^ý‹aQòy°lð> «‡ª˜Ù£ÒIßÏûꤼwHzèŽP*“.GéaDq´v5=uVH\俉6Ò<Ö„6˜sÈ)¤ÜŸã°×wFصJfügddÌq©¢ýåøZ¢å’²³aÖêÏWV`ÂF;DµDéúkñ¤÷„¨´Ÿi©‚/£B(YšäøÐ<ðŒ Äh‚â >§äIy´¿Ïè,*åàÔøQhÅD±µmwª…BºF)>×$g·“Š¡6‹0•ì *{P»’z]ÓÂÙâÓŠ~§!.ò¯*ø^g‘½Úàz;šjFøišš,¤ry$œþú%ó9:¨ƒ"c4dÆFFÆLÑíì'ZNÃÊX~uîÅï+Or}ÀnH/tž7š&Ù“„mtÅà¡08Œ8°iþ&Cù"m„z¸Ò2“&Nfþ ×¾ð¥ÐõP   ™¦9W1‚3üIÍÓ¡.ýú3þ-|†aqêÈèÔç7¸®¯¨¬ú'Ó“Œüë²{Œ¥t KË£3À[–àŸÞßâHÿÿ«Ëé¡(®ÌûÈÖÏ‚`'—öoh£QH ºGñÿ 5»õÒí®½Ï†2šíTX $ç×àÅÙ=^‡ èE=}¶Všš«Ýä´ÈE<×ë9cŒFS%c02ã?##c¶Ðv-¦ÑIìºÓ±­È —/ s.Ük®ŸÏc¾²™úƤ"{»¢É§ÑvH ;ú‡n#WðEÚTcyÀµN$m#õwèú=»èY£PWµ ’|m•l$¡Û1³4(’‰3ʼn¦¹×'þŒIFþUÃí8mëã9ߤîD’{ÙBK ã'ŠTp6 ß^Ëïy‡èµî¤´Ç±Z«í ¸¶ñã¬åôGàTÒ t€ˆ£¢ÎnDäþˆ"Ô\ºçð$Ôþ…–­gDö2 ÚêùrrŽC‘ûp¨@âWëÊk•ÌøÏÈȘ)œug+ËG}ï gþ AâúémNn:ùm‘ßÓ'rw lðCLÑÅ™Tdï`ï8±ø™¡Œ§–»J€ô(†;Ó_¯¬tVC7RMÜNÍ×$@ÃÕë«´™>>WÞxks9°äõgô[H’yN)mô5®ùŠ[LO 5däß Ý×ä0ŽÔ ¨^Ä[Æ‘Ðe¬H(C¾Åd•8€b|hJ2Âay¶;:-– è^0fí‚QPQú-´ަL®{ð8Ë!iÌ’$ò?ʆ-´¡CØ–P3ÈTúsì­Ž#…à럆9öèÄèF3Ò“ÿ3E½îNI-•Êà¯øSÈÅ„­ù’@÷¼Ü”Ÿ~Ú¾ à¦ç½í½àÉ—Ð Ö"˜DdO¶¡è¥ÐÈnGÖÂÌ •s§JScúi¡ˆ—ºÍÜîO·>V­'´VWFk8Æõ'ôçE&GßBÌ{žƒDM'ùç[ÏamÍí| e"•Cëh!úº%Tó:ÚÞº».¤Æ"Bj¨þu*Æd[ä¢=ˆ9ÿ}.³Ÿònh£OHø= ]aºéíi°ž hé ß$‘®6_Ñ¡êvôBàÛ>(sÍÀÏiÍ;4Œ¶UsF:2ã?##c¶ÐP(”P*•‘Ëå1?7"3‚ À0lÿ3ƒyÞÈ~’4̇œÁ'‹|2;‹Ø<ûd¨¢t“ˆìì>ˆŽ|cèçWE퀾ѣ€ö¨³ÔèúvÏZ/Š™V™¼HG¤>ih ½ÔiƒÞ )49ÚÚëánbôÆh‹vA’êטTä_ ˆ|_“·ñ:VÈAÐ~•ˆô]™Š¿G Í܃ »g Kn£n°è¶ÎþÇzH‚{ÌU{4‚òßß³ZïNöó…ôÔéí¡ëeý×ÔajçÌh|¦k¤v %‰üóäpã–¦³h4ÜÁ‹g"ñóåt……¢™÷@\§U¹¶ãiúœ‘‘‘1 {»»xàoB%,.-à{^ó]ÀÞŽßÃùµó8{v¢(¢^=À·½øå˜›3½è.\Àý÷ßv»ÕÕU¼éMoêæEwÐjû=ÓÝ É c©¦à¦Ý¦²„½dé—“ˆìU¯~k—^ú¹!kÛ°O¸gðŠ>AÆiaµjzV p‹a?ѱB|Èüˆ¤dÁFµ8¤²ü™mó˜k#02HNÙs&èÚlãD·¢ñªztâÖÖÓî¯`´L*òTV%ëÉwx1¿¾)¨h„Øum’Aœ[ÔŠ„2Ãê^„ †è¨cLûßÙ7 ŸR© ŒÐ.ÕIÿù™ÕzwA²]U972Y^q 9“¤^«èµÐÛOïJSóÏÀñ®òæ\¨¥%¾ä(;ãPÕƒK2HŠÀ‘åcØÛËÔþ§IùÏÈȘvvvðÀ?œÃËî})Þø¦ïB©XÄ×>ƒÜ­·4€3gžÂK^r'¾ç¯C1¯áŸø Ýnã¯ÿú¯ñÆ7¾ïz×»pôèÑþgAtx²b— ´«;˜¯¤›¦ùPÞ밈̸#{|ë9èäQln…«ìÏ—Ò‰¦Að´\ߨëþN+ÊÔP hž¢M– Oo ‹€4ц¡ n´/¯œêÿ¥ e;o:ÝÉÔEÏLãŸÄèÑï¹>zYN&ù×=©ØK Ë u2q$²!= ÉpÃ`Ìûƒ"ï#Ë ÁÕ¾„ãO'PôÑuqˆ"—¯Œt}]Ú?¾¨b|IÅ4pu—¥Ñ¥ýS†ßÙ¡«‡šßèü€IDAToœbèÁ®÷4jÿ›»ƒ•Dj’]šÀ:´8´„"¼úDÿÿ{!nšåqÝïiÈŒÿŒŒŒ™akk …š!!INœ<Žz›ÇãlÕ:+˜+ÁóÜvûØØØ€,˸páNœ8'N€$I¼ò•¯ìDPms©¼º]¥ßéwãêË¿åç`ÖþËMö€"{¼ÇÙÛÙøk´”ÛÃ÷¡TÆUŒ!¿¶ ¿º¶*A¦4ÔáÞ/&Òø6ªBÊÐ$svÍä(J9ª4ÍÒ3ˆhÑv­rµWŸN룿GÔ‚ÛÉ£O±æ¿Qýºë5M3xV !>’\áXt3m>ª»…J)X¬,‰0þ­î Ø‘¥†;Ñòðud‘Ô<Úõ3#ÿ-/AàäJaøõeÿñ‘ôÅÖ49J%ó¯£I¨r~Çuõ`4š “Ä…å à$‘º×É'ªåpºæ~Ž”O›ºÄ?ÇsšxÆ÷ž±×kxÙ ƒ“ÿ3à n-ãöçß MÓ@Q4¶·wQ*®`K2À·p9º¦AÓTÌ<8ŽC§ÓA»Ýv¥ø3 Óÿ, ÕË‘¨)dò¶_~/>f,â3 _ïøhéοì^0yŒÊ&®ûgdOSÚè´6qac?t™b®4’–paÔ4´aœõµÓ ¬-YŽŸ ê•Ðφ14»qáEÇõ«Ä¼«S#ÑPpÒl¹S–¥¡Æ‰©ýîß& 5Y„ö†‚m4ä'"—e¨\`‰AÿH¦cV²ãE*u´GL@×&'7Êv‘;ÿ³ÉržÌ*En´cˆÂJ>gŽ( fäNB33"4’ÄB)ý¸Ÿ$ò/«Ã‰è)z†i”ë)kòƒÔýÃÿ ƒy 62ã?##cfP4º®ƒ¡lnnc}m·Ýy7dU‡¢“ Îiª*× it]ï÷¥µ IZXí²~€õõ³ý—b· Æ#vÆä9<{ç]x¤ëÿú3‡7ß›°Ó¡!«j š®£Õ2£ùO>ñMÈ2ýý}P…½½½þz ^¯ƒ¦i×ú­¿õ†Ú_õ·¹oFµ‚Q¹ø¼U<-bËT¿õþýêÕ¨ÅSÛ‚k=.7Ðmžóýîç>÷9ßvŒêïö…¿Ä¹‹†k;¼ÛBϸ©ÕÇòw³ÙÀbeÉõ»¼ÐËþNãou÷ƒèŸçþ«ú:UóøýÍQtàq[Îi¡ç+îïCçÌlƒn£:’ó×llƒÐÉþú§}¼'ý·Ó4Óþ[»[#9žÎ¿D‘†,©ýó—£w&:>8ÿn¬?뺎vºbâýhËßr]×G Tàr¡á ÕŽÜŽÚî·=sùQGWº  Úwß4„chVÏŒíø¶ZäØ"ºÍÚPÛïýË Šk?ê»OäzIûww¿wœ[mœ\)ŒîùRÛ€Æp®ó)©ÔÔ÷7éßsk»ævtÛÿ3'ZnØñÛúþA«iy èw÷v¾ÀýÜ+sMßrºy_ì3q^ÆýwVÉŒÿŒŒŒ™A”<ñø·Àå8¼ìå/E.ÇaS0 h:¸|šj€$IP EU!+ŠÅ"Êå2F=<ÏCQ”Pµ'Ë‹¦ÐŸ¡ØþèÒ]·áÁn¼ú›ùЊ;¡Èr ¢ôwÞ9–cÖn< p§qn-:Åž4hÌøÒ^÷YÍ×”½×6{hX„BÑX*û£ãauýaé0‹Ž-,íÿnntÝŒëPåßBƒ™RÌi#–úïÑuôÌÊ×øàEÝ”ŒF'*šwÔû[äè`€´AÆE~ ÓHÐQ@ÍH5L Ï0 =SÜj}wä¿ãdogÐ!ŠXXmWÒÓZQ!në¾ ÊöN¯‹iéä)÷Ø•é6kÈ’édÁbq9ýµ±ð¼c—úù®ûõ …dÛÚT¾•h9•Q 9!’“ÐuÝ ˆ, ###cü4›M<öØcxík_øùç>ý!üÙÿüßxÝw}'dYÂêÂqü+Ç¡j:îÎK8sÿáUßqJsœ=»†Ó7¿ïüÙŸC«ÕÂïýÞïáïxVVVðÀàüùóøñÿñÀßùôgþ ?ñàÈò1¬?ø9<Ïx54Á|x~óî—á’œ,õ_-^ÆFÛNí~þÍE|×ë¾óËw»–{ì±ÇÆ’Ú{áÉ_ÇvëV|åÁ¯E.wìÄK°¾rö‰ÆÈ·ÁâÛ¯lbßV þÁ7¿·¿ð{Æö{“ä _ü.\¾Œ§Œ» pk+Ü4ÿUÔÅGC¿[a߃ËmšîWËhï|6õ¶Y}þ׆9ùù‹ç±Þ¨Y6pú[g°+íàå/½¯{Ã;Ç}HgŠ¿úëcmíY\¢¾Æ]£¯%~ÕA[Ÿ¬®>ÿê_þ×çgΜÁÝwß=ȪSñÀWïÃÿ`‹þÝpÃ]¸´p ®æ¢[o®°Ø~ÓõÞɹŸÁ3ÕàR‘WîÕquó,~Ὲ\Á_þóù/|ßxôAtW^ƒý“ãi[÷ëp¹¹Ó}óéçãùGÅ©Û~‹Ç^3òß{ð¡Ïã«|ó'¿»¥“ØÉïø–ÉQ4Xþc ?QK^^ò²ýv¯<Ý}ã©[ð/~ø§Çr܆á×ó««/Á7ä“OVG¶îW´dl¯¹ÇÙ_ùw¿3í]NÄÿüó?Â¥Ëk‘×Fûç×±rëéÈe–œºøê*þõOþ°oÇ×üîð[X¬,¡´OB.܆[ŽÆnk…c¡ã7}ïÉ¿k—ø¶ñ¶ýu†„·{ÆÀk‘f³‰ùùá´Faãjøœ‘‘‘‘žõ&jµ:>ñ7˜Ÿ;‚sÒ*^ý½oÁZ“À½/} ¾ð…¯€ãXð£?üOåroxÃðáÅb†aàmo{[àoHÂ6 ;ºgÕ…Z†é®ÛþÐÐ+®×18r8Ã_¶Ñn^Æ^ídì²¼:~±+®”v1/'Š: öö¶AP40_2ü`Žkm­gs.2Ý 8ú~Zœµ‘¯sÖY[3Óá7Dñ1¶ôð=Ì­€ˆå¤jþ½Y2’ÕÀ‰Zšm,ƒ Z7#Õ¿hüïí pá§„ ËÜ¿C°7ƒo­ÅøO©²ŽµÆn?&áÙƒä™?Ä< 8´C©¯wï4æçi쿪>:é.Ô®`a)þù7m¬óe¤Ò³HRó$cÖèw¤%¤595ÃhšÑÕ±QJÖf)ì§l8s˜²6®E2ã?##cføŽïøN¨z·/Þ·ºx æšÇ±p“ŠG4¼ùôÍ(TX¨ªš¦Pš³ÓÌï½÷^ÜsÏ=Åbø„¶#¸'\¤¡a¾²ô‚}[…2¢}ðŽ:‡SËǰW5'™›WwÁ·×'ùßßü, ö.<{öñØe)#?Ö¨?к;í¿Óí¸¦Ù„¥˜@ãPdšÄñÊê)í+‚³óh6H$$pFÀ@äЬžI9:ì,-,cªTÑh÷tË{|{챉Dþ÷¶Ÿq¿Ñ@,ÅOTýâ}Qå. Ê4¢ÿ gÈ—˜,º·¯Ñ¨²ÉŒw†4kŸEýAÉb×íäcéÙÞ=ŽQ#îš©Óþç:/jX`]“Æ:_”QÈa[]»ùÌ6“ ¨é³iÝ|“´á̇t»9Ÿðßû¤1¾–ÃÉÈjþ322f†VsN ­V­Vû‚†Ê<}½Ž&!*x¾YÌ¿†»î‘$ÉHÃ?ŒRÙì™Ìä9|©>,.ÏùÕÞé <µÛZ@Íÿ8"{ýGQë®&Zv_OK-×öPîìB›mÑ›´PðGBŽâòÍi0ÞvdIXÛ³ IqD-“:°SݽŠð×:šb‰ƒtsHJ®ä6þuÒ—›TäŸ!ÝcÓ†ÆAa㯡 Ö~QíþXÖ4BD18$H“æ1_d*ãÛWÚ}¿Ö{Jæ’°=ÈêÓ–XÈ!ÇTÒ̈gM<ƒ —,² Ý÷s­#Î^Ít³mŸkC}F’àQÄÈÖùº¬19l“FþGI’Ðå䙂¢úlàûŠV^¿ažÇnóÜÄö)ÃMfügddÌš»vY p½léÕå< Ï|ÂP‹~ŽHS»º†6 8î6Âïɕ€_&¢øÂ&^ø2/¼Ñ=ñ¢ô4uïƒÝ¯B§nÄÚz2¡¤˜pœœËE¶ñ #Ǻ aÍŸ15i(B†3•Õ KÅ—ˆ„EH Ò쳞†#+'ÐÔ×/’å[V8§Ê_ÁɹèsâlIw½Ðm™Bv„Aͽ;Rìš¾ºë¼dÕ]3êñ! ÕXéÿnnœßʫ±P4vJÐ{mjˆqf9!ÎjÉ à´ÈùY>æ~O¯@äÇ[rÐÔÃïé‡Ãä†Bò\éV˽ì,¦M GÏZyI’¨úµ8i¼"Ž£Â:_†6XkËêÚ…øeˆƒk FämG=mèà7E<Ò!9—"œ€ÞyDmX ›ƒ„‘3&CfügddÌ ÞÑ=…î§ÍÏÓà=iåb7ýH‘ö|ÒÆKËÉ&^Æ. ñöjwRÒ͉£$ŽFGà°À÷¢ð,ÅDö¼·'{üi=8©×ÜëÕ<ÃIDþ½i¶Å„Abö:‚ P¹gèZðç<σ4ü™4£¤5_†a¸ïYE/BsÍ?¡’Sï±R´zªëÈ Ý)’’>eœ(Š=n‰&ÚC¬-Ê}ÍFJ™ù)Ьžéÿÿ‘Î`â–ikþñrºåY—¤ ”VWe–•¹æŸ›cƒjÄ—KfŒ‡ÌøÏÈȘœ Æy*ùEûáÓDªÆ¢Rvhûê~G”ü†ü’U¹éù”)‹ûDËeW[ncÉ ý“²QFöö¾†p,Å7HÐL2ÃÕ&ˆçRm×Þ ´+2£c¶&§Ã°¶ö,Ñd‘§ I•8ðuHˆ‚e9\8°©ÁÄ;gríî§\ï ©û'z­´¶¶.BG-í(¡Àb®<-ÊQ´Ëq²˜²O·rK‹[V_RÝÃIDþ½eU›‹Õ8ÂËV€¤BZ—õ®¥(µÿ¸2‹a)=ZzAØ´œÙ¾†(ʯ°À%ЊžÒq¶ ߃ÛÙ{¦ÿ ­°éõX憦^XpÔ脤p.Ò’4òÏ0¦sa·>ijW3@æ¸D™eIŸNXÝtbéYúÿÔÈŒÿŒŒŒ™Á¥`LÒ®‡B¨ØVÜCV§y)éªûZ®™vg¥QŠÛ˜Šùÿ[Vç°OÔ¯ïôMy(š[¼«+ø'˜£ŠìIÂ6£„óçŸIü*WD’Òò Ǻ œíîߦNýwv>¸ºÛLõÝY‡Õ‹¾ÖG²6œ‘¬¥¨ù_Z<îû“éøýóÊþert¸B¶•&-+×Oô_í¥¦ëâ‹^çÍ®ð)¤EÒìsÝò¤+O"ò¯y´N->ìîuzx ëd¡ñ¦Q{P«ú>sFBSJ^¤B¡iPpi3 ¥ñ Ut‚LÞ[’WÜc±*¦þIJ>fœœã@Ñÿ-ýóÅÓî¯z0†ì‚Óì˜åy•òn=^hI#ÿd š tu0ñ=Au8ªˆ7Ùvø9¨Þ3†fÿÜ]«dÆFFÆÌàT0ÖÈœ/U[*¢DÙÙ‚”>rÐnÛ©%n¦ :ÔyÓø_<žn}jNÂ3]ÃñÖD÷¨"{­Úãµ[P«%Ÿ4’›¨×‘‚?R³:—î1!;fóÿ)PPçËX\ŽßÞšxÆõú¥ÍÛPxb7Q·dÍŸ!ŰéÊ 3FOfügddÌÞÞ˺j€ò¦X–„æp6ÈôЬ¹'³óSÈM£iX »d2õ| «½N܃}T‘½ÖÁ³8覛 « '4éÏx`©T¿UšsOHQ¾ÃŠumòA•¨^çIàè䓵†jO¨“D*‹ºèø ‹Ö*äõ9%°SÓÐHtP‹£’;à \±SìuO‡IDþ CG@ ¹|ôwÂjú-®ÿZ„Ánµ9¤hçI…c±ÕýC¨øà@û{€HÙ±…ñôL7tsü\(gTx %h+k‰£ßžGÚÝÙÌÎ)æÊ¡ÇÀ‚WÍë¦!þ}ªuwE÷AsdÎ"*9¸ó8iäÿ€LDÏ0X4fÙ†Úä¡1ñYAAz7|q°Ú /'ª)f©Æ¬e®\O\ŸOúŒŒŒ™Ã«\Ìy_Šu©LpÏVIßÜÞ6kXY’@©l¶ºD²¸íÆÁhN®@ä쨌¿SbT‘=Q2°±q>ÕwxÀ>ш_N}Ò÷^ZuøfÛ]ƒ©ãoé5nÂÚ ѽÎ]Ç!¤6’Eò:н¦m$$û ë#­¥ :Xòúkõ'‹½®ª‰*ïhHÉÕÚ êPƒ÷Ž“ˆü«Þq©+ĦÝdtjnX›/¾§GA¨¾Ï¬6‡}í[™HŠVÃm ƒMW ¶XkÝ®ç´ âÒ•*!]A" ü¤Šé-·##Mšý$°êïãŒÜE÷!e-•è¡0ç^·(Ͼ®Œ¦˜†ac3mÂHùIó·QŒŠžþ1æØ s_+Ï™Y.{—|ßS‰h1ÐŒñ“ÿ3({T|¿AŠn‡_××Ï¥OßÔð½zT·6¥º½E¹Hƒ4l£i¿î¨"²Ç·žƒ¤æS¥ü«'› E‰;Ê*\òôæÜŠû·šÙš ‚%ؤñ]Wæ4õªaÚ ™lG—ã±¶}lÕœû ”ؘ]ÏtºæuŸì ÷û¿§œOe¸ä—ì1b{ËõšDäÿ êîÛÕ©Ø´{IÛˆ]oØøP*•"|ìPˆà–xÎL¤šô§©ëÀàò.‘ØV{¼:$dHŸõ(?N1Ý"çÉö r¬L“­-{{¢Œ\ï±X.4ÿÆÁž{Üs– Î*ªbn3IÐàòƒ­#iäŸÎõæGŠ![´):vlÈ{Z€Þ¾c;¸+á×4Âá12ã?##c&ð*oÈL?¥ÞBa%†{ref‰GpGªæsG_ëiq m/•It)÷„Æ›î>ŠÈ^«þÚB%õ÷v("‡)ÀÁZaHâx¼§ÒF£Œj†=©IÓã8¨ÞZ†’èû,ëŽÚîÑ*Ë9ŠtæÄng/G×Ò÷|>ì„i:DiÔÚÍv.²”{šDäÔÜÜ«,&À7Ñm¦+JCÐ-í’:iÉÉ:·èÇÜõÜW·f³fZGtxÛk4î _J•ÕQ™?êzÝs6ǰ4{=:$=pw‹å[nƉR/\ŠÀT{W÷Òµ@T¥Mlo_é‹ ÑY9Ðv\·UDþñË®Ï-Ñ?¯säz dÆÿ´ÈŒÿŒŒŒ™À©\\)/@¨ø œ´°çé‹Î7Ò)…‚'uR`?¾‚“+ƒ×:k„Š+Šý@îˆþmEdOR9¬¯ŸMõ#ËǰC'H³‹h9•¦îŸ™sÏ|E톡÷{VxØc'EµÌK £'K<èÚÇuùH|ÆA¤7nÚ5zƒ:šúû‡«³Æw]™Qçy»û·‰‡;aë‚x»)L"ò/«ö”‰§::$„ï[PDÿ%ÍÛÁ4º¸¥»ê8>þcv€vhí·ÒsBT#Z 9ê⣩JÀ Ü:.’82$ÃÏ*ë$ý#Š4¸¼ýl¡ˆøÒŸIbm@P0˜pçc;ÀÙ‘¦« ɺ¯^LgèN#Q× »—ÐR~›?EþUèr ª2Ðúu0v9$€ŽA¢ƒèšü¶bw5ºI¿É÷9á,/•ÊÐÔÃrÞ®=2ã?##c&p*“^N%¥É+Ký×¢”\4FSÜÖ›*ˆ0`”â€&ÚÐ ›WýõÝ£ˆìuÛÕÔß! t9ÞXÔpc1MÝÿþž; Øl6†ÞïY…¡Ò EÛÛ9Úu=±XYÆZÛŽ¦‰œ€8ªâƒ‘ŸµTX l/ãFO™Qs˜±Ä÷¨´Ü¸óU2ãdóª=¹¾àqâM"ò߬ÙFWÎae>: y. ý†ûÍ, ¥KÉÊBèÁÀƒš9Ž+áÇ-¬” LÄ2 Ò0ЮÚíÆ¶¶.BÖ+„«CK/õª9~l°àŸ¤Egˆ$-!bÛøgéÙRº·úÙ€F…;•œF£E.Eç ÚSS ¨Ã;a'¡ Ö†ïh!úÔÇû¯;ÊZ¨æ ™΀…ÒTñL׈\Þ;æ«úyËmsoVáv®„B®ÐψȘ<™ñŸ‘‘18•‹9¶ÚcV)@;"”iê;÷cgã9²in®×I`0BÅ…Ž»³WMzØÈž$l£#¦W ʱÉ2âêÓÖý·o̹ŒYÂ8üоªx ssó±ÆÒ 48Âu=1WXÂ¥Þí±\æP)O95Ç%îBàE“¿@cZêõžwH¤*®&;È‘¸º½³SÉ$"ÿ¤#R¬Rll;6o‡ŠcÌ)pÍ1´xÖKE58‰es ÉðÚzM †‡‰à…‰X†Á—üYXªNC¶S­'Ž6ov AøºÔášqûë…-93Ìf+mÚ¢%åpGz˜£Ìjý—îëV‘vwXeE]}€ç7Eƒ"ÿ {Ϲ= ÜecÐÌ –¤š› é͆rÖû÷—i˜sŠ:Ιë‰ÌøÏÈȘ ÍžŒidxÔ±EQ`“hCMýP9p{ßùM 2¦˜„£GK¦XNÃFö$~õVúh¬Næ'£N’D.i2…1éTøÖ¿w_×xrŸ±”T¨Ë",}œ0¢£~ªjŸŸGã's ‰0ƒMg+š8 DÁtP­‘Áΰֈ]åë‰~G;évŒÉŽÆí“ˆü;¥8°lôÐÛ¡âæ;ªÍ^ÜsÕýAj,û?èCŒîL k™6õ_dUÐêô§ºµÍþ!I8–´;";«J÷—5&P;㎓¼Ç +H³­/¨¦(q]G¤ÆF7/tÐQÖpä6wp!*“$jŠngÔ0y9&úžaÙPAõþÛ wü ]s¾ŒÉ2ý1###€ÇDÒ``pÁ“‡R™ç¨%»é&@j/EÐù€Ü™¦˜”bÎ^§Jw}6tä_±·Ÿ>UUSØe¼iy7Ò·â(sÒõ¯>™ü7çGP+Cí÷, '@èqF¡†òÖ"!££™&o;¸J|F˜1ú"ãÎXƒÍ`’ ^KÐdssó¡‘ð8Åûºx&±`ïHý•U;*=‰È£aGçMÇ\L¶“w¿—ϸˎ*FÅ\¯µÕrZ~€è9¥ˆ¬¤°Ö˜@ºÔTOi˜j˜:ÎÌ‹i#iÉDUK‹nÃNäG[¾0*ÂÄsÙˆš¤å3[5·ÖÁ¬§[¢—†¢B!’wQ©p,vù¿4×á‰üïE´k-äçÁQu¤2ìû-|%vyg‡†pé½áÈ*.ɦÊw¥°CFë>„µø»|!^ô{OᥦcÁ¦tÚŒªñÙ4*ÆCv\*׃´f›emG Ø÷ݸ#ÿ­Úc®ñƒ'¢3H¼ÑuŽ*¢ð¸[]þxÓ4¢-ØÐP õÚ¾ï}¾×þ8faPŒä"¯Í|y¶ìröjºétQ•Ñ·ûSáß§$FmÒÈwMp—Jˆâl”U9 pC vGµõKZúÀsGÁ‡íi?))]ÍÿñÒ¥þ½åǜѭt\iŽ\/û)¬ìÒBP¯ôÿ¿Úœ \ÆÙî/c¶ÈŒÿŒŒŒ™ƒÕ‹¡Æ¸š“@jöCTÁ‘Äëå»îÔu–5x_ë©<òaäYû!¨tŸs}6ldovF‹•%hscöÍÙOû6å4˜îâ>^ڸ͵\ÒÈL«mg ~6:Ý4Òÿ¸ ŠsTwê/\OXª=…ð‰VžÌãéÞ<îx‚ŽaªÙ/ü¤9[ø¢)¶d°ÕÑ£›“L¯8浌j¬fvɯù¤uÿ¹’}G{ÓqGþ OI!h‘å@^Á½ 5ïÒUûûA†Išã€·ûþÞv¤ÈeŽ¢!GhVì ÷'δ¸J·ÀP…¡;IÙXT‰Á"ÑI®5Ó-!)£×!„ƒ†í”$ÔðV¤N£ÑK”cÀÉ…‹ ×k]\¬w’léÅÄËV8›¿è¿öFþ`QLkÞ´_KUïH¦sL’eÕ8E+Ï…ëYTè•@½ ŽÍA'æ12ã?##c&ÈÓv„øjGŒ4Æ%Ã1t¥ðú+Z¢d>ÜX’Ó‹r&雇Ái`OÄ r´}¼ÕçrE(ó•ØåœFì kv&ÅêÇϹ¢ÿI œüQû;Ę"æÓ`i9~’ùªK§qÛÿXs·82üI’m”.‹_Kù{ßNÜîbOÔ®Á÷Sÿ…ô‹^¬,A#âS?¯ ’ƒN(tüêaD˜cÇKËlÖF˸#ÿ²§ö~7f8ñêF­úûܶí 2DÕ|¯Ûrw!  n´&É¢8UN§ÑB{î1†;êe†n;ܨõ–U ³œ’w_£‚˜®ûÁ$hGô¡Rú·8“ã´ÛqT=˜mG¥•‚_N!{¼äǃ"ÿA™£bHËÑÝg¼Ïü“áYb£˜Õ¢P 4̽­ƒqíÌÊ2225N1*–]]®‰64ÍŽ~HÝtéÉnÏ mè(0ó(Ÿ>+|•PáìäÓÜëa#{fúôNJ¥±¯Eï›3bÇQE,~ÆnF7yÜÉßÚT”Jäí‰ùAkøÉÈ,À±î4é°ÈãücÛ`îãf–ʬ ØÀ\¬,¡%›÷B¥°CǧQ6%ÊÿžÒ棚™-ƶqÝ®5ÖÖž…F’ÈdÂçéV¢uìGÔàºÖ·ä»T{Œwä¿Ý´Åµ8.]-C$£ƒ^Á½ 5ïB€È—‹^‰ƒAº'ù,U‡A¡™a¢pN(2Y<Ü1†=&©ª ŠMž1–„z#|œKªvN&È’ººÝp½VÅK±ß™â2­žX̱8g;³,ÁÎYE3ʱí\½4¤o¸^Eþ #¸[äVåV_™îéqDeYû^ªÐ+ #æ&sJxyQ·“llÍ=™ñŸ‘‘1¬÷ú^—JePùp¹B¨ÐDû¡×ê&¯£34÷ÔèÙ>qÂWI ‚ìHéõDꇎìbꯈ"­^r<›ƒR{O}Ö6øãzÇ[8µÚá¯û#Ô _j~Pt²B¯ô#ì‹—üþ°ÚhÎ<™çæð”`^S·ÝŸ"YáXtw”µB¯¸:°Ü2'áAF–N&«]¿Öð:w,Œ÷]’´íŽhó–£NzÜ‘E´ õJyƒD˯ v îÕû[cLÝ”2Ûðï+kŽã²' \C9°ŒÆ‚¥âë•÷„¿O¼ï4cÀÐüƹªŽ¾æ?H%=©Ó4JÁÝ‚8ê6"mø¬µQ ô4n–*+¡Îô$Y4I[ÊÒ‘Ô<3Û‚Û{{‘™.^n[ ÑñdHEþC¯—”éôÝÖù~f¡ÚŠÏ$q:D-GreÞ|n::M´AÌÌ 5^ËdÆFFÆLQȰKFGkl¥ÿƒLÞÖFvônWw@ ÔùòHFÂxÔÚƒ¤êž,Ù“„mìVÓE7Ž9YŸma˜s>È«þãÎnìã&ÚŒb'ÌP÷oΪ"uR‚¡ÇÅΉÐâgž LýÌP‚­£âR/Ã,Ä×pMœoé®úÞ³jµYÒÿ»r>}/êà ß2µ9Âtœzq$)‹x{°1%KãŽükm$.(É•þƒœ‚ój„¡¡™ã/¸#Ú‚Äas‚·IŒeE«‡–­xÑ·P&AæÑmžOôý4h„ßWõdνÙ#D‘F®`?óÚÝÙPL?805&Rˆ3=IMXÿzŽçÐ~c¶–<ÏC#)ÌÏ'Ó&è*Ÿô½ùçCôÂ<ƒè¶ºŠ=Š mÕ¸ïÏåv´Ã3×ˈq–)„ ÒÈÌÏi’ýŒŒŒ©Ó>øVÿÿ¤A…N,æYw¿ì¤-›ÚB±Ÿa†B@£i(ìh&Pœ£‘·Sb˜ÈžÄoC'ÒõîÍéΩsèÒÑÑ-çƒ|é¡àI÷é‹¶A˜$2SÝwOÆáp§þË8‚&ÅÂ)_5_òL„‚Œ¦ Œã7b+KzuÚËe{ DÄ(ÒþŽÍ?™¶jµƒ¢Ú9ÕÞ¯PÛµˆÖ‹:è(SäÇ IÝ?wÂvŽTô±Gþ5ûº+Ð9¬.‡;y¼ª¥v¸‘mMþƒ vC2DCsA[[Aƒ -;HÚBs©,rhäÜú¢$ô ÃÐêN•(‘;/‰œ«Žn7’4Æ¿µž‰—Ä©“$ûݧjµ½iï~,A€JPMuºÂê"Eþ;!ú \ïví4ŸCœ%—Àá­w[Ê(±?Èí…-Ò¶#Ì™ñŸ‘‘1u$Ý6äÉ||ÊÚ6Åõë’··¯ Ô²ID²Ž6EGÖ¾¦¡ÊUúÿ߯»zÃDö½ˆµµdJÈÀ²Œ;‡6¢3Ú²éxq¦¬{±â€&kˆ£µL»ZkñbòvŒ³ˆU›èt1ýª½¡cû k穎,»ÕüJƒ}½¸õædNo*´™®í¯ËÎ?a¾Õf ZÝñ µ]‹XBxMŠ…P›Ô61ŒFÁ²Í«vÏ©Ô>îÈ«n;XU*ÚÀô:¨¢&øVd/È`7zSLÙQr`9j°¡cS’ã$Y£aëY{–îK·•Ìi<,Q"w^‚„½p%Ûø'Ô‰ìCRŠF˜Æk’ ˆ$Ù@/¸­³š]f9PY6ÙÎÁÏù È?í,êHétX–šñÎ'§€_”ØÐmÓè³”4äˆæ]éÉŒÿŒŒŒ©Óq¿)6t™C˜žç¡ Å›t»žmgã9ð›ê‰F#IJ¤Ëèu2LdOè&«µX®,A£Žàèb‚ä^øI9<F7yÜNܤAE,ãÔ?8Ü‘ÿfó0âÕÅóO¹'BA"iA©áM€£íh«9 3ð¥6‰K¡ÊÆ;\ŽrP<íÑ¢Òµ-Å/–é¡ׇڿÖ׿ð×#'m'g‘$]%ì{rÏáwä†=¦Rà"µ@¼õòáN Ü®¹lPt{¿hŽ…†£ÙrÔ*bYKL*²+ÇNùÞÓGXó¯ˆf·šaÅc“ôºo öµêÚN ]7ÇG†,…fÒ%5ì“hgìUݵé‚0›f¾cÞ”Á"®ñMŽ¢qµû©ÀÏœ‘g9Y³ˆ4Ì÷”„‚tµƒV»‰JÙ—Ý0ÂÖÎñ-Nìò=ç³WôR¸†ºF²£Ÿ‘‘1uœB|*’M¸9‡0™†dê¶FÀ¤’Ì®Or©L¢³•­›UÛC>Ld¯#$oFýWÉ”+ÑC¼s’µ89qÙ<ÞI'p’c.¦ŠïûÌ ¹'—ÞÖ ½¦áž埸œ¨åߥœgÁ‘òtÒtÆÜvãäí›8dhõJx-lŨFXÕ^FA&ëoØé·J pP%i9ç%è<8ÑNÚ%KiŸãŽü;DE‹vdÕù›¨[#—{ÐŒj·Õðè¶äüëoGˆa’Äðsrº?ö7Š`Ù¼+»†W—c¿—†í=ó¼{KÖÒîO’ÔøÒ¢í`¾º5jÿÕš©§Ã“Áç#©Š?¬åáÜnçd³“^wŠ9h“¹ØòÂç•Ã?ß{®‹£ÌIüÀ§–ñ}1úÏ• ò³Ò þIüdÅ>~Q- bâÄþ¢ Š~}8˜g‘ÌøÏÈȘ:Šd×ìñ&¢k>Õœ‘±'UBëéD¿#i‹ÌȪ ÚgӵʼnÃR±îˆná§a"{JŠvNǧ/àKm"=‰rNæÛŽ\Ö™úŸDhkaÉv‚xÕ¾4Ù«»xošsŨ~×}÷¶PL$¥!`¡²„c+Ç ìÔq•4×§.&«9׉ ×kŽ*âÈÇŸ ]~µ1Y÷;ús(r´÷Ä,£)f© «}ãNuränì"|ï¶#Š÷ãŽü×ëvIOä#…@å Ë|¼ó*ÊÁudùZÍFÿµØs¶Ìqéšá¿¼eBCÛcÖ‡î‹ÓA;,ËGozÎzê0j‚m¨Q„»ü$p¶Û ‹KRÎ`Añ5üWⳡf¾s1Š>^Ÿ|á¼üó»QÝäqOóvP~†|òc †a÷Âv´ƒÂ™™'ögqu+DÕ]Î' @Š£%3þ322¦N×a(³zJ‚H'£Ù©i‚ßÊI¥¼¶×Ú«XL—Ú…š“gM¯ûæU÷¤t˜È^WL¾üÕËÈça¹Ì¡ŽhÑî)+GÕû÷—u¤þ—¹ø g«mNµƒôªÃ³CvÐé”:Þ Ž¶Xmõ,-ØX1°€Ë_»çø8þ|©MâÞUb5,¼m£Rþx#r­ƒ¾Ž¦šlªÀ7AA‚;Š–DÜKSŠ×`Yóš$ûÚwäŸP.ôÿOËD ¾…3#¤¼ï¹Çø£Üûd¤AÁÐm‡—(˜)FM:xâŸ$íÝ ¯>»ŒBÓÈ3°d:£h$‰b;qÖS‡aèvÍ?K§{öB'ÅâÒAª“çݯùdBÓÂУËr™UwìËóÈ?a—ÖX:AÎd霖ý I€-#KWœ¿'öG£‘ þM‹ëç Ÿ‘‘1³tºvúëŽ_[-DÇbIê%ÁÙfèʧO 7âÎf-Â^¡æ¨)&²'KÉ&Ä«+ÇQ6Žás2‹›VãwÌŠð%Mß³Rÿi2>:EÏ_;i㪱‚º—qèl…ØÂg^’N:Ìn¿õxá©ïFõÈMX.s8Èï'úî©9ÎWï•òïÜ® ˆœ*Îf í8°ZàÕÿ„9¨Be-V+ W2õ0¶¶lƒeì5ÿ„ml×e5²•—óZŠË€9%|¬QI „£¥¡ålQB†ù´Ç¼.ž‰=Þ;yª·nû´Mg$Äg¤Á›QA8YNS·àFú–ÐuÄeV) ¶ã»#¦ë3.¨Þ9è*¹Ï“ÕŸÉ D‘F±Té¿îг]Æ*yT#:¶D¥üeNâgÜuys˜™x²¶ú=+«)1†¦RŽì¶äü½8±?‹e¾ÍÓ5…0ŒCÑ¥áZ%3þ322¦aØ©qe6¾^­2ì´Q]~È¢ýà¥IÆŒD¥¿¦Åæ[ëýÿÙk5ã[Z•JeÔן…AžBS# –ã#-V„ïh5™*½•ú/(ñ*ð cŸ„:ÛQ™8D0bD ÃZY5Ñaѵ„'Hí">Ó¦pâ:d2™ˆÕ\@´4Îh³T˜ÃD íz2ÕõÃŒN˜™ºè?RqË V碧V®=æXê÷c¯ù‡]ãÎËeÔŒ`‡¢3zÏQEpã'èå®iøEw ‚€îü³œ-TˆÒwÒVoNâŽw<*ÙŽK}]v0J¼ÞÈl…^Á‹>¸†WüÎîÔ_¸Ž"ýLÚ½j;•7·fcl½pÑ,¥à©àsáMûŽ#‰VAÚŽga–»æ~_Ô£±¢þ`èg÷<’ÃÙ†ŽsŒ9>@hÓàÍë\KØÉкX_? ‚ˆ×8±ž_IÄþ,8…ô9tv·ð¡'3þ322¦ÑK eY<•,O9¢“ªx9vyÙX†Ö3àHÃ4ªÔù2Ô‰8BEG¶Ó¨UâxÿÿƒFö4¥ ^‰îä+Æ-x€®˜Q㘔ÿE÷#|¥KÉ& t“ÇMÔ­¨Iñ‘™«M;Ê&¨G‡8ªÓçÊæè²m¬EË„ žcN!Žn™Dí/À•—½ßö2H^*ámyvŒ9[Æaµû+³ÀÏ·¶.T×:õš‰^.ùŽÞh•Å)êtd{ŽŽÎÔ).ÚQJY3'ÜãŒükJ»ß*ôèJt{§ˆ•ÎGéªyoP¤ß¡”çrhuììQ2 [ξ¶ÂŽyIêþ ‚…Ðnô_Óôèʽ@“.A PI÷Ffoé®öÿûGׯ£¸rý˜;å}–ÚÜÍ•‚ÉiÏk’2¶dÿ„Ÿ¡2 Dí†Øeršíp}+æ8çWü×ÉŠTA' d5Ÿ·» eTÉÌäc`™Ð€ˆ³D,Lã&pÿ²âHÊF©¹‘‘œÌøÏÈȘ:;uóAP)/@šK6)kíÉz‹`(ãh4 iĽfUG?_Y±×=hd¯Û:[w|åè-Zå&\’Gã(N%ó(ÃÕË»æ$$.2Ã,Úëß͉ۛYhÃ6Ø|}ÐéhÕâE×ë°´ÞË'\\¬¦2üsí+Aðþ^AÛ}µœmEÃÉëG9OúÏIMô×ïÿWNá•ÿï:Þúÿvñº+/4Þ¤˜þómÉ,1ÌqFþ»-w¦N­ž]Å8ÄŽ7“¥”Ós‚Röi’v þuÚ¦¡±Í§i{ £ÌI¼öÊx©tW¨ÃEHP÷O-–ЭW]ïåæîD³öÍán+;2ŒÜ‚ï}¯Ñ{ôÛÑJ7y|Ç¥Ó¾ïÄEɉ"\Á>7¢8šVµƒ¢(¶Ó‡ ñ#'IåwícÑ¿Ž`;•ÌæXÕìeíåðL«SåðL;¾nþ Šü—ys®äuFÇuð¢ÃtبBR¹ªâ,[m&ï(`eÅ9·“åFë|ËHGfügddLK)˜4($•ªÉé…¾p–ŽøIª³£€ÔnÂPìQlbAµ¤ä(ûièöºìéÄT«á‘­R©Œo}ìîQüÍœP+ â­ÈJ\+//Km¹¾FÑí:+ŠÔƒ )¦!>_ ¯)‹‚Ìï¹'wì *ò!¥<y&Y&GŨøZ‹i„ µ•\¡ú°cE ÏjnáàìŽ;õ÷ûVÀ‘¿yßsÿ¢Ï(k‡)víu ‚y}3ò¯¶¦G>&³Ê™EbEôã°J[‚Ê$d*‡ü -¾nSTë¦-K Y‡N~Üìɵ"Ù‘¦A#{íÆ¹ÈÏén 'K/†xú645wÜTF'CÃê夕—vczj[›£ûΆš-Eê4XQÓ˪=qñ¶€‹;†¥õ†ëõ@-äBà(„,i&Çœ’÷µ,]])̬CÀÀÒ‚ß šPß°æÐ¿‚7~Ñïˆm£–cD¯ç8#ÿ²jºÅDv8Ñ‘êâÙä‚\aQy•$`8Ä5,£˜+F'½Çüvâ0ŽˆgþñËø®ó¿W÷yÓÀ±Æ$E5¯qImÊ|T§š°òŸ»7Ü©ám%þþu¦¼§i'7¬c P,.HTôvâÜöÇOà»?Äã(sÒ÷y’L ;þÿgÝÁì|~xiJß|ßy]Eþ-£:èyR*%‚¤{Ž3æ5%†dC’¤ý¬(>›|lp:L½$kÎ=™ñŸ‘‘1uHÒœðÚò¶ùã¨">óLÿuþñ+¸|¡ï{e.:š_ëÚ¶ƒV²ŒÁI£s8²| ‹!ÎÅ¥GÜü­.5ý È?œMÒD*AƒP7n¤© ¨Jу½9CÚRAë;¬CO¤E2(ä ètÒ |f OfügddLž7l¼ª'JYÌ–J Ù3t¨øú1Mº I4 +ÇÌæ\yôC ÁèÐz-sØ‘ A#{­zp= ¸s·y¨;îÀÓ|/V{4™P\…c¡h5Tè•XUø æ4Uªã¢S‹GìÉð,)R§A ]©ÒÞö]Üf|$µa;¨ÄA¸¤E—+O'sþÁí «DD~,ËA#ÙÎY¦Ù<€NÄIÄïnü“oâ&ÚœGeÆ(·ûQ/«ç8#ÿÎ6 **æl1—TìÏÂj÷ç­ïÓ¹a•Ï„ –Ív#qÞWæØî4¬|æ[®Vy5)ºî_Ê™ÛEæ>¶Úö=;¬âøÖS4peÁ=z ø('áÊß=çÊfpJAÔûÙÒîŽV´vP ‚¼¶¼†7é7ù–¹ãû¾l†ŒËxÑvÞ̪ƒ¹zÐî ëìpê³îç~Pär:ç{žX¥'m>™#X3Ì{ƒÐ),ÌÇìdž´¥‚A¨¹Ù¸f¯W2ã?##cªX‚jG2á Ò¹‰›À‰²Ù™e9°låÓ'`Ο~(4ÓSKWíZÌA#{ŠáŸ„?r¢£UÀ_õ&@ßvk%±ãĪÁ<ª 1ºI¿)VôírA8œ}Ua®TiYsGu“8PÒ$%¥©|ÚõºB¯ ðx|ÛËþv÷œ¾ôZB¥¼˜MíQÂQuèŒ_Ì;¡¶Äµâ¸ë ¦QgŒªV[¸^Zð8#ÿU[@Ž(#Ê9 ‘´×«µ¼·¾[íõ}×Éro¿W@ëÁNCç}uRŽo‘vûG×\éòQuÿ‚Žrþ˜ý[ŠY¯ yd{ûhgžy ø¹-ºÉã6ÅVþSü7t{@š¶“.®Û€·´èX@v»±—6os½çL3\ïÉòûØÏbÛ8Qà¡‘á×eÐy>Æœò¥Õ‡EþMÕýàT²zCDDÕtÒm ëDp6ŒS£˜LOÖ…©/ãvhÆá-<ìdÆFFÆTétl}#—îׇWû—º\¯Y–ÃÎ3Ù]Â7O<P`©D ÿsœiô-·O{=q™ŠýÓhûá΋³™–‡¦ìC%Ý“§y“4ÒY^‘DÍ;Ž ”ÿ$“k‚Ö}@›fu‡’$ÁRLl$*ÿT²Ì•üWúéÿQ*Ü–ðÑ›¸3ò‡Q]—Uì!8¢j8"ïiˬv^$É\§¬–ÐmžÃÕÝ=ðdð8ï¼/â ºÉãåÏØÆ\Ý? ]µÅaÕžÞß^üØ&$LìÏÉÍ4EÊ‚ãjñš7ãÄjåšcË9¼â}Kg³­þõ9Wô?®k€°I’¾¿ü”0˜àv~Vù“;.V|ï…EþiI\Ë&¶kªŠ#ËÇ"—qf±$)sóRæi¨º{ŽB`úz×+™ñŸ‘‘1UZÎzÔtRcÂ6Ndc9ñ÷æóG¡Î—'*CïZPÿ~ Ùkðn¥åÊV—nuãé~ºÿ‹nO¦ðoa¥×Vž\xnþ±íXÑ¿\ÉŽ2*êáŒükr†ç|:'[œžÌ@f7ì´Ö¶šL?Š¥BÃ÷ÞÊ•ze¤Ý –A89œ†Ç1æ”Ku>ŽÕ›FL™m„.ÖÌÈ-¯TŒ¹æŸ°³‡Zjxv•Ô3"ÓŠýv»?ï>Óºù{Íêס*|Tˆ]Òv8³’–¯T>û­~©ETÝ­À‚Ðm‡‚Uš2@(3€n×=Æ9£ºój¼.MþñË}'AœâÿîU{›uº5ÿ–ŽB‡¤}åÞ2 ½*G7y_ô?Nô¯T¶[òB²²·IB“ä™4ʯá£hÈšÿ:w ýY„EþK—:ëЉdbÆÞr¼ùùàÌ gË\Ân2®ã é©¿“1>2ã?##cfÐréêL ŽZ:EŠž¬’š)^F÷&*Í ŽñˆÍŽÖZÖÃuÈßzÎ5I€ÆÆ9¨›9üÍœL.—9Ôóéêö­ôÚAâìÆ>=úw¯îÚÎU<œâqQǺ'RrO H—=áQ‹íKCWùºï½…/¦?Ÿ£‚<5ºî‡ I”@èÑNÇ0•ö0è&—íÜ©íÐêš—ý^Ôtœ‘«üÑ•°\ ×éØî^ì/ŠfÎ6ά¶^¤îŸz.äÜÛUx<ùxñÒ7D—ZìQ"Œ‰­žØëÖÖE´Í1TUGcü+ó ¡Ÿ-·“õ5wF}£4Uôc¶SØŠ¼O‹Ý½ÍÐϼe q¥f7|ѽ®8Ñ¿Ñv\jJzýšqCT"ø<eÝ©¿Ø%ôgù箘c·×Éb$Ì¢”xǵ#¦l9²¼])’RºÔé Zй" "SüŸ™ñŸ‘‘1U ÍôhY>†6—®LiQ„Èeeµ „Ê M¯Þ¬3'•µÕ¸ÙS4 W6í´ÿÅÊ–r«hÝö|4{võÍ·r‘½¥ƒØîO”†ljÎbä•;aOX$}qèß›*Ê 24-<×HÞbÊ)ú•G…cQïµd³0£Óɳ?,‚TÚIr4IB†ÏP˜u®l^@“b]Y@Þhc’t/Çÿü›0ôjèçÅ…^ßyÊŒÚ3ò_¯Úõ°š^§ñVìÏâ&êV_]¯À(•ÊôE4«f?s™ð2N#1­š8»±ßÿ‹ªûËoé6£3˜âùà,§±“T3ÂõÒT!Š4Š¥ 3º<-EÏ›ÛÉ‘~Ç‹WôðXL7ç¹â… E{LU•hGÁ4ØÚmB ‚»[ßãkÁÙˆa‘+‹ÂëdL=“8 Òt"Ñ$ 2þ,·ÊP†Ñ R M´¡$TdÆÿ4ÈŒÿŒŒŒ©"K iPЙt©a"m”])Z,P5ìÉ­Þ%@V’¥Å Ý{ï×í ß ‘=Yq?Ðó$@v*øtÛœd\)`—N—žkMòÓF3ƒXÚP"'¨µš-8T¯U“¬ræèvÚ 4{Âê5 ƒóÕèß‘‚ßÙs¢3˜s¥Ü¥}j¡A: ýÚNÕä[Ï™ÿQÔ ;ûÁ«0H+ÜR£C#ÑmÙ¼¬”éqFþ™žq˜§ò®®NœÆÃ ×)§³Puwêuh£+@hž‡Ò3ΤœßÀá(ÛpDMüÖ¿Ûîícx ÓP\¬,¹Þ×ÔáÒÅ«Õ`Ç³ÓØ±"´qÐM¾oü’dôvY:7 5½÷õºýüi±þg°×xORÎáÔ>ˆ>H{ŒjvÒ• NžçAF`w ïùõ¶@tùzŽß€Ò-AŽ/çR9ˆ’²×#ÌÉm•¡$Í`ñâÍ2´²˜L”0c´dÆFFÆTÙ«™“>•ÍÖÅEÁ²öDN'ÂJšÒÑ“i“ÚMb¸´ë($ÿ-ƒDöu·-R¨y^ÿõÑç¥]£ÝWøÈÕá'J Ÿ}&Rô¯I~Á8I’\e^Ã0©àQ‹ªƒ ü‘ÊÅKƒaµ˜^:üç/ µ_ëíŽì{Zå‹ ¯~ü\h$ZìµÆëˆ¦A:ÎÈ¿•n¯Óœ«k…k9Û8M+ög±Üâ|Êî€éØ•$ RÏÑkÈ~ãßi$–÷ÓkPXã¨R º—ÓLÓæßB¡„Üܶh@Œ^}µ³{Œ×éVç„ýS»/Λ%û驦 ¢ù¹7$Ÿ ‰Sûà @ÌΉ(;tfµ5iˆ–‘÷ü:»=x9¡9’Ó9Ë@MÇ•­$¡Óm"Y¨óÁQxgvß°lçºhC§;½¬•ë™ÌøÏÈȘ Ø Åm®ÔŸÈ©Bø®Û:U5#Yй  [̃1gG‡%Á4މì9ûs³,£©ã!ÞœèÝqÓ<ö‰ä=Ý-¬v^¥õÆHöu>bÂe,³}aBÝJ¸ÆÙ‚ж-è©öNÑ¿a¨ŠúÞ[ ‰ÅQºÔñ)‹[{k×vJ¦e³º;›ƒr8!‡)¡›ó 8ªkøÖ;¦“`š=î[­Fÿÿ åw$:ÞÓKû@Ñê¾zv'ʼ}œ =ýópÔÔ`›wÜ=õx¸#%*ò”)#ç“Ýr¯›\’á,Ý(>;øµ6¨^@ÆèÉŒÿŒŒŒ©BBïoU"™§ÚÂà40½ˆTyk’ßûEןqQ*šµtíž°× ‘=±it•òöh¿ÖGKUµ"1ù‚ZQ,ïFk-ð=A&A]H²ºÙÃUHôú˜;"¤i'õ€-¦¦é®u‹Ss‡à ÜNÜ1ðîÑmÙ×}€˜½ìÙ± k浩pîI¯óÜ$Qibå|p$zsÓþnóÜØ"ÿ­Úcèö¢kºj„¦õJÚ€áÄþÂJ` 9ûu‚~,ËáʼßÈ5Û¨H#öçdîÁ󸉾5\‡du Ë+'avfAš†Ù8{ÄéjÄq›rŠV--ÚÞ´zÜ7›öu܆ÿ^qvÍ0{Ò'cá k}GéRÄáÛÛ±ËE:ã+åèØôÎ 7lT;;:TèÌEdEEþ9…ôe¼¨t2óNÓÌ{VÌõÍÁœY9i2X¼TŒŠû· Я/cp2ã?##cªhšiÍvÈô©‹¡‚ée $5.ÛÛWQ>}7¾¾ójNB®`ÿšn:%‰ìuDۣϑ$ªœYâðm·VRµös"i[­aTž®GŠþ•—Ì ê´©EÃ2 -Ã?ùJÚæÏ‰%ú'Ø›»Äú¿w´šLÙ9ˆüþ4ÜîBd‚zÑÃŽÕŒ‡;bé<7K)º9±ø…/¾¯Á¾·e[äߪ >¢ý—eTì¸MÓ1æ(‚ÁA›ÇöN•rð8mEA‡›N_̇–Ô”9æËms;uCè†ú½0JŒÅ-óéïO«î=J´£ØYK¸mÒ轑p›^ÇEš”qºÉ÷Óà})íÎß?=–5¯7§#bµ£æ¾8S½÷È-ÝÕÈuEEþM1ÉÁÆkE_„$J @¡âh²tF1op:Ï% 2ã*dÆFFÆTÙ¼Úë-M¤7¤4BƒÚ3þ -<­H{hõ&|¥òbï»ãí;ïí ?HdOuL¬XƒÃ3²9‰ToQXHP+Œü—Q¦ÂE{´^omŠ˜=%æ$èDΕn¥GéÚüYXà Úè$õ2_zh¸’ Ž*º&£eO ¦Qœr·'&¦¹Çg:nR•ö(n H3WOåQ*™†¶¬–ÆùU[ÜŽ– 4áÏÊQt?›d˜šÞ°¨ F2¨Õ÷±¹µÒV>o+fÔs(¹“…/®ùêÌ”rK%Ó0×5ÛéÓ®?»î0jU³äÌÊÜ¥#ƒ\CùÇ/£B¯D ª ²m¬u…ñiØD±yÕÌØ`YÎçP÷:.æþábâõ¶$JÇ(Æ,¡I¢n?I,=Rð›Z^¾¥Í–¨È?ðžô®mU޾ö4ÅæÖEP:‡.| YŽÁaç ÞÌ‚¼ö̳JfügddÌ<“Þ:@ ”n>°t%\8F±!PçË“àQ!ÎŒÄ(½èÌ ‘½zÝVÈ7$—$c¨¨¿¥T¿ti´Ž•f¸’µÐS#Éé)RÃÕ­Kh:á¬ôhÀ¬qN‹%¦TGP‹¿ ½îâpý­GÙ×ý0’ÓÜÑnËÒ :†QiwºÐ Ó™'í±EþÛ-»UèUQ…ÿ}è4ÒrÛËǘS¾ön c;?T6çS>wÖt/ÐVÑ Ýäq¤|?4*P ÑuŒ«–P¬ª.<Öå)H2\ÙAÎÒº=˜ãó–î*2¼Æš;æÈé‰)Nª-®”øêN·B¯ö¯Âr€j´H`¾`:Œ4c65J.“~£Ù™Í¥òoùŸ{` ÅÓ–U6ïÁn+™X©&K˜+ú3TLÇ éT)Ž@›Ïé<§ ¶—Y—1i2ã?##cªXaE¶£&4I‚%)°$Õ/Í'p…9–Ã<—Cf@“$Ú:‚ pÑ‚FÒÌh¥¼€ùü1h4yŒ‘ÿxð0·çàÀŒ¨ ÙÓ {2#)æ$‹­ ^®PæÌc=¨šw [áýàs%s2_o¯ÌbœÐd]2øQ™¦ÍŸÅ05“A-þâÒE“â2ØzsöÝê`N¦ÃB_ßÑþ.OÛ`Ž*‚‰‰¸%a~/8Ͳ¦Ñ«ˆ—Æù×dÛÐÍ3KË8£ËƒÖÛ[ät,é>f„C`… H-vµ°­¢“¥«Ý@‘8™T@èvÍó¡ê*DÅŒFv›ƒut° æÜýÏ¥#ƒê«¬~­9"£íÂÅFÿÿН¢?.\4£òIú2:œî ýáïÚ]E[Y‹Ô>Ð{ÙëS9q”Êþç‡3;%Jåß".ò¸h;=Í¡Ž´ù…?gnÄ£Nø§cpØ{³tUru8 5:1\YUÆ`dÆFFÆT‘Óèg¸ (‚@Ž¢QfsXàaûU½ôyŽ¢ÑÝÙÇc÷<ô³_~„¤ _(ƒaXìU;øoüÿoï=ƒ$IÏûΚÊÌòÕ¾gz¦wfvÌz8 ’ @¸ (zŠEJTœ$ƒw’Έq:J¼¸ E'‘:I hD €°Àb-v±;~fÇ´›ö¦ºlVVfVÚû岫º§ªËNÏ󋘨®šª7MU¾ù>îÿü'üöoÿ6>õ©O¡PðG°JzŹà õ¾…ÃøSÚÚìÉéËU•îÑÄJLã1;ìáǧBä©潱ë·÷]œ¥ËÚt™*RwB€Ufk‘»ú”ðÃF…+µ“i%4£›-þê™ÉE}›ÃÚÐó9Ÿóé(b[6¢Ñ8ÒBm>¨_ìv+#BX¼Úüõ°±4íHÏ"ÿ6<£kjâl^{^é7ÞšÞ™\ÔÝÔµaš,öëOÑC¶U¬'~yk_‘8Wñ¾ßH$†\®&œY”ïv¼Ýf9ôgƒWWa˜7÷ýgºVçïµíõ5ö,hÈè¨Ï~8lø±W<#ù íéý®uµóLnaoWK{öRß¡ÒÚñ ŠüÞµ[ïD«`Z-Þòò\ã9®¯Ñ¾¼ÐÚXûÀ+%E™LgYkÄá 㟠ˆ²¾¹A‘ˆ`ë×ß•¯<‡ä½epð¢úÃ%·^xO¾ÿ}øàOÿcc¸ûÒ«¶íâ•W^Ãw×cøÕ_ýULMMásŸû\u¦zë÷À³,+#x8ã_âxL†Z[Äp}[»‘=“9QUév§f:«‡Õ­;=Iñ–wqLÜgq6][ìÓ¬UlÄÀ£v õ)áü!£Â‡­ÜÛâOä‡nñw»A‚{ô#2rn!)´ïÿw"~WÏéÅæiÿ²âÕåê%§g‘ÿBÑ[p3`±Í67þ+¥,ÝÒ©/€l]–•Údž¨¨‰w«˜°¼‹XÎ…HÍ8ª|ïšáÍ玥ܷ>ú Ä=—ËŽú w‹8[œÙ·ÕæŽ$™tªQ»C6WÛ&Ç5^+õY ‡ÕΖwqyÒg„î…OÔήO¶’nCDlþý×wAhE ¡•Èÿa(šÕrȃ2ºqmòçu£xèÚð|_dü1pFbcpÊbAщqœyæ)DGFÀ”£þÃ"»¹…øäBã£(˜yæ)ä¶v #H§³XÌœ8 –eñþ÷¿ËËË0 /b«–Úà Mœòx唉ØX¡¹c°’¡Ðk3xui½¦uc0z£@Æ?AÃ6=ƒÑ›±`;BS㈟' °oÕȱ Uƒ ÂrXކãÀ Àt`YXŽáx‹é@ Q¡(žáb:u È[‹Þ?>ªƒçÿ6”?BÁXôýߦòW˜ c_Ëxév¹¼·@ºt©=7E©[DÛ VJÀ.søVFÇC1ç1ÒõòfL,4¯9MÚµs­êžñ¿µµ†µ²0[E08\›¿ ‘Å\ÛŸ¹Æ²‰“_íLå¿Ëñ;o˜ò?U^<Ôx¯5g…úð‡ùžöcTiŒ\fòeÇŠµÜ³È?Sppøæs^}T¹ÇË äŒ=ÊùL]vßù¯ÔÇwj(׳ŸÎ ïZf]þˆ«Z8Üo}k§;ZÓÞ8®Yë:¢// Âîï€ Ç½sËìiWÙR™Ú½„oÒÏ®"ת»#__@ÁØ?Ã)<[shJøð ö !Ö—0œPF[£ÝÈŠÉ‚„àÚεËk ) 7Ðøªd(¶lã~ù³ðƒÿAôMÓJ¥J¥°¸èEZXQ‚ÍØpØ®ë÷ Sü Ç»§0˲P!)–a îøþ϶=c“++û— y(K\áà…’Äñ¸0º€âÀØm­'[º„Ó#ÍÓ-3\å¦îCÛ‘£N!›qîx¤£ó^YptC½¼‰7^iúº;.`r|@õ‡C‚*Ïe9H£žÁ Ô©WwÒÿ=xÍs”ÍÚK®ô²ïùÓÎ3«üW÷çÆfÓÖƒ7ÜAh¼OÔ lÞ¯Å_…ûEþ÷fo€Í°÷?•õM°9ÿ¾øƒ]º6§³ÕïQ²âŽF2þ ‚è+7oÞD*• /¼€µÍ$Ö×·ÁCÂÜÜÀîœ}‘·½hPrn®ë"·±Û¶±3ç‰ÎlÝ™‡mÙ˜Û\ÇrØI¦àº..^ôZ¡-//ƒçy\¼x¶UÀúú6Œ’ŽTȂ͙Õí4{¯âÖõ«ÛË£€å¥]hE×ïdþ=ôê±¨ÈØ\Ýôï»Þ÷j/E»v~`ãµW¾Wa*ˆŠþ¼Íù¡ÕÇ7ËN¹ùÅ¥¦¿¯Ô¼gx³‹c];^‘ £´¼ZÝŽÁÚØ\÷U[Kë ûaØiìÜU¾“ìÚù^ÿ»«˜Š Ç{okã'q÷Ž7?ÏÏ/ḳ @W??m¬Ý­n'À¹Õëtíó—;>žÉ»KûÏë9ïú¼víZO~?=îlÎccûŸ¬m-ø÷oÁ{¾sWÁh–íøû4¿¶‹ÉPóû‹3¯îÇå¹¾Ÿ‡ý{ «K;Й’o9άý>^˜kéø¿²¶{àÿÏ­¥a9rÃùyýâêûyýVË`9åŸwç1¬ÝoÖ^lm?ï÷˜¼£ø¾Çù%yàßS/‡Æq·]#‚èù|¯¾ú*Þóž÷V×ñ•¯~ ñ“ïÆµ‰ZÍYLqõÙçpúíÏ 01Š„ `sqë·ïâÿ(¦Ëà…?û+üÄÏÿÄ‹/à[—¾÷ÿ.œ{ªªâ7ó7ñk¿ökày/¼øe|ëµo`:ƒ¸}7¾÷2hžþþø¸ŽÂ´u\Ó¡¹¬?¤5îŽàÌú5(Å,~þþ}ÄÆÞ‰væÚ^zßúÖ×ËjÁç:{Û(ý?>¶…Qù-<ùû×»ýµVyéÿWÜqñ½»ÛØY[ÂÿôñŸÂ3O¿»gÛï6¹Ì:>û¹Oauâ}Ø”¶ðÔ¸†ÕÂ>vùb(“/ü‹·ãnXÁFñ‡îûÞÇGll¨¿[}þ´óL׿ǯü« ¬ÿqõù÷\Ù†:¦â#~'fO¸Û§v(øÌ§þ4ó8®NNW{´‡þ†ÆæI¼ãwº+¦ø_ÿÅ¿ô=®d1•¾GfÏâþô¿hk~h•ÏýÍ¿ÅíÅF¿oMK(ÁÝ;›¸‚¤öM|Géí8û_®ve›¯ÿËsÈq?†ÕB-Zø]›;0¬"6Ž¿ |­Äd:$AµÿD.Œð»+jöæ¿ú—¸²gÈﻦ!Ux ª^€5:÷ç‡1Â} Zþ Àû>þ­¶·óþŸéÂwáNØ»<=QŠüŸ!ñüðo5¦ìïj&ÚýÓ_úß¡ÛMZ}&S°ÖçðØ™0~ä'þ]WÏß}÷éÏþÖË¥‰óÁÕHíXŸ×±Z¾~üòÙŽ•âàÛ¿ö˸–i~ÞžœŸC±Â¿ëðÁïúH_ÏÃ~<÷õ¿ÅÜŽî[Ûµk.ÁO૵{ºëº÷¾ô¯ÏbC©ÝO¾ck?zöÀóño|›+ 0³§pé)f]‰Bå;ìæµyåy;Âj¡„'Š1³–ñ“?þÏ»~|>x|¸2\×¥È?Aý% b||ããã‰{ud¿þ‰ˆ‰ÿ4âS“È'waŠƒØ¼3‡ØäЬ‰‘©id2Yd’^ôåÒ¥K8qâxž‡mà¸Þøj: +Û×ðOˆBÛ†?pìzÓ×m†ƒªz7Ívkz•¬Ù I!¤Í(Šlg7_ÍšÇH–ëhŒûql§yI /ͯ¾5Ôƒ€ ŒÏ™Qû[\Ï·?`cË& Vk bÍyÕ÷üÜ»_'™p¾T]—t0ÚƒË`®jø¨–ùt³þ¼B”ñÏs¥W¯-ñÛ=«ù·\O¥›ÑìÃr†çDê†Ø_õ8Í œû¼øWŸÆêí»8ÿïAÚ’!‰Q<ùäüå_~¿÷{¿‡Ë—/ãcû˜· yn½Ü´°ÄåXäp5¯ùÒÅÆ×P¨m«Ýš^­äÝC¼„-“EÕŽfô‹ˆ®ôV¹=º¶Ôôu½,²Wkc´Á“KÝ€År jÍ ÞKbÄá;Þç÷kãUAâxdôÚïëiç]ªõ÷#øj•+èò­®okX0™иÚ¹^¨í°­É"bú¯agBD$ƒÅœèYÍÿæŽ÷;Kr†@BªÎŽnŠÆŠ<‚œßPc€À (À?TꟻÕfÐ7v±ù5Ê•‘Íz°Û¸ÔMÏø®Ÿ#*FS7éD¾¹öŒ0éÝ'——»¶­VÈ×uq±¡^¼d{ŽéÀl×¶9ywißÿ"Þµkc¼¯çá~0vãœZQ¼oGDï~5ÿ€×¦^Á ¬Tut9Œ&FxÉö‚3ùÎÚUî¥ÒÚ2Ùt#‡…Œ‚ F6ï- Š¢aj:6žüÁ#<=‰õk†YÂÔcçñÝÿø'ñîúÞûÿD¦mCÓK89{ ?ñÓ?ƒöÏþ~õWÓӞȜÃxÞuANÊîol%µ¯ê8s¡Áˆ3 ã¥{ÙÛVdÏ6 ÈÊž§ŸsXc‰ýÍF½EF´ƒ4õV˜~óͦ¯cÞ¹qì«§¯c«ˆÁªZ³nyÙÝXÐ Ë»¹0bÂÁ©œ³±šãJä¸ðg½Qß—Å=ÑZ{ôK «íªw~tšÙÑŒñt£í°µµÚ“È¿*Ï¡XT ŠF‰†ÿï¶Ø_…fâ`%`›¯Ù²Ñßí¾Z}|©±4ÆjWH ÁqNíܨ…ÃE©ëÝ+3D³{QÍ‘ùæN¸¥•š“JW{#æÚŒ|®¶­X| 6ãÿr+ÑíNÅþê‰?÷ê¾¢…r%ÉúæNßÎA+°A¿á\¿NhÇÁتÚ}'›aÀàp·L–cÁ*•`&B ÿ_qRtSˆsïX ™¡Î:A‡µüéf¦ãÀ°mŒŠ”l/ʹ­~šeB6 £„’íup‚J%¦!#ö{ÈmÐK1¯  ¾O¤u6*6ôQo‡f½˜ΛbS©Õ¶"{Š’D*å-bLWD<Þ™êz˜W;êKßǸÆïNÊ[àËÅ˘4™`Иö/9Ýi{t Ç«)ûÁ05‡Í;òÀ·Ùò©Uø’ã‹Ö²Y®ÛÿöaýB•ç°°pA±–Þ_ÿ]fv4cd»1 8’ ª*žyºû×§]®OÄF óó^÷¾ïnÏ ¼RjPw,†Ó8UÞ'%»Ÿ¼±Ùà”Í…@ö^ãYŽíÀªÛ¯b¾siÅðíföHøÖÕ¦¯;ÓµzbCï_YU.Wëè„‘ÝSJW1G³Ý53Î2ÍÇ‹ÏxçAä2í ×SxVÊúóõÎÞàõÖ5­Dþ›Fèíý!3‰\. Ái4üëKÀ·»7òJ Ž[û2.Óv© Ñ9dü10*^é?ÒôÿÇC¹êߦñÝ|hž‘Â:Ñ:£¸¥è-L‡j>íE„ýo’çÙÇñáõ§ñ#_ÇOý.ð#ŸLJמF"PK1¹ÆÏ‹š!ÜNdÏrk7qËáèÔÖd“=I«mÆ¨ÖøpǼÌù«ùw¬4ø:C¿²¨ïÖ¹œÉE! å q<¶ŠŸàEýg>Ó»ERdoIÈX ÃîÏï¦ßX¦w¼åÿ+gèQÔ¿B}´Ö x×’£iüWÊt¶¶8®|Q³7=ÓO/¦^SÊ6òãÏ|w×·W½éÐ>ÞÃœáµ‡ë¦ØàÝšå/q¸´qc¬ÑxÝÕžïj]ø^&]ÿœ“ 0óÞ\h•tÈ…< ªÿûn'™Ü^„ …‘DcÄ9°ÔÝô½¹3Áv½¥¼ªÚ®#¶·kåGüúèv·Äþ*ˆ÷n6}]÷æ©î—ë–ÅåelÿÚ†+—¹´#ö´ùOÌÉÕñÀåÈäô}ßïXÞµQ*d‘ûçˆ ï9ºO9§ºzN‚WW«+PÁŠ¡F# ÿA ŒÝ,Âbû-=“Úó¾çÚg@[ô„› ³±îÑ´Ã(é%Àuà6¬`£!½WT ¾oí¼ó·oW þfðyï{Îû{WûfÃÿs‚WY(¶WóoÚžÓ`41†¬­Ÿ‡¡rl±×îzŒvˆ®5Ö¤'íò‚Kôeº…q(,× ’Þ­tÞàM¤eVRþ§'1ù™k==Öè+ ÑZ«¤Ã169âpcZ@±jßm廈©½ët0æøçŸX9]ùÛ¯¿ÖõmÙ†7w –äß®Äñ0lÏ`í¦Ø_õ8›´ü,îsZãVwÅ|c§ý×êN@G)í}çZ!Ãl4ŽäLë×ZÉô/£‰yÎåcÂ#Ý?–|ó¹">â?£x»ëÛÜÿ¸k•ó;Oê£ÛÝförsã+ãEþUUí«öÁý( ßóŠnL;b@ë‘ÿzg²*ì0(©ËF `¯hU´²ÍýlŸcå`2'º¾ â`Èø'b X_Mÿª§"R—à'ðÝ©w¨EªöÂ9,&Ç=¿’꯫µô¬o܃k[à…æ‹‘™¨*üØå³˜ülk}Ôƒ×Öª‘«½e ªåED´âf[‘ÿBÁ‹žó|¦†Á>j?e½ÃG+Ì^iìîŽ MŒaiñÎ!F.”õ(êSYù‚Ñ•ñù¼ ÑjžRYŸòÿ®o÷&½—JY°Ók?XZ%—ö~§BØh/jµ÷Þ£øŸ+§þ>ñX÷ÅÊ¢:`7ZÝa0‘ wU쯂èûгU¨Ìñc=h«X!ºÑ¼Ýš ¡˜©9v¥èÓÕ¿U¹½ÇjœŸ{áЈÍ7¿ÿíÊÞö]&ÖÎp‡¦ßòµùÓ÷˜‡n·Ê,Ï·ªÍ‘ýÔ>¸ú‚ÃÎ1­Dþ£//TSê[Áu,.ÞÃ4êXNïæÂ0ºsÿ$ÿA Ž1 ”&mìb¢—¾÷¶ÿó+8ÅŸÛ·î-îݼr¹L5·‚U½R²ÞBo—i\H|í÷}kÏ Öfªâ…­Ñò>ûoj:_IicÚŠü뺊pAìvXð/p+8iëhŒvi&úgX‰‚Lò8×[TÖ§²vÓXš*6?/•”ÿw•ÞØËs}9Þ€SsD¸œ Q8Â)™Œ7ðvmá[©ÕÞè]ä0žó/ÎKœwޝ^y£ëÛʤ½9¯à0 ÙC•ùµÛi½Æeѧ<~½t¶„RÇĹŒOœD6¹‰­­Õ†ÿϧÛë¼àk¢{ÑÈñ84ž¼Ñ< '<êeŠå•þª*J-µ^D¤ƒ~ãñ°ÑíV‰ËÙx±Ñ)€fÆÛ²ëèê&AÄ^}Š¢¸Ö^yB«‘¶ŽÙßÐ.Ù£MŒÁG2ƒ’åLÆn—m~Á\Þ¡(½»þ‰æñOÄÀ(r`ÝæF¥Þ⋞áóè=ϛ߃\éô.´¢_TK-y hx Væý¾Îž{×·ÅXËÁ†Ò•ZZoo¢îRîþÙJ•ôð^´U¬¼¶Úè0®›%Á»¯0™êk†¶Õr긡g!„jQÙŠhd+Çö–@±äm«¤õ§Í].Ÿ«þˆO6´ù«þ_œ:âVó9±dy_¬ZLµ3\OÐ4#ÑqŸ^O%{ì0"z­Dþ dÔÖ?ª‡ÀÜÁˆç`JþõAe?{¥Å5ƒÕk’%+t Ði'b`ìfd˜¡æÆmθŠSÜ9ð9o15ò²ÑÈ6.pò(å–:öž^ò¬ã¥}Úº>Ò¼èt·¬-pᛇ¤–w‘à'`4ý“Bqlïl¶ù/d®xûì°p‡o·v:VòŸ»>‘È5k ä-( +ÒÞ`BN_†ÃÄ.CV"¥ 7ÑÕíD_»×Ðîo:$!£_ćߜé¹È_=ãj¸º(³8¶Y‚jw8êpRRî!A>è-ƒªµÚ8ÞÓíFwü‹þ-ïû}óÍ‹‡î@œJ¸Ù(¨Z0¼¾ñ½2Ф¤Š˜;ð=,gBäÂ{ÐV±žÄ/#‘ÿb£ÇS Äö|E©]Ï9£–_ÆÐOÌrý¬XN,•K:"›¥Ã¹/_ýZõïwYxú“Ë}M÷¯ %U9oQm1Æ÷tX òX1ÔP¯Ü«o=aÝ¿ Ý`0}üd×·`ð|Ûa¿qZqtÌôØðž2ž¿2úÅžÕÆ×_ö=×¥\Õ»® ©šc@¬9»Ty–qÿ’ÇVÀk©Õ9ãZO³š‰þ™gÃA¾÷Æ1Y¹vÍY¿#¾&xر¿ ÍÚ&SÞ½·¢u1Hlsü)÷!ÄôÖl5òŸ˜“«ßA…j¤ý0"¸=啎½û«Pq„ðR«¿@Æ?A¡¨{Æ£7ùÒŦ õÂò.¦³ò^N+‹ý”j†­Eá]…E6Üèh(·à'ì 忲o ~ç7Úø€‹ÝL/óÏ[GS¼8ž -¶¿€’8ï“O;Ï Œ‡}¼´Çˆ:î EåÁHûWtSǰ¬,º¼ß¯tßøçó*ΦçñØÖ <úŸÿ¬ªsÑoê[îF<Ãé¯Vd_°ÍT= °ÒnÙÙQÀë¥Ò…`Ñ™ ÇC¸výF×·c2'`Ç+ÇÚK•}pœµ}ÿ¯"*vC¨]‚[~#FŽáÝw,Mƒm• ÉßnQ.k¯Äêºßà.Y©®—íeÄnœBÁl·÷YEG:í9îADZòg•T{ýÛjÖöÐ{÷{ƒ×)Ñ4ÊÇH¥ÍßaÖíEþ½ïÀd,€ ìëÄrí,8¶bÖ¸Ö3±?Àßé€gÈ tÖ ‚(ÛQ¿w¼Rz|¡¹‚ð eZ“tÒ’èg†ZKŸÕM zIϲ`…Æ^¶ Q€bÎãlq¦+ÇrÒ8Öêj–îÞöÌc-¡=ãXbFÛO1?»„lYáìû“ÚŒ½¢jÞ3,R÷ç^`•üû¯Yž!ëQô÷ýÍßá»?ÿÜ@9xmµjV0ÍDßTÄûEQž‡ËŠàP›,§àÕ)÷Áñ[ðwaYÎt·­¢m°°p€èS€’í}ǽª÷¯÷êWDźÙ6s?„å]“üŽºî“í•ùZW ÞïpÎî¼ÒÒøjÎËP –Åc=( ª'®5¦µ»,‡ÞuM¨ž“LÍ¡3›€Ìû¿ãª­GbNÞk‰Ë½{ÌAuîý"›× ~ÏiZ¿th£ºÕÈôå…j‰Ø ‹â>╆¾G3Àrµý”8¦é™Øà]•,S¬µ$%úÿA ×."‰5t«Ô F_»×ôscË&rF¯?;Aá ý(©ËPŠ2X×m¨Ÿ+hFW'ª·—É 4ì›ñ¦Ùk×—ZC/§Å›\n ½^km!©~€×^¼78Ý«~#rÛ.ßnœá¨Ç¼yYërÕ¨iÁ\€ÈõV n°Ïùd36+U·ahÛŽ:\؇……Û`œÚ¢7g\ëz'‡ýKú#q:XXÑ9Zs* ~›mlå—Ö/ö4²WAÊYmöÊ„yµ§š{I¸µ ³ôž„*«,¸¹·³Eþ>uÿªêe7É£^ÉFåÞÕ‹Ò z"éÆÈq,…ªª-·(<,ÅBíž"H#>¡8°¹§bÕãUŒ†¶‡yN€ ˆ¹ÎJøºEM]Ít9¬QÝNä¿dû×N†Øç}#àZ´–¡Pù ÷Cìä#ÜNv˜!㟠ˆP,£¾3€Wê)Ô7_¶… å/ ÜþnËŸí¡Û~'ˆ;.`r|šÑÛZÐná:‚BÈ5í‡2ù ©Ôig˜‚W}gŲœ.Õ–@%+Õóø {ÿ¥¸Iên}…Ãx‚kŽå"‰š1TiixB齨”T÷ãX£çê™,Ö.;¢ç˜˜ö FIÐ(úgh[(æ÷ŸG³ÙæÎUi«·¥MñõFã7ëÝgJjo³½dµ&škImä’Ú7{.öWaoÛCw\@4’€‹á¸Ç(¨ýž*™.‡5ª[üÀˆ“¨þ ‰Õ˽¸®7ÈlmmTâëµöɤåu7qm èS‹J¢ÿA „l^ƒÉñ ‘ݺsà”ϫ˜ÌV£ö¶"a0`¡Ô-òŠò¶¶Ö`k&0Ù¸°×­;8ÅëZªoô¯¶4V—î·I®^þfKcTŒãR-+ý?6êT øžÛ ¬Ö¿BL1 ì±KƒÝ>)Rw‚m•å4ÞJ;HÑ:ö €ZJ:+À¸GOøOÓ¼’"†ñ ™ce±Ð~ˆÏU¨WüßM•0¿¸ÜÕñU½,°Éø'ÊÊâ~¬KÙN¼±YÛ‹nÝA|·ž¥ˆÜ˜Z,Þ¹qíÚ>Š¡3¾÷ÔòÏ´¼ßKÅ]R”®,÷ôXš‰þU”îKNwJØöc7YË`Óö”ÒUºðôZ쯺½Tc6]ÉbúRþp?{ _s”tjT·ù·íšs¨ÒÒr/• ½…¨Íñ!¾^—m(Žw]ñ®†Œÿ¾CÆ?ACë\sÆÕû.LO(£ ¢:SB˜+{² 9€ƒTµ!lÁÜÕžÇñCôÜ=ˆSÜ9ÄÄ\õ¹ÍXO>ñhKŸÏ¤æ!"T1ÞÒûÏ%Xl§úüiçŒ<{««ÇtX&ö¤e†ã°ŒqÈÑúGQž‡n Øþl‘±‚tÈ|ÑØX¶©ï[3ú ¢ëžS-`{FJ%:-%ûç0/Ö¢Ãn"„ãÇ'!§ovm|Mñ‘aÖßÞ”a=-‹èkË=?F>¯"­éÿٮܱ¿ ‘5¿ø`âÜ ‚‚çV µ{ ôghí®yß1e9 ž@vk†ÙH ?Qï°ëŸG+J÷E¥w%:Åü]dòžSIDènó’½û«pl®q^‹CUÕ36úfÆÁŠ5Ý¢Nêv"ÿ‰bm»Z ùçJêR™pNÜ—a¨[·ûR¶QÅd0Lo52ˆFÈø'b`°{Tf¢ÃÎì[ï_alÙlýS Âä,.ÖnújÑ[”³»'üކJ-êø¥îŠÍŒ«aŸhšÍØHÄ&qéZºµ¸±üý£ÛÂŽö›ÕçS“8ÿg½kÏÓ.39ÿ1ۆﮰY/°[[k0IdªÑË`#ÃB4ØêµáÚðœfÜÈ w««hEhjT¾ß^ àÕ,Ô® –³°¹™„‰®ï”û×g\¿×S³æËeUýi%.57þÓ¥KöA쯺·öDð@`½96½½^}YµümÇTynß:z9·‰øh­Èq³}ÓHìés_Qº7õκÖ„\4ªJÿ#Ñq¬ý¿­Jt»×Qã §ïÝ•òÛì\Ln¡¾ñO§Fu;‘Ñd}¥‡–Þx¹¬\pöajÖZ_Ê6bª—‘`Æ©G#Ú…Œ‚ BIËÀàü)i“!ëÀzÿ Ñ×î!ßDôs=rAöF <­ÃÞÐÿˆX„È…ê°Åß^¦n*U5mÈB†Í²8yüþÓmE¡[â%lGöô××ø^ÿ{¿Ž§û×çý驆 `kkí£õb1bQîzßA¥u’ØÇÈð ˆl–ª‘pNf—ó`ÙÞ¶.ë7%Ó»³ð dS‘à»gx·B`å­êß+» ŽŸ„¢t/bZÔ#E ’àÏ JioöTÉ»á8íÆcšI}¬‡Ï«>ÅF €q¼Hu1“‚PvDg Q°{„=w׿ÒtLÓ øZaöNßt#BY¿A§™^–•Ëônûr¾æ°‚£ %{gö%j\O}ù Äg¢åóÑZæ\/Ù ÔöÁvåŽCíDþ#›¥jé!#8UG`=–EHô~ó©ºò‚¹Ð·²Ù¨ˆd$Ʊ;Œál;nIDATh 2þ ‚;»YpŽ_ä*ÀfZ¢âó*FÙHƒèŸSŽäÙvùÔ%ÍIDÊýÊ×{nhl§œS]?®àM¤õK¾×D!Œ¹ùûG åôS–8àþûøxÞgøÀ–EèêpÖáev†`¡©ý‰8vBå{pÕŠ»÷»êGøAÃ+%*"m P*äQÌw/}ШòŠšçÌà –9n¶o"eê[•Y³A¤’YØF²ƒý(…u$b#(ÖÍ‘±¿ÉÍîŠ DXnÔø8·¯ˆ õŠÿÎT®âÿÙä&x¾æˆ–bïò}nwýKMÇKíÜ‚ãÔR¦mW_êFÆäÒ¢_ÏñL÷®¬ª ×~Ÿn“’=ݺÝ÷ë(,ûË’)/â¯z—Ñ*–T+{Kë—0ÓA‰a;‘ßn[Øi’tè”&v“¤¢mÓ²úmÈù£ÐQ‚Œ‚ †ô/ JörËBT'”ÑÑ?Ûñóšî ½õ"lËBH r6ã÷.kÖ<¦Rݯ5ãó^±r#3ÆÙsOÝ÷³‘.× 4ôæ®p.Áb£ð ßkï*½“Ÿ½~ßñûÍÞ´Ì|¹FTηèŸe™E Řç8ÒË%&>¥Jq= WΜ‰…«JèGËT «&¢Ñ8x†‚aï`¼Ïz{1ϽËè^‹8ÆÉq9äÅFѱøåþ]ñùÆžë1!‡ÑLÿ—ŸÇrÁÙµûC §}ÿeh[ mÿŠù»p˜8¬EÚSÚ›=WI¯°WôÏ>)ar|ŒÝ»ï¶¨Öt* ¾Ñ;­Yk}‹W[õ;Õ™qïžnƒíŸÖ ÚJ¢“íDþƒ×7,w2*ñ̓)%u;Ém Î9X-oëSÙF†LѾCgœ ˆaÁJjÏß·Þ¿B<é@Ú#úç0ÞâÖ°F g½Ú–eÁŒ"ƒš§Yâxdô‹¹Ù›~À'cÕ…¶· ^}íò}?§ÈËÕ‘Gc»¨„( c|Ò÷Úö œû/WzrÝ`µÚKƒ÷þÖJý‹<†\.ƒDl¢]sNõ³'ù –ki»\ûÏò#°Œþˆxõ¹ÃÂÂm„¤¶"Þ‚Z³ÖzÞ›½cVÍ™´±½ƒ’é`4?š5‰ò‰y1l ~½îeÜq=_Õ¨à¸Y$z4÷D4[sDèRÊRͱejµïb7?Òú¿>çŸwåüïBù¿3q½Jó{SÞ5݃ÞdX†Œ²b}$™k4ò æBßÄþ*LÝ[÷=Ïe½{K±4Þ×ýØK±t¬úw%¿“mEþëÊ.,®¹™çº–/Ô•ò§~”m¯o"&æ‘b²A‘ÿ~CÆ?AÁ¶ pÌÚ´RÚjÛ½øå-”öˆþÅ D"1”LŠû~È…ƒÚ'Q¬Ã È«ØÚZƒ _û}JA›¿ ¼­ûž;®Cë]ÿð~RsÔJ0˜•tX[·«ŸÿŽ'av±ÕŸÉœ@mûDº}H¯.Çczwp}½O‡¼¹u%Xîô x‘ò­•9ßûd51tÆ÷šc)XºþëØ^þŠÖ @Kx)oö=;hrm³(,BàZl)Û&9¥6g[bc›À©q€xÞïð°YšÕß —z,C†$ø#êÕ¾ö‡¤ÝÈ´œqç°cÈ;@èÅ%¸f ¦PºTú\¶6<Ï™ã´wlDçñOÄ@K1dëêð5ká;íÕ¢Nï|¢»œŠ€ëõh¯´nrL…°?j›Ö_A¢Ç¢W§¸sUÇÄN8„…ù%ówüL&50‚ltLìhŸ¯þý>åˆõ±'y'ìý3Ùô`ŘB7\‹ Ë…Í™ÕtÞ‡¡Í_…PÁ»6䘧Ní"~$ÒþUyšá]“CMâÍŽ懥^´mî­yd2»Œægaá6B\Í ó*$>2 ‡Hºæp˜‰²q@Tˆ•…ýŠeÿCP¨FΞÞð¥À÷6# NAV¼å³UmYì»ØÝÞ¹U°PJ½1z‹u*ïOºu£ÙÁ˜±yÛßh4‚Ljpó•¢$aˆ5-Û•;>7íFþYÓs)‚€|5}qð†€´àýfU¶¶õð8Ö‡2þ ‚è;éÝeX¬a[+îµ·øMÜÌ X'úg2\ ªjõ†Â¨. b-£ ! 1lÛÛj—q5ìsLœ¿p–y%h.‹Âü5p&„j­ÿö œúÄð ü5£^ô/n/±è9¤ †ä«—~Úü5#.MÁ¶ƒ(iÃÝ¡¡JÚ6Ò9o.pøŠÑ¦b4Û», û+¯Ââ§ÎÁÔs]S.+ÓgfU/K"²¾Pû›) ¤Þ¿ÂX®¦³ŠØÚ$-§ý÷„L! )útÃ\ôCH—ß«žñäºú@D#ëK*6s.Ö7îõDœ³¢ô?5~;ÁÆ ‡¬qm`N“{îåiÕA€œ®LQãÁ°5‹´~©ã.íFþ#›%$D¡©p0XåÎH•®BaÁÈ…ûZ¶V¼l2K§šÿ~CÆ?A}§ šk‹®Ù¨ˆ“Ʊ¶Ç ^[…cù»¸œ·¿tåU‚Æá©S› âPÛj—xÒ©:&RL©dúb:¶YÀÂÂm˜×pÃ8/Z'rá¡økÆ”V[ ¶ÕŸvX‡!ŸÏald–DjU¬éahóW!1'c:$Aç½ôDZ¡Ês->»ó2æ.ýŸ°Íáv,(y,,x©ö–åÍ?–³Ñ·ölÍÞ¶“»«àØöÊžöÃa¼yFÔ¢Úš5?zˆ~ëf5 j4µ¶€v9ÏX0my Y'eÏAŒÏB3º#*™WtLMC1PKCïgúfTÜ­±íÖH½@Úñ×®R3ŒrëË ï× %ég †NåÂ0‚ÿ\ùÞ Ç}ï¾¼€~Sßç~7é-é]6~ØášRT ,.–;ëðz “!kàs䈖«þVX¬o܃®á9ŒbÓ•öyíFþƒ×7(Ïá<׸mCÛËŒV –³™|´¯ç¨¢££J‘(+{ 㟠ˆàp5•Y†M"²˜;Ô8SY×'úg µ¿y–ÃúëyKörÏëý¯Y—•°»†eì_ë^Ò=ã_ãýÀlT„i§‘à'0ù™ë=ßï^¹WkÉh¨€œξ¾…ü vÓDpØ=ÃÉr6V>(¢¯, À¹ÐÊÆ£m{ þlòÕ?§ÊóX¾õ;Õç™í|íÑMAñœ€SÇ ¦½íuðÌêàÓžƒðNq ë›Ë]Ó(ÞãrÈ„T¤ÑÜѾÚW•øfŒ¬.WÿÞÝñ®é’33})jµû˺âi7òaÕ+ÕÐÔF£Þq¢0ÄÚ©`. |»=ÁånÁ rsU±?ÒAÄ$ÌäcŽÜÅöÔÂJÍJtÐú[P5«*öçš<Òn<ë D¡}/SŒùÄÂ(i_熺 Ö®•UiÖüÀ#³ÃDDõæ¢Ìˆwߟ8Yý?Óб»¹Z±XÓM(dk÷Jýò ÏÔ2«Bby…é`4?ùÔ%¤åräš•ªiâFÄ"ΛìØë³j²i¶1¸–²ér)¢f­uåþq˜È"å9 \6è›-SVY¿AâÍu ò2,–ƒ¢ÐüÔOÈø'b Êšoa^íxašÈ×<ÖB VA… ˆàMy±6Í9Ì2¢¯õÏð ÞØÃx)“÷––`¹cû¾7I‚qxì2µšÓ±Ppúå\ßö¹WTEÿ"LŽOCSÓƒÞ%Eyi9‚D8Ž‚‡ÁXÕ¬Ðz£špÁ3 *5Ñ ç•Òdw^ÄÚÜ'¼s–ŸÃ[¯ýRë_ºïx+·~ Ç#jוizå8A^FøÎ`R]ë  R KÐJ t­s'‰zhØßëý‡‘o.²ï·Ø£³²rýu8v£“X$\¿ñfõ¹õ ¼0¯BJÎ9Ú¬Ó–Q-ÅÎKG*¨ª‚äî&AD‰iì¡;¯ôÕ¡~õNÈXº=˜R9ÆÕ`”»Ì…®tA8Läßå¼ß…Éó>ý‡™c3Pc•2(µãÌa‘JÈ.àÚO7a€Œ‚ úŽ¡g¡ ž'›a“§ &Vr˜®kçg9 ÆÃ°¶„e©¶( š)ð}Œ óyB¹­]âÔ 6¶ö74ìR%.â{­h¾éÀ,Bj¡ÔMêEÿlð(jÃ%ò£v°²:Ñ U®…”xÓÙAïÚ@Ì€'°2wS[n̯éý¸ñÊÏ¡¹ÒÒxÙ‘?@3 —”´-äË"hc#ãpcSÎ)@¸7xã?‘KbüìÆ"0JGmw3%†Õ{$& ¼ÞØmxŽYð ´ï¯c×ò9¬Ü¾Úð9¹«þ}lr÷ÒÞÆq&¢/N­üôÍZ7•èh Ó=’bQA:ÄXbrØ_N0’`[«yö­{=c‹5§‡¦23·²¬[‚ÍØ˜I]ë‚p¨È%¨0}½”C©¨¬7'êÖíe Ž×6aêþçA‚Œ‚ úN*[@ªáŽë¹ŽÇ‹¾v1±Ö>. ŒáîkσåÕ×¢€ÑBÿ½ËaÍ»©-­§!ñÛû¾o3™‚^mJˆ2úE\Ø»ï6êEÿ‚ÑÐÐ¥ùåóY‹ ܇@Â3 xvqk°*Öƒ" {F1Úœƒ¢µÞñ NBý_tXŸûä@Ž¥½ƒí´å•F ˆZ£v«H-,µèFç„®±Ž|À›K¼:ðzÿaC\Ù€ÄñX—< Æj4–×ߺ9Y-uܹs­úÜÖlX'=‡3cÖS DËY »)Û™î-íÅ»g… áNÄñýßXpg‹ƒÕkÆôb­•.;ÜÁ”ôÈŲ!p.ÆÕööf&òxÆæPPk:;V) £¤#ö~¿šµ†àõÁ”eI%6÷ð ê2þ ‚è;©fЋ·ãáó*¢¨“v0ŠxbL¨¶è Y˜z«ÿÆÿh†ÅtHBðì4Ørêô^y’ ‘j¨c!Ï0ÿb÷R8MEôϲØÎp¤ŠVÈËŒŒCµ#ÐE/;D³æ1VèNïõ )©Bâx0å謶ô9– Cf›¹YÁŸñ9 ™KH®}¹ïÇR(ä«õþ’Ëc³lÄIJC¢=±zãgÏ`,ŠJç¢[!  ^fÕû7r¢NÍU›g[\ûæß!¹¹Š«×¾íK÷†ZË\Š8#zYôv"Æ\ìl°2–!C.x´±¡Þ[û[Ì~ecЇ^%¦˜p½}´T¹â`œ^…ò½-&æV:¬Ìa"ÿÑ— p.tÞl7 Nã –„œ›ŸLÙŠ¤8àm–Ôþû ÿAôŸr›¿Ù¨ˆ±•î(¿'êú3Â"F³#7Y‹Vòlf ©™‘1ÑÀæÂ]$óÍUVJð"¶â5Õi‡YÆæÉ^诞ŠèŸ @Ó‡ç¸äôeìd%„ ~ È8iýâP q=Ég MLϱ-ˆ¡3÷ýœý)h%¯D@VCp¢?åûÿõùOôýXòrÝoMÖ:¢04FñôNòÂ"X΂cw~]È ƒˆ7ÇæK©Þ¿ 3šg,'ÎÏ@ÛÚÿ_¹ô*Fþ쫉h†[«g÷ÏèêÓ)/r«d9ÝqXÊ™+HËQD"1{tΰ8ãë»Büý-yâz»Ž£8˜´ž+른YDVºcý6òãK0XË×þÑ´Cp86cc6*bÊžÈyÎZ„*H`Ûqõ¡ƒŒ‚ úçz7è0¯vMp+¶U‹o†\Øš{uïqu0‹”è+ Ø"ÆÏžA(jÚ*­¨$"0Xo*q<¶ŠŸÇ©»ÝSm*¢†ê"›œó^r¹ ,¯¬/º(Lx–™¨w{A¸AP]ÔÅ!A%lðàúÕàÈG‘ýe9ŠÀèÏTŸÚÖæþ¨oÇ¡ÊsH—nc#ã@ÀÓÕ Áõ|úì#Hæx”¬ÖË+š!§/Ã1\‡ À˜ÓÑpG–èn¦ú·­í’tµ€äõ7p|l £8>6…;¯~ EÔn.bnðN¤ÀòM@òx¦ÝZ–Îý(j’»›H„ã õþ,3³—† åaÓ[kåý‹¡[Nv˜À°·»Öñ0‘—,è† 0µs¡n¢T²…×h:5¸´ûÈŠEO?œ÷ÙAAÆ?A}gWõúº†\½îåñË[Uuö¤`#üƒoC–÷ýÇ#¾»ÞÉðí›–Dja &†f4ªêæd ®+!S.]˜9¹0Fž½9°}îÑ¿BL0<®þlNeYpì0Jao¿$.‰?qáÅ«’w­²)˜ÎÔ¾ïÂg°ShîØÉM 4òÝÕç[÷þ –у©_Áâ²×q#$ȱžsbÌÉ÷Uüó~”nÞEᘀL®³..+‘D“!Žêý÷!œô:x0#^¶ÕÌì…}ß«« î¾ú5,}ëë¸ûê×03{™„gL%Da Jÿž¬sdåå"ò©KY,$‘N'!™œ¯Þ?! pŒ+9Üaá*`9è XŠömž©'ëŒD«{‡ÃFþCz<"(ê~1aÖöÖ!,[èZvÂaQø Þö0AÆ?A}‡¼Z³x±{Ñ_ay#l-}ó•5ãr&ÊbìÕÁEùBÊ6ÆÏžŠÚ˜ú¯ª*L±æÙçØ <­vG%x˜˜™_‡È²`t–=5ÿ¶Y@:+c<1%0…mÖ[°åJ/ã¤1˜VQÂ`.g#"AWU$ó£ "~€Wç¯2ßËÙ?‚´]xBØ+p,ëóýÿK§ÖJyÆ?_t07êeÓLì—áòXÈ»‚.‚‹¾Ü™!&懦´a؈¿öÀ)·ë‹Hm«Z£Ç½ßzL`{¥;ÑÝN™…WïLDÁ°G½‹Šçˆ2mÞWï?IâÉ{‰AnSN/¦ ÀæQÒmåþ_çF©‰ã‘(uÏÄ:lä?¨æ°ŠB)Ö |µ4»Üæ¯dßëZvÂaˆ¾¼€ÉÞ¤Vý„Œ‚ úŽ[îήu8’Ÿ1¥y”6ÎdÚÒ+±šƒ±²,½±g|.ŸF‰ €c<Ã$©} 3¯gÁ0b%0×îf‡’cp1 ™Œºð^äÜ6¶-ír£^ºì£ sÇîœéî-È8ðÈ'½Ì1ñý ïãâ²:p,Ëá‘w~°ê<Ø^þkèjo¶Y@®à]7ÑhF‘A!Ê ! ˜xáZ‡£w—Õ«n¬±µ¤ëºpœÖ~‹ÅüuX¬÷^ËÙ zÿ˜$ؼ—Ù·Sn¡EÀD<ãÔMÑ¢çèáE­³ûE1=éÉã°ë„s%ŽGÉxŸ¹>èÃÝ—G¯pÙx‡£µÃp˜ qˆ•3»Á~‘ÿ¦k–+…ÀLjY2J¼Ù©~ÆToƒ7¼ (ÖêŽ8dÇç ”¡ÔOÈø'¢¯˜F*ÂlTDâf¦óëˆo6_èŽg:ï(Ð á;I<ñ¶GQ 8 F’ëºxíÕ70wï8†Å£ cláÛ;à¢1|c=‰ë™®{ ‰Ar¦¨‘"PçƒuT*‰\> 'ÏBñ¢¯"·‘ côˆ•]´K°˜ƒöŒu]Îvò'«|– #0ú3ØÉ5 FøÆß«VÀÆ~¼ú|ñÚoôtÿ3»7°¸êE¼¡0òœÝ=ï˜]+7êߟÌ ÿœèº.>ýéOã[ßú–ïõ›7oâOþäOð‡ø‡øìg?‹TÊË¢²Ü ج÷õÁ•:=L:&Ò1Ïájlµ–‰têÌÛ`ñµê°: H§w »F$S¹áÑÕFF µ¹¸”6OLÜ÷3!u»£5ãw$3<–ðœ÷» £p™Î"©Š"ceu®bb!X39û›Cõ€É%/Õߎ ]éžÑІÃÚ8ÁëÝËlÚùg}ײnà[«›øèçðóg/`2ÁëW¼î–!C-†¡ "Æ‚èZ·¥N®3xÝŒ‡ 2þ ‚è;šÀ˜Ñýɞϫ˜ØÓ‹x:è …0Ñ“§$ì„80N- áêÕ«¸páΞ½™õnîIík˜|vwv³xÛÉc87}ÿéƒDä®×~©ÏzWÙ½·î.ƒÏYH÷¢ygF<ƒàìß¾kЈ™MXL-Ý\Îz5À–Ë#[Œ5­ñßÝZÅÖí«€«/÷®¼ A|ïÙÈž†}°òÖïödßm³€õ-ï;œœ˜Fj׆<ÉáQAÄè×¾9èSÛÀc‰0& ‚ÄÁ€wÍ_½zï}ï{ñÔSOùÞëº.~üÇárVÆÔÔÖ×=#”s“0Å&CÕû߇±ù;XˆÔ:ªÌLß½5N³šÿýÖ‘‡ü4$×›•ÁÂZò2ŠŠòâN®1¿‹¼€¢xFûG?úQ<ñÄ`ËÏh4ŠGy¤úüÒ¥K8wÎUKcPy(œžÃLä­k0ØZº¿rÍ90úŸ0OìY/ò/q9ƒ±pfDa§)ê_FXÞ… Tû ç6÷rZ–‰¹7^„YjŒàåžûÓ߃TWºb¹h¼—þ¿6÷‰®·äÚÜN"•ÚA4‡¼¦c÷”„㎎īßôimÊc‰0¦ï-Áà-Jë"\—/_ÆÜÜ>ò‘\u )ÑE¼ÇbŠG@>ƒ‘€€èÉšÁ2ö¶¦ï}êì÷¢°¤¡x²f Ÿr†/ ~,µƒ»® Æm£{ÁäÌ5äµpŠƒ¥r)vBðÈü`ËçÚa¶ Ãe"`¸h_·k–vGQ¥»™m{#ÿ­¬,ÇA"(‚-•°¶î9pE†¡9È£€˜Ú½nK d ÐaƇ2þ ‚è+Šáb:$!ú­{=_\ÙÀc!¯ÍùˆˆØ+éX~ñ6&DÀqü7jµ˜‚ÆxéÓ¦qÑ—¾:÷±M¥0Ô.GGÚ!“šÃú–7/a.aV£þïSÞIQÿ:,Éû½ÎÌ^@!µ…¢ÒÜH_¾uJò`aÍçþüàÔi^ÈZüÈ?„c)X¼Þ=ñ¿’¶…Õ-Ï 1*†!gà†8¼ã…¿ôéÜ—;¹"B·¯AÖY˜Lk¥>—.]ÂóÏ?ŸýÙŸE,æe•Ød·ˆØîðÔ¢33.Àkiüò Ožþ>ß{?÷AÈ×=GåVT¬¾]{kлß@âöe0aš|øû«ª*Èe3€Í#YnËû¤°þÀDý`tþîIÔBkÉÕ‚„±0ßõ’›ýÔþ`0| Çc\ðÞcãøþ÷>†—Þ¸³$öUäUa‰ÅØæp´× fnÇCøaŒ‚ ú `1ÆÛ]­‡ÛËÙEoáóÄâð,ÎK„1¥ç¶üuÒù¬†lPÀ±0óÛÇ 0º¶]· kƒùY^]@„‘;y ¨8¿‹6„SŸ>1¸Aw=c½Ò}íNc‹¼­•…jÿýxõ‹yc¥*˜ÌO"4òÝÈÌöK]ÙçõÛxëÖmD£qì,°ôXO%ï`fnxê³÷òX"Œ˜bÀœ£¾¿á~õêU¼ð øÙŸýYŒ{ÑhÛ,@×%ÌFEDVçX{˜,äÁŒøçÝÂ[%œv?ˆÇ§>ŒsÁïEñZ-¥ºx®¦ôìÍ×½û ÌÌ­AdY(nkÝ šQ,$QRr@Ðëâq2*böõoúÐÚbú ¯¼§¤ôב›‹˜htUìh^ó߆AÚrq;§€a98%ÇLE¥€\zj±‘‹`2d!²˜ëë¹Ùq-”ŽF+ã2þ ‚è;ãJo…¨Žþ¯ðñÍ L~î ƒ>Ô*wrE$¶·á8!È™šS°\ÈPÀsIŒu8zîöšé7ßĦÀCWc  «X^Ù…aŒâfTÅ™xiýëxÿg†§g÷°M{ôJôÝÅÛØÝªuéX¾s ¯?ïûÌ£g¿§ÊÆÓÌÉó c^{å«XºüJU`=ÿNïÁµ_G±Ãe¶YÀܼ§Žc$lžBT(â©ç†3Ý¿B%²7m[дƒ‡¹\_þò—ñ£?ú£ˆF£Ð4 š¦AÉ/@±Œ Tïß"cówàD…†×mÍAñžŽRºVšd„D8^ä?èè˜ÞNAÅÓ® ×yú!J?2Û/B·'œŠä„ç9c/>¿§S,C8ƒb¾ýºÿÌö‹‡*EâÄ,³ëÁ–#ÿÂ’ˆooìBƒàâ#˜{ó[H$& £ÐŠ»0CaDQèz·¥Ã",ﺌ|ª÷ú%mræ JZïKûú±Ãr4zGñÀ ;<ާ{›ùB÷EÄ:á±DúÝ˸óŽd b£@.³E/ÁvD‹ÛàÓyÔKû¹–Dzàr,\Ë:ô¶‡‘ca †2˜ÈÿêòMXE`gæX¾Ùüïø{oÎ@¼wôK.Ú%¨§87m«A»óÒW°>1’œƒYÒ}ïôìwÀ¼.ðŒ'xâÌ,ŠÒV–oTß·zç:2›«xÿÿ,´’Ží»1!¥°xíßã‰÷ý>xáp­ÊR;×pwq “ÓØ˜g ~_?ø·„Xa¸;•ÈÞ±M¸§ýªó®ë/zã7`š&þäOþÄ÷ú/üÂ?ë8ˆ«$žÕ*‘·®aåÃïlé½[㵺䙕W[úÌ 8–ÜB&GIÝ‚:ÞÖgs©ËµÓà4nEmœŒŠ8ý7Ã×£NäÓØÕ\Xf{Næ…kÿ©õ/aúÔOâÔ“¿ÚÖg …ELï¾ÞÆ~‘ÿ†5‚ã Ä2x÷‰)|®Üõ`<ÄÓ¿úCHï. ¤k`1ޏ²Òõ}ìÝ2Á°RçíCIÛÆúÜ'°»þ¥êk±±wâÑ·ý_ƒÓ]ßÞöò§°6÷ œï§zvL@Æ?A}Å´Šˆ- O:~¿¸“+âñk«~ CõÚîŠ%y ï߃w=·×öø®eâƒ'¼ÔKל8^/˜•s(Nëûvm³€¹…5XÎÖ ¦CÏã '}ùZçƒABk;P˜¸×ýøò9ϰTv£5Ãß²¤ˆáÜè÷À™)`qÑ‹ð(r_ûÓßÇ~ôçaóìêÅ„ô,n½þ+‡vÌ/¼˲ o•°ù¾³øÐ7þÓÛá­çN®ˆÇG"ˆ®-AŽûÁÿøÇ}Ï?ò‘TþêY¼óe(¡›^ÃtØä3V¤ûäÇjÆÿw¼:¼âw£·¯ {îƒPññÖ?g2ŠE¹ÍU0!Ï z:uc(áuæo!5ûN”RËçaù­ßEfëGþ’k_ÄÈôkÍ9I˜H¬u¿Ô 2?ì¥ÙÁ)ixz<ާMÀrÞ±qKEvûYÊPcf7‡Ëøp ·kT qÛ, ”x7\n ¬+£˜¿Œ+ÏÿNœÿœ8÷O»²-UžÇò­ß…œ¾ŒPü©Îì”öODŸq¾þðEX+žû(§ ŸòRø2»÷a$Œ%HoÌÁµýuš®eÂÑ58ºÖð:cówPûßÛwyu FAµÙ(NÇWñŽŸ%Ã?‚[ Õ¿OŒï¿˜™˜zö|ðÀ±Jæ ç¤ïÁ£¾«úú‹Ÿù$vço „±­ýÀí×¥í´Ûdr—nl"!N`{ö|赿ÆéùC¼±2?L¿ù&ô`k‚{QÁ ƒÄ—oêó+R ¦1†Unñ7’¿˜2¼Îر[wQŠP‹…¶>·»ñÎò›ÛÈMÇðd,€éÏ>ßÖÃÄôo ¡Z3t3Û/a{ù¯aÿ6òO‰þ4Öç>ÙÖ6ÇEx¥ûbÆûGþ×®mÃV‹°óY0r¶"ƒqu88¼,Aq0òõ…v6ßsF³t¹»¥%%m·^ÿe¬Ýýïâ߃BàW°.Ù³X˽9æç!Žý ’«‡/ÿÔ¶o›¬Ïÿ®¿üO`»è¡_Æzáû}Z÷…Œ‚ ú ï½Öu­P©Ù›\Y‚âxçS2òq³¯?|ÆçÈ·¾ •í«¬Û·obmd§GWpáö-2ü¼ vÒ3zÔy b°Q”rbê„·gáhNKc6sܹü ¾ýùÿÖ2±ž'lþé¶o^|>ŽÛnï~볌áøkz¥‚q¨zm=½ ^îò†a$Ø‚(Üú£žà%ãèøàóÃ'ô·IsPÌoµõ™Ôæ HgãÜÜžyõË=æíB¡„B Ç «›X¸öëŒþ d5WU °Ý0òéÖE`m.Û“Zúvjþ›,ÉÕ (ìæå¡û^§%Û½r‰ìÎ˸õÚ/Ádf°ëüväÇàÃà, ©ÅÛà, <Äná8¶Ÿ†x7n¿ñ¿amîmo+³ý®½ôOMÍAý2Örï„‹(  G7…fPÚ?A}E Âû ©xîG_|©ãÃ4 lËÎË=íýšÁ¥2m¦¤mCN_ÁèÔw ´×ÃùÞÂkH!„g·ðÄ—^Á£a'ä:ÈÀA;{æ}xë^-xáÜwB»ÆÂAk†=¥Œ d$œýð'ml¦nãÅÏ|“§Îƒyï‡0âÎŃǾã7î[°¹µŠõMlGGñ}¯ÿ5&7‡?Õ¿žúÈž°³Eã µÙü#o¸˜NÞô¡÷*&ów‘¿µ>û7ˆG÷Þ]Üøö›€ÍÍÍï-]Á[¯ÿ2òÚIH£‡¬ŸÀÆü'ðüßþ$2Û/Ý÷ó¯¼ îöE$þý—U†âø”Ç£!§y¦QN+ ”1±þfÚfÉ÷z7ׯî¼!!”ºnÃÆ·¿q×çዟûm$×¾Üôû]™»ùÅð]_ù+èåökÃpÞÚy¬Ÿ¦_‹o­µt]T¿ýʧáîªÈlf†âx´G[ÝÞ÷w™~Ï8ÔÔ>öw_ø~¶ü{ÚÞFnSŵ‹Ÿ¿ïïgmî°º.bcuÙeÇo>‡ìvn(Ž£çÁÚ àÊÝôø/½òÛHo~7ïÅêÂ}ïyÏ{ …½KG’›7ov|CbY@gΜôáAA ,Ëv%UŸeÙ¶õ:…Œ‚ úÆÜÜFFFpîܹAï AAA‚ ‚ ˆ¡¢“u0صEþ ‚ Ž ®ëbggŠ¢@×õ–?'I"‘¦¦¦:Ng#‚ ‚8Šv n­EiÿAAAqÄ¡´‚ ‚ ‚ ‚x 㟠‚ ‚ ‚ Ž8düAAAćŒ‚ ‚ ‚ ‚8âñOAAAG2þ ‚ ‚ ‚ âˆCÆ?AAAAqÈø'‚ ‚ ‚ ˆ#ÿAAAqÄ!㟠‚ ‚ ‚ Ž8düAAAćŒ‚ ‚ ‚ ‚8âñOAAAG2þ ‚ ‚ ‚ âˆCÆ?AAAAqÈø'‚ ‚ ‚ ˆ#ÿAAAqÄ!㟠‚ ‚ ‚ Ž8düAAAćŒ‚ ‚ ‚ ‚8âñOAAAG2þ ‚ ‚ ‚ âˆCÆ?AAAAqÈø'‚ ‚ ‚ ˆ#ÿAAAqÄ!㟠‚ ‚ ‚ Ž8düAAAćŒ‚ ‚ ‚ ‚8âñOAAAG2þ ‚ ‚ ‚ âˆÃ2 37è ‚ ‚ ‚ ¢70 3ÇøÌ w„ ‚ ‚ ‚ ˆžñÀßxiÐ{BAAAD×y À§×uàí~ À¸0è=#‚ ‚ ‚ ¢#îø€O¸òÿÝZÜ}óÍSIEND®B`‚celery-4.1.0/docs/images/celery-banner-small.png0000644000175000017500000003353013130607475021476 0ustar omeromer00000000000000‰PNG  IHDRX3KÊvlgAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYs  šœiTXtXML:com.adobe.xmp 1 xmp.iid:CA15D7FF9A4C11E6B7A5A5C86626E854 xmp.did:CA15D8009A4C11E6B7A5A5C86626E854 xmp.did:45B92D9B9A4D11E6B7A5A5C86626E854 xmp.iid:45B92D9A9A4D11E6B7A5A5C86626E854 Adobe Photoshop CC 2015 (Macintosh) >F42«IDATxí} |$E½nUwÏL2“™Ý̓…e•å¥ÈKžÊQ÷ìßE7("‚øó¨àA Hðñ¨WQT®W]=^Ï.¬"ŠoŽ¢(‚ˆ‚Xvl²I&Ï™î®û}Õ]Iïd2™$3ÉL¶ÿIOW×»¾ªú׿ÞR̃6«6uË-žq~Óös—Ê>QZò©ÔQBŠCa¶¿"+…°Œ½ø½P€&ÈW.~]eɤŸ²Wx­ÉœÛâäDÒiUI§eÌ–N«2 KÈñ^7úNòšÃ[^»]©KÊúû9H,Ë+ñ"—kw„ó,á[§©ŽƒÎÓð¬Á“ÅÃòZÀÓ‡ça8¸Ï’âPß^ê»ï"ëýkF,Y”X½:“ó™î(–Œ”²&¬b¯ܳdq¨&àìšÎ”(t)'ùߤ3¤AÚîÄøÀÀ#ÐkÆr1™”y+ÚÛs©¢u@Y|æíé<²L¥Ä?ÄîÝyø”ùyzUWg™®ýS–ZQ/ÌP¯\QLމ‘仯JÒBÞȲÚÈåÕNåº ã]žWu¯€º·3´W·WËÊ•)ÏI!¿Êc&!y^2h­«Œæ-[î•ÝÝ`uó®7¯UEõj)ä+„R'·¤l"a ßW¢Xð„[ôµºJïck²ÊWlÛ}Õâ¬òs©Õ^[k‡JÚiË–6{ÈM²\*à>…}2éˆt¢E Œ !ŸÎ=4{öÖ}HÈ¢°Dðtc˜Èu<ÛÐz4ZêºÊ–©·Æœ¶”ò!†PÀÚêzj“ëg#J"ƒ¢ÄÛL¤ãì¬h?Mør+"N|ø,EgˆÀ“Q­D¾¼×ÍïþÔ^§ 0Ð\B"6¾“íø(ÞÀCœ8b|Û °ßçZþ100€oíÆXXæog¨cg+%¾†´RP6õ¯š¤³„²ëíÐ/>9¥Ôë¼áþ-P'ð˜Î”KN&­ÊÎv|g#Ê»ãYK2õ‹F”I¹áüåôw(ý?‹e™e˜ö…&1rÚ:?8_ˆˆ•Ö;ÆÕÄ9…²w–7ÜGžVk¾¬ý³³/—B} þ¬LÛ‚%Ô—¢ÌA›Ìô³Mõ8õÐG«,i] êuéL¢‹mPaÜ㣮?!\ÍÁ-mh3àÒÀg "ÖŸ°©<=j•Iuù+[ת\ª]ØV’‘âÌØö{Bzøœ"}¡Pô' ®ÛŠa®ñ±Â ä7=_ʳÁ<Ýæé”“e¥šl¤“™UÇ(Û¾ÌötH¡vP•ÒCÄŒ AicPQ]ƒÆŸf6dØð>Á‘âÝ"×ùE7)®{Ç ‹BB|66)ßJ¢þf&ñXÊè’‘ø~f)£P!ì)>¦€—…â üŽiö‘ôiÛQ+§ìO³´Ì5”jì_ùíKžR–))Z–<³D±Ì ôŽYç,V`¬±ØP^Œ:‰“»²À&×ö¾é°ì’—5‘Ÿj>î*ùŽT¯A§x-ørù¸1¿…úf(nCCýÆmyËsÒ%ÿwEWW›÷¯FÚ*ÆA©^Ï¡£Š„tHN ²!¾é±·¤¸ãÍ=–°ïÌ®hyW"iwŽýÑ|Ñu=ßcö!Ù >PÓo¦6¦! ÅVÇJ‰5+ŽUO]yœ\Õº¿mYŽiBéù.e]Ú(ˆøcž0dLÜ ‘„p5‘Í`ÖPÊ«·m „«ŒJ†––ÛËôÆFÞï[Öi# áŠB$Vüà¡= ª¬Ä@¼ ês \neô/w&ÔïгZµîXà]ê´˜P‚DzÐób:–â F¤Ô¸DL×1/©Ë•0X¿KD« îÃL4\ÌSƒÏR¼ÃѪ2S¹ –9˜v@YÒ˜#j,KuÀ |+à_!C(R‰çã¶gÝi·uœ†Íð©]  1Òû8Ä‘ó~¥ñ!¯Ø'%&€å¡ŽŸ¸fµ$Í×qÿ2àõtÄ!Z÷M‚ø€Ÿ¢I~;f8vU1˜FŠk­~¸ýMϳƽßfrÉ+ +;<8Q,=°]wP6læj\j Íý  ô \­h}ŠX×q²jOˆ²ci¡ŠS€z-±W{2?2*Ž}ÂÚ“y>£üÏâŸ+–š'kq<ÔC¹zU¶ý¨W!ØJ=.£y9 Ú‰X…n5³*Âï#ЭؖhëxÌÈ!ù4c=`ÚˆÉR>¾°cES3£f z_&ƒÏR–%6óÁħaó ²)3Œ·)K& µ|“þGI€ÂVíÊ!h\n@gôc!H&dQx±¼üî›é/ƒïR 6*ô0Çßå´uý/˜SøazBtï9ÙöÂûbíÿ†Ñð!ÿqèVl”ô °Ëqîò´yóFS‚YÜüà›Þ©¤¼ÓGLèÑ*h#!±PU½…êbt]·ßžX=R¸âH‘°[ÂÑ*y™6¬æ²¼ÖT 5Ø:žŽ×‹^ ˉX\ÑÚ¾ÖÉŸcØý%Óôt8U®Ìƒh/„öÖr±b±2ë^2–6_ãd;/†‰87‘÷5 cYÒqñ,ïÈP;ððú ®æ—üÝE:`ìjc§ß7n*½ÿ´ ëð—|_ {%Ž '‰ã®¢oý{hÆIßéDájr!ûŽó>“Φ>kÛÒ)¢ÇŽC°šî*Ö© pÅÕê®x¦èj;ÞrÑ:ÖWÍW°2 ª ºJ Ceö2z…¿mÿ.Ç‘?AºŽCB9”Kýre– „•…µÑÖ•G¢ÒN=tÃ:ÂÊM{åˆB{ôåêD¶ó-ø¦¿åÂ+ç¾ô(|2ýõ}ˆrã¬ýБ‰ê†Àb–© dÕ-)‹â±áA ©ƒ•p 㢎wa¤³XVFX”O„e&î2Æbw P¥é‚Œ‚Nµ”ÏÆˆœéôÎ7Ú]"×~øûóà/ãPÊÛ>ãc,ÂÇTføíiMâšœnÙCOÄÍ;Þ|mvEòmù Ó‹g&ÄT7ö®r-ÂÓƒ/h»ç3b5-ž\y«° £`âIßrË}Ø5ÍnÓi°"èÂïXEìðÏ@i§p•,“VÚ¥0eÁ”ê1TÓ‡¡À.ì4’8®A‰ðŒ…í<î‚öè†x•VÖ —¤”Ÿ>ÊxOq¨ÿvØ ô¡hjšޝo2XÆ•Àñ"š–K¹¬/fÍêû"–)Ë÷RºA ên3"FÅi‹iíuÕ‰¡¨\ h¬[¥BͨG!‹ê«+:QÜ}פ>u—žN~Ïζ ìû­ˆ/õJe6¦Œí’míß+ ÷c1ÿœù±æß8âpOù ñ£Ÿ¥¼‰Â\3lréAØÑEn¯ C|à"L°ã¼OesÉ· A¸ }+—!°SíäØ5¹£ÅŠ–.,nçŸÒ¼\HhÌaiŒQÂúµöiýBük(·('×уò**§îf®˜~ör èq|ö·x¾ü}Øóˆ$jmk²mäP€ö¥ä¹psLXiYyJëë3,¡|u­ë0_¿“ V)Œ1c›È‚Âç«+©vbä3=¦¥–…’¸ ,ìñ½46#üŽ fÄ,ˆyü;¦L±ÿZÀ^ä;ë¨n gr4OýÉ2…ø—ÐPΚ§KãŒõ€<äI4àߣPMý#¦<´§ÝʳwÂU÷^…¡¬¯¯É2¬0Ï~z¯xŒq”™èž °Ýä õjR`  AÄ‚êèÃÍŒ”ð†zoFóÐ =Ã1½@­èa­–”û;–®ÑŒß111K€>ä“äW®’yEœFîÿ?ÝMça°¾Xþz¨É'Ë Ð^RÒd°«PmGZOòê(A€×ÓAÚ–ÿÑÐ@7QK%jmn‹Ä‡!ۤݨ5³îjgQM\è¶(j‰½q$®¿á{ûß·_Ò–k9>ß?ÊQFvšÈF½(1&ÚqT3T3¾Iåì³ÀÆ”¨[º3ö¢~Díÿžñ/úŽšµyGíU«¦[’‰_4nÔ/õ»ü7†›Ô„Ø¿ýHÜ#˜ÆH–Šé¼Z¢Há9¶ådZZ¬Ü‹÷aD`"ÇCwAúúÑ®ßoߺaC»Œ„+b¤a÷¥|9*X+¬òóäÁš«û¼ë‚·’†v«%2 SŽ7´çf¬õBe•—jixºaѯbݧ›|-Ó3í==å±NŒ@uDË”QWç2¶E¼æŠYÔ>Gù Eé½ÃöåsÐÍ?Þ‘¿E;¦ìéCK’Êv<‘ßýw|P€ ½F!ÆÅÑ» ³!¾ßÓߟf\ ñpi6orrí›Ý¡þÀ²O©0Fû”{ŠN[Ç¿atÄ5lQ\h‡šú6\»v±ÆZÂÀ £äð”vN ^Ï뎻dbtLø8“ž~h¯£¶cuMàè•ë‹é5XwµŸ¾îfްÁ—¡T5<ú]§±ÕIXwôö?~Òš·Ž–ú· …+SIN ÓZNÎÕ• s§½½Ã°§+Q)6U|³Jèj᥮r²¯†Pw*/ã ;+¡ºBC}¬Ó–Ž;,~ui¼B«ñ+F F F`Qàîj}Ülkÿ*fRzÊ4ö†_å<_=ö)`½Eì,‘÷êH#» )xE,ª9r5Yâ“B¬ù¥±m¤~t‚ßìt;0=$w:o‡±ö+Ù¯…ŠÓÑ|’¬lפÜâ»3ÙDk~‚­d¹vÒZ¬¨œ^]Ýq¸pì$„­9-l÷,Û²S)GäGG¿ƒë‹zÉžyO¹xnÞ¼ÙÞ¸± Ù¹ÞCxÔ)ݹè±Bx¢uÕSñ>š½õ¢Äé:.vü‹—MÝ(†´ÑB0 [SbèçâçÂÀt࡚UGïÄA }Ô°bŠˆˆh,lñkôì'ŽÒPQÒìc ËûÑB“޳'Ý8*ñ/à÷‡—ŒØ'íä&Þï‰Ë‘žRKûƒÑ+*/ã!€ ¥dØéŠ”Ù5´Ce²x è ÷y0>cDÙP¥À€©ê‹¨PýSȱ‚MHâ‹3´ä¿ÅJâ¬Âÿ £D!‹B“žâó¥u5̹kZïR 혗^ æä+³M ”Ú°ß…^%—‚lìÆï"àcMa;•Wdº 'i  ß™?–“°EÑóχpu«+Å­~F¨ªñ`™ÙÑÀaœn]XlùbÂo5a)ûo3v3æ ýe=Q–o_†],7à|™ èLÕ@ñÖò„W–ÕOçrì"F F F`áh%-ÿÝögbMñ¯Iÿiªú&?W1ÉûõT¡ï<‚ÒLS…\6²>ÑÖñöâp…^‹Eÿ»ÆÙöó0"ñBHAì@kÙ(’\´¯˜ôÕî,\ê³­©Ø¬8\ÜîŠ'Æq¸.§2Gˆ•µ@€ƒž7&r8óª5•Áô ò³*ॗʹ:Ã#£›k{Ãuœ¼R\ áª'Q©EäšÇ"ÆJE¹f­¿#I€¶¶Ö‹nË?BýZaÅ eaäI®{+»ö-*'ã[¢Æìƒ,»å Í”‡ôjôØß¶=¥B…æo0µ|õh“$ŽÂŽ^¼¯r«Þ-•u3¾™V¦Ít¼ƒ–©ƒ0õ¡ÔŠý~21¸k»Xµê©Ê• Ç:˜vcJ­ pÙ„À¢ù¼™œuºå»îÉ-i»ÕÅyð9ê)=Ž© û8‚•Mwœ®€lc/!È÷™‚à ˜—HÚ®ú¤í\A{·ˆ+í}T¸20ia p®4ÓÞ¬bäÉþÐŒ•§VD¿8¼ÌmÏœ‹Ÿéa,j.¼[<‚ öæÖ2}TÏ”ÎùèÇË hÝ|ÊÌln–{™ªÜXÀ–þÒ q!¯Ò³XÌ}FržŽØ”®½F£'J¥ ˜Þ[Chà©íÙç  }¾²M›Ÿ0RÃ(-Ð+ò7ü«µ‰\û…ð‹MFÕwÈb™.n·«±ÏüpT¯g" ÖèÙ’`!7£V®BsJ,~©6žzNj¶ºÉ|×» í\Ç0t>ÒÃ4DÓË"hu#L5Ÿ(i`ú/¿û»0 Û9•'tÕŽ-‹ô´ÔsêÅTcôZ+䵬jeZ¥Òã–øk—ü×pGÚÈ´žÇ®)½ÃQ. æâ‹Ó*ªîq4%õ´fñ5Ìk»äÜVã{·dèF­–¡ëF +—qTJÓ •p@#1IˆuJX¸v „ ±•œ…fd‹š5†ŽÁŠ#9l|´IhÓ¼hFá ¦êCPó›£?s,`©‰iaÊKÊKœ ũ§!]LGtp¡’áj‡'&pð¨&Ú%U“ƒ#0J_µýØb50fÂTúúPQ‰ò­×_Íhq* u~áý¨~ëú¦µ¦,í³ªuèáåfÔäéõ•r¦‚±QŒ@Œ@ŒÀ’!ð­©Q§¹FÄ8øDŒ¨_lµx„…uÁŸuóúÎ>Úo6áʤ‰#V ±{w^å:߃©Ñïã›i'ZøÂ»‡`Š4Ø5È©Áy ™N2ÝZ©i*x¬·8‚åø¶H¥Z‹ÒÓ,9 G½&ë»q4ƒìÁ9¥ ˆÆrs:‚¬(¤fÞbÝ8¿3á»ÖÃÏ…Ä'v»¸Ô'ÿëãëâ"S>4¦¬Ú)RªqG‰rF¯ü›ÜᾋJ=iÒoÎ^`ªp÷0Ux ÚÒ 0ŠÅ´V°Ìè§o íΫ qZ2iœn¶a7)„Mmžàî`ýwË]”*ñî:Ôv¥â‚Pq…¸B]¡z8fm¾ÿÌ„ƒµ³Ã7kÒgÂwV‡,ÔÃÏ ÁÅF †@­ó?ðoöo †%;꺧¯üëÝüª³„è£>ùæ¼ ¸k$Òí€ç¨K¢xÒùô2S…&¾¡p¥¶c×༧gœ"²lÙŽ{íP@Ã&Û4KÆVü® dÀ–S²>oº‚£Wg˜9cïi ’œrZÿû<íÄØv0 XH\WÐlPïóhÆÄÄ4,fÖ‚Ì.lMt\£jj˜ï LQ»ãOp½—Rãh>ìæû®‚pEýå"\1-Ñß?„H/¤3M·pjPò@ÑyO 2P’sŽ–aq©š,ÑãŸ9#bË)?ë®yØhU,a§;Ñ«nÌá^çT6e†ew† ,Ê­µ1Í”5vjùpM¼Ä~ùG+vÙ¼Ô²,i¿Ðˆéî,S¦CÕ¼ð”9jÝXGÞlŒÕÑo 3‘1Bgý«¸ÐùÄ@¸ÒÖ—“peÒ¯Gâx)4nѬlú‚u®?£ý[15xSèP» Õs~9¸°àDÊ>º8¡‡°–kÁœ30õsÀr\ôìúœ/0ÛNŒ‰²ÇÇpÕ%Ÿ–Ï_ÞT*Ó£¡¥}•xˆ›>øÄƒ!PlÿY— n3à%Çg0´•¨l^ÑñÞ†¶?cÊ镜­Â_L‹€€¾‹ÂUÑä+« wL we&í o„ƒ_ ñÏÕ¸[ÎvÈ8Hd³0n,í þ™#JÝìå{M£5GÇ•­‡œÜ4•-Ǧ͎€iì½bQnc»õ®è:%j¹”)ŽªðÔñ¿¢C¢žcM.G¬ ÿÛ>eÛ4Æ%ÎÇaäþS048D[w¨µóƒ±T%X ,K ñÀÄÞž-Ó/…%#ÁHUùZµu0OuF¯ \§¢/àŒfDùĺ B ØãÅa¡ZQÐ+evná–£Å ÜÆ-ÏüëÀ¦O!7ìØ¦zp]NO³n¡]†%ŽÍA¢%ڦˠ(Årš°ça±×ǧ<ÈyTÉ\[jÔ%Õª nk1ܺ‹Ö’GPÆŠG¯5óë/™´ü ;CÀZ×"ÄÎPήY`Ë©L±n°mÞíõýhÝâä:^ w§B8#Ñ©x h¸‹O¶aáÉåP¿ÂgX¯›–O!îÕÖ0W´è×Ë·ä]… ·ßI`°>ftq¯¥!G±ÆŠƒá™X³·Wa´‹Ïmˤ;’Žìa\Ö‹+¼ÕÃʲ¯’N;И€ *­™ýÚC;³ƒ=£gÓ è™™™Æ¢–á»Å#Œp‡B¼®¨ž)óÑ…«ÅËÊÆ IZ,+ ó)3³¹Y†eJïô'¿ãÃÎ"¦r„1 ­””ü\(*”ëê»ø jlL¬è<‘ÖñÐ^L5DÀyé!_yèæíçý6™r^ì œ(Œ©ÎðØœ†%ÆÝ1ᎊÖd‚–Í:’¥ìüȨjiMžýÀЦ_ÁþuJmCEé=²g2•ŠA† ,àùx(¿”–^|k « ‡»®üDè¦X1,•Ììw¬gyÇãi /N«=Xl'=OŽÿ,Ü‘¢ÝTLUl#°O À]Ð1Í÷ÈÔ4c+ï~§¶Ç…ÚN¶ó64,Ï…´ÅŽQ¹Q¬„ï«wÃìl<ÆÿòÞÆºsF@ŽÝq]΋Ñ<°ÁbÆÅ…ÎPÎÍ…,×#…~‘NæªuL ë<[ZÖ¶ç¿Ñ/冸~î µ y¹Áƒ… ¯Ú`šÆž.§8€ugÈ®ù-¿á·LAÎy:Ìî®QÊL8·¼`4òåÓø‰…ôÇ„m‰p¹å—õ‹#^1ÅÄÄÔ r¶í.øâçÁ¬ž µÅ"ï2Œb ÙÈt}ª8ÒKþ¨Ý ñ{aèÇK|gl´øX2eó;–b†iU®ƒö×ùñ' hq:œwzR·2Á†í¹ž—HX¼Çíúò߸€B-ôP”àº,¥6ãú£ì¼Åê™å=&©™…²Õýè¡8]ž#Ò‹D©ƒ^\@µ(ßA8­íkáå)¿)| lmãÅÞË­âh&âÔßLoNYébÍž„O7¥‚Xè´ê—Æ1‘¯ãê‚+¿³‡HýèƒOavß™$õbŠˆˆ¨'äszcÖ¼6 ¨ßCÏ^÷Â_›Ì¬:öÈ?ËÙ ½ˆ_sA`H4½_ÈNôÛ GæÇÓsq¾v͹{ÆÁÁ£<¡ŸÙQV>(„Ó÷ý±Ñ‚ʦӯr¤ø-F³þ{Çè7Ïxhü¿yDmž<` ‚„­ ¼xüæå´Q XblÏÃè^Hœ½)Ä8 Înϰ‡ ¯µ‚ÝB Z“_:*˜½+fbp#½?؉•111‹…€±*¶Èoþ-l`ØŒN¡¤ÐcWhPu#õ(VOGÀé–[¼Í›7Ú\ì¾õÞ×_›–ö¥£C4R8¶0³Íоé‡nÇBýè7ÕQ*uG³¨ÛrjÚ1î¢ê¨õIÔ‹R4®F¿Ôõ=cǼ£ñ1öø6a›·±_jÆï™üˆÚ5jZ‡P5â=(2Ö~bõªƒ„ëQ …ªHKdC#£®c[©Lº¥»à»Ý㣅AáN<Œ5Z{žm² çhýh×ï·oÅè–K!‹£\U…ÐØ–˜#fÍÀ¡^§4lj&ÓjA¼¼\tu}_ôöòXãʪ‰~éÜqr€ò`\ô›~E‰g×àÊq;Üöëðê‰ÒxEíÇêZ#ÀΦC~'sí×bœê33®Ågbë³…‘=÷hwÁÈü NbíjÐ ÃÆGùoîXáê¡1¿;Ùb>>æaÓ•b¬ÆŸØÎ<Ð׿`á£OþIäÒ¢%™žÏö¹j)‹6£Yƒ#£>Ôv"é¬À±Ç`%¼ŽÃp•wîSO:ô¶íƒ_=ÂÕýËLÈÂOê&p“"Á¹#c ¯“á.)´ÇýÏC"z#´Ù“ã:ßÕ?¹¢ŸE;·ê%pqiè¬\F…zêÛØ†ÎSé¶yÚ©u|&­ædÕœªÉ‹šZs§¦ã‰U=ñbÔ›3ƒIŒOÍ ß^’Š¢*~3!“ÿ†ªüò¢è(>ùùc+F±.€ú-x´;¼cZºâz/ýþÄD÷I[ídê|MÖ÷$p¨%Öò yŽŸºa@Öë8)1V| ±f:²4Ï"jhe äÔ+æÅüË=4š³E,S¾´¸“šÔ<øñ­å/G -1øävÉôMò*yf)±ŽŽŽ8+•í| KG!KíÇß(×X}tC(dqÂõoö>h;–HµXúà2øW.c*U‹…,³LœÅ»ÿÑ_‰G{ÿŽº õº,LíÁt<"pʹú™¯6NMdÌÀÃ÷º®: jž`L!‹ú|J‰åŸ‚ÑBD© A7¦—G{åˆ-\Á—÷ó»¿Œoú[.¼rî›A‚%Ó_ß'ß(Ë“š¤H¿xéK€#A7r1ËTP²ššºET×-ÛW×¢aG†å³´-g¾p«Í•êaLÐè„>죯™™9‹¸l‚|u›ßýI(IÄ9®ŠøgF \½Ù‡áXò¤“7ZÜq æÄ5_ŒP©€E½`G¡o G±h‡¼0¦9"0£€E8]ØÓÓcq]ÖÆcÿû‡²Å:qtÄû Fò‹´l‡ ¦u1ãÊeÔ£[7è‘*TÇN‹Þ¿‰ûvÞ"ïÛ¡§9mham´ø®’ ÙL‹ç½»vÝñàÏéæVqërÌ·@Èêwóý/P@R'ÀP8Ë2Os HU»$bº…Lÿ”ú+n‚ÜPîû<ÌȱøÌÕ_8YrbÚˆÉR>¡ °ã\¦¦¥Å,ˆó«ÑÈà³”eÉ„MlL| §Éø`FÁðaÆÛ”+¾¾yOº™§B·ùXòs xÃbçÓðÃ(fä—Yhp½Ör#ƒ%ß&Í|Gõk’f‚[‘ `é@¿ôû·$ºúr?,_´õÏg|mdĽÑÁ*øD›~S÷°yJy(ÉÕb#C‰—ï˜æ@ÀNFÜQ,~¿ G9ˆß«²«U"‘¢…uÁÜ}ˆl Ù*«a(!þ8EH·µ5™ÊŒÒÍ{7lèq—ÑÂr“i° zØ}ó1œPü}eÛ—˜ÓÁ8xÊ:HÿÐd<|L™¥žyXhF¡J3(€ˆ»åÝ”¼ZìÞÍéV†E{ÚS¼›ƒ.´dбü?AÚ–&æAY5ì”—)€ åG÷þ§Ÿ—ŠP´©¥Š@…p‘—:rú¬Ÿ öêm”©`'g½ÃZÿàÉIœ¤?TK‰GÈ_e´¬9_b±ÊO ¢8ÔwG"×¹íÂFŒò—“Âr¥Þ–lë¸nYí(”¬·ë(¦„%(·ÚœŸ §rÀ–õõ­'}YŸ“uN}?]öü–ÎÝz÷õÏÀN–—aåøÖL"á8ûFà*ëW¬97(@Yàé–óâFÿ!†FÙôjYkÅʶÕ~k*#l›âPï(p¡FÒÙJbV7hIîÎçaãœÃ²gÿB©œ#ÛCáb9™ k’Äõ»;‘ëx6„Ñs¡õ"`q¶ˆ€‹ö’` Î £›ÆŸ’ìÝÐßêzj“ë{„NA¦|5ѯ´|,.—#ˆ²éÉ-…EàY`W¢‘a\‘G– þŽ Q¢ªOÕ<”jH\/˜…ÑEî5ô~^I9|ˆã¦ë_•¾Ma\¥ƒ Öè&ToShhBdqÆ⨮ˆ¦ØX¦üv”¹H˜¶ùÝ’Ÿá DùYT»†^iü%vÔëÀˆÉYx_Šg!á—þ­Ä ÒÍ´pÀ(Z±yÄ_=˜×††1G¿z¶­w°ýÌïÁ ðtÚƒÆúˆ?üå´êϲ¤„-ì¾’G³p£‘§9†[/7srúÐc»ˆ#Sv«jMu¨¶Öv•I­ôS-«%‘žH8Iä­doˆé!¼nôä5‡·¼v{(\é¼+ʲÔeE"ØD.×îçY·°sPý§áYƒ‡î>dú0܇!u®Y»=? X\ËEbÙ¦͈%뾫WgRc>ÓàÅ’‘RÖ„U샃'4DAÉ®éL‰BxËÓÒñr¼q±gý¥Ï?ƒD{{.U´¸+wiña|ˆQJü#eʼ‰g#½3]û§,µ¢,fHƒåú£cXWZë(·¬\yòp•ÈÔåÞA li»ãhC– ¥;פlÑV6Ík526Ö÷h-Rûÿpj9ùúþT±IEND®B`‚celery-4.1.0/docs/images/favicon.ico0000644000175000017500000001027613130607475017257 0ustar omeromer00000000000000  ¨( @ O1w U4™S3—V4— V4˜ V4˜ V6˜ V6˜ W6˜ W6˜ W6˜ W7˜ W7˜ W7˜ W6˜ W6˜ W6˜ W6˜ V6˜ V4˜ V4˜ V4˜T4—S3— S2™O1w `:ÉtFÿvHÿxJÿzKÿzLÿ{Mÿ}Nÿ~Oÿ~OÿQÿ€Rÿ€Rÿ€Rÿ€RÿQÿPÿ~Pÿ~Pÿ}Oÿ|Nÿ{LÿzKÿxJÿwIÿtGÿtFÿ`:É [8_zJÿ{NøRû!€Sû"‚Uû$ƒVû%…Xû%…Yû%†Zû(‰]û'ˆ\û%‡[û%‡[û&ˆ\û&ˆ\û'ˆ]û(ˆ]û(‰]û'ˆ[û%†Yû%…Xû$„Wû#ƒVû"TûRûRûzMøyJÿ[;_iCzRÿ"‚Tû"ƒVÿ#„Wÿ%†Zÿ&‡[ÿ&‰]ÿ(‹_ÿ(‹`ÿ"‰]ÿ&cÿ.–jÿ2™nÿ1˜mÿ-–jÿ(’eÿ#Œ`ÿ!‰]ÿ$‰]ÿ(Š_ÿ(‹_ÿ&ˆ\ÿ%†Zÿ%…Yÿ#ƒVÿ"‚Uÿ!€Sû€QÿiCzjDx†Vÿ$ƒVû$…Xÿ%‡Zÿ&ˆ\ÿ'Š^ÿ(Œaÿ(Œaÿ)‘fÿD£}ÿOžÿO”yÿMuÿN‘xÿO—{ÿPžÿM¤‚ÿE¤~ÿ8sÿ(‘eÿ!ˆ\ÿ&‰^ÿ'‰^ÿ&‡[ÿ%†Yÿ#„Wÿ#‚Uû…UÿjDxlFx ˆXÿ%…Xû&‡[ÿ'‰^ÿ'‹_ÿ)bÿ)Žcÿ,–kÿM£ÿ?lZÿ9,ÿ+ÿ (ÿ +ÿ. ÿ:,ÿ-OAÿ=jYÿKˆqÿQŸ€ÿE¤~ÿ.”iÿ'‹`ÿ(‹_ÿ&ˆ\ÿ%†Yÿ%„Wû‡WÿjFxlHx!‰Zÿ'‡[û&‰]ÿ(‹`ÿ*dÿ+‘fÿ(“hÿH¤€ÿ2VHÿÿ;'ÿB-ÿ>*ÿ9&ÿ=(ÿ>)ÿ:$ÿ0ÿ%ÿ:-ÿ>gWÿ@’qÿ*’fÿ(Œaÿ'Š_ÿ&‰]ÿ%…Xû ˆXÿlFxlHx#‹\ÿ(Š^û(‹`ÿ)Žcÿ,’hÿ)‘gÿ9¢yÿ?{dÿ# ÿE0ÿ?+ÿ&ÿ- ÿ:,ÿ.!ÿÿ ÿ ,ÿ=)ÿ >)ÿÿ8lÿ-—lÿ)dÿ)bÿ(‹_ÿ&‡[û"ŠZÿoHxoJx$^ÿ(‹aû*dÿ*‘fÿ-”kÿ*•kÿA¥ÿ+WEÿ7"ÿF0ÿ/ÿ2kVÿH§…ÿC§„ÿC¢ÿ@”vÿ8}dÿ+[Hÿ6)ÿÿ1fQÿ=§~ÿ*“iÿ,“iÿ*dÿ(aÿ(‹_û#Œ]ÿoJxqMx%aÿ*cû+‘fÿ,”jÿ/—nÿ-šqÿC¤€ÿ#L:ÿ A)ÿ C+ÿ"J8ÿM±ÿ8­†ÿ5¨‚ÿ8ª„ÿ;­†ÿ?°ŠÿC¯ŠÿE¦ƒÿ6~dÿ;—uÿ3 wÿ.—nÿ-•lÿ+’hÿ)dÿ)Œaû%Ž`ÿqMxqOx&‘cÿ+‘fû+“iÿ-–mÿ0™qÿ/uÿE§„ÿ#L:ÿE-ÿB(ÿ/_LÿJ³‘ÿ5¨„ÿ:«‡ÿ8ª†ÿ7¨„ÿ5¦ÿ5¦ÿ6¨‚ÿ;­…ÿ5¤}ÿ1vÿ0›sÿ.˜nÿ,”jÿ+’hÿ*Ždû%aÿqMxsQx'“fÿ,’hû,•kÿ.˜oÿ1œtÿ0ŸwÿF©†ÿ#O<ÿH/ÿ G+ÿ._KÿL¶•ÿ8®Šÿ;¯Œÿ;®‹ÿ:®‹ÿ:¬‰ÿ9«‡ÿ7¨ƒÿ4¤~ÿ4£}ÿ3 yÿ1uÿ/šrÿ.–mÿ,”jÿ*eû&‘cÿqOxsQx(•iÿ,“jû.—nÿ0›sÿ2žvÿ2¢zÿH«‰ÿ#P=ÿN2ÿ K/ÿ/bNÿNº™ÿ9±Žÿ=²ÿ<²ÿ<²ÿ;°Žÿ:®‹ÿ:¬ˆÿ8ª…ÿ6¦€ÿ4£|ÿ3 xÿ1œtÿ/™pÿ-•kÿ+‘hû'“fÿsQxsSx)—jÿ.•lû/šqÿ1uÿ3 xÿ3¤~ÿI®Œÿ$T?ÿQ5ÿ O2ÿ0ePÿP½ÿ;µ“ÿ?¶•ÿ>¶”ÿ>µ”ÿ=´’ÿ;±ÿ;¯Œÿ:¬‰ÿ7¨ƒÿ6¥ÿ4¢{ÿ2žwÿ0›rÿ.—nÿ-”jû(•hÿsQxuSx)˜kÿ/—nû/šrÿ2žvÿ5¢|ÿ4§€ÿK°ÿ%V@ÿU8ÿ U6ÿ0gQÿPÀ¡ÿ=¹˜ÿAºšÿ@ºšÿ?¸—ÿ?¶•ÿ>µ“ÿ;±Žÿ:®‹ÿ9«‡ÿ7§‚ÿ5¤~ÿ3 xÿ0œtÿ/™pÿ-”kû'•hÿsQxuSx*™lÿ/˜oû0›sÿ3Ÿxÿ6¤~ÿ5©ƒÿL±ÿ%XBÿY;ÿ X9ÿ1iSÿR¤ÿ@¼œÿC½žÿB¼œÿA»›ÿA¹™ÿ?·–ÿ=³‘ÿ;°ÿ;­Šÿ8ª…ÿ6¥ÿ4¢zÿ2vÿ/šqÿ-•kû(–iÿsSxuUx*™mÿ/˜oû0œtÿ1Ÿxÿ4¤}ÿ3¨‚ÿK°ÿ%ZCÿ\=ÿ [;ÿ2lUÿSĦÿA¾žÿD¿ ÿD¿ ÿC½žÿ@ºšÿ?¸—ÿ=µ“ÿ:°ÿ9­Šÿ6©„ÿ4¥ÿ2¡{ÿ0uÿ.™qÿ/—nû(—jÿuSxsQx)˜lÿ0™qû4Ÿwÿ:¥~ÿ?ª…ÿ@°‹ÿY¹™ÿ*]Fÿ^?ÿ]=ÿ;pZÿeβÿSÈ«ÿWÊ­ÿVɬÿUǪÿSŧÿQÁ¢ÿO¿žÿLº™ÿH¶”ÿE±Žÿ@¬ˆÿ<§ÿ7¡zÿ2›sÿ/—nû'–jÿsQx-‚dx?©€ÿF©ûI­‡ÿL±ŒÿNµ’ÿNº—ÿfÁ£ÿ/^Iÿ_?ÿ_?ÿ>r\ÿlÓ·ÿYͰÿ^гÿ]ϲÿ[ͰÿYÊ­ÿXÈ«ÿYŦÿVÀ ÿT¾ÿR»™ÿO·”ÿM²ÿJ®‰ÿH«„ÿC¥~û<¦|ÿ+€bx7ŒoxH±‰ÿK®†ûL°‹ÿN´ÿQ¸•ÿP½™ÿgÃ¥ÿ/^Iÿ`@ÿ_?ÿ@t^ÿmÔ¸ÿ^гÿaÒµÿaÒµÿ_гÿ[̯ÿUÈ«ÿOÅ¥ÿWɧÿVáÿS½šÿQº–ÿO¶’ÿM²ÿL¯‰ÿI«„ûG°‡ÿ7Šox9ŒqxL´ŒÿN±ŠûOµÿR¹”ÿS¼˜ÿTÁÿiƨÿ.]Hÿ_@ÿ`@ÿ@q[ÿ~ÛÀÿWÓ´ÿ[ѳÿ[Ò´ÿ\Ôµÿb×¹ÿnÚ¾ÿÚÀÿ„îÿoƪÿUßÿU¾›ÿSº–ÿQ·‘ÿN³ŒÿM¯‡ûJ²Šÿ9Šox>‘sxQ¹‘ÿQ¶ŽûTº”ÿW½—ÿXÀœÿXàÿkÌ­ÿ6cPÿ[;ÿgIÿ Z;ÿ{°ÿäËÿ„àÆÿˆßÆÿŒÙÁÿ‰Éµÿ{­›ÿ^ˆuÿG-ÿdš‡ÿkÔ±ÿU¿›ÿX¿™ÿU»•ÿR·ÿQ´ŒûO·ÿ<Œqx@‘uxU¾”ÿV»’ûW½–ÿ[Àšÿ^Å ÿ\Å¢ÿn×´ÿKoÿD#ÿiKÿcDÿ\=ÿV€lÿazÿXmÿAnXÿ [>ÿT0ÿZ8ÿ`=ÿ1VDÿwǬÿ\É£ÿ\ÁœÿY¿˜ÿW¼•ÿT¸ŽûS»’ÿ@‘uxD“wxY˜ÿZ¿•û[Á™ÿ^Äÿ`É£ÿa˦ÿhÔ®ÿoƪÿ:+ÿO/ÿgHÿjKÿdAÿ`=ÿb@ÿfEÿgGÿ]?ÿE(ÿ+ÿ6+ÿf²˜ÿdÑ«ÿ^Åžÿ]Üÿ[À˜ÿX½’ûW¿–ÿD“wxF—yx\Æ›ÿ]˜û`ÆžÿbÈ ÿdÍ¥ÿfÏ©ÿfЫÿr߸ÿm½£ÿ(H;ÿ(ÿ7ÿA$ÿ@#ÿ9ÿ0ÿ/ÿ%F8ÿCrbÿ`¤ŽÿsбÿkÕ°ÿdͧÿcÊ¢ÿ`Æžÿ^Úÿ]—û[ÅšÿF•yxJ™{xaÊžÿbÆ›ûdÉŸÿfÌ£ÿgШÿiÒ«ÿkÖ¯ÿkÖ¯ÿuã¼ÿyÚºÿg­—ÿY’€ÿUˆwÿWŒ{ÿ\–ƒÿfª”ÿr©ÿyعÿ{äÁÿvå¾ÿlÚ²ÿiÓ¬ÿhÑ©ÿfÎ¥ÿdÊ¡ÿbÇÿaÅ™û`ÉÿJ—yxLž~ycÌ ÿgÊ ûhÍ£ÿiÏ¥ÿjÒªÿlÖ®ÿnÙ°ÿpÛ³ÿoÛ´ÿrâ»ÿzêÄÿ~ìÇÿ€ìÈÿ€ìÉÿíÉÿ|ìÆÿwèÁÿsã¼ÿoܶÿnÙ²ÿnÙ±ÿmׯÿkÔ«ÿiЧÿhÍ£ÿgÌ¡ÿeÇœûaËžÿLž~yS•~YlÕ©ÿdÌŸùgÍ¢ûhÐ¥ûjÓ©ûkÕ¬ûmدûoÛ³ûpܵûpÞ¸ûoÞ·ûoà¸ûpâºûpâºûpáºûpáºûpà¸ûqà¸ûpÝ·ûoÛ´ûnÚ±ûl×®ûjÔªûiÒ§ûhÏ£ûfÌ ûbÊùkÓ§ÿS•{Yp¶š¶}ß·ÿzßµÿ}ã¹ÿ~å¼ÿç¿ÿêÂÿ‚ìÅÿ„îÇÿ„ïÉÿ…ñËÿ†òÌÿ†òÌÿ†òÍÿ†òÍÿ†òÌÿ†ñËÿ…ðÊÿ„îÈÿƒíÆÿëÃÿèÀÿ~æ¾ÿ}äºÿ|á·ÿyݳÿ|Þµÿoµš¶ÿÿx§•W‚¹¥tƒ¹¥q„º¦r„º¨r„º¨r„¼¨r†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr†¼ªr„º¨r„º¨r„º¨r„º¦rƒ¹¥q‚¹¥tx§˜Wÿÿÿÿ¿ÿÿý@€€€€€€€€€€€€€€€€€€€€€€€€€€€€@¿ÿÿýcelery-4.1.0/docs/images/celery-banner.png0000644000175000017500000003332613130607475020373 0ustar omeromer00000000000000‰PNG  IHDRX3KÊvlsRGB®Îé pHYs  šœiTXtXML:com.adobe.xmp xmp.iid:CA15D7FF9A4C11E6B7A5A5C86626E854 xmp.did:CA15D8009A4C11E6B7A5A5C86626E854 xmp.did:45B92D9B9A4D11E6B7A5A5C86626E854 xmp.iid:45B92D9A9A4D11E6B7A5A5C86626E854 Adobe Photoshop CC 2015 (Macintosh) 1 —÷ª2XIDATxí} œ$EnDfVUwUWõL £ —"‡r*O÷÷œY×sfEAüy *좋Ò¨à®øÐ§(ϧ¢ës{`QŠæ©B©ã¡ó,<«ñdñ°¼ñìÆó(<`Iñ;¨ï, ï¾ïëýkF,Y”Xµ*“÷™î(–Œ”² V©_ íY²8Tp6Û•©N!å$ÿ›tÆ4ØnA >½f,“IY°¢£#—*YUÄgÁž.Ð!ó#%þ&víÊÇ Ì/Ыº:Ët˜²T{½0“VÉ(%ÇÅhrXˆãei!odYmäòj§r݇…ñ®Ì«×Òv‹ƒƒ;B{õ{­XqHÊsRȯʘARž— Zë*£Á†yÓ¦ûeOO XÝöàÛÖ(G½^ ù4R§´¤l"a ßW¢Tô„[òµºJïck³"d•¯Ø¶ûªÅYéçR«¼¶ÖNTiË–6{ÈM²\*à>…}2éˆt¢E ŽŽ #ŸÎ=<»qó~$dQX"xº1Lä:_àÀô¨¥‡­u]e+Ô[cN[JùC(`mv=u£`#J"ƒ¢ÄÛL¤ãì´wœ.|¹'>|–¢3DàɨV «Þïæw]5…×é 4—ˆïd;¯ÆûR<À‰£!Æ·MHõ€kùë!d â[»1–ù[ç™ëÜôU¤•‚²©Õ$å`^mÒ,žÒ/>9¥Ô½‘MP'ð˜Î”KN&­*‘íü"»QÞÏZq`ýb§eRnG8¿G9Ýê§ÿ;"p±,³ Ó~£Ð$FN[×'ç ±òzǸš8§”g{#»ÉÓjÍ—µv¶ëÕR¨oƒ•‰#ãAb[Щ„úB”9h“™þ¶¨^£ºáh•%­ ¦7¦Ó‰n¶AÅ OLŒ¹~A¸š)"7mh3àòÀg "ÖŸ°©<=j•Iuû+Zר\ªCØV’‘âÌØö{Bzøœ"}±Xò E×mÅ0×Äxñæ‡ò7¾DÊ?cž®ótÊɲRM6ÒÉÌÊã”m_f{¤P•„?¥‡ùņ ¼1¨Ú&™ÍlȰ'â}¢#ÅûD®ëónR\öŽíEñÇgc“ò­$êoé\úˆ’‘ø~fé#R1S|L/ ÅAùÓl" èÓv VNÙŸfiykXJ¶(tó€OÇ’§”eJŠ–%Ç@íË ôŽY×Va¬±8P^Nu‰“²À®í}!Ò!`Ù%/k"cÒ|ÜUòߩހNñšùó[¨c†b«0nkòWtw·É ÿZÄ¡mÖ8(Õï¹â£t4+ïJN ²!¾õ‰·§°ým½–°ïζ·¼7‘´»ÇFKþX¾äºžï1û€F‚Ôô›©iQHA±Õ±RbuûsÕ3W/W¶h[–ccšPz¾K€…`k¸ùgž0dLÜ ‘„pUÈf0k(åµ[¶ÂU/F%CKËíezc#ô-ë.€´‰´: Xñ‡‡ö(¨²kñ6Dl¨oÌ)pp¸•ÝпÂ)¨ß gµjݱÀ»Üh50¡!=Œ *;PLÇR<ÁÈ‚”º‡84it¤¤.?TrDÀ`EüŠZf—¢@Ãý—|`¢ábž|–âŽVU˜Êm°Ì<в¤1›@ÔX–ê€øVÀ¿B†P¤<Ͽٞu·ÝÖyF6Ão¤vX$Ähÿ“GÎùõ¦ó+% ÀòpÇO\ óZ’æë΄9ðz6â­û&¯‚ø€Ÿ¢I~f8vÎ b0$×Zý`Û[ÿΞð~É%¯L$¬ìÈP¡T*y<`»î lØ(ÍÕ¸Ôþšû@éA¸jo}†XÛyŠêHŒ²ci¡ŠS€z-±W{2?:&Ž}âšS{ £übñâYË@Í“µo<ÔC¹zU¶ãû¨× ØJ=.£y% Ú³± ÝjfU‚ßG¡[±%ÑÖùn˜‘Còi¢zÀähbÚˆÉR>ŒH`ÇþЦ(fFÍ@õþLŸ¥,K&l惉OÃæ dSfoS–Ljùf…7ü’…­Ú•Ãи܌ÎèÇBMÈ¢ðbyù]·"Ò_ß%&¤2lTéaŽ¿×iëþŸ0§ð3Éäè`D÷ž“íøx_¬ýŸÂ0>ä?É™»1%}3ìrœ»2õõm°1%HÅm¿õ=ò½½5“8vd° G« „ÄBUeô«‹ÑuÝ~{bUöhqpûÑ"a·„£U>ò2m XÍ/ d?x­©j°u¯ý–±2¸¢µc ±ÿÃ¦§Ã‘¨JežD{!´oܰTˆ‹•Y÷’±¼òz'Ûu1ôHĹᙺŽiü#P?*Õ›ú…û<_È£È/C>†ì’ò½Ö)ð‰¼¯QøË’Ž‹gy—@†ÚŽÏ€×q5ÿäï.ÒcÿãP;•ø¾q3Û;ðOÛ°>É÷µ°Wæz’8î,ùÖ¿†fœôN®&²o;ïSélêÓ¶-­‰Ñzìˆ0«é®bÚ W\­~pûóDwÛ!ð–‹Ö±¾j¡‚•‰XPUÐUb*k´—Ñ;(ümv;Žü1d©ã‘PåR¿R™%aeam´u周´SÝ°Ž°rÓ^%¢ÐÆ^ }¹6‘íz;¾éo¥ð*¹o=3O ê÷å;­ˆÿ–5û²L%«¹á4¦2ÃoOkÒÀ×äôÈ^z"nÛþ¶²íÉwæ ¦ÏLˆ©nì-\åZº„§_Ðv/dÄjZ<¹òVaAGÁÄÓ4Þºõ@ìšf·é4Xtáw¬vxÈç ´S¸JVH +íR˜²`Jõªé£P`vI× ÄAxÅÂvwA{tC¼Ê+kÐKRØS+ÄgQï+ Ü {>MMSÃñõM˸8^DÓr)—õŬY}߇eÊò½”nЂºÛŒˆ‘AqÚbZ{]ubè*ˆëV¹@3êQÈ¢úšD{×ÏJC»î™Ô§îÒãÏ©ÂïÚÙŽ/€}¿ñ¥^¹lÂÆ”±½4ÙÖñÝâÈóÏ›kþã!Žô”ÿ¡?úYΛ(Ì%p0Ãn85;ºÈí•aˆBÄ‚é÷·Ÿw]6—|ç0„«Ð·J›1Õ@Ž];V´·tcq;—ø”çåbBcK{dtœÖ/µOëã_C¹Õ@9¹Î^”ÿÓP¹8u7“pÅô³—SD¥ø>6y¾ümØóˆ$jMk²môp€öR¥ä¹ps\XiYyÊëë3 ›<Ô B¬Å|ý.X¥0ÆŒm6"w Ÿ¯~¤¤ÚÏô˜–ZJxGÎ/Â46#ü† fÄ,ˆyü?¦L±ÿ.êI?òuT·„39Z þd™B üSèG(g-ÐÇ¥qÆz@ò4FG¾F¡šúGL-ÜÓ w«ÀãCC¾Go(”‡•ûE½€ùêJ¨ÿq£½zä¼étÑ…gû—`qþéh#Eybz¢òŒIG»x8Uø*<´Sm:hO—WÿÜV ¶)å‚ü„på«GKÂy?ÌIt«ùW4Bb“Ø€Ñ+.h?ï|¬·º(?¤G®è ¼ÇN½˜jŠ€ž_fÁ¢öU8B´ÖÂ6xJU\‘δìùîᙳïbô׉Þfd:åȳ»‰t÷ áþ@È*•YVN-dâõ_Ò÷Þ_ü}™g¬$ØÝ9^„ÏÚœlþ"¨?ŒG‡¾ñš¤`-ƒ´Ž§]7/¸`”ñhraÁºÆË÷ÿ÷d*뤈ÄIU§ bo—Ö?ä­t1Êü/CCÛë²2ù¬wÈ5óŸkC0z.ĨÍyóõu2Áíí+ßÂàòµÀÿøÙ Áð%ÃóŒ÷ì|Rý'»òEn~ÏPS`¡€ÒÄd%ô±Ù® ×[ðMžl„QG¦ýgë•v¶ó-^~÷×`Pm:´=;×u°?þ½ä%|SöðEbä)Î ™¶ÚS‚Óœ‰Dá Ó‚§ÂÕÿ.Œk,y˜ 37ðJ;)û£YL C`; WØ-زFtfÖè3¯æ¡v…ýZòF!ä–wé"¼•¿2ÝÖ²gtô±RIý3m*Õ‡ :wÜ’;%søˆå_ ÆÁ1{ºàGb”b0+Ž0¹Ã»O…+ @¬´e4T3˜ï˜pó»?¦¤ÅÝ(º™ä_P ê‹ï8š¬Håq1v›ãmyfÚÎôÞˆM½b2Y3š 8–óC@¡ocsÚÄzT¯²düe8M^¦T”‡/™´ÍõfÚ×-APú…›¸g^æÿ=-¸¸”cC?ÙÇ©ýí¾ñð#×S…á®BF³œ3ÀÒ`]=~L¬]îB·”º†ƒè7ý‹§Ù„b× >Ø”æ{Å%z±îŠç\ݯzyèàçÒ¹¤ƒS°u[¡ÁS”ôCuùC³r½ø»*L°¶öØ{"åd±cðpd!§Ž1È2-/£ùZQóóƒ†ã8vk:e·eZ>¹LÚá÷ž‘‘;„WzÉQ+6n„«ž½ CE__“eXažýô^ðã(32)Ð=AΓ{ûÏ5)0Ð… bAuôaˆfFHJxÃý·AÈê‰á˜^ Öô°VKÊË?×hÆï%@€>òIò+#? ³øjéûÿO7@Óy¬…ƒ/–¿jòÉJ´—”˜.ì*TÛÆ“¼:JàõtÐÁ¶å_èF"j©L­Ím‘øF+Ñ@ÔšYwµ£¤‡º-ŠZbïAëoøÞö×m—´åZNÈŒqÔƒ‘³™gL´cZ.#cÆ7©’=cؘ²uKwÆ^Ô¨ã¿Ñ3þEßQ3£6悔jÕtK2ñ‹Æúå~WþÆp“*ˆ;ŽÆ=‚iŒdi¡˜Î«%ŠžƒË3--Vî'ÆŠC¥’û("°‘㡇;!}ýpço·m^¿¾×]FÂ1Ò°ûR¾¬Våyò`ÍÕ¥ëÝ‚·’†v«%2 S…,¬õBe•—iixºaÑÿ$ÄÚO6ùZ,"¦gÚ{zÊcꈖ)£®Îel‹xͳ¨}ŽòK¶÷î„/_ÈõYðŽü-Ú1eOZò°T¶ëÐB~×_ñA‚ö…Gï*Ìv]„ø~W|šq5ÄÃ¥ÙP¼ÕÉuô¹Ã?‚eŸraŒö)÷”œ¶Î¿ÂèˆkØ¢¸Ð5á'Rêb1ò$Öîh%‡§´sjð¦ûÞxÔÄÈø%…±qáãLzú¡½ŽÚŽÕ5E€£W®7!ÚÓ«±îê}ÝÍ<`ƒ/9B5< ©jdì;8Nc³“°îêyòäÕï+÷o W¦’œ¦µ’œ«+z#ýý#°§+Q96U|³Jèjá¶¥®qò…×C¨; •—qЕÐ]¡¡~®Ó–¡;"~uy¼B«ñ+F F F`Ÿ"ÀÝÕú¸ÕÖñ̤ôVhì ¿Êy¾zìSÀ2zû4²sFÞ« ì*¤à°¨æÈÖd‰O±úçB<Á¶‘úÑ~³ÓíÀöÜé¼ÆÚ¯fš¾(J\L;DóIr°²]“rKïËd­ùá"¦Ù-`’jÒÛXQ-œ^]Õy¤pì$„­y-l÷,Û²S)GäÇÆ¾ë‹zËžu_¥pûúúì ºQÖy¯‘z •¢;=VO¬\ùLïcÙKQ/Jœ®ãbÇ?yÙÔ-bX-ºuÄOŒÉ\çgâgÂÀt࡚NïÄA })Ô°bŠˆˆh,lñKôì'ŽÒPAÞ! dÙ F#*uœ=é^ê¨Ä߃ßYA0b@ž|¬“+|ÐW !å–ö£W8(TžPÁ¦‚”>’a‡+Rf×`ÐU@Æâ¢7?pÖ!ÀøÌ‰`a{ùpXg±Öb øÊ…ímØ1¸¢­[$Z^´g ÃK$lÛÇ5rc£Å÷žyó‡µu$jÉÍ ¥RXWÇoŽŒöôô@°ZïB¸Š ³xß4FZ˜r|ùlĘw’ÊD  ~B¡_å•*°0¿#6 |•Ãý¢àE}>ì¬y½‰æã¦Mã¿}‡€æORX{$—‘ Ï ¾Ì·R<ž¥‘)èôârg¬å®BÆ•<9::E½`fA‰àŒ¯ñyÐ<<™Yy¸4—~оn_¨‰šZ>ÒSƒù'xf"gC~EÒøwC:í¬tK~p·`E«±fmè8¸¢#÷ Ü ˜ÒÓƒÕŒ¢…ö°†Ý.¹¼åY¼áˆì›¯§¥Ô)¸YAJ T½X–„ƒE§ÖÏÔ6úá›®ÒC •W¨ Bh[êŽ0Êå•f!) ÂÁežp|'v€à…Q2Îùš'8߇~?_¬X±– P-Â|Šÿcbb‰€òô!ÊF€*ïßú"êETçr¬`’øü íù/G±’ÊWÿ+Œ…, MzŠÏ—Öµ0ç®i½K1´c^z4š“/Ï55hPjÃ~ÿ5z ”p\²±¿kˆ€5… ìTnÏtCNÒ@5¾3,'a‹’çžDvãͬ®·û½¡ªÆƒefG‡qºµa±åwTˆ ¿UÁRö_fìfÌúËz‚Ã1ýËÑ+¹çË 3UwBÖžðŠÁ²úéàüCŽ]ÄÄÄ,Í¡ÐÍ?޽p´üì4Nñ¯Iÿ)¨Ý“Ÿ«˜äýzªÐw^Ai¦©B.Y—hë|Wid7…^ƒEÿ;ÇÙŽó€ÅËB,´lI.Ú×à@QW/ õÙÖÌÚ¬8\ÜîK''p¸.§ Gˆ•µ@€ƒž7.r8óª5•Áô ÊvUÀK/›iuFFÇn<¢mã—8x•¸ ÂUo0¢R‹È5DŒ• Ðɵ"üŽ$ÚÚZ?º- õk…ƒ”ÅÑ=\÷Vqí[$TNÆ·L?þŒØXvËš))<èÔØÎöæ0âä‹Q¡Bó7p­1ËW7Iâ(ì‹÷s+߇»!n ÓÄ´™ŽwÐÒ"u¦>œj?àÇ…¡Û¸ŽW¹òêp¬ƒi7ö¡Ôºxt‘ÈçÍÔàœ»Ð-ßuOiIÛ­.΃GÈQOéqLõ@ÙǬlºSàtä{ A¾Ï:^"iS¸Ú-mçJÚÛ*®²÷SáÊÀ¤…%À¹ÂhL{°ŠÑ§B3VžZýâð2·=s.~¦‡±¨e¸ðnßùñ®aú¨ž) ÑyξËÊÆ Iù,+ µ )3s¹YîejöÆ"¶üŸnˆ y•žíÀbîË1’ó‚pĦ|í5%8Qâ‘b&¹=ôLóÛPݨ/¦ S…{~áñ†é3H+¼‹._gc%\ëð»Jë‡ë¬" äÔ øÿ %ŽUÍárZu ¶õÓ/J˽`2KNZ Â(Vº¥ù†BÌ]|ÏB°¢Ò‰”,”¾yDëwlÁÔàúýsZ°Jm•4¡ 5rê9{3ø1—6ë ŸeKXÉGü@;ǃwÍÿYøÃ¼ª¹ß±‡‡€*úV8í¤ïë¬G —c™2Ûi£®;Ö- G|óˆá亮‚`õ!<ü$•ûGû6L%žzŠ7WPk>ÇíÐq­ÓжVš*Ô Þaùu¸FçËx÷„XDGñà)½kðWLN «ª„M6ò\\×3`zSÍ€$ŒénÇN‰T"]m«ÂirgxlLXJ†kÜ^ó˜5™‡,ä¬L¬øæ*—™’À³¯bZ(R¾ÜÎu¯KɂǒÉûÅ’«hSØîzO1¿ûÏðÌäçbýÝ7.¦Ü؉„êñ®‡°˜WæTÕXÍ‘,òåxŽû+žVŽeT¦xíÙdsQÕè ÑØ‹2¬ÂÞóÓ°#ê蟚ûòbÉc € ShÏä]øÙÐ/ò¨„سgHåV^ˆ©Âï㛂S4­L3Ó™ÄìÑ[ChùMg€¬Áá…˜d§€£§UwÖ)`=¯€õW ò!BêÅT˜]ŽÕªwbú¥Š¤×ÚštÆG Íes÷ÒÁºÚ0¥*Ânx+¼k(¹Wµ˜ee¬eÄp§%²ÖSŒF‰Ëõ¹Ö)çÅ 6àeØî|9|¸=n†òB»›j¼p”Œu.Nzn ¡§¶gŸƒ‚ô5øÊ6maÂH £´H¯ÈßÀÞÔšD®ãBøÅ&£ê;d±L·[ˆUzGxσO«õlÉÔEÏ•*4§Äé—˜jã©ç¤f«›ÌwNÞfç:?!ê|¤‡iˆ¦—EèêF˜j>QÒ8Àô˸hû;0 Ûy•'tÑ-‹ô´ÜsêÅTcôZ+€ÎƒE-«Z™V©„´Å„%þÜ-ÿ1Ü‘v%2­·Æ±kJïp”‹BÁŸ¹ø¢ÇVu£)¨¤É„j ˜ÖvÉù1¬ZG"öo©¬-þá $”©ffC„EûŽk·@`uUƒG¶hX£¾ EïäHcå$ÑŒÂLÕ‡¡æ7Gæ%XÀþR!Ò”—”—8Å©Âg!]LGt0ŠP4ΰ'qZ»Øî‰ÕD»Ä£jrpDFé«¶[¬fÃL˜J_**Q¾õú«-NÄ¢Î/¼×otK*×)7ûj-zxy€9KЧN¯Ÿ-gfñ 6ŠˆˆX2¾55ê4߈¡€Ÿ¨€õ‹­0°°.øÓn^ßÙGûÍ&\™4QÐNˆ]»ò*×õÏèhßL;ñÐÂÞ•ˆ8À_z× §énÞ88ÉtëlMˆ©†pËñm‘JµòpŽõípdó$r£d/0©a´šÝ«™Ê°®!HÜrêÑ.E^Í„ïbâR?ŸØí¾E >ù__÷-2•Ccʪò(÷¡w䑨9haôÊ¿ÕÙ}Q¹'MúÍÙ Lîú>¦ ¯G[zF±˜ÖÙ¬èÔà-¡Ýµ!NK&ÁÍ6ì&…°©¢Íܬ¿ânÙ@F¦0_à®CmW*.WŠ+Õ•ª—`F€ÐæûñßL8X«a0û1|s&}&|çt8‹…zø9Kp±Qƒ!Pëüü›ûÄ›ƒaɣÎ:…‡„îé+ÿ&7¿ûl|SŸ|sA‚Ü5évÀsÔeNI¼é|v…©BßP¸RÛñÛïi¶Àv€ Ì‚…Âu¥Íõ~f @Œ@Œ@Ã"`f-ÈìÂÖDÇ5ª¦†ùž…)jwü ®÷Rj ÒG \]š,áŠÉ¡èˆaì*¼» gš*$nÁîI!y è‚§( ‹'‡£eXDª&K´ÅøoÞ„ØrÊÏúk6Z„%e v±<ñ¨~u ¶Ê“®Šs*›2ò|OûÇÜZÓÂ@YcW –×4ÀKì÷Xx´b—Í‹@-Ë’ö ˜^àÎ2e:TÍ O嘳¡ÖuäÍvÀèQý¦À0³atÖ¿Aà¤e*\™ôë‘8@ ­š•M_°Îõg´;v Þ:ÔîBõ¼_<¦á¡DÊ>¶TÐCX˵`Θú9:¼èÙõ‹8_`®#œŒÀŒãª;K>+Ÿ>:?ßT*Ó£©_tßgâ¦Ï›9ªÒD€ôðf¶›ƒYöG1Öú+L@¸¯Ù9XðR%m)î áŽËòò/w¦LáBzÿ¾²j–â9XÞ¯B(Õ86PvÜæ%J •ܲg¶^ýÁ±œ×N =õpÛÖÃå‚Û xa´NË¡3 ˜×†lÄ”Ók9[5[˜µ .ö…è»!\•¼ @¾¢P0'(ÜöLÚÙ?âÅÕ¸[ÎvÈ8Hds0n,í þ›'JÝæåûM£5Odz[9¹if·›6;¦±÷J%Ù'Æwé]ÑuJÔr)SUá©ãF‡D/<ÇF¬\Ãÿö†OÙ6ny‘–,ËŠR,§ ëq{}|*3<€˜G³™kKú§¤ZÄm †[wÃZòÊXñèU£f~}â%“–߉agXk[„ØÊÙ5 l9•)Ö ¶Í»ÜáÝ?œB[\ç+áî4gÄ#:Ï)EÜÅ'ÛpæP¿ ÂgX¯›–O!îÕÖ0Ïjѯ–oÉ{ŠwÀI`°>ft³â^KCŽb—†Â3±æn¯0‚e—ŠžÛ–Iw&Ù˸¬Wz½ª—•e%v 78#AUZ!2t„væ{FϦÐ/2/2' 3=ŒE-Ã…wûŽ0 ñZ¸¢z¦t.D?®ö]V6NHÒbYíXH™™ËÍ2,Sz§?ùv)0Uz ŒQh…¤¤ägBQ¡R0¸‹OŠ ‰ö®“híÅTCœWöåGnÛvÞ¯“)çån±È‰Â˜êŒ€¾ìucÂwL´&³´°”hΑ,eçGÇTKkrãCÃ7þö¿¤ÔT”^Ñ+{—!S™5#È´€<Ÿ å—òÒ‹o-auãp—ƒÀ•Ÿ ÝÔ+†¥’™•Ïõ,ë¡$†§ÕœÉá{ž,ý$Ü‘¢ÝÌšªØ0F`¿@€» cš'†o‘©iÆVÙým µl×hX^i‹£J£X Ìb½fñÿ+{ëÎ 8FGnÁPáËÑ<°ÁbÆÅ…ÞPÎÏ…,ן£Å‘NæªuL ë<[ZÖç¶å¿> åú›¹~îJµy¹Þƒ…Y*^µÁ4=]NqëŽ]ó;Z~Ão™ò¥÷l˜Ý[£”™p„oYÅhä«§ñ; )Ç…mp¹å—õ‹#^1ÅÄÄÔ r¶í.øâgÁ¬^µÅ"ï2Œb Ù“Èt_Wí'Ôø½8ô޽üöøXé‰dŠ‹çb)vqVç:hm‘Ÿx ‚§Ãy§'ug'ذ=×ó ‹÷¸ÝôPþëP¨‚ …Š\—¥T®?êÅÎ[¬žYÞc’šY([=ˆÓå9‚ ͱH”:èÅT‹^ZNkÇxy*ÏÏö<Ø ³÷úAd”ÙÙø18¸#:®[!ñ+F F ¾h>‡ÌoB“~o8=RÞ¹#ÃÁš"%,ÿÂ0:´ÂêËeî»Õ§6Ø/;âóO£mþFª…mvÜì›øf÷éAR/¦z"@>§7öà`Í€*ñ=ôìu×þŸ¸ÜöÈ?+Ù ½ˆ_óA`H4½ŸËì„#óãiŒù€¸P»æ\Œ=ãáàQžÐÏì¨(T ÂÆéûþøXQeÓé×á@³_c4ë?·}ãÌG&þã°ÇTßä[´ l­‡àÅàû–ÓBF-`‰={@÷@âèM!Æap.p{Ž=R|M¨½ èÖ¥°¾tþT0{ VÌÄàFz%þ+b'VÆÄÄì+ôˆU©E~<ð/aÃÎ`”Øèp %…û{Cƒª¡¨G±z:NÜäõõm°¹Ø}óýoº!-íËÆ†‹h¤plaf›}ÓÝŽ…úÑoª£TîŽfQ·•Ô´cÜEÕQ=ꓨ¥h\~¹ê{ÆŽyGãcìñmÂ6oc¿ÜŒß3ùµkÔ´¡jÔ{Xd¬Ī•‡×£,@ U‘–ȆGÇ\ǶR™tKOÑw{&ÆŠCÂ-<Š5Z{žm²!ýpço·mÆè–K!‹£\U…ÐØ–˜#fÍÀ ^‡§<lj&ÓjA¼¼BtwOô÷óXãʪ‰~éÜqF —"£žÆE¿éW”xv ®\÷¹#m¿ –_M‹WÔ~¬Žˆˆ¨5ìl:äw2×qV‹|j†ôZ,0¶³0Šõéâèžû´»`d~'±v5è†aÆcÐ(mîxñÚáq¿'Ùb91îaÓ•b¬ÆŸØÎÐ׿`áãOÿAäÒ¢%™žÏöY·ãÕøJ›Ž‡Ñ¬¡Ñ1j;‘tÚqìÆqX ¯Ý3 Wyç>óäÃïØ6ô•s!\=¸Ì„,Þùt+¸É‡`ŽÜ‘±‰×€Ép”G'&üÏ¢`¿ÚìÉqïr¡ ZÓˆ#Wô³dçº_uW—…Î*eT¨§¾…mè<˜n›W ZÇgÒjÞHVÍ©š¼¨y 5÷pj:žXÕ/F½13˜ÄøÔ¼ðíå!y¡(©Ò7*ù/X3ò ò¢è(>ùùc«gÉ  ~;ín„¸^ç ¿=)Ñsò¦!;iŸï¢ÉÁúžµÄZ4ÏñS7 Èz'%Æ‹ƒâ±§î§Dk¼«Yð^–ï8‹4Iq]×+x#£.Žpñ{e[Û‹„“ø)*=”#XËdºŒ@GîÇñqßx„¥’0£§ Á`6Ú¹ÎυرAìháP}X?hƇ~jáJ*¿jõtÒ_ÁôôÉËOº¾õ•ˆ~¬ŒˆˆØ×?:<&mÊ—4w«½ ­ý›“mÇÀ ÝE…°Ê®bÝY˜lÞqòÝ%N¾þ¹}?ÁÁ=—¥Ûع×=£fìÍšèF3ä!ÇnýCŠ'w=Œ‹P®Ž:w²bЊ÷MR@°öŒL¬LgžÅÙ׃@Èš¥ÊÑRã‘ o}#UܽÇÂ[.d‚ÀyN9þq"³ây°GFÂQ,Ú¢Nµ1ƒùÚ'ÛùAW·@?ƒ‡n‚p¡ˆïF$}ZŒöã|®IáLk6åŸo›«ˆ(’ˆM½ú HU"ï‹áFè›rU/¼èo“–]ÛÞXiÖ£zbcðiê2µÈ Ö¸žü¿à8 ÷N㔸«£XïÃ3Ø-2øý×9ÞIÚ°¡ ŠgžÔwMßÝŽlkOž“*š†d. “ÕSáØ)ñÈÓw‹d¢Ut­\#\—å½f°sX,98¾¯Äk¶~ã”Ã3gßµUô¢²õ2›™t-õÿ‚ÓÇQ†/#©Ô Àäº))_ .ÿ›D¶ó[ØЇNÅÝ¡@a*kZ“mc‡)Kœ®Tþ\¸9þ'6š{Õd;–HµX\ÛB!«RÆÌZlcN(dÙ˜eâ ׃ÿB<ÞÿWÔ©×eá+˜.‚GN%/ ‡Ÿù9#Ó|‚¡ð‘'û]WŽbÊŒ)dQŸO9±üS2Zˆ($èÆôòh¯±>há ¾¼¿”ßõE|ÓßJáUrß zd°L}Ÿ`@£"Oj"q4ü>ÀK_ ºi”û²L%«i ©[Duݲ}uvdX>ËÛræ G±ÚpžÏ{˜,¢Ñ }ØO_323s>qÙpbßGÐ xE¡àíhË%–>v<´Prcðk\x8=Α, åÇ“¿í¼[LGƒÑ,èjA+X 4¿9ÿˆÞÈx¡€üßÑñVѽܘO0š4>°ÓîKpœÂm`d$d3uˆÍË™>¿YG*aD*\s ¾ºÀÍïúôHÄ9®Šøo?F R½ÙáXò¤“7ZzÇ5Ïešy+ØQ(Å[ÂQ, aä…1Í,úÃéÂÞÞ^‹ë²6<÷? [¬“ÆF½Oa$¿DAËv$Öj ˜W. Ï3*±õ(z¤ À±Ó¢ð/â[Å“»·ëéCNZX[`-¾«¤b6“Æâyïžw=üSº¹]ܾó-²†‡ÜüÀ+Ô¥Hj …#°,ó4§€T5p°K"V¡[HÀôO©?ã&Èõ¥‘ÝŸ…>óõN–Š˜ML1Yʇiì8—©ii1 bÁüj42ø,eY2aŸFÃi2>˜Q0|˜ñ6åŠo£oÞ“n¨Ú|)®ïbXì|~ÅŒü2 ®×Znd°äÛ¤™ï¨~MÒLpg%X:Ð/üö퉞c¾8Ëmþã™_Í»¼«àm>øMqÂx{(É<ì’ %^¾cZ0_H8Qtǰøýåp0N|?\¬Ì®R‰DŠBÖc<Š›æB6‚ÊjJˆ¿BI·µ5™ÊŽÑÍûׯïuy1ôzÜ]¸àè5¶C¦‹eÐsó»?†Š¿§lû sOYé?Ú£‚Œ‡)³Ô3ëÍ(T J)Ü}(?ï¦äµb×.N·2,ÚÓžâݤp¡%SŒû*ñ¤mibôUÃNy™r¬P~tï_qúy©eA+\ÔÛpÄsft䨡YJ ÊT°“s)ã1gØàÉI¡Y‹j)³ŒüÕXFËZóe«ü¤ !JûïJäº6£]Ø€QþJr@X®Ô;±£ðKËjG¡d½%«›Vƒr«Í«Dsk•€­èä'QŸ“õN}?Cöþ–ÎÝ|ï™sÏDn¿ ƒ('´f Ç‘¸ˆØ7WE¿bÍù!@ÊO·lœ7ö71<ŠÝ¶éU²Öˆm«üÖTFØvrr‚j$ݘ‘­$fuÓ‰–äž‘|6Î9"»ñgJõâÙe+\€ÉLX“$®øÞ=‰\ç Œž ©â€Ó!ZØ¢m-í%‘‰aÍ:_Zx"¶”dïÅ÷f×S7ŠñÝÑdzÁWýKËÇâr‰³;&{rK!dxØhd—F$ÆQü¢Qbw㩚‡ÒA ‰ë³ð/ºÈ½†Þ/Ü+_ª 0 âŸéúW¥oSWé`kô *‡‡·)44¡` ‘½ Ê<º""˜bc™ò;Pæ# `ÚJtKÁM{ŸFö¼,ôÈðJã/±£^'FLÎÆû2<‹ ×ø»ôo%†Àn˜Eë/6ø+¡óÚPÀ0æéWï–u¶Ÿù½8žN{ÑXõ»?‡Jõ|Kú'¢ja÷•<–…õ,ìEÌ3ØzE¸™“Ó‡ÛEU »Uµ¦:U[k‡Ê¤Vø©–ŒÕ’HNy+ÙbAz¯[|Ç¿þÈ–s¶…•λŠ,OMV$2Œ€IärŽpžs*ÊéñОÕxØh±QàîC6 ÂÁØEË5kw¢çG‹k¹H,Ûô¯±dÝWbÕªLjÜgº\ X2Â(ZÁ*õ‹¡¡†öÂ$›íJ‰T'xËÓÒñr¼ \vNAéóÏ ÑÑ‘K•,îÊ]Z|b” G™ƒ2oâÙHïL÷)KµWÄŒipý1u¥5òЇ¤<\%25E¹w Ûv bpmÈ2¡t×ê”-Ú*¦Yc­FÑy~¼©ýÿ 1Y!ÐÒ¿IEND®B`‚celery-4.1.0/docs/images/worker_graph_full.png0000644000175000017500000030270713130607475021363 0ustar omeromer00000000000000‰PNG  IHDRe%6‰e±sRGB®Îé@IDATx콜×uæ{&çœó & g€ˆD3­`EJr¶e?¯¼öZ»Þ]?íj£ìµ½^g¯´¶öQ%QV$)&d ‘H€È˜ˆÉ9çøîw‡ Î &tOWuWU—?r¦««nøßîa:ç|'h`jtJØH€H€H€H€H€H€|N H‚ƒ}>*$     x@€FÙü…H€H€H€H€H€|O€F™ï™sD     x@ ôÁoü…Hà§/IKkǃ×ü…H`qe¥²uÓêÅOâ»$@$@$@ó Q6/ d/ß×ßÿÜÓå]g¼šH€H€H`^4ÊæÅƒN%pìÔ©¬ª—¢²uêÔer]$`þÞiª«”'Û+…ùY†ôÉNH€H€H€&@£ìa&<âPUurüÄ;’ST"ѱq]%—EÆÕyd+K‹dßÞ-ÆtÊ^H€H€H€æ%@£l^,<è4½ýòÂw_•¤Ô IÏÌuÚò¸0”Â|k*nITd˜|ú“Ú7;#  x˜²‡™ðˆÃL©;Ld“$ùÅ«¶:.‡Œ'Ð\_-ý=Ýò•Gaüì‘H€H€H`e³pð… ¼~ì¼ÔÖ5KÑÊõâÄ%rM$`ÞîN•GV-Ï>µ_rs2 ë—‘ ,L€FÙÂløŽܹW#'O_–Üeã€q $`ñÑQ¹_~[Ö­)•];6˜7{&  ˜E€FÙ,|á$==}ò%Ÿ’ž)©éTŽsÒÞr-&Pa¾Õå7%66R>ññ#& À.I€H€H€"@£l!2Yµ~›r4Ù3,H\>±ùq¹Bç{wltD^ýÁ·äÞwe°¿OÊÖm–]‡ž’‚âé´S¯þPî¼Y>ûëÿJ’Sguñšº®±®Z~ù_~E±›žr»N¿ö#©­º+éY¹²zãv9ðÔÏKHÈôŸ xípÝ'~éKRu÷º\zë˜4©úVðR>ùñ/HéÚMzŒïýÃ_H}M¹þýÝVsxìcŸóÈÛ©ö×UøcÅ­kÒÙÞ"Ùù+dÓŽGe×Á§f­/Ü9á‘?ýÎ7äèG>#­M réì1ikª×ýnÝ}PÙ÷ØCý:í@—âØÞÜ ŸÿìSÊS–è´åq=$@$@$`[4Êl»uœø¥woÊ•÷nIÉš iK s .3ÕwoÌZK_6:{66:*¿þåÿ,Y¹…ÒÛÓ%òï~SÚÔM61Y.œ~]J×l”ÃÏ~úA¿ãccÊ@ù?²aûžÙ±Ÿ~O^øúŸJþŠ•Ú¸ƒÄ?ýÝÿÐÕoýÛ¯i쫽U÷#—Ïõ[wi£sÿÿù¯¿£<–%Ò§æÖßÛ­Çì—îÎv5÷‘ã/õ ÆùÓ?ü´Ñ¸~Ûn5Ÿ-R~ó=¹¨ÖwíâYùâ¿ùoæíî¹½=zRá{k7=¢Â*7èÐPô[qûº<÷Åß_jj¶}xhPîWÞ•=»6Éúµ¥¶]'N$@$@N$@£Ì‰»kjni—ÿô”dæHBÒlí–?#—¬U ü˯þ¹DEÇ>XƵKg•F•<ùó¿ ßý?.c#ÓÆÍ÷ÿñ/¤£µY¾úW/è¼-\0¤  ?þ·¿)ÿûÿP‹fÀèŠOTÆÌ›³Œ²›ï½£ÏÝ}èi=NsC­êûÊÁ§>!Ÿÿ­óÀàyûäÏäëòyëŸÊ'?þ᜔aô_þö»’˜<ÃwëêEù“ÿ[rîØËòé_û]ùµõU%$qWþã—žÓ´ý3®}ÐÉ"¿¼ø© ²/}åÏdË®úÌÉÉIù¶2O¼ò}ÙøÈÞ3OÎEGÕªPòüÑßkƒ ¯ÇÇÇäïÿøßË›?yAyËŽJÉê 8ì¨699!ÕÊÍJO‘§ŸxÔQkãbH€H€HÀ ìïåò\ò ŒŽŽÉó/¼"Ñ1±ÚK³ìŽ,{áâጘ6Œ¯³o¾¤Œ¨Ÿ`á8Œ¹}á7¥«£U®¿û¶Ê “žò[WÕ±6œ¢ÛåsÇ%>)EÖmÙ©_Ÿzõ259%Ÿüå/=0ÈðB‹J×È‚8³!Ðeá8£Ô~t´5Ïõ«ÿRbbã^=4OÎÕ¨ÿì>üôƒ ÇÀè¹/~Y—Q8ñò‹®Óõ³V ÊLŽÊž{Jy<ùgßQ›ËÅ 8‚=eŽØÆÀZÄ÷tLúú†d•2\¹Pv&0s gœùú¡u} ›ÏÚ»oŸ’Ê9Ꭾ0Axª6nß+{Ž<£Œ˜ïÊe•Cuô#Ÿ•‰‰qyïíÓ²÷è³2-ôÐPS©žÿõÕß}hHZý½³ŽgæÍz9‡©¢Ýð:yÛšëïë.r9·¡ÜA~ñJ©)¿­ßòä\W_¥k¦óÞ\¯ñ395CRÓ³u~ÜÌãNø½£¥Iðï/}áç$11Þ KâH€H€HÀqh”9nK½ ·/¾/×oÜSyd›•îˆÅÎõ‹-j”}°âá¡!ý[tlœòX=¾™–™#©YúœÂ’Õ’SP,ßzSe·¯]ÒF–+t' jm¾¾\Ç\ÆÎý@ø¿݆‡t—‘Q1óv­sìÎèɹ®Î•ñ8_ ˆPÈé±ç{ߎÇàQ­«¹'öm“Õ+gÒv\çL$@$@N%@£Ì©;ëÀu54¶ÊK¯œxi⓳Bdtu¶iYS]¼òâ7%4lÚà„uCÈæÏlYy…úåÖ݇ä©OþâÌ·Äe¬„‡(~oÙ‹JD*†—Î×bð8¹„CàY›) áz…†‘“O˜/ZFv¾¦I)CÎ×  £ጞœëê <ç6Ÿ´·4>Pœû¾_ONLèzd¹9òø‘Ýv\çL$@$@C€É³Õö^èÐðˆ|Kå‘ÅÆ'(åAç<ñ‡BâRX²z£ÎsŠ‹—¢²µ²båô¿Eekd·’¸ß¾÷ˆÞÀ©Ân•ÃsÇ^z(dFÝoò€@¾ÞÕv|RçLA‰ñ½ó'uŸ®÷ðsƒ s„¬åÑÙ")éYÚ³wV­ “3Û{%J²QIã£yr®«HôÃМÙÎ+Aƒš¤SZmÕeLþ3O*v®?Ö)«ä:H€H€HÀè)sÆ>:~/þàMQ‚”GÉ9Ë…dýÚÍ;,Ê‚k”T;r§–j½øóÿð;òµýkòØG?§ê¥èšd¯þóóR¶vó,Aˆr@Ôã¥ïþƒ «½sj}mÛsH…„n”øóÿ¤Œž:•¯·MPÓêÌë?ÑÒöOüüç—šÎCïÇ(íS¯)‘I´}ª’í_ªÁöÙßø}ù³¯ü ù¯¿÷‹òÑÏ}Q{ÆîªZlñ‡@ɳŸþUÝ'çºÆ…Tÿÿò¯ Ö”œ–©j¼½'?yáëznPŸtBC™„ζùÕ_ú˜ÄÇ}¨ä鄵q $@$@$àD4Êœ¸«[Ó™sïÊí;URª #»Âú¶ÄYË´|ppÈ´àǨ.0­¬QHæï9üŒ>ýÖÝòåÿö·ò­¿ù#%éþïô1„;¢2díç¶=‡Ÿ•ë—Ïk#0)eZÊ~æ9_þ¯£ê”ý™*Fý¼6Rð^Jz¦üæüweÐyîAJQϾÇ?ªkƒÝ½~E~ûÿÄ-£ ãÂPýwü yþo¾&ßø³ÿ€CZ%†å/ýÎÿ+1Ê£èjžœ‹k`Ì^¿tNKýãuHh¨^߯üîðYˆ&Æ5« ôICu…9¸CJ‹§CAÍ‹ý’ C h`jÔÓè"cFf/$àÚºfù»o|_²ò‹”ô{W8ëW¸¢kUx ïÐ܆ñîŽvmD¹ãe›{ýÌ×ÈIƒ§B*I)é‹«AμФßQôÿéY¹º€õbÃ,vîeþñü† 6 WxÌzº;%#+Oûa‹uk›÷ ¬yçÚeÉÉL‘_ÿ•;Ê«l›MàDI€H€HÀCêá{#=eBãé¾#084,ßúÎ+JÔ#9 2ž«Ä8÷µk7¢câTݶ8×K¯~N häyÕ‡‘CmB$î4OÎEQmüë¤v_• U6ûsŸy‚™“6–k! p<‡¹;~É\ ]|çÅ×dttB JWÛeÊœ' ø@‹ míîl—Ïö)‰‰öÛ<80 €çh”yÎŒWø€À‰Ó—¤¼²N •ú`h¨3BË|€C,B NyŶî>(IJ¹Òim ¿GîWÊãGwKQaŽÓ–Çõ €ã 0§Ìñ[l¿VU×Ë×ÿñ‡’[T¢òˆ¬Fg?’œq ˜“Ûï_–‚¼tùÕ_øh ,™k$  G@N=eŽÚRû/¦o`Pþé»?“D¥HƒÌþûɘO Få‘E†…Èg?ù„ùƒq   SÐ(3+;]ÔE~á;¯Ê¤úYP²j9]ð(Mu5Ò«$?ÿÜÓPkçbI€H€HÀIh”9i7m¾–7O¼-5u*lÝ’Òç6_*§O^èS’þMuÕòô“J~n¦×ý±   ÿ Qæ?öy{÷åÄ©‹’WTf˜´ûŒîù+ 8ŠÀØØ¨Ü¿wSÖ¬.–½»69jm\ "e¸ë[sOo¿¼ð½×$9-SR3²-6;N‡¬Ea¾÷ËoItt„|êçZkrœ À²Ð([6^dI•@öíïüLº •üâ•FuË~HÀ±šêª¤¿·G¾ òÈ"#»N.ŒH€H€‰²@Úm ®õÕ7ÏJ}S«­\+ÁÁ!œ!§DÖ!QæúùÈ3$'+Ý:ãLH€H€H€¼"@£Ì+|¼Ø·ïTÉ™·ÞUyd+%2:Æ›®x- 8žÀØèˆ[ܰ¾Lvl_çøõr$@$@$Hh”Òn[h­]ݽòÝï¿!i™Ù’’Nå8 m §bAS*‘¬ºü¦ÄÇÅÈ'>zÄ‚3ä”H€H€H€¼!@£Ìz¼vY&&&åùo¿"!aa’[Tº¬>x †ûU24Ð/_øÜSHKçZI€H€H Ð( ˆm¶Ö"_zõ´´¶uJ‘ªGÆ<2kí gc=Ý]íÒÒp_>þs%3=ÕzäŒH€H€H€¼&@£Ìk„ìÀï߸'o¿ó¾VZŒˆŠöäRžKG`tdXå‘Ý–m[ÖÊÖÍkný\0  e²ÓXg{G·|ÿ‡Ç$-+W’R3,0#N¬K`J•‹¨V¢““âå£?wÀºåÌH€H€H€¼&@£Ìk„ìÀãã*ìe ˆ’¼B摹Ìç6ºûå2:<(¿ø¹§%,44°apõ$@$@$àp4ʾÁVYÞ_>%]½RX¶V‚‚ƒ¬2-΃,I «½UÚëå;"©)I–œ#'E$@$@$`eƱdO ¸rõ¶\º|CòKVKDdÔgñ0   ImÕÙµcƒlT5ÉØH€H€H€œO€F™ó÷د+liíþä„däæKb2•ãüºÜò¦&'¥¦ü†¤§&Ë3Oí³ü|9A   cÐ(3†#{™‡ÀèØ˜<ÿÂ++9ùÅóœÁC$@3 ÔVÝ“‰ÑùÂsOIhHÈÌ·ø; €ƒ Ð(sðæú{i?üñ éé˜Î# b™¿÷ƒã[›@gk³´·4ʧ>ñ˜R\L°öd9;   C Ð(3';s¸¨rÈÞ»vG KWKxx„ë0’ ÌC`hh@å‘Ý“}{·ÈÚÕô*σˆ‡H€H€HÀÑh”9z{ý³¸Ææ6ÚbV^¡Ä'¦øg•lB`rrBjîÞ”ì¬Tyò±=6™5§I$@$@$`$¿1’&û’á‘QU쉉—ì¼"!X‚@må‰IŸ”Ï}âI æs²%pñm  p$Þ8r[ý·¨ïÿàM–¢Òµ"Ì#óßFpd[hon”‘vÉÙ)§j_“›-×etbÔsç$I€H€H€Œ#@O™q,¾§sï\•›·+¥lí& x@‹è—ºš{ràÑ­²oÝ&¹Ûv[n5_×ÿ§–ʪô5³X|H€H€HÀ!‚¦F§².ÃêZäoÿ÷‹*¬H2s ü8MÖ'011.wß¿,™éÉòÅ_ý¸r*O«“ŽMŽIEû=¹ÓzK†Ç†¤0y…¬ÎX+‰‘IÖ_gH$@$@$°,AÔH£lYèxÑLƒCÃò¿þú•Ü¢†-΄3ó÷ õücJ}íÔÏ)õ“Íæ>ØÏ嬢³µEƆûå÷~çóýPê"Õ•r§å–tuINB®¬ÍÜ ©1iË$@$@$@ö&@£ÌÞûg™Ùß¾[-ÿß?ýT–™’ß&,¹›¢¤åΰ vNèy„„Iþ¶hu>%5£R¸#Zj/Jû¸ßæ¹ØÀIá259%Ýuc‹æö{XBN˜ô4ŒÉÄØÒ}~v˜tÞ·nnU|V˜älŒ’ûì³Û@Ô‰¡¡!ò+¿øQ).Ê]ò²†Þz¹Ùü¾´÷·Iz\†6βⲗ¼Ž' ؃2{ìgiðlÜn¹)×›®JLx¬ì*Ü+)Ñ©Ò9Ô!g«NËÄÔ„”¦–©›ëë’—){ŠöKXp˜%WöÓ›?”ÜÄ<Ù’³Ýù ÉÞQ—=.±™KöÙÜ×$'ÊßÇW=­.yN– ÷ÏIcoƒlÈÞ,k3Ö›>‹Öþmœ5õ6JrLЬSž³Ü„|ÓÇå$@$@$@æ€QF¡s³÷ Ð>Ð&kß–¾‘^íÅX“±N‚ƒ‚¥¢ãž\©»¨CÎ’cRå}e°­L[-[r·‹úòY’LÛ@«ôôIQ²qŒ£B£$*,J:;Ü2Ê2ã²$EñºÑtMö¶$§ÈÐH=7ˆs¼×pYš•¡´»ðQµÎ‡CZ@zl†¤—Õo(ÏÙ™ª“’™(ø¼!÷̪Ÿ)£ÖÏ~H€H€HÀɘSæäÝåÚL%éò« W´ñ…p²íù;%6";•‡4'~éÄ…ÖíÉñžán¹ÕrCj:«$6"N{ÎhœyBç’ €50|ÑûÀYØDà!wž¯‚¤éBÙð–­>-£ýòHþ.©l/xŸ®è«›õåâDˆåßÿžǃGÏÈö~Ó{RÛu_žYóQ·»}íÎË+pû8>9.—ë.HUG…¬L_-›s¶io©/æƒÏÂbñ™„q¶6s½ötÒsæ úƒH€H€¼'£Œá‹ÞsdD7ÀUléoÖ^¯M9[ä†5ôÔÉùš·$."^ª03œYó#eOXÞÛƒ-¬ë¾/0.\¦‘Ûš•,¹Cÿ¡ÁîýÙY—µA‡èuwY^kÚY°G2ã³^RäíU†8> f7Œ±Á ÆÙ…ûçUèçûÚ8[‘R°F³7€ý“ €¾hDvá|òÀ ïæk:^0y¸ÚÍ–ër­ñ])I)Óž’S*\/$(DgG´ð‡ë<+ÿ°Ægµ²£\¢Ãcd}ÖFzÎLÜovM$@$@Þ€§,ä¿ú•¯zÛ¯''èl—S*¿©¾§V6dmÖÊŠÑa1zÉù{ûþY¹×vG¶åîܤ|q­ 6h¦ðƒ‘̇ÆåRýÙ”½EFö¾ÂC¢¦€ˆ‡»-<4\{ØV¤ë>ܽΟ煇Dê¡FÙ|dxŒÜL_m¼"¿€1q°ô¨¾ÙuÝ\Ã9¡ °Ne´A% 2xÐ(nŽíÒîµß‘îÁN•·[·6GR÷0bs•Áànƒ—¬JåJLŒH¶4ÜÛbç! #äxAu2öÙª´/Ë àóÖ…ÉEZôzóUõ`¡N?0ˆLXlú|H€H€HÀ‡`”ûp<E¶!ÐÔ×(¯Üþ‰nØ¡ •Ã¥keE×à={íî+2>1.¯|Z7Niƒ,+>GdîæM¹úó÷O¨Cæ'š*N¼2Èâ{Ò`¯J_£S`˜Ù­¡´ê­ ©ÜB—àsã놜3Èõ?½ú#ÚH<]uB}v_|ÆÙH€H€H€¬A€F™5ö³°Üø#ñdù´Çëi¥XœR:kv5]UrìÞk’¤BÁpÃÝ;Ü#|‡Q³§hŸÏÂÔfMÊ‹0°†¹ëô¢Ëy/MŽN‘î¡.™œšœ÷ý…b^0r"jÇ–™$O¬zF^Þ¼÷ªÎõòÇ:àí…’%æ‚ÚqøŒ/Ý/†¢?ÖÏ1I€H€HÀÊÜ“A³ò 870ˆŒ­wë/©ð½`ŽxH‡ŸÍíÒîP¶[±VËž×v×èUÅ©¥òHÞ®¹§Ûâ5dÜ&£ÉÌ–,SSS5EOjÁ +M[)åÊ(C¡d¨Ø­!”ð@ñ-uÄ®¡NÙšûˆ_ x°G¸-J5 ÎÞëw^‘¼¤Oè µH»íçK$@$@¾ @O™/(s KRÅÒ9ûÜÄ|]K ù@3¼;xªv; vÏ2ÈÊÒVÙÖ ÃºîwUku¾™ë5ãwÜð‡(uGxË%l€bÝ[Õ§9d¨Kfç†058¨©å«†:YË5ÊàeˈËT‚·}5]SÇA |þÒÕš ¸0R7|V§¯•Ÿ[ûóJJ…\®» (àÝÐ[ïï©q|  p4†/:z{¹¸™àCÍ1"+UŽÒ¦œ­óc¸ž xÒ`@B 8v¦ò„$Ù6‡l&ûÕZÕyt¾j0ʆdžuÞ=m«Cþ_Ÿ2¨ýòçé¼:Fßm¸$çjÎHÿh¿¬ÍX¿Ðé>;ŽÏû–œí:Tôjã»rZOϊϖ͹Ûôà ŸM„‘ @€ Q èËD¾ ”#â´w" 5xÎVR7¡9Zâ7ÎP˃0Ží,سХ¶:Ž|:˜óy ÍZŒ24xË¢â<7Êû«åñí::“1 ™ðL (ÃÌ*Jž˜ŒÆ¶ôV{ ¯YIJ™¬ÏÞ¤eõg®¿“ ,Ÿ²å³ã•6 €½‹J‚†¼,P¿[ÌATÒ÷Hþ.ºˆ›äSÊk†< ½Eûõ1,}Ñ)ÂÈ„pɶ<߆`F(A•(eÃ(C.ÛrÄUn(A ä>9Iˆ¢4u¥D«Fä+ŽŒÈžÂ}¦óö„=b<±ò!­ñ=j¹.k£þN!¼—H€H€HÀ;Ì)󎯶0ºîûò³[?‘%÷Üx#3È*:îé2ÜôïÈß­/“>©Rö„Ê›ë“c÷^“áñaÓÇôd<ØXŸµI×ó‹L“åoê<¿þÑ>Oºá¹$@$@$@3Ð(›ƒ¿ÚŸ@³ò0àé}so£ì/9¬CCƒÒ½ÝzS®Ô]Ô¡q³éjïÜ?'(ª{PdË¥põcµŸ=õúF…òzø£Á(ƒ²¥7Rë%©eÒ9Ø¡ÿõÇÌJŸ)ï. fÃ0ƒHÕrûö­8¨={ýÊ€DñékJ1ØH€H€H€<#@£Ì3^<Û¢p#ˆ"ÐùJ‰I•§TݱœøÜ%g{«å†¼WY¶æ=¢¥À]\oº*µ]5*‡ì€ÄG$¸;âg•Ê ÊˆÍÔ‚þXŒ²ÉÉImð.wüÔ˜4IˆJ”Šö{ËíÂò×Áè9ºòI ’7P+Ï¢ž(Ôîƒ7zsÎ6-ÀòÒÍéZ~–Ì ’ €…Ð(³Ðfp*Ë#É«·_Ò…wìÕjq”XªÝl¹.W¯h± Hä»Bû®+! }à†ÓI 9r(~]”â/X"ä ÚðBzÓà-Ã^OŽ{Ó¥¯…‡¡Œø<¿©rÌzG¼ó2šÍ á±i„l>Ê ¾YK³Ùò  °-e¶ÝºÀ¸–ºW7yCcCòØÊ§få‚-EFÉåú Úƒ¬÷ÌvNÕ‡Bۻ‖Ã×/ôˆkttèlþ^V‚ aôÖ(Ãàmïo³l¾•‘œ]†YLD¬¿gmÖÆµëµJ#Ê/`ÎjÏ ~°‘ Ì&@£l6¾²0„ªA¶Þ%uÿäêg%9*ÅíØÃM!dï×d¬›u"È 1ªÝP9bðÀíQÂPh[¨½ßøž»ÈMÈ_èǯR‘¡‘Ñ «7|&"B# U`Œ‹ÑÒø­Ê0e›&¦¿+ð :!¿læ¾æÄçÊS«N¢Ã¢užYeGùÌ·ù; Ø‚2[l“½&9:1*§*ŽÉ½¶;J ñQAÝ$#Œ±M×t^Úbž7äÐ4ö4èœ#Ç·z_ðN$ÙÆ3ht^ö‚-}4Êf~VS¢Sä—¡¦Ÿ“Z”2È•>&xt¡ö¼þwbjÂIKäZH€H€N€F™Ã7Ø×˃TѺ‡»åhÙ“R˜´ÂÐ)àFë|Í[’Ÿ-+Óσᖗ!Ó”Ö>Ц½NE)ŶY2Œ²~%ÆbdK‹MמTʦϦŠü2¬ÈÁršÑ‘ˆš œB7ø;è¥fï>_‘ X™2+ïŽÍæ†<&Ü…†„Ê+ŸÖaƒF/ájÃQJŽ;òw/Ú5<E_›¹aÑóœö&>Uþ^rTŠm–§Œ²c2¨B@^U¶Ù_68: (%áĆpÆ'W=«=ůÞ~I¯uâ2¹&  ‡uØz¸?@ÇÅÚ·%/±@´¨À"y^Ë"„î¶ÝÖuŠ®´X»Ùr]…îzÉî·ÕH[·ý%Û‡š$*$Z.—_\ ©ï¥'fH~ZÛc ž–Ñ ô­C½-NîöBlr"rî¶än×!~ø¾:‘öꌗë.șʓêáÌú€ c¶ÉÇ‘Ó$ ø€2~¼&€'î7›¯Ëº¬ ²!k³×ýÍ×ÂÐ+™wÔ)Z¬ááKXì´YïýÍK!--³ŽñÅòd$gÈÿø•?wûb„/ŽOŒ jÙA Ä¨†0½v‡Ôæ2Љ«Ÿâ”RåAª“wîŸÓ"‹‰å¸®±ÛO¨3>’¿KRbRåRÝ;Ò=Ô¥r\÷ DOØH€H€HÀjh”YmGl4JÈM©íª‘…{t½0³¦S©- Êa•Ì¿T»Ûz[ð¤<7Ñ}Åʼn‰ I+N“”û„ý-ÅÁïwÜï‰vÏbÕ^¡¡È·‘FYjLš„ñ;ŒùHÞNyùÖ5#äb9µÁEH“òúWd_ñA‰Hpêr¹.  ›`N™M7ÎßÓ™‘ãå¯ëú@P=Cg³ÄCŽ/B¯kc“c‚¼ª²´Už«N-Ö3ßs‹À2¢°wPPôô¹5„»'%G§¨üÃC S»;¶ÎCð&U¿ïVË %ÌÓe‡)/{ŽPž|bÕ3¦ ³†Þúe÷Å I€H€HÀ 4ÊÌ êð>A%€¼ ˆ*˜Ùz”™(+ÓW[Ä`¡FZIj™™Sb߀j<›}&e0ö(ö±ðf•¦®Ôá}îŸ_ø$‡¼¥aó“ åtåqý Ç!Kã2H€H€@€F™6Ñ—KÀ . 2äe<¾êim,™9~MW• à-rCpó¾T«h¿§eø™7²)k½k¸G yR[ë ãb»ïÔJQWÐé yfPnÝ–»CPXá×,›àô]çúH€HÀh”ÙcŸ,1K„ü»÷š èHÙ‚'Ïf6ÔQzOIà—¤”é1— Å¢{†º¥$^²¥XYíýØpU«Ì`OÖˆFzÊßm`‡:!{· áÍJŽH}w­œ¨xCŽÍF$@$@þ$@£ÌŸôm4vUg…’–>!…É+T¢ü! 6_#¹.ãc²!Û=EGxÉ’cRlU£ËFS§£©Ã°áýGî* ø‹2‘·Ñ¸0Ž ½&cýq'ŒÐÛå Ém5îõßE>xÕ ƒ¥56û€# Ý`¤)þg¸Ûè®ÕB=·å>"5Us 4x ‘®Ã²Q ‘H€H€üA€F™?¨ÛhÌ+õåjãƒáKÙìëWÕR„¬J_ã-d“‚$~6û€ãÔÔ”áásIQÉÂF7>¨˜£ .C,'P°Ž—©C«;+eé\' €…Ð(³ÐfXi*¸);WsFÊÛïÊÞ¢ýZbÞWóƒ{ÓÜ·|ÝÜ×(0$~ÅfOðhE†Eî) „0v9»8²‘»¾.s£öP_oºjd·¶é Þ²=…ût‰€ó5o麇¶™<'J$@$`[4Êl»uÆO|x|XŽß{]«à!Çrâ¾nÈ%ƒ8CN|®ÛC7ö4HjLš[‚ nwÊ}N!Œfˆ}À(ƒØÇÀh¿Ï×dÇ¡¬Š‡"a TE‚¤"9P|DzêäTÅ17žH€H€Ì$@£ÌLº6êõ‰PƒltbT+{Òô¢Ðó¡AÞOƒ ™Üåž¾«–þfŸ{ô\có§q¢ÃbLñ”!§ uÇ»†(ïînAÅ4>2! $òç²ÉŒË’Ãek/ë±ò×2œs.¾& 02óØÚ¦g< ]Õé  Öu{b”<¹?òXRbRµ`‡»ãC:¿o¸W+§¹{ ϳ&í)3!§ žäJ±^™ûûŽpRäW¡¸r IäÏ%„ÜVD ë°nD°‘ €h”™AÕF}Bi ²(•σ§Â‡öGC.ÂÝ-íšcç@»þá‹þj}m}rçÄ9ù7'åÕ¯½*ï|ëi«ló×t ·îjTœ«0¬¿¥:‚,þÐèàR§-ë}„0vÒSæ;¨™fÆgÉ»õ-xƒþ1Ud!°ø[‰¨6  0š@¨Ñ²?û@8×ÉŠ7µá@É óÛäo·ÜÔ*yYqÙÍ¡[•1±~›ûå/Ë›ÿóM™›ÐÈP –ÑÁQ9ö¿ŽÉÚÇ×ʳÿñY 0÷kví§×¤·µWýµG=b·ÔÉgÿ᬴W·Ë—^þÒR§ò>:.ŸüÓOz?Ø"=Ü=uWo6n”-2¤)oáÆ5‹!‹gèÅhì×9“á!á†öíäÎàa,LZ!ï+ž¼ÄÔ¨’ó´!ŠŠ´xå2ÌàEc#  #0|ÑŠ6ë£} MŽ—¿® ým µ"t Šgž6t®ÂÞ^ëÍù-å-ré{—¤xw±<÷×ÏIÖê,  –°)z¤H>ò_>"‰Ù‰r÷ä]iºÝäÍPsmTx´^ëÐØákNVÆ¥ñ=G»>k£Va¬é¬òüb‡]© 3„{#”áßl$@$@$`zÊŒ h£> TˆÂЙ*LpoÑ~ÃÃÄχê’ÁîAÉß’ïÑ”!šqü¯ŽKþ¦|ùô_|Z"b"ôõ8þùÿÊËÿåeùâ÷¾¨Ã ]—Ÿ-—ßøîoH\ÚtŽUõÅjùöo}[{ÚÇö…¯A^üW/êœ2ü>·ýô«?•]¿°KvÿâníÑ[ÎæöiæëHCÛŒ–™$=C 7[[ˆZàÁÉõækÚk†òÞð7 I0ÊŽ«*é|Šú§‚ë' åàÿY—ÏÎ6Wº ²ü¤BÙ]ø¨% 2Àkè©Ó¡j¥i+—Å*}†X®A·¬AÕEúÒˆØi£ÊÝ~®ü󙚘’ÇÿÍã 2\›Z”*»~q—O¬º8;oç‘Ï<òEŠâ^@IDATÀ ù…Û ãö4÷àå’-s¾A®Úræ°ä ž€\³-{–3 ²]ž«.{`7.¼×vGrâs—­´4­§37Æ3êäm¡õ¶ô.Ù%¤ò'Ç•ñ¨r¢ÂtØãÜ ó6æMŸSÝ1ë­”¢”Y¯±fHì£_wÚªC«f¶œ9ÌêÀäÈý2ëÆá‹0øÇÈæ9ìMqj©Üj¹¡ž…¨§!lš€+Ç ‡´a¦Ê:°‘ €§h”yJÌFç[Ù ëé丕,ÓK†m@&ê‚OŽûtW£Â%65VˆF.×B ¡‚_Ûù5†sFF´°Ç|ç‡ÇLË´£ÆÙÌEGoZ\úliùåÌÁ›ñ=½ÖLOY‚ò”¡Ñ[æé®|xþšŒuÚh®ê¨øð “ˆ-þg¤áÏ €§¼»ãót4žï3V6ÈÈ¿€§Ì›†ð¡‰I÷¼FÞŒ3÷ÚÒGKµ°Æíã·ç¾õà5Ä<Ð ·êŸÉùªV– }„RâÜ çÙ 2³ùc3Ç_ê÷H%ô1l’§*Z‘yeKmÂïƒ!”é-{DjŽ(%F<$:Yþ¦Ï=<#! °evÚ-7ç D=CfµE,á†Õ•R’ZææŠ> Âf…»-<ªÈÞ_Û«…3ŽÿÅqqT3ÏGhã[_K{ÔPË †ÚÛÏ¿­ºþoÛ…º Ñ/ØZà:lÊO+Ìa±…ÁS6<>lZxòÊX[j±Xú=xËúGútÁ÷¥Ï¬3âyXÉå£ú[U'MûU®–H€ƒÀìÇè±fG¯²µ¿E×!ËMÌ·¤Aøõ=µ261&+’§o6$ZÝ ŽMËÀ{Ó§×&d&Èð„¼úG¯Ê7雲õ[%g}ŽáhºÙ$ç¾yN†û‡å³õY‰Œ›–ì_st\~ñ²œûGõžò–­>¼Z‡4 C]²ƒÿâ 6âu3`…:dóqD¨áoü³ì)Ú'ù‰…óâѱ»m·å–zbÿ±õŸò躹'ÿþ7~G‚R‚$¥p¶ÚáÜóz=>:.­å­259¥ûpyÇ:jjJºº1I1 fêq3æuÇ©Ž)ù³_ÿËeÍù8/^ý¶ì/9ìu¾á|€—öŒ +ûô¦Ïû¼¾Ý|ó±ó±Ó•ÇelrLåQ=açe˜:÷ªÎ y§æœlÏß)¥©Ë+ýaêÙ9 €%¨ûöFzÊ,±ÞM¢s¨CNT¼!i±éòhÑËdXe•Ê%ƒ„tnB¾w‹þàjx>\2çP@óWƒ FöÚÙ^±Åæiû¤Ü¤ÅN1ý=+Ìaî"!+"#*¯ÌŒ†Ð;„GB1)ÊXQ3ækå>We¬•ã÷^<‚gˆía+’Kôß§KuïHdh¤ä%š›3úð x„H€HÀ.(ôa—Z`žÝÃ]Zé+%:U‡ËY{K«;*u>ŠQótÝXãÆÍÂU!ÞU¤ØŒ¦2UÞBlÞȈÍÔÆØíÖ›Þuäð«×f¬×^²ó5oIÇà´ÊªÃ—Ìå‘ ,ƒ€µïà—± @º¹T^FQÜ}Ň,Ž<Ô6ªáésLD¬ ŸŽÍ°§£&ex€<Ä^õÝaóžÀªŒ5Z…jƒl Øž·S2ã²´*®?Ôbžß! ° eVÙ ç› ãªHiTx´ì/>lyƒ Ë«íªÑ¼zF¶´˜té Qf$R¿öa¢§ ‹W!Œô”³ÅIE‚²wZoÓ¡ƒ{Ù­òhÃTèöéª21åûÚŠFË¥‘ €#Ð(³á6©âº0È›u°ä¨ €²Ú}e”()m£réÚú•È’…ØlO ByÊP«Ì¬§=¶fõHýBPhUú©l/×¢´vO׊¿ÓûUD\¸ÞÓËy> €Ã Ð(³ÙLŒèELû`éQñ§¸…'èK10Ò¯óÉ<¹Îs„ºgÌ×p‡—ÕÏ™ö”™g”A†ž2ã>Å)¥º |Mg•q:´'ä4î)Ú/÷»ªåVË ‡®’Ë" XeË¡æ§k ?}ªâ˜ŒNŒÊáÒÇ$*tº¦“Ÿ¦ãѰ¸a‹‹Œ—ä(ãUÚp£#M}͉'[“-ÝC]:dQËzûZnÏ¡‹C£ƒ’›hLm²ùÎMÈÓ Œfæ"Í7.O@‡/ª0]³ÔU*” Œö›5DÀõ‹ÒÈí¤·Ìý­‡"#¢Î)ÃŒÂîsã™$@$àT4Êl°³H oîm”ÅG$1Ò¿‡—ƒ«¾»V«.ºjŠ-§¥®ÉTž² iè©[êT¾oq_„f ò'ÍhÅ ‹–~e†â-K[%ø®SòÝ=¬ø>Zt@?¸Rwѽ‹x €c Ð(³øÖ¾ÛpIª;+u²Ô˜4‹ÏvþéÕ+C ž,3 2~`,6{@ŽšYµÊÐw¬ò–Qì$ŒkùJY¡§åíwëÔá=Ák»³`T´ß“š. ¥8|»¹< X”²Eñø÷ÍÍïëú?» $‡Û±õöé\3C]\0<Šã“ã®CüiCa!Ó% ¨iVƒØÃ¥ yüâÔR©ì¨`y Ðæ&äËÊôÕr±ömécQsÈñT peÝO<9}¿ñ=AÞ ´ÚµÕw×Ixh¸¤Çf˜¾e“S“ a4´¹ À.š™êˆð” (Ñ6c ¬H)Ñá‹=Tô„ìæœm‚R Èf~™'äx. 8‡2 îemw\¬{[6do–ÒÔ•œ¡ûSªï©•ìø\õ ])+˜ÜP³ !Œ(RÍf_®ðÅ1UúÁ¬ÁðE3ØÆ†Çéï`EG¹Ý;¶ÏYùeõÌ/sìFsa$@$°e‹ÀñÇ[”>_ó–@2y]æLÁ°1áéhëo•œ„\Ãú\ª£¼¤-o¦—e©9ð}ïÀ€ ÑÁ½ëiá«aêÙŒx6 €¯Ð(óé%ÆèÀ©Šc’“*» ö.q¶=ÞnTÅœ“£Sa…¾j¸ÌŠÏ‘ûÕ¾’ã˜@ T‰}Œš¾­Œ²   *0š°wŃaQÉÆeÑÕäÔgórÝ…e]Ï‹H€H€ìI ÔžÓvÖ¬QðøDÅ›ºvÒ£+ njœÐš”bQò Ÿ/c¢ +BÓ¢B£Üd`DúÚúÜ>Ÿ'>L ÃÕ?Þ¶°à0¥¢ižú"B$a˜±V™·;5ÿõa¼ÓrKŽ›9ÿI<:/¶ö”)/‹™-:Þ[ÈÄ_¬}[ÖgmÔO@}<¼O†kU^²ðÐpIŠJöÉx R”\¬•,«T/ɾ8<êl£ 9e¾ð”©DBzËúpÜ%ºÃˇ‰’"›r¶Ê½¶;‚^l$@$@Î$@£Ì‡ûÚ9Ô!g«O)™øbe”mòáȾª¥¯Y+†ùvÔ‡GCNÆŠ”¹ÛzÛ"¯àá#QáÓ9eÃ#Î_Ô’ø&ç”E)OÚ óÊþ tùI…RÓÅFo¢ÄjXâ €3 Ð(óѾ¢8ôéŠã’“.; vûhTÿ ãï|²™«^±V†”Âò÷œÐ‚ƒƒ%,,L†¾ˆuB$ÂÌ™qf^™™”Eeª&kzCz{ÞN-\TÑá¬ØM„aª<.‘aQ²wÅ­šçƒaý2D÷p—Î'K‹M÷Ëøs‹‘BULúvË͹oÙö5Ä>œ¾/çÄÔ„é{¤eñé)3•sf\–úÛ©kLÇé#|eúj¹Öð®8)OÖéûÆõ‘ €»h”¹Kj™ç¹j‘Á0;PìœâÐ áhïoÓõ³üO6s~k2ÖIÏp·c¤¹‘WætOYˆ2Êïe¶a¥”À“Êf”™ÈMÌ×ÅÜÍ%0zFØ;¼»7”P €³Ð(3y?Q‹ µ²`E…Mç°˜<¤_»ohÈÑãFÌ*-!2Q“~_•!pBƒ£ÓsÊ\ņMaTßÉ¡1gççYá3še]ƒÒ;Òc…éØvȵ„aVÞ~WÏF$@$à4ÊLÜËëÍ×t.Óž¢ý~W"4q™³ºF1l«„.ΜÔ.á-sBÖ@ _ÄþMNšˆZZÃã4Êf~WÌø=#6S=”Šb£pKRËTáóX¹F‰|h²  °e&íÔÆP z[ÞGÖ"›òú†{•JXÚ|oûõX|D‚V½„· !¥vn¾¢·ÈlOò<é)óÍ·!/©€!Œ FÂÆìÍZÑ’ùe$@$`4ÊLØx‹Þ¹NV§¯•ÒÔ•&Œ`Í.ºˆ–jA£ óÚ Â~òƒz?vnQ:§ÌÙÞài£Ì9eà _ôÉ×!?±Pz†ºÂhm°LŽN‘« W è] X2ƒw¡o¤WÎTžMÝœ³ÍàÞ­Ý]‡2Êâ#ÅN­Øò³2mµN’›³âÝšSDx„ãÕ]9e“““n1YîIÅSõÐÌ6þ–;?']‡°f¨0Öu×:iY~[ þÿÒÔÛ(Í}M~›& 0Ž2ãXŠKú>&"Vv>j`Ïöèªc ]8µòl×fnÐÓ³³zÙ´§lØÊ˜½ž[pÐôŸ¦É)“²Äwè-ózË–ìawxXUO£lIV<½œ„\¹ÚHo™;¼x X2ƒvÈ%}?¡„ ö+¥Eד~ƒº·E7P™DH•[xH¸ÊÇØ"wZoiá+Ïu¡¹E@}qÔÙFYŒ2xnИW¶Ð§ÍØãÆïlgƒ°B‰…¹{ ê‘Ý ø‹2ƒÈ_ª}Gßlì/>$‰ ´686 ‹F[Ý(þ@½ uÔ®Ô_´å6iõÅgç”ùÊSùÁw• Œ¾ù*dÅe dÝÂh oü½…·ì†Rúe# °7eìßíÖ›RÑqOöî éû¹Øà%Ci2+ž;Ç™¯¡Š‰\ ;Jäë:e÷”ùJèÆ_Dh=e3¿&þÞÙʈ¨ë¾oâ(Õõ:U¿¹eµí\- € Ð(órSzê但ËZÔùÚ`”Av>48Ô Y’R&ïÖ_Ò¹€¶˜ô“Œ WžX¥êïäF—§lÊäœ2 …,>sÊ|÷ ÈU'[û[l÷½ó!ÏFJ‰N•Œ¸L¹Ýró y6 €¥Ð(ób;P#æ\Í}sùû@nù°CèâÌ=Ú”³U¿„Qm§¡´a‡0BmjÊüšrÈ3wvŽž†i‘ÿÀS†ÖÄ<(ÃvduÆ:­ÄȺe†!eG$@$às4Ê–‰|h|HN+é{<¥Üž¿s™½8ç2Ü ØÍ(ÃÍøÖ¼G¤²£\Zú›m³Ó%†GnH(»ÌlõElzDh$½6>üô#§,=6Czê}8ª³‡ÊŽÏ‘Ĩ$å-»éì…ru$@$à`4Ê–±¹¸Q|«ê¤ ïeïŠê™þôSýetåˆKàeÖ7v[аæ%èbßv©]…ðEÕœ¾ˆõùê{©Œ²zÊ€Üg Þ2xÊ ZËf UktŽìÐØ 1²  Ÿ Q¶ Ükß–ž¡nÙ·â e %/cY˾¤{¨K_›•¸ì>üyáö¼2® ¿Wo0FHâ£9ÞS¦Öè‹›v}ŒŒh¦üo@1ÌÛUÁy6c&­Ð¢5í÷Œé½ ø”2qCi±ª³Böí—„H{!.yÉÓ{†»•XB¤mKÀS‚TÜÌ4ôZ?¤*",B‚‚‚”§ÌÙ²øX£/rʾȜ²%¿æ†žQ Øˆ8id£a\!ŽSœRªÿŽù"ì×°‰³#  M€F™ܰCbKÎvA ?Û4xÊìn "Œ±(¥X.Þ?o‹t-‹ïðœ2„/úÊS6JO™ÏÿœÁ[Ƽ2c±—¥­Ò¿XrÀX®ìH€|A€F™›”á :_}F?‰\•¾ÆÍ«ã4ˆ| ÉÜî µË‚Užà…ûç,¿”H¥ÀèôðE_yÊÂUøâÄ䄌OŽ[~ß4Aä•á ϳC *,Z×c£1<Ù ø’27hLŒh¥EÈ?b›MùuN0Ê  ·»ðQiTwÛnÏ^¤Å^E†«;‡‡/ú 9ÂWÑ(öá+âÓãdÄfj±¤¦ÞFßìðÑJT#ÔdûGû¾R.H€œE€FÙû‰ð©sÕ§µ4÷£JØÃUÔv‰ËæíÑ~íep‚Q†MK‹I—õY›t˜* b[µBø"òÉà-3»Aèbf“žÝ?þ–¢è1²Ù\¼}$¾3+’‹¥º³Êá+åòH€HÀYh”-²ŸPY¼Ûz[vì‘䨔EÎ Ü·úFzµò" 1;©íRaŒ*Çèíš³–\V„_tvñh_ˆ|`s¶ªîc…b¾ÿ¨gÅgkeÇ`»ïwðˆ…É+dpt@Zû[¼J.H€œE€FÙû‰›Ô#[“¹N ’Š8‹‡!€™à8ðŠ¢0xCOÜl¹n¹õM‡/:[ßWá‹Ø\ÐfÙüM_²0r¬üÄ’;*ÍÌÙ3+û" ÓÐ(›ƒöRÝ;Ccñ¡é\“9ïóå‡ ‚146$q‘Îùøpuþ¶£`·Ä„Çê²Vþ˜úpvø¢zÄï3µÓ°0 }|ø‘÷éoéJ¡ªTb3Žþf%G§H]×}ã:eO$@$@¦ Q6- nVv”ëZUN É›±\¯…ÈZ\Dœ×}Y¹ƒ m¤CÄ­Ð"Â#T2ç†/NLMh̾*AðEæ”ùç“% Q‰Ta4^RÔ÷ÔšÐ3»$ 0š²ˆBØãrýYŸ¹QròŒæìÈþFúµj]l¸³2lêþ NrËÞm¸ä÷ý„§l||\Æ'Æý>3&€0b´àà3º¨OÔ*cøâCX|v =6CeÍ>/PÊUÿ/Öᡲf®“H€ìJ€F™Ú9—°G–ª™ƒÂÁlîèW…££Ãb|bæÞ¬Ì;+5&Mvì•;­·änÛmór£çˆˆéÚZN­UöÀ(S†}Ñtø¢ò„²ù‡òÊ:‡:(¶b0~¨0ÆDÄjYƒ»fw$@$@ðÍÁ“6²;ˆ œSE‚¢†ÚTlî@øb¬ú~ 5”G€"ç•ú‹> ê蕳ïy ¤õŤødýü»z;å?ýÝW¤¢®ÜÛá{£Œá‹þüà ¯ ­uµ ߆œ„\i¤º¥á\Ù! M à²wë/IÇ@»ì+>¨k ØÉý!|1.ÂÙ"óíßšŒuRšºRógÄWEoatýøøä{¯¾  3Ô)ñáá!“øá×e`°_2R¦onç›·ŽM¹Â}ä)›®SFõE}F ~™•L¥@6 ;>Wº;uDˆ ݳK  ƒ´QVÓU%w[oË΂=’™dÒÀé¦o¤O«Ί?\é¶¼’Ÿ-§*މKðäÃwÿ­ «PŠóJäòÍ‹òÝŸ}["Â> _üÞk/Hs{“¬,Z%qÑÎÈôµÐG%ÙÿØzÔ#òÊZé)óˆ™;'ƒkPP…TÜÅsH€HÀÖ(ëê” ÷Ï˪Œ5‚46Ï ìs@å”9]yq!*(j¼·h¿Î×8UyÜ'rêÏìÿˆžÎ•[—äGÇÿYÿ~ì×åêwõï;7î^hº¶;>>9-`‚z¾haÁaŽMñ?#ÆÈP!Œø»l•²F¬É }à;„|X ©Xa78 X˜@@e£ªHì[U§$%&U6çl[˜ßYÀ°ªO†ÚBÑá1 žãô7¦¥ò òŸN–¿iúÍd^f¾¬Y±Vc½~ïšþYQ;C-«?xÏ Ü]9e!>R_ ùÀø£Aà¿OOjLº¨g=ô–™°ð곈´ `Ù% H ²ó5guöPþŽ qNW£z±1l”j,.}Lçk ”ÑU_ˬO“ûžÑ]ÃhAHþ…áòÈúŽRÁtyÊ\Æ’Y<]ýB}m|bÌuˆ?}L 24Râ#¤E¤ 'Fä Þ7;$ 0†@Àe×›®ê'†ûTÍ)ܰ-ÀàØ€6P¿+Ðê´*9*½Ã=r¦ò„öœ™Å$+5[6­Ú"!!!ÚS o%ò¯¶¯ßaÖ~é×e”ù*|Ñ5Žk\¿,šƒJZl:=e&|R¢Sõßkæì™—]’ €AÊ(kè©“ëÍ×d[îÁÿ¤Ø–Ož²È°(z?@ˆz@Kê"­gU‰äܙ՞Øû”LMN÷OYnfžd$;CuÑÅl9eʉQ_4—§Œá‹¾ ½ð0Ê:;L}°±ðèÎ}’¢“¥m Õ¹‹äÊH€HÀæÆ(ƒBÞùš·¤$¥LJRËl¾mþŸþ 2Ê=tqî.$G¥È’#Ò¤j½sÿÜÜ· {š˜&ÛÖ="Áä[9Iàà +_d3T } OL ŒèüÏ Ìž__•šðùý8 Ä>Pþ…H€HÀš’6ÃMÉûÕWõ¤^‹ûÕú&/h$X.—_ôj7b¥4ÛÃnhtHnÕÞСi^MÒ䋇êu8Œ·,͘ftDŒ¬ÉŸÄ0ºw¾™‘ÙRÓQ%=}Ý’¡Ä Lh™YÊ3vsJï¨o±•öÁˆï8»¼W&à{¨K×Xô”=„Ƨ ¥<ðm*¯, Âl†HŽN‘ÊöríÅg.µaXÙ Fà!£ìZõ{ò—?þsðJG¹ÒÒÞ¢Ší{=¥`UÐö›¿ÿO^÷3_ço•ç}s¾·,u,>nºhto_¯¥æåšÌ_ýößK|”ñ…­Ýý~¤¥¦éÏZ_ŸkJ†ÿŒ–h VÿüÝ+mxßÞthÄ÷Æ‘Ë{åÍ\ܽV{åT¸$…>Ü%fÞy©*„‘avÆó…Q/d÷P—.Ômüì‘H€HÀe®¢­«­ò¦_K^[XVèõ¼úZú¤áfƒ×ý,ÔÁää¤DDDHÑ{ÔNË–ì…–â—ãƒÝƒRûn­iž^'?ŒØ0£¾cªlExH¸Sr»Ô*£§Ìm\¦ˆ0»Û-7Lë?P;FÞ+”Z‘³—•¨¸n °,€É)3j¦‚ÌpÐs¤B¿Q[Å~ü@À¨ïǘ’¦w…új¡JßetûjLŽó0eÃcú8ýÃïòÈr d†YÏp÷r»àu$@$@& Qf"\vM$°<0Ê`$ù²!„‘’ø¾$>ÿX³ƒªhû@Ûü'ðè² $D)£lˆFÙ²òB 0‘2á²k å@¡¯ÃÚ5©rnØüKÆ1Âëh”¿ð”uÓSf¾|'%&•F™ À"dhtŸsزK ð–2o òz à ŒŽJDh„áý.Ö!<4Sô”-ÆÈWï!¯¬k¨Ó4Á_­ÃjãÄFÄé)õš§ kµ5s>$@$`4Êì²Sœ' ‘‰e”EútÅðÌAý”Íÿ`”a/`˜±G@eJLª„F™qTÙ C€F™1Ù €FÇG|žSÌðEwл®â"âµú&äÛÙŒ#opTX´ôÑ(3*{" ƒÐ(3$»!0†B!MîãðExʾhÌÑ Ti”ArvÑÊ(˜}¯H€H€üN€F™ß·€ ˜I`d|X¿ŒñCNÙäøÌ©ðw?HŠN–®A†/½Ñÿ?{gÕµîûÜ=!Hwiq«Ð)uAJoÝžö\yïÜûî;ïžu9uJEKi‘Bqw—@HBÜõío¥“Æ3™Œì=ó_s8™Ù{É·~ß@÷—o­ÿò@~q¾µ»e$@$@Í$À ¬™ÙœHÀº µ¥‹Rì.ô¡-_äáÑÖõesz“LÅ>šC°î¶*SVÌLYÝtx•H€G€A™ãØsd :˜2evú $~Îpà% ÊÊË˵sµÒh…ó íëåÇL™ó¹•3"p ÊœÀ‰œ 8mi•»»»Ý…>D¡¬œê‹zù.û„ÀÓÓûʬìÉ@‹ €¾0(Ó—?h ¸<É”Ù[_ »»¹3(ÓÙ·/Ì/œA™•}â«e ‹K‹Q®½XH€H€ôCÀÓš¦”•–áø¯Ç€¸~quv›ž‹ {. ²]$¢â£ê¬Ã‹–(/+ÇÕ“Wqùðe$K‚w€7ÂcÃÑõ¦®ð ö³¬S+·ÊÏÊǹçЪ[+„¶ µrïúìîÚùkH>•l–qí¯oß æŸÏedÎ’)óÓ–XÙ»¸1(³7òFDZk¹©Öcó ˜~á!¿üðó´ÿß3ó-eM p-V ÊJŠJ°øµÅh?°=&2¹N’)§ST¡ ÅègG×YÇV³S²±}îvt½±+Z÷lm«aÒo^z–üëðˆ(Ê-BqA1~šñF>9ƒ¦rˆmUM»¦ü?á?' t‚keò‹Š l¨Š¡Þ÷Îy­º¶ª÷¾¹7ŒÌÙQ‹Ì”™ûí²_=ÙWv:õ¤Êê¸A;õ˜¥Ùäè)%Z¶ ʚ͓ €µX5(³–Q¶ê'7-¿ÏþáqáN”e]Í·|‹ìälÜøÂèvK7„´ Q/¸„uï¬Ãú÷ÖÃÓÇî`+¼fõëæ>û(˜ÕÀ *É|;ïX9É(ñÀ*øºýÿÜ^y]Þ„µ«öÙ?H¦Ld»í]Dèƒ{ÊìM½áñ$(EÌŒütÈRF–æðòðR—iA €n¸TP¦êV6dÓ?7!+) ÷¼uºÜÐ¥ZïmzµÁä'ãó>‡ÔëwW?¸{:n+¡ãÿ÷øj6:ûÿPÈS)-.Uo½ü¼ÕKxM\L?å ¥ˆ€HÓG»ý”LY9…>ìÆÛœB|C!Á²Hã3(3‡Xãu<þÈ”ñø‡ÆY± Ø“€n‚²’Âl›½Mí7+È.@\ß8ôÛ-»´¬Æ#ñ`"v.܉”3)ji^p‹`U¯÷í½+ëIfH²a}ïè‹ vâüîóð õCnjÅÙ,{—îÅù]ç1pò@Ät©lgÄ7iÓ°ÿÇýj_Í€Ì4Ÿ@µtñÐêCH>Œ–ÿd*ËeIçÅý!7 $«#ì«á¾cÁ$ŸL†››ÂÛ†£ÿ=ý!{ LEúßœÜt’•”@¤Ã ¸~Òõ•û¤ÄÞŸnD¿{ú¡m¿¶H=›ŠÍ_lÆu\‡ŒÄ Yé—Ò•]FwA·1ÝLÝ»ÄOs¾ßæp®Ö•pú·ÓH–Pécs|[_¶¸ž§¡äçõgk‹1êêS¾×Ì”ÕEÆq×dÉbˆ_¨Ê”9Î ç¹rù"Jw.Çr6$@†' ‹ LÄ?æ<5G=”wÙ ¡1¡8²îö,Ùƒ?x°R4äÔÖSXøÒB†ªkþáþ8½õ4Vü× ”••© L<"AA›Þm”ØÅ¾ö©€,¶w,ò2ò”à s ‘“š ^$¸,/-Gß;û68•ëî¿ò§jÉJμgæ©À(~H¼ ’.ì½ ØŸÚr wüý€ßssŸž €åa^ÎÆÇ9Žûß¿ CT·‹^]„3¿ŸAçQÓ#"p±éóM8³ý ¦}5Mõ•Ÿ™Ãk£Ãàj< Þä³ì÷ ò$»'´|.¼„1ÓÇT5Ûiß›ûý6‡s]$¨^÷Ö:ŒuÓ]U1×·uõg‹k¥å¥š\wä€[{î)³7qóÆ õ cPf*³jÉ÷\ a.V" »°IP–z&«þ¾ªÎIÈÃwͲáý jùÝãó¯Ük$Óì§fcé¿/Åó?>wwl›¹ žxdö#Š RÝåá½1ïáäæ“•A™Ü "02O,x-:¶Pu“Ž'áËI_ª¬‘,ãs†"‚R"ÛW_î%“k5‹›»ö»gíŸLÕ½ïÜ«)¹& ŽkÞ\ƒÝ‹vWfS¬8€²’2<2ëÈž0)£ž…'~Œ½Köª ,3)g¶Q{Ön}ýVUGþO–LJ`–r*¥Ò•7«¼¹|ä2¦|6EdrY–ø-ý·¥Ø1oºÝÜ­òz•&N÷֜﷥œ·~»>Ü ²–7¿r³  9¾µ'è¼¢Šl¶#ö”ÉÃ*—tÙÓÛæ%™²+Y‰æUf­F ˜‚2í·kÖe  û°IP&©›NÔ9 Ó~ÓM ¾ö¯ØÁÓWdrO–Üzj¼¸@=ìK†F2=Ã^I= <$`+È,•E22òo È*o8Ù›Ì+™jFáÕ…Ž®?Š%]Rk¶Ã†QO‚ð‘åŒFuª Ȥ²l7¾x£Ê^ÉRDYÆh €÷-߇“ÂÃËCIì?>ÿq•5“v"¹ïáí¡Ô%CÑ6B.cðCƒÑýÖî•}¨‹uü_¯q½ª^2ƘWǨ#v}·«Ú½:š;Å%s¾ßMæ¬=wýú靨òåŒ|j$†?>¼+s|[­?äWd³”É/3XôE Ô/ ²Ï°°´>>ú2΀֘T,™)3 óh2 €S°IP&g”Õ'‰n×9ÌyrN%TÙ&EdÃ/º\y]ÞV¨CI†K‚2Ù3%K­9¹&¹Kû/Aö Õ,amÂÝ)ºæe§ûÜ2XÍIöƒGW¼— -ZT;r  §@eM$p’"Ë:ko?oµïìÊÑ+ê–,<±ñ„ʬmùj‹Z:Ú¶o[t¾¡³:M*yû{«`oÃGðé]Ÿ"*! q}âÔñ²GM‚¬†JlŸÚvÈ|d)ëµs¶6ÔÞî™óýn*gÙ§)O¤”–TŒTeeŽo«Ö·õ{É”‰°ƒ#¾M9T×ôàjëù²ÿÆ „jbR2ó3Ð"ÐùÿMoœˆujððhëpd/$@$`-6 Êšbœ,?”"æÊrÚEÔúBZUȻ?ðsåáÔrÖ˜D¬{{]ÍfjQ±¼±Ö '»`:€[‹–½X¦"Ë«.ii|Yg*…¹…ê­0]W‘‡”åŒÂ_ÎÏ:õÛ)%!ûÎNm>…Ÿ?ü#OŽP]HV¬óèÎj¯Ù¹çppõAìþ~7$@žüéd`Õ5–\¹þºŠ\7ÙZ×}gºfî÷»)œÓ/¦+UÎßçüŽß¾ùMeE«žƒf®oíÅ9W Ê!ò!ó3b’Aðpkø—öâÁq´,¼¶¿ÐÛÓ™ Êø}  p^u? Ûq¾‘í*öBI–`È´!ÕF–€@Ƚ|½”2 _ Ãpï[÷V˼ԵäÈÓÛáS«6[}ˆ¯²TòÐ-Yú2R²ç®juJ)²§¬®"×C[‡ªåŒÅùÅpópSô"â!åêÉ«øþ/ß«ýb½'öFPdÚ&˜ òG‚: eO™(,NøÛ„º†R×êʆÉR׌Ë*ãVoC'¹!™Ns¾ß²·O¸˜Ëù†çoPfQÕüüÁϱü?—ã±9U~OÌñ­éÌ;{ Î-ÊA Oí_ÎØclm£]†á M' Òø"‹ÏB$@$@ÎJÀqVýAT–¨ÉR;¨¹ßL~³ÿö¨·!ËèLËEé¯jà!Ëð$àªE²‹²OLää×¼±F rÔd!Y294»j m ɲ wYZµÈRE1x<ûÉÙøjêWU« ºc4zÜÖC]+È*P ‹o {CeÉL%˜úÈPu.Z~V¾ér?å˜SÖÔTáઃ A”½˜ûý%˦pö öUè"ÚE`ä“#•àŠɦbŽoMuíñ3§0ÞŽÉr›2euý’Çsçõ}e²|‘…H€H€œ•€.ÒI",1ÿ…ù˜ùØLuvX`D :Gl묭ˆí«–å)A íÙ">!ûŒ|‚|”äýÖo¶Bá•ûɧ’U€WŸ³üBüÔ­Ãk«ey"/}½ˆøÆÕW!0è~Kwu¾[îµ\%-/K娪Ù2ô¸yú͘ÿÜ||3íµQ–Š^Øs›¿ÜŒ€ˆ {t˜BÓõ¦®øùýŸ•bŸ;ú¨¥¦Dˆò¢)¨–¬š´Yý?«•ˆHtçhP‰ˆ‹dw:ëØ f‡™ùèL š2’™‘%’<ˆä<4g/*clÆ÷[öÞYÊyÐÔA˜­3·ªì™,c4Ç·ödŸ£eÊZÇØsÈʱäœ2)ÜkS‰D7o$(;Ÿ~V7öÐ  °6]e‰0Èê¬ÆÒ]Z1GíùHÎRºåµ[ÔgÙû"Rë"ë-ŠŒR$ ûocUM®Ëò/Q\¬¯Èþr.–œƒuÏ[÷8EP&™Ã;ÿ~'Ú h§¸«î±“½z7½t“ vg=>«š;à¡/ªÿY…åÿ{¹º'}ÉÏãþcœRT”‹ò0/ËHå nag*í®k‡±ÿ>V-qô ðÁ¤&©lÝÊÿ»ÒTEIè˺ÆÎQ“À\5^ö¿–©¶îžî*C6áO¨w¿Yå Nð¦)ßoK9‹J©,!ýrÊ—Xþ7mãÜÇÌò­½ðJ0$BÞŽY¾ÈL™½<Ýôqdù¢œ_—¯©s:jÏaÓ­f   0Ÿ€[nyQ5 è'~ÇÇË?P¿I7¿ëÕ%E9ØYTE°f‘ŸeŸ‘d½ªÊÀg\ÉP‡J×'Q³K?ËË¢9óÕy–vÑ`»Ÿö®Å¢- Ð~hûë5tSÊž/ Z%ûeN)Ê+Bvr¶Ú«$Q]E–—ÊüK J”Š|uÉz‰E,Dl` ¾"Áñì'fã®ÿ¹ ÝÆtS|ËÒá±áÕ–©Ö×¾æu[²}ï?ó Bý­Ÿµõߦ|¿›Â¹&§šŸÍõmÍv5?7çï‡ì'[vh1né2þÕÏÝ«9Ž->_ÍIÂÏ'ÖâîÞ8DýÑsr–>Eñþ•pb‚[;Ë´6y{fbX‡‘ˆ mç080 ÀŸ´_ _ÖE¦ìO“*Të{Ø—ztUU4µ•=R,„_Û~m›„C(ÙwÔP‘,š9Až¨?ä%¥9m-Oomšòý¶&+s}kK^ÙÚ~2)Ê”™æÆ=e&úù)G$øyù!« “A™~ÜBKH€H€¬H þ†aW$@$ÐìÂ,%}îëYw¶±öͽ_¹|Q[FÉ¢?A¾ÁÈ*ÌÔŸa´ˆH€H€¬@€A™ ² Ë H¶GÎ6 ŠvŒâžå–³¥µ H$ÈçÏЭÝcý™„>«ÇûŽ!ì‚ì‚,Ç ÎQI€H€HÀÆt·|ÑÆóe÷:# ²ü÷¾}¯Î¬¢9Ž ™²`ߊƒâ1¾)S†rfÊÁ¿±1%S–˜u©±j¼O$@$@†$ÀL™!ÝF£IÀùdiY=dÊ(‰¯ÏïV°–EÍ/ÊCIY‰> ¤U$@$@$Ð  ÊšMI€¬C ¬¼ ¢¾¬eC]*3fŽ6„ãW# ™2)ÜWV ? 8 eNâHNƒŒL@–.Šê¡#3eT]Ô÷7H¾²ïûÊôí'ZG$@$`e–qc+ +È,ÈPÜrH0 ÔE@2˜>AZ¦Œbuñá5  c`PflÿÑzp ™ù*Kæî渒*÷’iÙ}}eÌ”éÓ7´ŠH€H y÷Ô<»ÙšHÀ‰¤ç§#ÄY2'r©M¦Â³Êl‚•’ 考28&€«å‹¡~a®Žóo„Ï*ko“ –ƒ2ÃºŽ†“€s(-/…}„:x?™Ièƒê‹úý^I¦¬¸´ù%ùú5’–‘ €”YMH€¬G@ö“A;¯™™2ë1uÖž½ÕÔòŠruŠœ €‹`P梎ç´I@/Òò®ÁÓÃÓ¡røÂ¢RèC/`hG-þÞJ¥3§0»Ö=^   #`PfdïÑvpiù׿®›™ÈYX,ú$ KK´lYŽvÐ8 8eÎäMÎ… H@2eáþ´œ&;‚@ O r ”9‚=Ç$ °e¶cËžI€! KeOY˜¿ã3eeåeÊZGž•Ö.ÞÖTdʸ|‘_  ç"àYßt²Sø½ºØdÔuÙz×4ÁƒÒ2MŽü-bZ˜[hQ»¦6¢ê&ÖÔ¿Úùdò}×C¦Œê‹uûToW%(»š¤7³h 4‹@­ ,Ä?T[µï†Äƒ‰ÍêØ™‡ …Ú«@{I ±ÙôBCPR\Bþ5ûÂ%¼jܪõÑ×[«íå[ëº5.áP¦½Jµ—#JSþ~ÈÒEw„8X_8•3S戯K“ÇT˹§¬ÉÜØ€H€H@ßjeÛtÁ·¯Î¥Ù~“%M«6ÿˆ;7`h§á¸{Ì}ð °™W¯ï4׿:ˆükþzé8yþ8îºé^\×c`»Õ?Úòœ)#üýøÏOþ7ƒáýFVc§OM៚›‚È€(õ‹ ;™Wï0&õE }Ô‹H7½ƒ´ºyŹð÷²Ý¿Åº˜,  pµ‚2ÓÌ›ò`ejãŒ?=Ü<0aÄDtm× óVÏÆŒoßĤ±S›`Óé’u¼Üù8ÖlY‰ïÖÎÇŤ ¸ó†{àîî¸-‘zöOLTk\I¾¬‹@§ºkJÎ¹Š¶aíjßpÀÓž2=ûÖXt7d€&ô!%Gû`P¦;÷Ð    8î©ÖBƒÕ,!®#¦O{±-ãðÙwaõ–QVV! à(›\i\yP¾mØxL»ýì9² Ÿ,üY¹Y®„Àì¹Æ´hË)ú_~\PR€ì‚,-SÖÂì¹Ù²¢d_˜%³%aëôíçé§–¼æqß³uˆ²  =`PÖ/øàá‰âî›ïæݿâÃùï"5#¥ =°js ôìØ/L~¹ù¹xoö[8wùls»tºö”%¥^Ñý/ Rs“¡ÅÚˆ ÔGP&™2*/㯃ˆ}Pß¾¢•$@$@æ`Pf§jµõ‚Wz åeåxwÖ[Øqè÷j÷ùÁ¶¢#ZâÅ)ÓÑFËZ~ºðClÛÿ›m4Xï²|±´´WÓô­P'KåÐh/w/]– Œ™2]¸¢Q#¼´=eyÖc  0 ez**¬žŸü2÷ªö9ÍZñ ò ù`!Î&7…ÅGîx7¼‹×‡Eë ¤´¤Éý8c Z===q9YßKEÖ¼E`´n\ BÌ”éÆ â/AYQnƒux“H€H€ŒD€AY3¼%RÞãGNÄ“÷>‹s‰g1cæ›8}éT3zdÓ¦3ä6œí?¾/x™9MíÂéêK`!™ž÷•å—ä#=/ ­‚[놿ÚS&ë)YtOÀÏË™2Ý{‰’ 4…ƒ²¦Ðª§nǸN˜þðëh‹Ïþ!¢-…b±nñ=ÔrÆ¢â"¼«í3;sé´}Öñ(­[´Ñu¦ìJV¢RÏŒj©Š”1S¦w4hˆ¨.æsùbƒŒx“H€HÀX”YÉ_µD@æQÄJhÍêF–“ŠHûÖñJsËžMfµsÖJ­¢bt)»’uÑ-!GN襔–—j{ÊøO¢^üÑ~Þþ(*)BI—,7ĉ÷H€H€ŒC€O Vö•ˆ€¼<õ/JùND@vÞnåØ]}|¼|”dþ-CÇbÙ/K0õ—×WÝ©¯‹cn^.²r2u9Ï$-(k£+ÛDèC–$³èŸ€¿—¿2’Ù2ýûŠ’ ˜G€A™yœšT«Ex´ÊÚ ê= ×ÌÃì¿E~a~“ú`eË ˆøÇ£w?‰#§áãùï#=+ÍòÎ ÚR¥\J¾¤»\ËKEaI!bt´ŸL •••ê*s§;ÇéÈ úBF9…¦ 4‹ƒ²fá«¿±üÆ}ÂÈ;ðĽÏଶÇé™op¯Sý¸¬~§K»®xiê«(Õ´ß›ó6N^8aõ1ôÜ¡ŸÂBÂq%å²î̼˜q>Añ Õ•m²|‘{Êtå’zññðQ{©ÀX/"Þ  0e6vX§¸Î˜>í¯á9SkÍ–•eR,¶'‰ç'½ŒŽš>ÿþüºkƒíÕѲ¯,Q‡™²‹§#R¦HïîÎuç˜z ±fÊêÃË$@$@†#À';¸,À/ÿrÇc¸ë¦{±q÷/øP¹–™j‡‘9„·—7¦ŒŸ†q#nÇÊMË1gåL—ÙgÖFûE€Þ2eéÈ.ÈB\X;Ý}9Õž2 èÎ R²ø<«Lg^¡9$@$@–`Pf)9 ÚÉAÓ&9Ól×áôÂ&–5àœëA±dÊRÓS Gè¥H–L”ó"ü#õbR¥”qùb%Ý¿QHS_÷~¢$@$@æ`Pf'«Õª*²`Í\Š€XlãÉyr²ÏÌÍÝ ïÍ~ÇÎm¼‘kˆ£œ½•”zE7³¸˜~mBbucOUCTPFõŪHtý^2eÅPÒµ“h €Ù”™Êz«‹€œ¡ˆõÐ6ÚSXp8ž{ð%ÈÓ_-þ'~l&¤¿@IDATÞþS£mŒZAöÔùúøêf_YVa&2òÓu¹tQ|,{ÊôtnšQ¿wö²ÛÇÓ%öŽã €M 0(³)Þ†;W" ¿N†1Yý®—§¼m &޾ k[…™Ë¿Faq¡ÕÇÑC‡z:Dú|ÚYµtQÖcõEžS¦GÏÔm“¯”2(«¯’ Žƒ2»,À·Bäî›ï«IÍHq°U®1ü°~#ðÔ}Ïálâi|0wRÒ“nⲄQ/ ŒgÒN£]X{Ý2–sʸ§L·î©e˜¯¶|±¤´%e%µîñ ƒ2xlP¯!•" ïÎz ;o׉eÎmF‡6ñŠ»¨4¾?çuà´3ÍX‘–=eåÚË‘%%7¹…9hÞÁ‘f486Ï)kînúyú)› J¸¯LwΡA$@$@M&À ¬ÉÈl×À$"* ×Ììß ¿¶#^ÑsH`(ž}àEôêÔ_ÿðÖm[cë!íÖ¿œWTT¤Tí6hÕ²d¡~aó ¯ã®>.IÆÅÓÝSÆÐŠF øzùª:…%ιô¸Q¬@$@$àT”é̲§eüȉxâÞgp.ñ¬9}é”άt>s<=<=¼Ɖ7€¯)SFƦcm  ]`P¦K·&6ѱøláGX½åGÈÃ-‹m H–ò™û_À¥¤ j9ãÕ´$ÛhãÞ%Ø” ìåÇe‰™QXZ¨eúÝO&n`¦ÌÆ_F+w/ûÿ¼´ :ŸË­L–Ý‘ 8‚ƒ2GP7sLyxâ£M»ŇóÞE@̄׌jíbÚkç™ý~ø`Î <¹¿½9¾©ì+sd¦ìôµ“ˆ n ¯ÇÃhÀ‚R._l€Ž>o‰ØGA±ñ3Úú¤K«H€H€ìI€A™=i[8VM‡~·°'63—@p@0ž¾ÿyôë6@IæK¦ÒÑbæÚ^³žR`tP¦,·(—³‘Ù©¦Yºû,™2ª/êÎ- $²øúho’ „ƒ2ƒ8ªªÈwkç+¼Â<ƒXoL3eŸì1»wÌظëuØ´™K¦,+;¹ù¹vwÄ™k§ *y­Cbí>vS” L–ñ‡€dÊ(ôaÑR  ú 0(«Ÿîî˜D@ž¼÷Ù oßÀ鋱µ£ö¬Ô¯hÒòïÏ~WR/ÛzH«ö/™2)öÞW&™ÅSÚÒÅ pÓ^z."‡/§xP}QÏnªe›‡®E…H€H€ŒH€A™½Ö1®¦?ü:b[Æá³ï>ªÍ+PVF[ºRX¿¬í3 Q{ûößcËá¬Úw‚‚쾯ìræ%äç!>²£Uçc‹Î$K&…’ø¶ k»>½=½QTRd»Ø3 ؉ƒ2;¶ö0•" cîÖ½›ðÁ¼±6äýúâ©ûžÃõ=aΊ™øqã2”—;öPæ&ÖûQ²eöΔJ=–A­èT¯]z¹!"R”éÅ#æÙá-™2MÙ“…H€H€ŒN€A™Á=8¨ç•Á‘i̘õ&¶ÜfðéÛ|‚¸côÝxpìü¶o3>ÿþ䨝VS)© ÌŽg•ef!1ë:Euiª©©oÊ”Éaã¨È”1(3ŽÇh) @}”ÕGÆ@×£ÂZà…I¯`hßáø~ÝB¥˜W@[º°·ëðìƒ/jÙÉT¼7ûm$&_²åpÍî[Ä>äÌ59 ÙåDÊ1xBàCx˜‚2fÊìñí°Þ²§¬¸¬Ø°Ê¨Ö#ÁžH€H€ŒN€A™Ñ=ø‡ýîîî7üvºSŸ†jVI¦¬¬´ IšX‰­‹<$‹ê¢dÉô.ðaba V)ôa"bŒŸÞž>J ¥¨”ûÊŒá1ZI$@$Peõ‘1èõ„ØŽJ¤m«vølÑGX¹i9E@lèK9`ú‰{žÁÐ>Ã1åüðËb”•ëOt¥EX4¼¼¼ì’Ñ;{í´b¡ÓWƒ™2 cýôöðV•p £±z÷“9`4¾[;‹×§‹}}­¢b_ô¬´Jš" “™“Qù¹9oÒò®ájvºµìÙœnÒVÄI<µ|ãðÑÄ>¸|Ñx~£Å$@$@Õ 0(«Îé?Å·IÀôi¯£}›ø|Ñ'X±ñ»I¤;5Ø:&'Ëà$–½}{ŽìÂ' ?@VnV­šY9™ØºoK­ë¶¸ Œ²”õä…ªû m™å§ß}„=Gw[e¸ÃW"Ü?Ñ-­ÒŸ=;))-f¦ÌžÀ­8–Ó Ê¬”]‘ 8„ƒ2‡`wÜ ~>~˜:þaÜë$ü¾+>˜3C_å8‹œ{äž{«}f¹ù¹ÚyfoáÜå³Õ&|I;ßlÉúE69¾ ¬¬ Î}W ºv©1}¼|ŽŒìŠÌØÒõߣ¸¸íbÚW³É’9EÙ¸˜q^Ë’õ°¤¹ÃÛÈC=—/:Ü  ~“ š…H€H€ŒL€A™‘½× Û¯ë>¯L{ žêðãmûkFolÚ舖xqÊtM1Ÿ.üUYGGD«¦+7.o¨ ‹îɲUo_Ì[9[-¡”³¸b¢Û(Æ#gãðéCðp÷@¬fWsËQm/Y wbCÛ6·+‡´¡“’ŸC à ¿eù) ™ƒ2#{¯™¶G„Dâ¹_R{Ÿ–ü¼_/ý9ù9Íì•Íë"à«GÜñ8nx³ 殚¥ýv¿á!ðôðÄÅ«pô쑺š6ëÚc'Á×ÇÛöý†÷g¿É”Š*¤ˆ¾H‰kÕVßœA J ÔaÑ]£»V½P2eÜSÖœoãÚVdÊJgG&  +`PfˆFîÂÝÍ·‡gîWR¯`†&rìÜQ#OI×¶r› ÎŽŸ9¦ÔeOYdX”²yÕ¦V·=8 ÷Žy@õ{9%;þŽŒ¬tdkûÛD¾cÛNÍóXòaµô¯CDB³ûrT²ü™2GÑoÞ¸L3SÖ<†lM$@$àx Êï]Xоu%×_.þ Ë~Y¢29º0ÎÉŒèßÏO~ EÚ~.9ÏLK/© “ì|ðä~«Ï¶wç¾èÛµ?$—@LŠì7+//×D_â›5^ai!N¤C×èðpóhV_Žl¬2eZÆ’Åx¸§Ìx>£Å$@$@µ 0(«ÍÄe¯È»É㤱S±óÐv¼?ç$iÙ3ë(+/SçÄÉ~¯»nº>^¾HJ¹¥Œ¸jÓ(×^Ö.2V `µnE‰±¹"Ç“¨`¬cdçj}íƒì)óòð6šÙ´W#À=eü 8þjؼhå9ôë:’9›»rÞ›ó6ƘˆaýFXy×ënãî_°â—Ꜹ)s•šž‚}Çö o—þuÖ³ô¢ì%›¤ÜŸiø¦Ò:ª ¼<-?›K²KÇ“ªsÉäÁØÈE©/òœ2Cº{Ê é6M$@$Pƒ3e5€ðc°àp<ûÀ‹¸A¦XþëRµ¤1;/›xšA o—~¸møx$hKD=þX*W¹œPË ™ŠÊ–iK ­]dÜáýGªeŒ’©ëØ®yûÉŽ§UË!;Eu±¶©vïç”Ù¹Õäž2«¡dG$@$@$`ì_o;œ+ -ØÁ·¢K»®˜³r&Þùö¸O;߬[‡î®0}«ÏÑß7á‘á~=úõ€k驸šzU-_LK¿¦öyÉþ²ô¬4,Ú°âš·ß«® ´Œi…€ãÈÎÉF™[vÜQWµF¯É2Ì3¹§æŽý§÷6Z¿9ÚE·Gdp…Jsúi¨­EPè£!B@aq!? ö"6\Ó¾w3ŠÓQP”oñwÙÖÖÆFÅ!:ÔxªÛš û' ¨NÀ-·¼Èú¿’¯>?9y “ÆwÞ!}‡aÂÈ;šµôÍ 4y »OíÄ?¼[g;7-óÔ^ÞÚËK{j¯|íe‹"ãj¯LíÕœýkQ‘QHÏHGI‰måÈu‚§Ç=g ªO È훇ÑoF« ›côŽ·Þ„/V¦»iøùù!(0É)ɺ³M êÕ¡¦ßõš®l›·g&†u‰¸Ðvº²‹Æ €«О/3SæªÞoâ¼}¼|ðÀm“Ñ¥CWuÆÕé‹§”(HLTë&öäºÕeߘ”.7èc¹_K4ÿ·÷ˆ°©C%B½¶e)ÖöÆIa¦¬aÊâOOO$ŒHh¸¢ƒî†#ÜA#×?ì•£W´ï¯miQÿè¼C$@$`$ÜSf$oéÀÖ>û)éü¿¥Î(â,$`dÅÚeRŒ.VbdÐv  pu Ê\ý`ÁüCƒÂðôýÏCB^¹i9þ¹ècdåfZЛ€ã ˜‚2fÊï Z@$@$@®J€A™«z¾™ó–=P7jÊŒÏ?ø²&L‘®‰€¼C§4³W6'û=eR<=,?ÀþVsD   g"À Ì™¼é€¹Ä¶ŒÃ+½†{áÛ¾Â÷뢨¸bŽÌá$ÐdE%…JÚß‹ç”5™ X‡…>¬ÃÑ¥{ñöòƽc@—ö]UPvúR…H›èX—æÂɃ€Í¥‹Æð­$  g%ÀL™³zÖóêÙ±7¦?ü:BƒBñÁ¼øyûOÍ’\wÀ8¤ (Ô2eÞž>.8sN™H€H€H@/”éÅNbGp@ž¼÷YŒ~;Öm]Ï~„Œìt'™§áŒŠJµ ÌÃÛ§Æ9‘ „ƒ2ƒ8ÊhfŽ0/Ly9y9xgæØw|Ѧ@{]„@QI|=}]d¶œ& € 0(Ó£WœÄ¦ÖQmðÒÔWÑ·kÌY1 VÏEaq¡“ÌŽÓp…’)ãòEgq'çA$@$@†$À Ìn3ŽÑ^ž^¸ëÆ{ñÈ]OàØ¹#*kvþÊ9ãL€–:=Q_ô¢¾Óû™$  =`P¦gï8‘mÝ:tÇôiEtx4>šÿžÚoV^^îD3äTŒJ@Ô}<(ôaTÿÑn  p ÊœÁ‹™C½ëIÜ1únlر^g×2S b=ÍtV’)ãòEgõ.çE$@$@Æ À Ì~r*+‡ö®öšcÆÌ7±ëȧš'c,jOÕê´cŽáèÏGQRXâP;88 8Šƒ2G‘wñq[F´RêŒ{ V ³üù…ù.N¥öôåAUþ¤žm8£˜Ÿ™¯êIÝÜ´ÜÊŽ.S¿ªül‹7öÃvKŸ%e%(++c¦ÌV€Íè÷üîóøþ/ßcñk‹ÕwØŒ&v¯²ù~lþr³ÝÇå€$@$@®C€A™ëøZw3õôðÄí£îÄ÷<ƒ³—Î(Óm@èBÉž;yP•?kßZÛ@M`Ïâ=•u“O%WÖÝòÕ¬þŸÕ•ŸmñÆcØÂnéSÎ(“Â=e ƒCþoßûàæV!Ø»t¯ClhlÐã¿ÇîE»«Æû$@$@$`1e£cCkèÔ¶3¦?ü:ÚDÇâ³ï>ÂÊÍËUöÂZý;C?gwœEfRf½SÙ¿b÷âÇ£ÛÍÝê¼Ç‹@¡¶ŸLŠ·'vÄ÷¡ »@eǺßÚ=ÇöÄ…=v!ͦpL  p(O‡ŽÎÁIà¾xxâ£Ø~p–ý²'ÏÀäñ!*¬…Ë3Ší‹K.áàʃöè°Z<.dÛ]×çvž«vÿúI×WûÌÕ ˆò¢oª/Vc§O‡ÖRûÈzë/_/HÖuï{qã 7ÖiÁ‰'pü—ã¸rì ÂãÂÑï®~ªýᵇ1ñ¿'ÂÝ£â÷Œ²7mÛìm*È“À/®oœ úZviYÙï†6À?Ô=nëm3·A–áJû¶ÚbøcÃáéã‰Äƒ‰Ø1®¹égé¿-EpËàzí«ìœoH€H€H ‰”5«Û–ÀÀžƒ›€¹+gaƬ71qô]ÔkˆmÕyïaHšÉ†Õ”Xq~!~è8¼c­ lëÌ­ÈNÎÆ-¹EÍÒœѪ8Ì}®ÚFÞ›óP\³#>‹ò¢íü¥‹í"Ъk+5~Ë®-qàÇýÌh¸{V_ȱù‹ÍØøÙFDvˆTAVæ•L|÷Êwhݳ5Îï:ÛÿëvÀÈMÏÅœ§æ #1FvBhL(ެ;‚=KöàÁD\¿85Öé­§áíç Ù/æèƒVÝZ©¿?¿}ýÒ/¦ã®Ü…’¢䤿¨Ÿe¥eê½k,$@$@$`mÕÿ«gíÞÙ X@ 24 Ï?ø2Fô…Å?}‡¯ø¹ùŠWXÐ¥á›ôžÐ[=(Êoó«–â‚b^wÝoé/퉴F‘‡U ¬LED%Ó0÷é¹H<”¨Dó³ò!¢Ëÿ¶ÜTMý”‡`yèM<œˆ6½Ú 8¿X}Þ>o;$3Q^V÷9sòPüÕC_aë·[Õ^¡–[ª‡â™ÍT™‹jƒ8øƒ(/ÊÁÑnڋž’Ž%AþH–ÌTzÞÖ¹×rqróIÓ%õSöIJ@Öõæ®xbþûocñà‡bÂß&¨€¬jå ïo@VRŸ÷8îüû÷ãðÔ÷O©àoé¿/…W¦"ŸdyïÃ_?ŒÛþzž^ü4$3}ä§#(Ì-DÛþm1õó©ˆí«²jò^Æd!  k`Pfm¢ìÏ*ÜÝÝqÛ°ñxæp%å2Þùö8vî¨Uú6b'’ó õSÙ²ªö‹”xQnzßÞ»êåß7ö *›ò\s°¦<×lkïϲ§ÌÇÓ×ÞÃr<À¾eûY>h*ÝÆtƒ›»›ZÂhº&?EÈFÊ/ÞX-ƒ&¿Œˆî­îÉÿ檿#ýîî§–7šnH&lÔS£TÖøÌ¶3¦Ë*C6äáê™ø.£»¨û’af!  {à: {‘æ8hߺ¦O{]e̾\ü†õñ#&B”]©H¬Ç­=Ô§,E”ý7RdébT|”Zþ%û_Ì)ò€Z׃èŽÕC«O{Ÿ‚eIäÕãWëÊôPš¾)gRÔuQK¼|èreyS\X¬>'Oªüþ…µ «•aöô­ø·¥´¸´Z{~   [p­'[[’dß6#àëí‹ÉãB×ݰdý"œºp“ÆMELdk›©ÇŽe ãÎ;!Ù1Qª“=5gwžÅM/ÞÔ$sÍy5÷!¸æÀM}(®ÙÞÞŸ Š àëågïa]f< ¾v-Ü9Þ¡÷ÄÞý‘Rä;,¼¬qªEêBûŸìõÁ )EùEÕ2dUëK]S‘zR|ƒ|hº\ù3¬u˜’Þ7]¨¹oÍt?I€H€HÀÞ”Ù›8dz˜@¿® ™³y«fãƒ930vøµïÌâ ÖP”ãZtl¡²c”Xy@-õê1öÏå_æLÉœQs‚kŽ×Ô‡âšííýY2eþÞ‚½Çv…ñÎí8‡®cºÂ?Ä_í}ì{G_5mølïËë_VbUYH ÷îÍïbÿ² a777„dž+D9$]2iUËÕfm#ÛEª[]nè‚!Óª/K”=²OÌ”e®Úß“ €£ 0(s´8~“„‡«}f?oÿ ?n\†cgàÛ ¤Iýµ²dË~z÷'•%“¥‹ñCâQ;#ÐÜù™û\s£=hAY¸DÍi𳕈hÆÎ…;!{DM¿„]ßíÂðÇ+²gRWî_9zEÞªŒ ¿¸8i`µ¥‰¿}ó~ýäW<üÍÃJ¸ÆÔ†?I€H€H@(ô¡/І&¥¼›ŽÁó“^FZfš&ò:Ф>ŒZYDä,¥UÿoÒ/¥C‚4[‘Ì‚<W-5‚«Þ“÷UŠkîÉ‘‡â·G½]í!ºf{{– ŒB¶£.Ëe âÐG†"¤eÅ/NL{È$(«¯H&Xʾ¥b "Ò¦w¥À¸ú«!Ú/ÿ‚EÓ!¦{LµnD $õ\*DíS”I%ÜôÏMØòõ¥¬(J¢M-’ËMË…|‡®7¶àPùë=ÝÝøŸÿ¦~XŸH€lI€™2[Òeß6%Û2¯<ôš:lúÛ¾ÂÀ^ƒÕ¹fÞ^Þ6ב„WœY&2÷ò ØiD'›˜#Á’áòœk9ˆí Ù/&{Úä!øòáê" U‡âù/ÌWÅ'T™<‘æß:k«ÅÅUû·æ{ }X“fã}••”© ˜hrHs}¥Ý€vjìÈËÈS"S>‚õï­W×v/Ú°Ø0Ü<ýfuT„,a4-Ë•ìñäO&C‚·¥ÿº´bmïZ÷1ÝqËkçõÕ7n}×åê {.¨L›œ“Öõ¦®õUÕýuµgO³’Ç@èÞU4HÀÅ0(s1‡;Ût%»wÌJdÑÚ8}ñ”‘€ÍY‹dÇ$(5ƺÎ&³Æ¼e¹Á5dzÅCqÍ1¬ñ¹¸¬eeeú°L3ûÀ饵/5Z[dñ_\ýbµzrhó­¯ßªþÈ™y^~ ¤ßMÿ!1!﬩´¿¾=žYò ² ÔÏÁ-ƒk-•”sÌê*ýïîùSµÈÕÏ.{¶ê%þ/+¯8§Í™2Ãú†“ 8'eÎéW—›U„^ˆkÕ VÏÁ‡óÞŘ!·áÆ7W{P3yÈüÝÿQËìΣ;×y}À} ª9`·jiʃ¨¹Á5ÇñÌy(®j—#Þç«a}<}1<ÇlS[NA~žø&¢ÓÈN•YÊéuÐtŸ‰}êìM–OÊ–? ü”ýÄþy—ïH€H€E€A™£Ès\«Æ÷<ƒÍ{6bå¦åJD¤ôE„¥i,}®9ŠžŠåàh)~žÕÕüjΟO mÿ¶j¿âÒ[Š>wöAdûHµlqïÒ½*èñÄÇi ¸§Ì Ž¢™$@.G€A™Ë¹Üù'<¼ßH$ÄuÄÜ•³ðÎÌ7pçM÷ ×ëœâVœ¡+<Ë~2)ú°âÇF]ÉRŇ¾|;çïÄ©­§prãIE¡×„^þèpø‡ùÛhdç붬¬âPl7ç›gD$@&À ÌÀΣéõhƒ—¦¼ª2fóWÍÁÑ3Gp÷M÷ÁχY‘ú©ýyÇ‚óKòáåáªÐýéw=¿ó öÈ'G¨?z¶Sï¶É^J)žüÏ¿Þ}EûH€\‹ÿUv-»Ôlå¡câè»Ð¥C7,\=Weͼm âc\Šƒ¥“uö‡`Y¾èëÅ ÝÒïÛ“@Ii‰2Ü˽B(Ř³ Õ$@$à|xP‰óù”3ªA sÛ.˜þð_Ñ&:Ÿ}÷Vn^®T÷jTãG# Bùp1§sºø3SÆ Œ_ Ðezòm±ß<<ñQÜ3æ~ü¶w3>˜;ÉiWm6;Ö?É”q?™þýD ­K ¤ôå‹î\(c]²ìH€šG€AYóø±µÁ ì9X8-ç ½;û-lÛÿ›Áf@s­E @ÛSFåEkÑd?F! ™29nƒBFñí$p Ê\ÅÓœg%ÈÐ(ßrF ¨X¾ÈLYÔx—H€ìO€A™ý™sD Å“÷=‹±Ã'`ío«ðÙ‘®c‹iš%L™2_Om ?¶1.ù…UGë?ZN$༔9¯o93 ¸Á £Ü€¦¼‚œü¼ýí?°÷Øn {c3=åE98Z_XHÀ•äåjb.Ûu%Ÿs®$@Æ À'cø‰V:€@ë¨6xyê_пûu˜ûã,Ì]9 EËÞ`‡´"É”ñÁÔŠ@Ù•!ÈeÅÚáÑ~Þ Ê á0I$àR”¹”»9Ù¦ðôðÄ7܃Çîy §/œTY³Ó—N5µÖׂbm Ï(Ó™WhŽ­ H–LŠ?3e¶FÍþI€H É”5¸".šdþtM¤u‹6jŸÙÊÍËQVVæŠ(œbÎ’)ãeNáJN¢ òþP ðlB+V% °JâÛƒ2Çp ~ø—;Ãï·bù/KqòÜ %ß"<ºIóËNÉnR}W®\RPb“éËž²¿P›ôíÌ–——ƒß_ó=\R¨}u$t(™2wx{x›? Ö$ ° evÁÌAœ‰À žCÛQí1{wö[˜0ò é3¬Ñ)û‡h"nH<˜Øh]½Wyxk¯BíeëÖ1ÌêCpOYÓ‘†„¢´´T—ßß @{•j/½•°˜pݘ$AY Onì¡!$@$@pË-/âALòà;0›@Yy~ں뷯ƒ,o¼ïÖIòoüÇÎ>»z- ïÌ|h‡n÷ëÚßlf–T”ÐÚeþÞYÒn8Ú†µ·v×Nߟ¿¿yû%L¹ýaôîÔGwümñýµt’[ÏmÖ„>Š02þFK»`;  Ðþ[q™{Êl–]º‘S¿eèX<÷àK¸švïhÒùGNjtòòfôWˈV¸®û@¬Ù²ReOl9ŸF6±B~I>dÕ›îê¶ôµ¥}‹iº[ÚÞ–íþÀ¦‹Ù…YÌ”éÂ4‚H€j`PV› ¯@“´mÕÓ§½Žnñ=ðõÒ/ðýº…(*.jRF¬|«æäecë¾-†2ßtp4Õ å6k9…Ùò ¶BOì‚H€HÀÚ”Y›(ûsI>^>¸ï–1mâ#8xr?fÌz“.85‹àÀ ï7?ÿ¾ÎPç·åå)¿ø{8µ89¨J H[¶XXRÈLYU(|O$@:"À LGΠ)Æ'гcoM:ÿu„‡„ãÃyïbݶ5j©œñgV÷ nx³º±aûOuWÐáU‘÷ñô‡›‡­£I$`’%“D¡Ûf¯$@$ÐL Êš ÍI &à€YðÒ³Ò,èI?MZ·hÙ_¶jó(+/ÓaX"ûɤ0Söþp iy׿îså$I€HÀ¨”Õs´ÛiŒp^œ:ù…ùšÈØud‡¡ç&JŒ\þ¾«îæ‘_”oOoxº{êÎ6D¶"ž—†0e¶âË~I€HÀ”Yƒ"û fˆ‰l—¦¼Šë{‚Õs1kÅ7È+¨Èê4³k»7 ‡œ]öÓ¶5(,.´ûø (bÌ’5Dˆ÷œ€,‹ÎÈOG¸„³Mó! §"À Ì©ÜÉÉ™€ìù¸}ÔxòÞgqþò9¼ýí?püü1CNé¦Á·(eÉ;7èÊ~Y¾àM‘]9…ÆØ”€-â6\¾hSÌìœH€šM€AY³²°.Žq0ýá×Ñ¡M<¾øþSüðËb””–Xw÷à€Ñ×ß„»~Av^¶G3¿{up´åðÍ'ÆšF' ûÉä¬ÄP¿0£O…ö“ €S`PæÔîåäŒJÀßÇSÆOä±S±ëм;û-$&_2ÔtFô_%b¢ÃÕÁÑÌ”éÅ´ÃRsRÔÒE7¸Ùa4A$@$`)e–’c;°Q2|õá¿"Ð?ïÏ}v¬7Œt¾—§nr¶؆”ôd;Ðj|Ý8#Öp.×òRå\“âlH€HÀ 0(sB§rJÎE R:جým•¡¤ó¯ï1-£•D¾£½’_’¯öÖx:ÚŽOv!P\VŒôü4ev¡ÍAH€H y”5[“€]ÈÒ£Q×iÒùSªHçÖ¿t¾››Æƒ'ö«C¥í«žALG2(«‡/;k¹©3é™)s6Ïr>$@ÎH€A™3z•srZ1QU¤ó×ÌÅÌå_#· W×óíßbã±bãµSäðe[?÷”9ÔÜ~RsSÔ÷Ýâ6öƒÎ‘H€HÀB Ê,Çf$à(&éü§î}NeŸÞéüsú–Î?b"Î^:ƒ#§9 r‹r!§àÐ$@$Ð ÊšB‹uI@§úvééÓ^Gp@0>˜7ë·¯sXFª>DQa-0°×`¬ÝºÚ!=úà~²úÜÃëNFàjN|½|âêd3ãtH€HÀ9 0(sN¿rV.HÀ$?nÄíX¿m->^ð>®ejêk:*c´sË °i÷¯v·*§0›™2»S瀎"œ¤í'ãÒEGñç¸$@$ÐT ÊšJŒõI@çFö—¦¼Š¢¢"̘ù&¶ܦ‹ƒüƒ0rÀhü¢‚mOÕÈÂÒBµ”+À‡g”éæË@ClJ EùˆliÓ1Ø9 €õ0(³KöDº!Ð2²^œ:ƒû Å÷ë⛾DN¾& ¯ƒ2R;oÍËÓKeóìeNnaÅÜ)ôa/âÇ‘Òò®¡¨¤-‚˜)s¤86 4…ƒ²¦Ðb]0wˆýS÷?‡Ë)‰éü#g;|>^>¸yð­Øºo Ò³ÒìbéŒ2 }Ø7q0+Ù—µãüêæ`K8< €¹”™KŠõHÀ âÛ$(Níºàë%ŸãûŸ¢¨¸È¡³Ô{‚ñjóv±#G“×3ÊÜÝøOž]€s‡HʺŒ–Á1µƒ“ 4ŸP𯋵IÀ|½}ñàmS0õö‡qàÄ>̘õ¦:xÚQ“‘àH$ò÷ÛÄäK67CD>¸tÑæ˜9€””•@ö“µ j¥kh €¹”™KŠõHÀ ôîÔW8ç¿‹uš<}Yy™CfÖ«SÈ¡Ò+7-·ùø"‡èdóq8 8š€Há—••1SæhGp| h"eMÆê$`tÁ!xüž§qÇè»ñËΟñá¼w‘’žìiÉž·çŽãÄùã6?›™2›òeçú! KÃüÃáçé§£h @£”5ŠˆHÀ9 í;/Oý‹:dZ–3nÛÿ›Ý'Ú¡M<ºÅwÇʶ͖åiË™)³»9 ý \‘ýd\ºhð‘H€šI€AY3²9 ™@‹ðh¼0éuvØ’ŸáË%Ÿ!+/Ë®S;b.§&bÏÑ]67¿8O-çbPf¼ìTG² ³U‰Ö!±:²Š¦ ˜C€A™9”X‡œ˜€»»;n:Ï=øRÒRðÎ7ÿÀÁ“ûí6ã–­p]÷X½e%JJK*ǽ˜twÿRùÙÒ7²tQ ƒ2K ²Q\ÎJ„·§7¢[ÅdÚI$@$ðeü* (m[µÃ+Ó^C޽0sÙ×X°f. Š ê¤SX\ˆŒìô:ïYrñÖ¡c‘“—­Î.3µ—å”¶¯7}´ø§däÌ6î±±!„@bæE´ n 7íÅB$@$`, ÊŒå/ZK6% ;ß;æ⸟ÌÜÉ9 ¸0e.ì|NÌ!ÕáM$]»‚Ëɉ¸t'/Czf:JJJTîîîJ¾~ÖŠ¯1fämæt[o//Ü8üflÙ±IÉîŸK:ƒ]'wÔ[¿¡W ®¨àÎÒö õmɽ`ÿtjÍŒ†%ìØ¦nÒÏ)!›ßк+ð* €!0(3„›h$ 8–€]œÉŸRÏR,Û»Däxj/2xi¯«WñѲ÷+—6ÇjÉni¯=§wã·Ó[,êJúðôôl0ËgQÇÍhôÍô9pÿCY²ݰ) (´ýd IƒH€HÀà”Ü4ŸìM@‘– ­Ó¨NuÝ-ë¼îê³S²‘x0Qð«ÅŠ,$ÐlÉ9W‘_”‡¸°vÍî‹ 8–ÕËŸ£“ ¸a!k8Ÿ~¡~aàÒEkRe_$@$à Êã’ €ÅäÀh‘ÂoÞÞâ>ØH€H@?”éÇ´„H€H€Ì" F -—.šÅ‹•H€H@ï”éÝC´H€H€jÕÅð€zÕ¸Ã$@$@F$À Ìˆ^£Í$@$@.K ´¼”µwYœ8 8eÎæQ·H€HÀ© \ʸ€’²mé"÷“9µ£99 —"À Ì¥ÜÍÉ’ ÀÙ´Óˆ n _O_£O…ö“ üA€A¿ $@$@$`ù%ù¸’uí#¸tÑ .£™$@$`efab%  p<óigáéî‰Ö!±Ž7† Xƒ2«¡dG$@$@$`[²t1N“Á÷pó°í@ìH€HÀ®”Ù7#  ˤå_Cz^:D$XÖ[‘ è–ƒ2ݺ††‘ ÀŸN§žD°o¢Züy‘ïH€H€œ‚ƒ2§p#'A$@$àÌäl²óégÙÑ™§É¹‘ €ËðtÙ™sâ$@v#|2×.\kp¼àÁhݳuƒux“\•ÀÅŒó(.-F»ð®Š€ó& §&À Ì©ÝËÉ‘€>Xy¿Ïþ½AcºÝÜ wýã®ëØòfvJ6¶ÏÝŽ®7vephKÐìÛ"²tQý<ý,jÏF$@$@ú&À Lßþ¡u$àT&þ÷D´ìÒ²Î9ùúÔyÝ^sÓrUàΠÌ^Ð9ŽYrвq5' #ão4«>+‘ ƒ2ãùŒ“€a „¶ET‡(ÃÚOÃIÀN¦‡Ÿ—?b‚¹¼×ü9& ؃ƒ2{Pæ$@fÈËÈÃÚ7×¢Mï6¸îþ몵ɸœ_>ú=nëŽÃ+Ä.½K÷"éxÂÚ„¡ÝuíÐÿîþp÷¬Ð0ºrôŠÊ~~~4.¼Œ#ëàÚ¹k‹ Ã੃Û§âÞõï¯GÊÉ5žôw~×y œ<1ÝcP\PŒ wâ䦓lš¨?: ê€ë']ß ßj6ò X›@YyÎ\;…N-ºÂM{± 8'ª/:§_9+0$ xRϦbË—[PVZVmGÖÁᵇ!Ë ¥H 4ëñYH>Œ¸¾q*x’€nÉ¿.AYIEÛìälÕfë×[±úÕðôöDd‡HœÙv¦¢í©dÕW^z$ ”R˜SˆœÔ”–¨Ï‹^]„ n€˜?:Žèß_lú|¼¸åååªÿlEàBÆ9•!!‚ª‹¶bÌ~I€H@˜)Óƒh ¸m3·á`ÄÁ:g;⩌D¯ñ½ðÓŒŸp~÷y´¿¾}eÝ£ëªÌUDÛ¤]HSuúßÓ·¼v ÜÜ*2WIJÿµ û–ïC¿»úU¶=¹å$žXð‚¢‚Ôµ³;ÎbîÓsqàǸ饛pûÞ®²m_Núƒ¦ªl›™”©¸÷ À­¯ßZÙߦnRYÊ©´èÈ3£*ÁðÕ ÈÒÅ6¡qjù¢Õ;g‡$@$@º!ÀL™n\ACHÀù \Ü'6¨óOQ^‘ ËÝ<Üpô§£•@2®d@–"ö×S]Û½x·ÊRÝðü •™Üè9¶'Zuk¥–4V6ÖÞ\ÿÀõ•™\—eŽ","AWCÅ/ØÞ8·ó®ÿSÒðCƒñô’§!{äXHÀV2 2’“Œ„ÈN¶‚ý’ è„3e:qÍ W pߌûÛ»bW}ó @ÂÛp ·þõV¸{¸ãØÏÇÔ>±î·tWÍRN§ÀÝÝ _ZX«›¬¤,ägåW»Ñ>¢Úgɬyúx¢´¸´Úõš¼ý½1êéQØðÑ|z×§ˆJˆB\Ÿ8´Ø^íkóðò¨Ù„ŸIÀjN¤Co0ZÅX­OvD$@$ O ÊôéZE.M ×„^8¹ù$Îí:‡;@–.& MP"F²j¬FÖâdºfÚ&Lµ*›qA²bGwÆñ_ŽãÜŽs8¸ú v¿[ ‹Lþt2Bc˜-3#«4‘@qY1Î¥AÏV}šØ’ÕI€H€ŒH€A™½F›IÀÉ tÑ ¾Á¾j cdûH$LÄ=oÝS9ëÈv‘¸zü*îøûÕ–/J…âüb%"™°æ ‘lš(;Jp&DQöƉØÇæ/6cÂß&4w¶'ZDq±\{uˆH¨uH€H€œ÷”9ŸO9#0<Y(Ke £¨.Šô¼I_&—0,AG‡Vª6ׂì|8áC,~}qµë–~8³ý Þö†Ê’™úðòõÂÐG†ªì[Íe’¦:üIÍ%p<ù(Ú‡ÇÃÛû¹]±= €4ÿWɘ$M$ÐýËöãìö³u®Î 3ÝÆÝ‹v«l”hU÷ou¹¡‹:ËlÅ­@ÚÅ4´ÐYW³°oÙ>%m?pÊ@S7fÿô ñSu¯9Œò²rĉWç˜D`õÿ¬F~f>¢;G«LÜþû•ì~Ça”)70+šM 1ó"rвÑY;›Œ…H€HÀ50(s ?s–$  4ÕW$åESiÝ£5"ÚE¨ÃžMª‹¦{òsÒÇ“ðÓ;?á÷Y¿cóç›Õ­–!¸ëÿÝ…øÁñU«šõ^Úö¹£:×Läøe¹¤“>š„5o¬ÁÊÿ»²²9³L”ûÞÙ·ò߀µK>‚˜àÖö ±V—ì‡H€H@çÜrË‹xú©ÎDóH@O¶Ý‚/V†N£ô!Ó-Y­ôÄtu0tP‹ Z{̬ÅN—–C¥E‘18:X 4¥oi{éÀ%|ýÊlx¸Sµ±)ì«ûêÛ/bêí£w'ãÉéXud9Fw¼™ª‹9ž÷I€HÀI¸Áí23eNâLNƒ\•€›»d飭‹¨¥ú£­Çbÿ®KàDò1û†0 sݯgN$à¢(ôᢎç´I€H€ôE °´gÓNs/™¾ÜBkH€HÀ.”Ù3!  † œJ=¡–¶Šê" €k`PæZþælI€H€tH@Î$;‘r ñáéÎ:tM" ›`PfS¼ìœH€H€' ‡Eç£K‹nWf   §#À Ìé\Ê ‘ €Èà˲E?/£™N{I€H€¬@€A™ ²   °”€Y®ÑÝ-í‚íH€H€ N€A™ÁHóI€H€ŒMàðÕƒh‹ßPcO„Ö“ XL€A™ÅèØH€H€šG %7©9)èÝ£y±5 €¡ 0(3´ûh< €‘ I:ˆÈÀ(D´0ò4h; @3 0(k&@6'  KÈ>²Ä¬KèÝÓ’ælC$@$àD”9‘39  ã8zõ°ÚG&ûÉXH€H€\›ƒ2×ö?gO$@$àyŹ8›všŠ‹`Ï!I€H@”éÑ+´‰H€HÀ© Öö’É™dr6 0(ãw€H€H€ìH ¿8§¯D÷–=ᦽXH€H€H€A¿$@$@$`GG®‚§/â#:ÚqTE$@$ g ÊôìÚF$@$àTòKòq*õ„R\twã‚ʹœ 4ƒ€g3Ú²) €‹(//GvJ¶‹ÎÞ²idXÖ­œŠÀQ-KæíáøHfɜʱœ 4“ƒ]È¡ß@IDAT²fdsp5¡a@9x0ÑÕ¦Þè|ýà‡íU¬½ê*A~Apwgv¤.6®p­ ¤'SŽ£Oëþðpóp…)sŽ$@$@f`Pf&(V#¨ Ð-®;¾}u®—i‘K5_/ýÇÏÅØáã1òºªÝ“u¨…Ä¥.˜²d ‘\jÞœ, @ã”5Έ5H€ê À£6”Gï|¿îÜ€U›WàôÅSx`ìøÔ®È+.G °´'S£g«>Ì’¹œ÷9a hœ×Ñ4Έ5H€HÀl£´ Ù³¾ˆ+©W0cæ›8›xÆì¶¬è¼$Kæéî‰NQ]œw’œ €Å”YŒŽ I€H nm[µÃ+Ó^C›èX|²ðü¼ý§º+òªK½d'RŽ¡ktfÉ\Âãœ$ 4ƒ²¦3c  h”€¿?þåŽÇpû¨;±nëj|ñý§ÈÉËi´+8ÃIàåîÅ,™ó¹–3" «`Pf5”ìˆH€jÞo$ž{ð%¤d¤`Ƭ7pêâÉÚ•xÅi ä窽d=Zõf–Ìi½Ì‰‘ @ó 0(k>Cö@$@ ˆm‡Wz íZwÀ?¿ûXeΨ^Ù 2§¹yðÊ~øyùƒŠ‹NãRN„H€lB€A™M°²S ¨NÀ×ÛMøÜyã=ذc½ βr³ªWâ'§"]˜…3×NiŠ‹½y‚Sy–“! ë`Pf}¦ì‘H€ê%0¤Ï0 €sðöòÆÿŸ½ó€â:×þ«^QotQEïÝt\Á½ÆÝNb;Íqªsoʽù’›æÄqÜ’¸ÅŽ+¸Æ0Õ€Áô.@ $Tê½ë;Ï‘w-!!¤Õ–™çð[vwæÔÿÙ]Í;o»âv¹åòÛäó½›å©7þ&%åÅÞ±8 ­"§,[ÎVÈäþÓ,´j.•H€H ·(”õ– Û“ € L;S~pçO¤¾¡^Eg|D?èÄÞÙ•+ ÷ÜÞœÝ2(:YbCã\9û& ð2ʼlC¹ óHŒI’‡nÿ±L9I^|ïyyûÒÔÜdþ…yù 2 Ó¤²¾B&õŸâå+åòH€H€œM€ÑM”ý‘ €øÈM—~M†!oºBNæœ;¯ºGb#©q^§wÑÐÜ U^²q)ØÇéý³C  ï&@M™wï/WG$`r“GMÕæŒ-Í-òØË–iûL¾"ïœþ‘3‡tp–q}'zç¹*  — PæR¼ìœH€zO >:A¼ý‡2yôTyùƒåÝuoIcScï;fN!PÓP-GΖ±I$È/È)}²  k ù¢µö›«%0)?¹á⛵9ã[«—Ë©œL¹óê{$.*Þ¤+òžiPf‹Á!’?Ú{Å• ¸•5enÅÍÁH€H w&Žœ,?¼ë§âãë£Í÷ÝݻٺWJkKäxQºLT‰¢}}ø'µW0Ù˜H€,L€A,¼ù\: €9 ØÇƒ·ýP¦Ÿ)¯}ô²¼õérihl0çbL>ë=§wJLH¬$G5ùJ8}  O PæIú›H€$àçë'×.ºAî¹ö:øÇã¯=*gŠóìÍ!€DÑùåy2uà Gš³ Ø P(³£à  0qÃ'ÈîzXåñW•]‡w˜o&œqsK³ìVZ²Á1C$>,Á„+à”I€H€ŒD€B™‘vƒs! DGÄÈ÷¾ö™3q®,_õš~Ô7Ô;Лt—ÀÑ‚TAÔÅÉý§v· ë‘ Ày P(;/ž  óðõõ•+\#_¿î~9rü°Öšåæ™g&šiMcÎ? c’ÆKh@˜‰fΩ’ •…2£î çE$@3t¬üèî‡%4$Tàg¶ýà6za“®ìÏÙ#~2&q\WÕxŽH€H€ºM€BY·Q±" ˜ƒ@dx”|ç–ïËü© udÆ×>~YêêÌ1yƒÏ²¸ºHNgÈ”ÓÄÏÇÏà³åôH€H€ÌB€B™YvŠó$ ðññ‘+æ^)÷ÝømIÏ:¦sšåœ=݃Xµ3;³¿„ðD•ÜÙi#  ‡P(s‘ €9¤ ¥¢3þL"ûDÊ“¯=&[÷m1ÇÄ 8Ë“ÅÇ¥¨ºP¦`|n§D$@¦&@¡ÌÔÛÇÉ“ À… D„EÈ·nþž,žq±¼·îmyùÃ¥¶¾ö YÃN ¡¹Aöåì–áq#%:$Æ~œ/H€H€HÀ(”9ƒ"û  ƒð¹tÎòÀMß•ÌÓ'ä¯/?"§Ïd|ÖÆ™Þܽ‚ÜdûM1Τ8  ¯!@¡Ìk¶’ ! >h„ŠÎø3‰‹Š“'_L6ïùìÂ,^£¤¦XÒΕI*'Y_Åipù$@$@® @¡ÌTÙ' ˜@xh¸rÉœËåƒïÉ‹ÿy^ªëª $y…yò×?"'sNt:ÓÂÒ³’{6§Ós®>X^U&yéÒ6vTD´HKëÈ òÜ;ÿ”]©;z5•âê"É(L“ɦI€o@¯úbc  ¸ e"Äó$@$`!ƒû&«èŒË€Äò÷OȺík:¬~ÛþÏåŸo=-u uιú@XH¸TVUʳoþ]ŠËŠôp¥å%¢\䤥¥E^ùàE9vòˆD†G9<•%áíÈÚ&‰áI’=Ôá~ØH€H€ºK€BYwI± X„@hP¨Ü{í7åê…×ɧ[WÊsoÿC*«+í«ïß_ª««dÃöµöcîzáçë'ó¦-”ªš*ùÇŠ§ô³mì·×¬Ô‡•\_1h¤ípŸ‘ÒÚ™6pfÛ² 8B€B™#ÔØ†H€,@`Þ”ò½[ g•¹â__þ“¤g¥éUC‹†‚  e•¥úµ;ÿ›=ñ"ñóó—’òbyöí¿ë¡1—í¶é׋f-qx:Uõ•r o¯ŒM/‘ÁŽkÛž’ X’…2Kn;M$@Ý#00iü讇e`ßÁòüÛÿÔš³øèñ÷ƦFùdÓGÝëȉµBƒCe愨÷‘¼³¹ºçì¼,ýŒl“S¦:<Ú®ìí&c“&8Ü’ @O P(ë)1Ö' ‹ –{¯Q挋®“õ;ÖjíTdx¤¦°;u§œ.Èv;‘ÓÁLš››ÅǯD|}}e V‚gGÊ©’“’S~Zfž££.:ÒÛ €#ûËåÈHlC$@$`jMž§Í J ¤¨´P™úièƒõï¹}]±‘q2fø8=ø@¿Ù¬ sšK}S½ì9½S†ÇŽ”ø°‡ú`#  p”€¿£ ÙŽH€HÀû pÆ–ÝŸI™ E_QU!5µÕ:Ê!Vašª§KêñC2fØ8·Y4c‰Î8¨ÇÔÙÄ9âÐöæìÒí&õwÜôѡوH€H€jÊø1  è’I–œ•jñPk¥Z­µ@††>¾>òþ†÷¤¹¥¹Ë~œ}2¹ß4P™1úè±çO]èЕgäxQºL8CýêƒH€H€H 7|ªZê¿L¹Ù›nØ–H€HÀHr‹s$·Èy ž›š›¤´¬D ‹ µ€v¶ð¬ÔÖÕh>]ȦMœ!ÓG¸Cvn–|¾s³ ì7H.š>¯ÇcCÈ̬>!>Ò?´5ªd;qcƒàÀ7x¼GäP$@$@®& n.æR(s5eöO$@ ð›×ÿGŽçf¸td_elÐæ_ÔIµúçÎ-Y¸úW£þ5ªŽ”¤„$)-/•ÚÚZGš»½Íãßþ»D…1\¿ÛÁs@ peô)s\vK$@ž$ÐÔÜ(Ñ£%qD¢'§aš±£ÄøBNMyœÚuJ™6™†+'J$@$Ð=ô)ë'Ö" Ó€‰Å{ø´p?½g7¹ hO€BY{|G$@$@$@$@$@n%@¡Ì­¸9 ´'@¡¬=¾#     · PæVÜŒH€H€H€H€H€Ú PÖžß‘ €[ P(s+nF$@$@$@$@$@í P(kσïH€H€H€H€H€HÀ­(”¹7#     ö(”µçÁw$@$@$@$@$@$àVÊÜŠ›ƒ‘ @{ÊÚóà;     p+ enÅÍÁH€H€H€H€H€H =ÿöoùŽH€HÀêš›%ïHžä¦æÊÙŒ³"ñÃâeô’Ñâàg9<Ùû²¥®ªN†_4Ürkç‚I€H€ÜC€B™{8s 0ò3åòÎÏÞ‘œƒ9z¾¡Ñ¡R]R­_¯{|,ûå2¯Nö°_Ê ÊeÞ7çÙ÷kË [¤ðd¡<øÑƒöc|A$@$@Î$@¡Ì™4Ù ˜˜@qV±¼xï‹R_U/Wüü µh”„Å„IY~™dlÉõO®—·ú¶ÜùÌÒ|¯ôüS?¶ñ˜äÎm'” ›=L†'œ¿Ï @/ P(ë%@6' o!¡«¦´Fî|îN‡+vʧùTSeÐäARœ],«Y-§v’ëÿp½]0ÛüüfùìŸ „¶ÁSk“Ì]oî’ë˽ÿ¾W륲°R?CàÄk›ð‡¾àSÖV(ƒ€¹öñµZ ÃZê*ëd÷;»µvó_o¶ ™=Y‹}Q|A$@$`9Ê,·å\0 t$PtªH„ÐÒ“²þ‰õZ`¹éÑ›$eaŠnÚÒÜ"«Y%»ßÚ-Ãço§1Kß’.÷/¿_úÄ÷ÑuOî8)¯}û5-Ì\üƒ‹íCCûµà[ dÞ}_Üø÷7ÿ-©kReÙ¯–iá>pkþºF›V^öðeZøC?9(ïÿê}Ù÷Á>vÕgÿüLÏåÆ?ß(þ­ú 0½ñàZØC“;Ÿ½SÞüћڧ ¯ÏWÐߺ'×É Iƒä–ÇoÑsA]éÞ—ä£ß~$¬xÀ.vg-ç‹ÇI€H€¬A ½m‡5ÖÌU’ œC ª¨Jfª»¥¦¬F­<$#Ž´ dh Ú’‡–HpD°@“Õ¶ÌøÚ »@†ã05Ę&Ò¶àØœ{æ´=¤àL Q ™jii‘Å.¶ d8³Ih×`Òˆ²ûíÝ"-"úlŽC³a¬­é$Ž_¨ ¿–¦ ÍŸ­Ä ‰“ÙwÏÖf™'vœ°Öë»ÐZì•ù‚H€HÀ’¨)³ä¶sÑ$@$О@DßV?(ø„!°Çù Lû  ƒ¿–M»6pâWþ`¶v0?LJIÒùÎlÇð;$¶í[-LÁL°©¡©ÝqhìÎ͉æÜú'ËV÷ìñ³âëë++~°¢][¼)Ï/—šò}¼øT±Œ 4[&_7ùÜC|_8ø¤%ŽLìPׯ¢èd‘ Ÿ3\ŸïÎZ:tÄ$@$@–"@¡ÌRÛÍÅ’ @çbÆègÒΈM°è¬æ~ñmBøÐª‡tBeÔ ì¬ª ø‚€@ˆ³•sƒoØŽŸûÜzõÕõ:Hx\ø¹ÍÅv¬±®Q g˜‹³ IŸ¯? ÌÍVº³[]>“ X“…2kî;WM$@í 5Tk¿v.ß©}´œãÜ‚`â38F› â= |©:+8Õ?J›3vv¾·Çâ’ãä̱3b &Ò¶¿†š!Z8Ì!ïhžà4\m Ì+kµÙaÛã]½Ž££5"JcpŸàvUm,P‡…H€H€ºK€>eÝ%Åz$@$àÅ¢úE Lù`š‡àðÕ:· z!¢N¾¦Õä/ªo”Ä‹—ª’VŸ4[›´ÏÒ8ßUAD ‰ƒ_[ÛaéÉ«ž”w~öŽ>ŒPúðÛû~«™­îÙgu¤Æü´|Û¡n=ÛÖ´íåmíêôsû«Û% 8@Gxlw’oH€H€H  Ô”u‡§H€HÀJæ?0_çé‚°\^à¿0„½?ôÉ!Ißœ®ÃÎϺs–Æ‚€—üøyã{oÈ‹w¿(hÝ?ZçïBú°Ø0™û¹.C8jñ(0q€|øÿ>Ô¡ð“§%ë‘þç}‚Ì!Ù0$"ÄVÕ%Ï·=}›¬yt|ñò²ùÙÍz *¹þ÷×ë\h8Ÿ.¬kåWjakÓ³›t=˜aÞò·[Úì@}$…Þø÷:ZgBßöÔmšÓþ÷ëÐÿ8†0ÿWýú*™xÕD¼e!  nð©j©ïh£Òíæ¬H$@$`DÿûêÏ¥< \F$8<=Db„ bDR„À¼Ñׯk‹w·@¸zDtwp )É)Ñ!ïû$ôi"¿-€æÆf)9]¢Ãõ‡Å„µ=åÐk˜y–æ”ê`'j]Y åËÜ)=ð¤ÄôiÅÒ•ã²o  ×ðŸ\jÊ\˘½“ €i D$FÝ-ˆH›ìaÚ;[É®æ aÑ™sD@”ž&Üîj~U-õ-–[5L$@ Ð"Öøyÿé_~ w\}L9ÉëwÕG|¼~\ X€úmÏ¥¦Ìj»Îõ’ X†€•.à!ªXi½–ùs¡$@$` ôa‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+     ‹ Pf‘æ2I€H€H€H€H€ŒI€B™1÷…³"    ° eÙh.“H€H€H€H€HÀ˜(”s_8+  N”V”Èÿ<ýs9~:Ã~6<4\|Õ?”šºùëËȶýŸÛÏó …2£ïçG$@$`'%þþòÂ;ÏH^a®>^Y])Íê_cS£üëÝg%· Gú' °·á   0: eFß!ÎH€HÀNÀÇÇGN_"õ õòÌŠ§¥¤¼XŸkii‘W?ú·œÌ9!ƒúÖ{#¾   ƒ Pfð âôH€H€Ú˜1~–IeM¥üóͧõÉO·®’CéôëE3–´oÀw$@$@$`pÊ ¾Aœ @{AA2gÒ\ñQÿlš²³EºRTŸh7bBû|G$@$@'@¡ÌàÄé‘ t$0oÊAlææfñõñ•õÏ××WϼX k[ð —…2ãî gF$@$pá‘29eªøùúIsK³®(ÓÇÍÜ‚™ƒ À… ¬\³Erò dÈȱJ0ó»pÖ  È>‘. õµrçmËÄŸß ~¸dðNʼs_¹* “8š–)›6ï‘É#%84Ìd³çtIÀ=J ¤0?Gn¼îb‰‰tÏ …H€Ü@€B™ s èŠ@Yy¥¼ñæ*‰S~dð%c!èH Nù‘e8*sfM” ãFt¬À#$@$`bÊL¼yœ: €ù Àìµ7>QæŠþ2p¨ò#c!è@~d'ÓI|¬ò#»‚~dñ €é P(3ýr$@f&°jÍçrZû‘S¹–èGfæ½äÜ]GàôÉti¬¯£™ë³g  Pæá àð$@Ö%?²Ï6蓮u?\y7ÀìlýȺŠUH€LL€B™‰7S'0/›|ÈèGfÞ}äÌ]K€~d®åËÞI€ŒC€B™qö‚3!°¶~dƒèGf‘]ç2{J€~d=%Æú$@f&@¡Ì̻ǹ“ ˜’€ö#Ë-dä#£™)÷“v=íGV?²¥ÌGæzÜHÀÃ(”yx8< €µMÿÒLiÈBBíµx®–ºI ¤èK?²ë—¨|dQÝlÅj$@$`^ÊÌ»wœ9 €ÉÀlù›«•Y’Ä&0™É¶ÓuíGvü¨Ìž5Aå#cš7aç0$@&@¡ÌÃÀáI€¬A@û‘­X)>>þ2hhŠ5ÍU’@ ´õ#»òŠù=lÍê$@$`^ÊÌ»wœ9 €‰¬^«ò‘>#É)cèGf¢}ãTÝK çd†4ÔÕÒ̽Ø9 €P(3À&p $@ÞM~d7©|dCèGæÝ;ÍÕõ†üÈ òNË×]L?²Þ€d[ S PfÊmã¤I€ÌB ¼âK?²„Dæ#3˦qžn' ýÈ2”ÙÌ 2q<ýÈܾHÀã(”y| 8 o%ÐÒÒ"¯.ÿÒlØ(o]&×E½"ÐÒÜ"™i‡%.6J®\J?²^Ádc Ó PfÚ­ãÄI€ŒN`Õš­ô#3ú&q~'™.õu5Êló‘y|78 O Pæ)ò—HÀ« K?¥üÈvÉæ#óê}æâzG ´è¬œýÒ š2 °* eVÝy®›HÀeàGöÆ›«T.²D‰c>2—qfÇæ&?²SGdÖÌñô#3÷Vrö$@N @¡Ì Ù ØÀì5ú‘Ùpð™:%ÐÖìª+tZ‡I€HÀJ(”Yi·¹V —X½v«d#ÙHæ#s9l`Z§Ûú‘ùû™vœ8 8‹€¿³:b?$@$`uð#ÛðÙ.–ŠòRM ¦—_¾h}’Ö÷~~þréµ·µž;çÿâÂ3ò÷ßÿLŽ=¨Ïô‰Œ–вýúÍØÇåžïÿR&L¿èœV|kU­ùÈl~d)VÅÀu“ @Ê: á è»™¯Ÿ j® Í+ß•+n¼KÄǧݢ۾{ï•v*”åçdÉï~|¯ÔVWÉ]ßû¹L³H"¢b¤èl¾رEÞzéIyêw?•Ÿýñ6j|»þùÆzèGf½=çŠI€ºO€BY÷Y±& tJÀæG–2~ªøú™Ë?æ…“guhaõ{¯uºî·•ÐU©´lÿõ§ç$eü{Øø$Y´ìFýþå§ÿ -Aúõßìçùšì~dß¼NüéGfÍWM$p^Ê΋†'H€HàÂÒ2¾ô#>ÊR~dYÇÉîÏ×kíX[¬-±W\/§T½šê inj² ¬õuµA/#u¿Àü±ß ¡2iæ<™½hiÛæòöKOiÿ´Ù‹®•oÿ[Òï?%ôŽš0M®¾õ›¤ë£¿µ¬}Û7IyY±ôQ>mc§ÌÒڽа>ºN¦ ½¾êWdÙÍ÷ÊÀ!#ÚóòS„~åòëïÐÇíã.¼\>zóE9~䀄†GÈœ%ËdÎâ¥râØ!YÿñÛÚ‡.¼O¤Ì½ä*õ¸º]Ÿ õu²R—vhTWVÈÈq“e¶j;xØ({½×ŸyT’ ’ù—^«æ¿\ŽÜ-×ßýÝó³70ñ ú‘™xó8u · PæÌ„HÀ TTTÉ+Lù‘%ö3ýÏäfËsù•Òb|œ£Eù•eŸL—?<üM½¾ÁÃFËmüXÒì×ïç,¹ò¼ëöõõU>e¿hw¾¤°@þò‹ïHnöI?mŽV¦Húá½²cÓ§²_™<>ððï”%e«ñäÁ][%(8D¶¬ù@ ¼CFŒ‘#vÊG+þ¥EdË·ÿëºï'û9¼÷ ™2{¡ 5NòOŸ’÷_{VÛ.?ÿó º?Œ»ý³Õr‘šï¹BÙîm´y¥M(ø¾~¾²áã·ÔÞöÕ}âØóþœQ&›«Þ}E’ú–‘c'ËÁÝ[å…ÇþŸ h(³”‡R®|êþüßß’³ù9Z «>ÛÕú`*úÃßáeÿÙüÈfÎ@>2s™÷zÙVp9$@&@¡ÌÀ›Ã©‘ —üÈ^]±RÔÕ»éüÈÚRµ @8|¿n½ÿÇm«Èv¿2Q|VjkªåÙ?ÿRŸƒàƒ’Ðw€~îîoþë -=ø«Gµ…vÍÍÍòÚ?QÚ§·d⌹í4fé©ûäº;¾%WßvŸ}ˆ?üô›Jˆ[£¾_IuU…Ú³M–\u³ÜñíŸÙëüçµg´`†hç aöJ]¼@4ÉÛø‰\rÍ­ºVyi±üè®+äƒ7ž“ûú[û¡úÉÝË´€eÊÞú×ãRT/¿~òu%¼ Òíkª+åOÿõ-yæO¿¿¼ô‘]kA4*&N~óôr‡æÙÅ qª­ÙÕK»6“5Ä„9  ðæ)óxK$`n«×n“¬ì<•lœýÛÜ+j}[!­«õ”—éÓ= ý_YQ&_l\)“•F Z-[Fíæo<$aÊDfˆmKHh¸,½ùž¶‡dŠ (‚íÚ@KudßN­!³U¼â†»äϾ+ñIým‡zô› _ý5{0‰SZ3˜FNŸw‰ýxtl¼D©|ëP |mYó¡,Zzƒ] Ãq¬ãº;¿%%ErP ‘¶R¥˜ÜÿÓÿóJ kÌ>•®„ý¹ó¶¥ô#³m:ŸI€H Ô”u…‡H€H +iY*ÙN ?²Ð°®ªšêœ‰Nƶ °‡ÌWcâûêSÅgÏ{œ¯À—¬¹¥Yù‚ùÛ…¦c&v¨3ÅAÃRt¾³¶'¡‰kkN‰sAÁºJccƒ‡„Êõw}[Þ~ñ)ùïû¯—ÉÃeĘIʧl¦ÒºÍëжmß]½ŽŽK°›QÚêa } éЧ¿ÿWJs³Nèê{¶m”ãÊ÷¬mŸ üñ&NŸ«_Ç' PšÖ‘úµ·ýWZ|VÎæž–[oF>²ho[×C$@N%ðÕ_§vËÎH€HÀ; ´ú‘­TBI¢Wø‘«ó‘V® í^¢ Œ’}2M A…,[ûgþü mjøØ««”ùc•>Ò¹  &”0g„ö ůÀ£tò´bSf/’=Ê7,ußÙ¶a¥løäm¥% ÿánݧښ=ÃÐð>Ú,ñÜéBs—Ø*Ðâ\Œþ¼±´ú‘•™3ÆÉ¤ ô#óÆ=æšH€œK€B™sy²7 /&`ó#kññkEÏÌK®©®ø_A8++.’ìÌtùàõçìyËpsË8Ýš#D \´ôÆZ%Ôoh¨WÑ÷ê Q1ñÒP_¯»ÉSA>:+þ¡Å&uVçÜcMMÒØÐ }Û œáÁî­Á>°–¯ÿð;ú‚¶Í–ìúܾyßw`²n6uÎbYzÓÝíº€° Á40°UÓ‡“0½ô¶¢ýÈÒS%&:B®^ºÐÛ–Çõ €KЧÌ%XÙ) €7øt]«Ùïñ#»íis¿a£&h¿¦h%!mMm•Þ{åJ +”ù—]£!’aÿÁÃdËÚu„B[=<ïýâ3Ùp¢ ß“rxÏvùÖõse÷Ö öf0o¼òæ¯k¡±²²\ïÙj>g‹i«¼sóiQÂ’³JL\¢6¡ü\­_Ûò± ¯ÿÝ› Âó{sAp•ºÚj¹ëöeô#óææÚH€œJ€š2§âdg$@ÞJ~dë7*?2•gÊ›üÈ ŒÙJÖ‰4)PaÜÇM™m;Ôåó5w< '•FdåÛ/ë°îÞ1hhŠÊ=V _løDöíØ¬ós]výºhÀÙñÑ_}Oþï‡w˵·? 5cÇT.¯Þx^"¢cåª[¾Ñå˜çž1v’n÷ŠJR ¿N–@° hÑl¾[ð5CŽ*¿r—AØÌPùÇÖ©À"6íܾ} %ýÏ÷Q"/½öv‰Œ‰•£viN£?|ôWÌèí UEºú‘u‡8/ £ PfÔá¼H€ CÀæG› üÈ’ÌŸ¬+°ˆŽˆdÉv_3åbfó3ƒù]xD¤½y„Ò>!4eðã‚?—­@[uÍí÷˲›îigŽ8vòLùï?='/+!ê9•÷ <`yÏ÷)a}"l]tëòû”¼ú÷?ÉKOüŸ½ ­ï}PkópDîS¡ìŸôeÅóézð9ChþŸø­½3^ŒŸ:G~ò»¿Ë+OÿQþù§V-#xΘ© Ûÿ°3†0d0ÍR9×fLO?2Cî'E$`d>U-õ_Ù£y¦œ x€ÌòžyáÉ/(•Q¦yUøûîàìÌ,Ñ.°ÓAqám‚¨“i'ô» +ä=CˆxDX„Zo ÂÒÃ\C`FèëçסK¬9Ä”+®Ãyg@µÒ¢B`‚¡·ø‘¥Þ#aÁþòýïÜJ³EoÝh®‹HÀ%ÔÍÏ\ e.AËNI€¼…Àêµ[eãæÝ’2~š2{ ÷–eq$àTÙ'ÓUÞ¸|yH dñq ïT¸ìŒHÀë @(c ¯ßf.HÀQéÇ[ó‘ LIÌQˆlçõJ‹”Yn¶Üxí d^¿Û\ €«P(sYöK$`jå•UòúŠU­ùȼÜÌÔÅÉ{”€ö#;N?2n'ð ʼb¹ g€ßÑëËWJ‹øÊÀ¡£œÙ5û"¯!ÐÒÒ,™i‡[ó‘-›ï5ëâBH€HÀ(”y‚:Ç$04ä#;•'É#ǪƒEzòœ ¸‰ÀéÌãR«ò‘ÝyÛ2 ðï} 7M›Ã €! P(3ä¶pR$@ž"`÷#B?2OíÇ5>›Ù ×,¦™ñ·‹3$0 e&Ø$N‘HÀ=l~dÑqI—èÝùÈÜC”£x#ø‘Ò~dãdòDš÷zãsM$@î'@¡ÌýÌ9" € ´ú‘­R~d>2hXŠgÈ)‘€ç h?²c6?²žŸg@$@^B€B™—l$—A$Ð;kÖ¡üÈreÈÈqô#ëJ¶öbÚ¬®Zï2—F$à Ê2²‰Ô×*?²Œ#2}ýȬ°ß\# €û P(s?sŽH$`ô#3ÈFp†&?²“é­~d×\I?2Co'G$`ZÊL»uœ8 @o ¬¥Yo²½äœ:.u5ÈG¶”ùÈ,°ß\" €gP(ó wŽJ$àað#[§üÈ& ™‡÷‚×@™ò#;““-×_»XâbŒ;QÎŒH€LN€B™É7Ó'è9ŠÊjycüÈ%.©Ï;` °ø‘eÂlê8™Â|dØq.‘HÀ“(”y’>Ç&p;–‘×—¯”fŒ‰oݾÐì~dQrÍUô#3Ŧq’$@¦&@¡ÌÔÛÇÉ“ ô”ÀÚ *YVó‘õë[Š€Ýìvú‘Yjã¹X  Pæ1ô˜HÀÝ2N(?² ÛeÀáô#s7|Žgv?²kÑÌ4»Æ‰’ ˜…2³ï çO$Ð-ð#ƒÙ"ýȺ…‹•,J Ù¤Ñ¥Àe“ €û P(s?sŽH$àfíýÈRÜ<:‡#s€Y&ò‘ÑÌÆY’ x e^µ\ @gl~dÉ#ÇŠŸŸgUxŒ,O~dÁqõrõ ³ÅßßÏò<€H€ÜI€W'î¤Í±H€ÜNÀæG6phŠ„†õqûøÌ@ ´¤P r³eÚuI²³`‹(Þ)Iý¤_Dé«ÁþÁfXçH$@¦%@¡Ì´[lj“ \ˆ@[?²xæ#».ž·(úºZÉÊ8*S'•§\,eµ¥’Wž+¹å§e{ÖViVf±¡qZ@ë9@¿¶(*.›H€\FÀ§ª¥^eía! ï"?²çþõ®äž)’” Óh¶è]ÛËÕ8‰üÈÒí•à ?yè;·J@@û{µÍr¦"OrÊN+!-Gªë«$8 XkÏ Eƒ6-È/ÈI³a7$@$`M>â“Ûþ×ך¸j /$°Nå#Ë<•+#'L¡@æ…ûË%9‡@ΩR[S-÷ßûµFð÷õ—þ‘õïKkK$·,GkѶfn–õ/.,^iÑH¿Èþ‹j,$@$@=$@MY±: €ñ Àìùß•CGJBÒãO˜3$€ÙñÔró—ÊTÂß747H¾6s„–#5õÕÒªESZRŸ~èè•qH 0hÊ(”™kÏ8[ ¨¨ª–ÇžxUCúÈДq¨ÍÓ$`Mð#;z`—L7Bn¾þb§@(©)ÖÂY®2u,¬:«ûÔZ4å‡SÇè§ŒÃNH€HÀÛP(ó¶åzHÀâ¾ò#+T~dÓi¶hñÏ—ß9õEI?¼G‚;÷#ë¼UÏŽÖ7Õ«`!9:`žkj$$0Túiæ˜Ø§¯øô¬SÖ& /%@Ÿ2/ÝX.‹¬J`íÆíô#³êæsÝÝ&“u\j«•Ù=û‘u»£.*Âlqpôý@µâê"ÉQÑ¡EË(Ju" á‰Ê­U‹ÕEo¡ÎèÒé}T5—Ëi5φ–z‰TŸýéæK|x¼ÓÇqF‡µµ­Z4í–#õõÔG hý´©c’Ò¢ùùø9c(öA$@†!@¡Ì0[Á‰ ¸‚îÀï9½SÎVhß–Iý¦HX`¸+†òº>³J3e[æí÷3wèBehji’û^•yCÊÀ¨Á]ò>·W²K³dÙèkº¬çŒ“˜×ÆŒµR^[&—¦,5ìç ùIJJ2%õÌ!)©.ÖBÎèÄq*l}_g`pI˜3¢8 !÷ÝYbŸ${^´ðÀ>.›’ €; P(s'mŽE$à655²/g·œ,>®…Š)ý§KL(“Úvwæï—ƒyûddü(™:`†ÊÐݶ®ª³¶UG?’+Ç^+2gË,9!_œú\n™t‡[æŽ|]ëÒV "^’r…„ø‡¸ ƒSúͫȕÔüƒr¦"_bÂbeŒÎE%;¥oWv‚ïµM@C~´†¦é¡Ãí#`LaúÈB$@f#@¡Ìl;Æù’ tI ¹¥YޤÊáüè$SúO» V¥Ë-vZ3ШL8SFÄ¥†ìYÛäæI·_Pв pW½NúE¸e 0»[“¶R¨X2â2S$M§Ãg*­â)ÍitÂX;ÌæÐ¢­,P4Dt̑ҚñóõÓš?[DGjÅÝòÑç $@N @¡Ì Ù €1äWäÉÎì/¤º¾JÆ$×wÿé{Òý½bÓñõRQW.s•‰ ÑÌÚöæììñ£®ºà¢›åÍ}¯É‚a‹¥d×þg쬪ê+åS%˜…+ÙÅ#.5…pƒåaÏœ9,'Š3$È/HF&ŒÖ9ÂÚ›¥Ô4TKΗfŽùJØØÔ(‘!Qv-Z|XµhfÙLΓ,H€B™7K&o#abïé]’Y|BD RžæmËtézà«ó™È i€ s!ó@—NænðêNYŸñ©º1sМîTwj˜Y—¾ZD’9ÉóœÚ·;:ƒ¾[0®Sf™Cb†iá̬ ž¡ÁD ø£AÛÚÔÜ$Q!ÑZ@ë§ò¢Å‡'˜JxvÇg€c ¸—„2fgt/sŽF$à0·‚ï"³A2¡ßdÓ˜Š9aùNëâHÁaYàðØ‘2}Ð,Ã^˜BH€Y*ÌѺ[àK?#O¸ó‡.h÷Oã†ÁLOLÃá1|´†1%~´–ÓÆãEéú¦ÇØÄñZ èpçhß2øGâ-Ú™Ê|- !Bgjþ!uS'@’r?¢¿»oô@-@È!I€Ü@€B™ s ç€s?îÞÈÝ+¸kù¨+%:$Æ9[¨\˜ÂÿÚˆL ÁÖÈæg(QjÏ»[ðù@ÀOøä]”<_6ŸÜ¨Í.Ç'MôÔT‘ ‡ÅŽÐÓeY*€ÎAùôØ'Z³¿ÍþJËd¶‚5õUfŒxLU“Ç ˜8"`È®ìíÒ”µUbBbíyÑ`†j&T³íçK$ð e_±à+ (¯+Óy³àÿ4N]àŽU…¼Xêù†Õ5ÕÉæt®ª֘⺴¶Dûõ$šB¥×5Ö Öë)“LäS›1p¶Ž¬üáŒͲ§Ÿ˜bâ_­#*×Ùgë´ææ¤É1CMû]„FµO|„6EôQ˜7ÂÌ1³ä¤Bý•€¦4hêgì# ¸‚…2WPeŸ$@N%€À0³ƒù¢ï™Õ·Å©Pè A(ÐGäÓŠ Žv ÷7¦¬§{©'ZQ[.AJÛá©‚` ¡™D4ÃÁÑC<5§Œ›ž¨sÿAP†Y#̈÷+Íõèݘ ©@IDAT±Z£æïkÞË Dk…öϦÄ „Û‡m»Ò AÃŒ|‡8°û0Se! g0ﯧ³° Ã@˜k\ôáîõ¸¤ 2®ïDÓÞ‘÷4d$ Þrb£p0oè"SÝñ‡o3ô¤ '¢IB… š' ü°ê•Önkæf”Z³ôˆ„9QùsB8ÛŸ»Çžp|¤òEó¢F$Djó^¤Y8£~‡r”€v¼(C­u¿2K jÕ¢©ˆŽÐ¢yJ#köÏçO$ÐJÀ§ª¥¾…0H€HÀh’@#‚ïJ;¾CÐ4î>½C›™!!üjÌTÞ=¸B™«NžèIYyôCom²J"n„‚Ïó)e‡äÒžÍÚ@|ÎRÌÐØáZ{ØÇÙC¢?û¶ˆŽ0é„ö¿QРA“ t—Câw—ë‘ ¸L„`ªxL]Ü ‹!SU¨{3›D¹ \'áBÁ Ò )ÆÆÆl¥V…d÷À Y4✡'ó‡fª¡©^å^[Ò“f.«‹ýؤüùΪ‹ø‹G^nóÑž_ÖñÂtAdOD̬…3ÏðÖ‚è ù*X†@“VS_-!!v-ò£™)··î×EF&@¡ÌȻù‘€ TÖW(»ÏtD4„hOŽjA ÎYr½F¶¨Èg+ ä¢!óuçôìÞ^¾|]Új¹nÂÍÒÓPå‡ÏÔù¶®{ƒ{'ÝÅhZ6f¬•òÚ2¹4e©Î¥ÖEuÓž‚Šè—©*(HIu±Pú)álœÖ\švQÝœ8‚Ù´hHÛÍ(´h0]eÄØn‚d5° eÚl.•ŒNæŠÐl „<³8Fa¾Ðwð [¬C|;Ö“ç[Á$î`Þ>¹aÂ×z<|¦>;±^nšx› ÷–Q ö‚&g\é©°i”utwðgLUáôÏTäk³¾1IãdPTrw››ºö8_­CòT뚆 m͉¦4äG3ÒgÓÔ°9y01 e&Þ‰0Ë\“¶R›æÂÇÌ æmÅÕEíevé)}ÓetÂX;ÌR‰ßÁ@kÑ”™#´hêBLG³Ôa÷UÀ³DDíñ—‘ H€º$@¡¬K<š/8C05ò€ÿ.®¿XúöégêõxzòZÛ¨4Èõ4©ßTOOÇ¥ã—Ö–h­Þ Mb€_€”Ö”^(Ã:ìQù`â‹h„ƒ£‡ôfù¦o ­ø, b#nîìÏÝ«QÕB‰Õ£"0Œ~à?‰@!0sÜ—³[GcŽÔ0u@„…HÀx(”oO8#ðpàß±F*ë*y¦bBc½fmî^HSK“N®„ȳ’/’¡1ÃÝ=·M™3´Eð+ƒPf–26q¼Ô«ÀH¦ 3w°zAþ.hÙ'ö›¬…3Üœ@Rñ‘ñ£Ôc´Ö,ZÖ›H-‚´h¸f3uDtV?A¨ýþ_FtôF³g~HÀ¬èSfÖã¼IÀàp·CúÿÕƒ‰áœß0ÜýF@ö@´J£›á9¾Ò¯Z:3ñ3Ì‘‚Á‘Ô_ÍÈý¯ :UrR}.Ó!óÝ?㎈ß„ÒOSæzÐÆ®µgá}Œ;iÏ ¿0q„&팊€ÛØÔ¨ÍƒûFôÓ¡Eƒæ…HÀýèSæ~æ‘,A‚2hÊ.y…ЧÁñmG¾"øÓúÉe£–©DÃָ脖 ׇ΋#Y¥™Žo‚‡ZÎ[U kŽ­”kÆÝ`º0[EôH*¹$å Ëå=Ýt˜êe•djᬤºXÁé§„³q:àEOû²b}h¦á‡†€!g*ò¥©¹Ip3Ö i ¨E³â'ƒkvhÊ(”¹‹6Ç! €I2\HâÎ>“B;¾éó÷Û}f¦˜a¹ ¢Ï37)óªî‰ã¿l í[û_W¹Àéˆ~½îÐÍ`þø^Õ7ÕkÁŒß«®7 ¯"WRU0ðcEPœAQÉ]7âY;ü~ƒ-/|‚ý•€ 3Çþ:ê+?ƒv\|AN!@¡Ì)Ù à‚q]zë…#L©!sìs "øá®ÿô³´©‘c=™»Õ'G>PɃ¨ÀSœ²Sß“dÉ9ÀÌXjU‚ð5i+uúø˜!2#K×`ú +ò¿!¹2T‰Æ^]cëp¶¢®\çDCÀ˜<Âôæ¡ø~"ì¾·%­ï€HÀ (”¹2‡ +€£=²š†¹DiÈèCæØ®Ãôs“ è‹ ¹CZÖô ¦h+ö½*3ÍqZN»-'?ÓÜC;¶9h…@ Ÿ*Á,\ù-VÁs˜ ¸{›‚ï|¦Ngèâ#F먞l»Ç¯m-üÖ#H‚… hHU]¥)×¾ðCûR‹? ž PÖ3^¬M$Ð \@Ã祤¦X dWè;ÒTã¡ ?DXDâ×*¢•£U–Õ–ÊÇ©ïË£¯’è˜ ëÞigú¨uoD×Ô›µi«t4FDâTÈ]3öŠ›ˆÖˆ¨(x1*a µú½Øk|sË•/šÒ¢!ü>þ@s Âî3 J/಩¥P(³Ôvs±$àÈ¥tº,‹yÈz¹ÇàC…‹™¹Cê»ù½èÎôMàäó“›ä–Iw8-Ñ-îì–±Nnœx«éMÿŠª µfz@ä ™“<ÏôûíîÀG‘¤J­Òî#œ>„3gDút÷ZŒ4¸B‹†€!ø¾ÕÔWKp@p«MiÒ M£vÒH;ƹ‰„2&6ÒŽp.$`2{svéPã ‡]Ì;¢îÝ‘‚ÃŽHö:}Ð,j>G„ǯ¯ƒT;6‹ùRãdbxRÇ &:á}¾2ÃD¨üÀÓ2mÀLÍÞóSESø—¥¨¤Ó'‹kÓÆãEéÊGj  q7" ²ôœ¸âF(ø®ÁÄCpól¡ECtL¦-ÐHø Ø 0ú¢_ ô„@FQšì8µMæ ™§( íISÖUà,¿3û ÁÅà”þÓõz‚i%°ùäFýbžò«sfy÷à ›4A_Œ;³_Oõ…`…à%ã“&zj^1.´ý‡UÄÆ¢ªB‰OPÇkó;¯Xœ-Z¾ÐZMá¢4„ÝG~4+¥ü0À–p #@M™Á6„Ó!³@®Y_¨‹Á‰ÈØ4¤@@Òšò¾,_DJtvrXyK5Xf œ-;²¶I°°^á-ks÷:løEQ‰¨‘°æŒHN3”ì^n.|^ñ@)®)²çECòoÃýÔo!†0QºFÂÿ,F€æ‹Ûp.—zK‘Ì6ŸØ ƒ¢“M^¼· zÓŽñè‡x$æÅG{šH €¼H‘!QíO8á]TH´ä+Ÿo*V!ZWøë v0ëM¼.´–„ðDÁ£´¶D›5"=ÅþܽZ“ Öþ¾¼lºÃé"ã”æŸ_›í¨2çÞ—³[BCµ¦ò£‘{w¨²ŽÙ ð×Åì;Èù“€ ò&%…õ‘Yƒ/rãÈÞ1’Ún9±Q ̺Hk7¼ceÎ[Eym™´´´¸$èBthŒîÓQgú«9oõŽõ?¨úÆ:í· 3˜ƒ±ôŽn–Ì2~”zŒæw·wxÛµF}ÜL°ÝP@~9 ADG˜É㻦´hÈ‹¦>Û ÈÒßx e^´™\ ¸šÀö¬­R§’Ø^>êJæHê!l„áÞ}z‡6…Bþ-o zˆ¢Ëê0]ôõõ•ˆàÈ.ë9ræ‹ø ñ¶ “ûOSßÍ:­ÅFri«päÒ±Mh@˜L0CÆ)Sm|‡R9ϱqtâX•3®OÇF<Ò+£ü$¡Eƒp†€!©Êçoïé]®…3iI}úòoQ¯h³±‘P(3Ònp.$``ÇΑ¬’LY4üÁ… K÷ÀLqWövI/<¦îºOÑÑݺ×Òšµ`Þ‰;á®È¿…~ýýü¥¸ªÈë„2|Zfž£/b•ñb•Ħ±ÎûA›!>fðJU~g«3ngÞ&ä;\ïz÷!1ÃìIä «ÎêhŽ»ßTÜÜB4U› Q[YHÀ¬}Ѭ;Çy“€ /Ò§Ç>‘ Ê”¦R,Ý#PßT/[Tt<\H Ÿ”-Tt÷Z[³,ÀÏUù·Ö¦¯Ò‰¹g ší•€á“‡dî0…Ï"59®ÙfÜlÁM*gƒÀÍ ‹{ xžN\£|Es• o½6­‡†$öI¢Í=[ÁQœ@€É£‘]€·@(ãOŽ| ´‘‚|d,Ý#€€(è~ †-æôîa“÷¿#¨à*á9áìãŠQWusF櫆Ïܺ´Õ‚›ÌBüC̷;¢0­;S‘¯ÍîÆ$“AQÉ&Zù§ !YkÑ”©c®Ô (ûùúi!I«!¨ñ…ù÷Ù›W@¡Ì›w—k#'@ÒÏ3ê"öòÑWñâ®›L‘2=àû°`Ørë&7oí]¨ÄÈHä늒Uš)ŸŸÜ$7OºÝ«ï¢×*ßÏ5i+uÔ:ø˜!2#‹k @Åá3ùã`F‡ÕCb‡yõç̵Dヲ¡Z gÐÙ±¡©Aû©B8ƒ“Gúõ:Η-O€B™ó™²Gð*'‹Ë¶S[´†Œݺ·µˆ†nÈÇ3;y./Ⱥ‡Mׂ™ìê£ËÕã®wÙ]íªúJyÿÐ;riÊR¯†µ~ª³ðÀpY<âR~{ðYìMUhÉ äDq†À'jdÂhCŽ‚qo¨:ÞZ4äŸCÀie5¥Ú·4Q™š"G$5úI;Η-C€B™s8²ðJ¸Óøñ‘÷µƒ5¢±\˜ÀžœrôLªNª=¾ï¤ 7`vŽ¥ë•7O¼½Ýqg¿yçÀrM/E…6÷ö‚À)kÓVit¾2£Uø½}ɆY|ž­QQ`–;*aŒ„„fŽVœHuC•N\°û°iljÔyqãC§Žß+~2<»f ežåÏÑIÀÐ>;±^ X:újÞa¿ÀNÁìn«2‰ƒ¯ò·Ùòí\ OŸC)ÎVÈå)Wžsƹo0Pi0\LĹ³í}oÐ@®K_­ÍXeͽ§æ¼ðû€HG R¥¶¡F‡Ó‡pÆ|[ÎcìhOÈY¨µh:/ZŽþ›à ‚„ôÕ4hÒ(D;J—ízB€BYOh±. Xˆ€Ílñâ—뻆Zz— 1ô@þ6h"bCãzÜ´XŸñ©„)S;äqse9˜·O2KNÊUc®så0†ê7 ŒNè;Y‡u7Ôä,2øm…icy]™ö›D@æ”3οç0q„©#¾3MÍM‚¤ó6-öŠZ4ãì—7Í„B™7í&×BN"€§þGÇ ‘ifv«×WË^’šæênÕõ¶J>M>T$u±uÒâßÒéò|||äÊðk%ÉŸá²;ôåÁ÷¾)£TÎ'HpeÉ)Ëh‚ošx›ø¸d¨ÊÚJùÑ3J]CKúw¤ÓØèX)«(“ÆÆFGš³"ðÀ²ïÈœÑs{ÍâtY–V‹ª %>çÝDz DÔÌUZ4$¯®¨-—@ÿ@ѱŸÖ¢!`Èù"›n=²EžùøïÝ›kY’À¹¿%ʘ<Ú’.šÎO`_ÎnñU¡„'õ›zþJçœ@ >þÖôWiŽl–õï|¥¾²^ª[ªÎwšÇº¦:©Q¦]Q!Ñ.禴™J~Ʊ«òJÕÔUk,~X¼„œÿ³áòÅž3@‚$œs„o»K 0­PJ*Kº[½ËzÈYˆÌuNùù|‚|Äo°ŸøúuÙ–'{O ¥¹Eª¤®¥î¼B™Ÿ_«†L _(àbÓ¢!˜„6$ ·åE‹U¿+6-Zie©JÜHZNô~·¼¯ü–à3rn¡Pv.¾' À"†Í²P‡Òî dþüIéŒY½Ôwv˜ÇÚ@D4”¨à¨6G]ów·CC¾V®Êl3‹ “à>Á¶·|61âÅNŸ}|X‚N›QZ["«Š?_?_þŽ:rÇ›››¥AýëIAšƒ”x¦\'mnZ( q½é¢„²šújA´5 p_ño êk¿rG+öC¡ÌŠ»Î5“À9äîÕ¹ÈÆ¨ðÌ,$à åµeÊŸÌõA>lkÓNù**faÕYÛ!>“ €ÇP(ózLÆ [x$6Ðorƒ{cœ…Ù Ô4Ö¨ÓågÝ9Kîþ×Ýâçï×e=GO–æ–ÊÚÇÖÊ”§ÈЙCí†íº ²ÐÀ0 ð 袖ëNÁ—-À/@ûWšE([ù‡•rj÷)‰-ßùÏwÄG%Áv´Tœ­í¯m—ÑKFKÿñýíÆíí¶¼°EpqúàGº}l£ ˆXûÞß§?ågʵ°ž<=Y&\9¡WŸ g¯³³møíŽ+ñÃã%8<¸ÃµA%+ßœó¦ïqV±¼óð;rÕ¯¯’¨«Ü/”I?£Ç¿ñÏ7ʨţ:|þÜ}€B™»‰s<ð0Cùû%)¢¯i|ɆÎ*w=—Zsc³”œ.‘Oÿò©|ñÊ’4*I–þ|©ý¼3_TTèq •£PæL²_õ…pøÑJ0òTñ¶¬@ù•ˆKñÔ4º=nqv±¾øî7®ŸäÊÕ¯“§%w»ý¹q§ߣ˜A1¦ÊÎ]‡UßçË—'–>!…' %$2Düƒü¿[(&‡V=$‘I_E6ÝúÒVýû¹ì—ŽÝ”ëMû Ýh2kˆÜò·[ÚýÖ^¨ Öéê›sÃ(…ߣì„kæA¡Ì5\Ù+ ’r2Tœ‘%#/3äü:›TPxôÓ>d?L †]4L~óC9øÉA— e͇ǜKA>’úxÖ/!< Ãæ “˜12rþH9²îˆ\öðeb4ïHžÖ~͹wŽ$ŽH´Ç ˜>EŒÖ߉µ¯•³égõù½ïí•S»NÉÌÛgJ¿±­7A²ödÉ´1øžÁmæ3%"¡=#|Ov,ß!éZ(ˆ#Soœ*Cf´ÿ®dlÍÔÕ©’4_B´fß¹ˆÄ¯úÃwqÏ;{$}sºÀ´2 XÕ×_æ|}Î}NëeÛ+Ûó®­¨ÕßùñKÇkmº §~ªµ‚“¯,;—ïԚƅß]Ø“­¾‘Ÿ³vgIñ©b¹âçWȸËÇÙ§Šý»é/7ÉÁJú¦t)É)‘èþžÓFÛ'öå‹so´a',› ³ïœ-Yôyû§oËøeãõgÜÖöÜ68nµ›sîþþão)L›³÷g Ìbã‡ÆËˆy#ß)[?Öî·vË¥?¹TJsJõMRÜ4ºì'­7~áû‰ßü†ÀÜfµSo˜*¾þ]ÇìîoOO~/ð[ytÝQÉbhT¨þŒµýí±­ Ïš÷…Öݶ¯ž¼îšJOzb] C¨nP­%™’’0ÚÐóìîä¶ý{›4Ô4ÈÄ«&Ú›à"î÷3~/«þ´J;í˜4@v­Ø%žÿg}‘g«xhÕ!ydî#rlÃ1}Á† ˜D¾tïK_ôUYX©_W—UKY^™4Ö6ê÷üÏ9 577{,ȇm±aqâïç/g*óm‡ ù ¡ŸÉ K'èùá⨩¾I­<Ôn¾0_;¼ú°þ ·;¡Þàæ.(PªKª¥º´Z¿®«¬Ó}C°AIãË÷¿,¸ya,<6\v¿³[žûÚsú‚EWRÿÚsJþýÍKÎu«,zp‘Ô–×ê}|î¶ç´à}Çëwþëû´k+k+„¢_¦üR~3ù7òê·^ÕZ,TÂç «ö¨“ñy†¼xÏ‹ò›I¿‘ÜðÙðôijl©n\´Cƒ‡ÏâºÇ×u«ÍùnÎu«±É*¹óû_^P®¿›ÿ¾QÔ½3g¸¾˜û쟟Éð¹Ãõþ­—Ç·—7|C>ÿ×çúb<û@¶Ö\͸m†\úãKõ˜øoÓ³›dÓ3›äìñ³:êªýD›ë_/åùårßë÷iMNáâþ•o½"ïýâ=ß}”Œ-.÷/¿_F$ècfüB+4ëŸ\¯× nmƒeŒ½t¬àrì³cúÆxqQÒjeÁé©«žÒ\†ÎªµnÐŒàn`ýþøïíЦ³öèýSëµ@=pÒ@­M6â。o|Ý·ü¾ncšvó4yéž—´6ý^¨tvsîBmÌzÞ]ßðYÿÄzý½½éÑ›$ea«¯-LMW=²JkÆð]n«1ûà×Èì»fËœ»çè¿­ø;¼æ¯k´Æš|Û¸@°Û÷Á>±ýÞ´Ýžüöt÷÷B[¼ü…Œ˜?Bn|äF=?Œ íÝ+÷¿Òvx}ýГyŸ»îv9ð†B™ÐØ„ÌF ©¥IŽ¥ËèÄq¦»+ ÍÀþ÷·CŽ;½-M-rbÛ )Ì,”¸ä8ð…»äøÐÖd f]×üæyrÙ“úŽþ,úÞ"Yö«ev ãB‘Àø€Å=æ‚~>®‰žÙ“UÀ¯,½ðXOš¸µ.4·¸1ñʉÚlƒãÆÂ˜KÆÈî·wKnjnßKG'ˆþðýÂwÉ&¡¯¸!q2ûîÙÚì÷ÄŽZಅ‘ÆEÖÌÛfê9á;wß÷Ù念?Üm¿øÛ2ô¡š¬¥O\í+Á m±]ÐÕ”Õ´=l á ¿˜–Ø î†/üÖBYþÐrý[ IôsÇ?ï0µ@†u „ø7_û¦¼ôõ—äÕ^Õ.ØAXÇç¡þ­—y) R$eCŠ<}íÓÚ§ì'~‚.tpœ¶1M–ü`‰ÜòØ-¶Ãòáo>”ÿ÷CÉ9”#]µ‡öÖßÒ~`·>ùÿÛ»ø¸ª3áÿ4ê½wÛ²-¹Ò ¦cc ÓB' $¤¿il¶ä¿ÿä;»ÿ}³ÿÍfßMY² °¤¡$„bz±©î].r‘eõ^gÞû3b$KÖHšÝ{çwý±5ºsï¹ç|ÏHžgÎ9Ͻeà ¸N}Ó‘P º—}aÙ@¹§z bjnªÛ5ø†îcùpîTå;õ¹HþüëÏ‡Ž¾Ï^>{ S·˜ØYù•æÿQM ÊtyÀ…ŸûøƒKU×Ñ4ýÐÔÿó«eè9Ù`>H.(Óïž _h0«Ûʯ¯ø]£ßëhºN³Ö÷ þm¬õÚn9ãýJP6^9ÎCÀAûª¤ÏÛ'3sg9¨Ö'ªªo,îzê½cݹçæ{äûK¿/?8òÑÿ´õŸ&>Ø·~ß ãuš£n:O\ÿSÐÿ ô?9ýÏA§ré¹úÆDƒ=¶È h:üÉ%ó·V“l:òmוéÚ. ”t­”Ž:ù7ÿôÃÿ0dAYýþz³æK³Žݦ,žbvÕï«7AÙéמ.»^Ùe>Yב•©gL•i§O“9+æ ¬ ÒuO iP7tÓóý›TúW“>h¥kvÕȱƒß û÷Õ{¶é¶óåfꤿ~Õ‘uÝôSqP¦Óä†k›9ÐaÿèÈRåªJ³öG§kjà®S4ÿòÿÿE²Ê²äÚÿ}­Y«uªfegÊß)ó.¼¶Óÿfz´ªtSGQ®û—ë½×õ‰: ñõ_¼tP¦õÔ×Joÿ *ûáÜ “\ôMDþ­Ä+ºùÖ5hÖ`FGŸ·¡éäuT[o[ Ó‘‡n:¢­¢·åwO°¿/tÔn¤ß?:²¸µÞCÛXÖx”Gsp˜€ŽLÉš&IqI¶«yOgùä]ÿS?ÿ³ç}Gýe¨Sœûöc& Òi5ºé^}“1tË›‘gÖ§èþü¢™ê¨ŸòëT0ýtyÅ×WÈ£ß8ù?¡åð}èZºš­×åà,¡+}l% ¬+k­™´dúÖ5ú óâ«Kjvªi„~¯6覇þºmùËYõW«L05ô¹±~¯7eš@Ç_†NŸÔMØëfÞÐ?t§ ö¾±×|ȱçµ=fúã²Ï/“e_\fÞ€Tž)ä£ÚêÛÌh¾1Ò7K8ñ‰3¤¿»ß¬w <6ð±þÑ-)=ÉLK |Nk’‹Àß ééCqô÷ú:YñÕ毾VôC¦÷{߬™¹ÿÓ÷K|b¼hð6Ò¦3 ôïÞ·öšQ=_?Àªþ z¤Síב4ªS ‡nõëÇôa—®1m>Òl²ë–ì‡s8yÓu _FQ†¹w ¶e2~þõºþŸu}¸éϲþ®Òÿ³ýÛП)ýý ¯ &ÖDBÛXë=´Ýeç1AÙxÔ8 hü†öz9³ìl[ÖZ³!é²¢oHu=W°›þ®›f}Óé:º~ÝérÙß^fûÿÑãt„A1kBýôN³{}ù±/LïÑcõ??¶Èx}^Ñ Ì.#eº®L³0êÍÕ+òfGaÈUöoØ/•—VJJfŠl}n«h†@Ý4Q…¾n/üÂ…rÑ/r–˜ôç¯üìÙöü6Y|ÕâA£ëèC0£Áú ´&ÃП t7}蛣›ŽBk¶5]wâ_{¢ÓÙ4ƒž®Óà2«4Ë,°×c5ëbà¦Ó 4ŽN=Ô¤:*vÇýw˜Ìzþã4‘À©6¾¬›~P£kZ7}ã¨oôß|ù×´çÄÇ:z¢#:mÔ¿éè–&RÑ¿:+àŸÏúgyý—¯Ÿ2(Óõc?ºüGf”QGõþfLë›Ùß~ó·þ¢GüªÓGum[`àë?Ø¿OË fÓ„ú:Í-Ï æpÓçÎͺÀy³Aª÷!ÓFúéß&ãç_¯íÿY÷×ÃÿU÷ëÏ´NgôoC¦ôgR–¯ùçkNú}¤¿t¹€ÞSoè6–ß=Áþ¾ÐLµúÁ¾NuÄ,pÓÛ1nc­÷Ðv–5žÇÎþHa<-æ¢L`OÝ.“Ý.?Õž Ú5e½f[ÒÅêC§Ïœª«ô=ºÈ[Ehêm’Tº¨ÔìÓç·gÿåYùfÖ7M]{£Ó\¾`P@¦kj÷Ôž6ð˜`m€"d4¾ºfM⣇6¦ÐšÂxÌ Ê&kÓÌ…šÆ\Sµ®«ò¯‰Ð´áÃmþµz)Ý4û n‡62_ýÿhÐøé¶ÿЯšöZ··~eý|lúFjýCëM€£™uûõ-÷}ê¾€£ÄŒºèÏ—nšùoÚYÓÌÔKÍ¢¸é´CMº£SuÓ2éÖTç›&æ8Õ¦i­ * Ìí†N{{ãþ7äß–ÿÛIÓ­NUžSžÓõ/÷ÝvŸèˆÁp›ö‘&‚i¬nîé}O|ç 3*öׯüµ|kݷ̺2 þGJ>pâGŠæž¸ÅŠ®oÓD+?ý‹O›õ{AñÐó¿÷g]<놑Gö×ÇÎ }Îißë(Ò†ßl0ýæ¯{¤þu­bþÌ|óó¤YM7ª¬Óý¿#Ÿ |¬Á½âC3Ãê=?Yû3S%ðxÿc¹Áüî ö÷…޲êÿùëóq «×Óßf½«ÿâÖ×ñÖ; ˆ =<9LPqœŒvÐ÷ÉÂâÓìT­AuÑQSÝÈTוè‚óÀMÿSÐÿ¨::Ì'ºú©°nšYéÇWüX~páÌ=Åt ˆ.`×7~üÍíšÓÌõ4ØÒÌ‹þ`Iïo¤o45«YWó‰ MGDôF¸šäÃ?º¥Çé¦R×Q·öúv“ýO×Ié¦Sê4:z§Ïi‚G¾þˆI¯# šÂ_ï»öæ¯Þ46C==Çé[ÅùòÖoÉSÿð”h‚¡Û¾ ûLR“3oh³<éÛþüv“úÒéò™>x¸y<蜞éù“NvðŽÉúùŸqÎ ùô½Ÿ–?}ÿOòäÿ|Òê(:‚¯¢Þ¬~$Þ[ÿóVyþ‡ÏËÛV:ú×îyͦ¿o®ûß×ô@`ÁþîËï‹Û~fýnøþŸÍÏ‹~@¤3l4P¼ò»Wž4j7Þz¶a¼cÚ}=C>óoQœ‡vxiÏóV$\4cEX«voãÝ’–0h:`X/Dáæ†ÏÖ‚q£˜ÖÛªN­Ð7€©9©’QáßmFÊôþ`§Û œxŠºŽgeÚ¥2#¾âGE×S¯T­“¸Ø89¿<¸TÙ‘Òy}ß+ÒÛß#W ~#9Öëo®•¿¾÷›¢ÁÉp#;c-o<ÇëèCsM³ù¹ôlµ-£ép“YôïO<2\:Hßìëú0]ü>R›õžfº>.)#Éüì W–fFÕõHúÓåë:ý„Ý¿Fi¸óüûôÓp t¤|¤þþcÇòuÿÛûåª%×Êš%kÇrZÐÇ>Úò°t&tŒéwÞôYßøjò"9àßtßⵋ͚)õozhMS¯#ªºÿ{›¾gúíg×ýL4Q‹nžI°rã¿ß(ß›÷=³ño^ý3Â0ÜùzŽ®Û{ô®GÍhYOû‰Ä+tëì’=æki_ÓÃ6 ¼58,ª,’¹Ï5÷¸óßONîÿÉÎY÷ÇÒ7èþl‘þçOõÕ¬3¶Fm¯Ï¼Y²cC?*þ§wž–?¾ó˜”/-?U5Âú\(~þµ‚šüBoH¯?Ú_cÝt–‰Þè\×_éï†`û)Øß=cù}¡S°õ÷Öc´ß ã­w0>ú»äê%×ÉK®8Üú€òAÙp—@go‡<¾åwráôå&ób8[gÇ ,œíkÙe'‹ýqëÌ-zèä3ûGïç÷nõz¹añ­3ö7 þÚÙ!(ó×…¯¡°cPæo™NãÓ‘#½ý€¼:µK?pËÖt´ÉLwÔÑ6ÿýͺ;ºM€«3 ‚Ù4ØÑQ-ýPË$ƒ°¦(Ûq‹† ÌŽîÔé„ÀHAÓy… àRû%Á“ ¥™ƒïÃáÒæÒ, è=óÚ{Ú¬$Y¶«uqF‰ô{û¥¶í˜è½ËØp‚€Q3 ÌßñÖW<èßÀ-1%Q§&î:åcMI¯#_l 0vñ 8ökqDP@|”eNЧý¬.—Š"M…¯Ù°ì’?>%ÞšÎj%©iµ²t²!€ !‚²As") £õíu25»<’—åZ% Ù =ÖZÇôÄ×òub„*Ê(–£-eâæ2 €–A/\( S5‹œNÅbCÀnšߎ£d~§âŒR“¿³¯Ó¿‹¯ €„U€ ,¬¼ŽÀälÚo’{Ø)ÝøäHpU; ˜tø6\Oæ·Òµdži>äßÅW@Â*@PV^ G òšu±¡£^J³Hðy}®Œ@“5R–•<üƒ9?ÜÇh@VVÈÆpCS> €À€AÙp‡ÀaëÓ}½ÿ™ãÜÑŸnkEu°Îë~SÖ£í¼•d–™d>ÍH† €@˜Ê LñDZàPóA+ +1Ó¯"}m®‡ÀhºžL7;¦Ã¬{‰µ®¬§¯GêÚîæ1 €a ( +…"09zÿ§šÖ£RÆÔÅÉé®:ª€®'‹÷Ä‹¦ž·ó¦™!Ó“2äpsµ«IÝ@\"@Pæ’Ž¤¨À1+ óú¼R’Q¶Ðtø™6NòˆVjMaÔéÀl €„[€ ,Ü”@ŽZ7¼ÍIΕ¤¸¤^•K!¼€Þ8:+ɾI>[Rš9Etd¯­§5p7@¹AYÈI)ÉÐÞêoÙ°«@Sg£cFÊ4cB\‚jb £]_OÔ p‹A™[z’vD½@{O›´vµX7Œ.z ì) 7cîîë¶u:ü@9½ÏŸþ<±®,P…Ç €á ( ‡*e"0 :uÑë‘üÔ‚I¸:—D`t ¨›ÝÓᶤ̚ÂXÛvL4•? €á ( —,å"aãmµ’ŸV ±1üXG˜žË) éðãµæQïWÃhY}Ìa €ãàÝÛøÜ8 Û k­±‚²BÛÕ‹ !àБ²¬dg$ùð×9>6^Š­ûþl:àßÅW@B.@PrR D òºž¬£§] ÓŠ"q®ˆ@M]ÎIòؤRë¾5V½  0’€×ë•]wÊžª]#Â~Fˆñž@Çèš—ØØXÉMÍsL©hô èHÙŒœ Ç5¼,sª¼sðm9ÚrX¦dMs\ý©0„O «§KvìÛ&[÷l1_;»:%-5Mb}Œ{„OÝ%”¹³_iU” èz²Ü”<ñÄx&¥åºæ¦§­GôÛð±ÝÿAwô¶Ko¯cÒáö¢Þ÷O×kêÆ`ƒ2Mº£Ûþwö›¯NüG³Oêæ³þ°çš]ýÑÛÙkþâñþ^nnk2AØÖ=›eOõnñù|2½l†\zÞå2æÙ°g½<òÒC²cÝŽÈ4„«8N@?Hº” á{(PßQ'…é“7uñÊ´k¤Ã×î@¹ÈTYÿãŸÝ#,ÖýÉtsRæÅÀWÇ´ìéòÁáw¥ß×Ô‡9é¹ò­ëÿNºz:‹qÔã{wȾƒ{岋×8ªÞ᪬©ó¦-Wñ²"u•4{Od( ÛE(x@ Nâ%36kàûÑ©;<ˆª©–Ä„D™3½Rn¼ì©œ>_R’RŠX¶`¹äZ¿ø@c€„ú»dþ´…{N<$(;‰„8K@ß$ê yç†ïÍÂh"Eqܰz4£h^§.&'¤H‚'Á‘:Bönõz“…qjVyPmXT¾8¨ãìzÐK¯¿( *ÉÙ³—ÚµŠ®ªW®'Oô/›=¼>¯TÚkb›e‹õ·±¹A2Ò2dž5vÙùk¤bê,‰ó ÿ6:%1E–Ì>Ç ¡ŽþÕä˜êSQhè¨7S'òRóÁ@À¶š?+)øO¥íÖÂX”^,ûöI°A™ÝÚ0–úì­Þ#ÇjåSkïËi‹€£º{»Íº°m{¶Ê¶ª-¢ëÊòŠåŒÊ3ÍSЦ:º}TÞÞeöîj‡À¨uíÇ%)>IRÒF=–˜,)›Ì)¶¡h÷Ôìr3ZÖëíM•ïæííMoÊÔ’iR’_êæfÒ6¤¥½y`Z¢®ë÷öËôÒrɹ«­@l¡äf2zÉË$2e‘qæ*„M ±£ArRrÃV># )›]07EMZ:…ñê·åPÓA™ž3sÒêî ·w¶ËæÝåºKn÷¥(I¨©;*[ön6S«kJB|‚Ìž6W®_u“TΜ/©I©“R/.ÝeÑÝÿ´Þ R–9Å-¡ nhín1Ÿ>g%9ëÆÑCûC×Ãg”ZS«\”½»uƒY+súÜ3‡ð=ŽÐìˆU‡?^ÖÐT/©'Ö‡­:÷2+ ›3âú0G6˜J;R€ Ì‘ÝF¥8! I>Zºš%«h$ØV@GÉ4»zf²s×”ùqgæVÈ«U/Igo‡$ÇœmÍÿ¼¾êÔÅ3æ%ñq醾¢ # ôôöÈÎýÛÍÔD]ÖÑÙ!…¹ErÚœÓÍú°©ÅÑwd9ž™,‚²É’纄@@×éè'€Ù)9!("€¦ÃOKH*•|xjºRK2Ê$Ñ“(U {e~áÉ)Cw¥É)‰“ãÎUC#ÐÒÑ"Ûön‘mÖœwØ)ýýýR^:]Vœ³Ê¬ËË"!Vh¤)%eáP¥L"$ Sõ&µé‰º"—A`ì:šë†Q2m¹Þ3Ê–H\¬;þ+%Á‡¿wùj¶Ž6Ñ)‰[­5b»­õa}}}¢Á×ò%+Íú°üì;T“: 2wüO2 BÀ9^ŸWÚº[%3)Ó9•¦¦Q' Éhbbb$#Ñ]¯Sý0äÃÃïÉþÆ*©Èíø~%Á‡ã»Ð 8ÞX{b}ØžÍràÈ~‰‹‹3Ó¯Yñ ™?s¡¤¥°>ÌM#† (–…Ø_@ïý¤#$ù°_Es u¤,Ãúà@d¸iÓ ŒS³ËeÏñ]®ÊHðá¦W§sÚ¢Ó›5øò¯;ÞP+©)©f}Øò%+dNy%ëÜÓÔt‚eät&K ­§Í\š l²z€ë#Ðd”eZÉhܸUäÍ–çwþY:ê%'%×±M$Á‡c»Î‘×õa:q‹5¦÷©íˆ#@IDATkko“¼ì|™_±@¬¾EÊK¦›ÑuG6ŽJ#0‚² àq*“) S“â“\³že2-¹vøt¤Ì­ëóS ÌZ¹=u»äì©ç†1Ì%“à#ÌÀ/mmæþa[ÍýÃvˆfzóæ ÏXnÖ‡éMÙˆv‚²hÐ~Ç hP–šÀüzÇv`T¼ß×/:Í6Ë¥#eÚ…:Z¶ñÈûrzÙYïÈ^%Á‡#»Íö•®k:>°>lÿá}âñxd¶•®þê×™û‡e¤9ØöH#*@PQn.†@ètúbZbzè ¤$B, I>¬%#®Ë¼È4=g¦ ÊôfÒsò+ŸrÄc|8¢›SI½y³}˜ÞÔ959U*gÌ—eg.7ëÃâÓ*Š@¤Ê"-Îõ‘@GO»”d”†¨4ŠA ôM]â‰õ¸:M‚'ÁLÏÜu|‡#ƒ2|„þuM%öõ÷É.k}Ø6kZâ6+u}K{‹ädåš)‰×­ºAf”Îd}X4½ hë„Ê&ÄÇÉLž€e) ©“W®ŒÀ(:R¦™c¬?nÞt„L³0m="Åé%Žiª&øØ´ëCùĪSg*:ùí]í²}ïV35q×ÒÓÛ#SЦÊyg\( ¬´õEyÅ“_Ij€€ÊØiT½GYw_·$Ç'ƒ€m4ÉGVr¶m몊ivÉÂô"ÙU»ÝQA™&øˆ‹—Óçž* Êq©@}sÝÀú°}‡«ÌxÅ”Y²ö¢kLÖÄŒTw݇ХÝH³l.@Pfó¢z 'ÐÙÛav'ǧ ÷4û°…€¦ÃŸ?×u w%æTÊ«U/I[O«¤%8c­' >ÂýªpvùÕ5Ö‡ÕÔ•ä¤d³>ì“WÞ.s§Ï“ÄøDg7Ú#`3‚2›uÕA Ž‚²‚²`¸8fz½½¢Sl3“Ýy²¡¤¥™S$%>UvZ£eg–=ôiÛ}O‚ÛuɤWHׇí9¸Ûbf}X[‹dgæ˜õa׬ü„Ì(›éº›ÀO::@ @€ ,ƒ‡8E «·St™NÓÒeQWO“yÑjµ›Óávª®›ÓѲ-G7Ê¢’ÓmŸŸ½½;º:dû¾­¢÷Û¹o»t÷tKYÑ9÷´ Ì´Ä’<’IE遲–GZ€ ,Òâ\tõuI¢'Ñõ B@E“$ÐÔÙ(qž¸¨º—žÞ³Lƒ2½™teÁüI’ý²šàcóîrÝ%7Œ~0G¸N ¡¹^¶Z™·îÙ,U‡öšìˆº>lͲ«L –™£Û®ëXäx‚2Çw! ˆF Êâ˜Ï}ï”6k’M€M›Þ4`&ÁGô¼:«Xvôø³>LׅݺæSf}XRBRô`ÐRl*@PfÓŽ¡ZœJ Çʼ˜HPv*"ž›d)‹†Ì‹C™5=¾®+;иOʳg }Úß“àÃÝÖJô{ûÖ‡é¨XKk³ded›‘°µ_#e³$666¬u p›AÙØ¼8[h:üÄ8>Ù´EgP‰at¤¬,kê°Ï¹ygjBšLÉš&;Žm³ePF‚÷¾ú:»;eǾm&uýŽªmf}Xia™,]tž ÆJóËÜÛxZ†€ Ê\Љ4!úº­é‹Ü8:úúÝ)-Öéµ]½]æÆÑN©s(ëYY8_þ²ã9ÖV#…iE¡,zÂe‘àc„¶* ±¥a`}˜ÜšjfY…\qáZˆe¥»ÿ>¶ê*ƒÀÊ&€Ç©L–€¦‹åÇw²ü¹î©t”L·hœ¾¨íÎMÉ“‚ôBÙV³Y +씑àC{ÇùÛáÚC&I‡fLÔÇI‰If]Ø-W|ÒÜGŒõaÎïcZ¼«‹Î~§Õèíï•O‚Ã[AõÝ* éðuÍcr\²[›8j»æ.”—÷¼ õ’“œ;êñ‘8€‘Pý5¼^¯ì©Þmˆm6©ë›Z%3=ËŒ„]qÑZÑ̉žXOè/L‰ Q‚²ˆrs1B# AYœ'>4…Q !hê²Þ4FÉM£G¢+É(•ì”Ù~l«œ_¾l¤Ã"ºŸåžÐźzºD×…m±1ýÚÕÝ%%ù¥²dþÙV0¶PÊ §L¨|NFû ”Ù¯O¨£ ôö÷HñÇ$øˆ8ù˜/ØÔÚ(Ûön5©ëudL|"3¦Ì”ËÎ_cFŲ3rÆ\&' €€sÊœÓWÔ]S¦÷DbCÀŽº¦¬<Çžéà#é55»\6ù@¶Û"gO97’—>éZ$ø8‰Ä;Ž?l²%n³Ö‡é½ÄeÎôJ¹ù²ÛdîŒy’’˜b‹zR ¿AYø¹!ðú¼æT}„”•ÂB$ÐÙÛ!=}=Qwãèáøb¬Tx:Zöî¡õ²°ø´I[c§ >6íúP>±êÆáªÉ¾ èïïªê½Ö´ÄMf}Xcsƒd¤eš‘°Ë.\#³¦Îf}XûƒK!`'‚2;õuA ~_¿9*–…ÝAhqH¤š>ʼíkÊüî3r+dóÑ­µe[äŒÒ%þÝýª >âãâåô¹gFôº\ì„@wo÷‰õa{¬õaÖ}Ä:»:¥(¿Xά<ˬ›R}÷óãµ' ”lÂl-Ðïý((‹‰µu=©\t 4u6Jr|²$z£`H«c­ŸÓyE åÃÃïY£f %)Ì7}¯o®ŸÏ'yYù5!ÁÇEÄ´´5›û‡mÙ³IöÜ-:B6£t¦¬:÷23*–›™±ºp!p†A™3ú‰Z"0 àýh¤Œé‹$<°‘I>N¼Ù²µf“ì¨Ý*§•„w´êù·þbÖ&ýõíß6!ÁÇÉý®=G뎘)‰ˆ™õañ‰2»|®Ü°úf™7c¤$±>,\ö”‹€ÊÜЋ´!ª)‹ªîv\c5~AZ¡ãêÎ {b<2·`¾ Ì*­5fáEŒóÄIÍñ£RSwTŠòŠåÍ_—i%å&z8Ûeëè×¾CU&[âÖ½[¤¡©^2R3d^ÅY}þf}˜ö Œ¿-‚Qâl$àÓ<ÉÖc£ZQNèHÙ¬¼9p ˜?׬+ÛU»Ý$ýòtȾM´Fgt{wÛ¹xÉ%&`Lð¡Ó3R3ͳ]4Š Òõa»öï0·Wm•ŽÎ)Ì+²Öë!óg.”©ÅÓ¢Hƒ¦"€@(ÊB©IYD@@×‹è¦™ÝØ°“@[O«èH.÷(;¹Wtºñœ‚yf ãÜÂùa»¥EB|‚¹ø»›7HjrÚ@‚#u‡åù7ÿ"›wm”ë/½I–.:ïäJ²gX–öëþa[¬©‰›e÷Á]æ5^^2]Vžs©Y¸~oØ؉!@P‡ €Œ. £dúYAfRÖèGás *MP¶£v›,,Zx+(ÓQô¶Î6yîÍ?‹fö{è©LÒ ½àiÖˆÎÙ —†åÚn*´¦þèÀú°êšƒ&¸S^in+ ëÃR“SÝÔ\Ú‚6 (³A'PÆ"Àôűhql$4~jBš„fxu½á»-;¶UæäWJ‚çĨÖðGo¯N_Œ‰•kD½··×ÜË_Ò’çÈ—ÝÂ(»$à«Î@Øw¸ÊŒ†m±FÄê›ê$=5]æÍ\ «–®6 ;XÆC¹AYÈI)ð 0}1¼¾”>~M‡ŸÅ(Ù)çZSu]™fb\T|ú)Ï“ñññÖÍå}âõzEÓñ››Í[£—çv\·ò†ñéÚszz{d×Ö‡íÝ*z“í‚ÜBY4g±,¨XdÖ‡1MܵÝOðA™íº„ !pj‚ÿˆÙ©æY"' ÓK3Ë"wA^IGËtMÙ¶šÍfÔ,Ô™u¤Ìç=±îT2 *–¹\Ö.¿ÆZ¡¯rkGëÀú°]vJ¿ÉNyñÙ—˜õaùÙ¡¿(%"€A”Ä!ØIÀÿÉ­ÄÌNu£.Ñ+ ´t7KeÒüèE²å:uq‡5…Qÿ..9#ȳ‚;L×”~`³ré*¹ì‚5ÁìÒ£jŽ™l‰š¨ãÀÑýf}جisäºKn0÷KKIsiËi8I€ ÌI½E]°tJ’nfZ’yÄ?L¾@KW³™2GæÅÑûB×ÜéýʶÔl4£f¡-óg_ÔZ\vá¹ÄÊm›¥ûïXV×x\4ðÒõa+ιDfO›Ë-¢íEA{p€A™:‰*"(00}ñ£ÔøÏñÉh¶’|èkÓM™wÞ)ßÿï Ë G¦–M•g×}ÎL¡ U¿yÄ#9ÖŸ6ëϯ_{Àü UÙÕ“™š%?þòÝÃ=Ñ}½}½f}ØÖ=[ÌôĶŽ6ÉËηֆ-´¦%.”òÒé$8‰hp1«AÙXÅ8Ið”NQšä*qyDד¥'f Œäº¤©½Ñd% JÂÒœné–ÂÊÂЗÝ+’Ÿ-ú'œ[gs§4V7†ó§,[¯mU[ÌÔÄÝÖú0 ̦—˲³.6ëà sŠNy>O"€v (³SoP‚ˆaúbJiM‡Ÿ™ìÎû“edDšÓ1׋tPv¼±vàþaŽìqœm­»fÅ'ÌôÄô”tÇØQQ@ P€ ,PƒÇ8@À¨>oŸjK£E@Óá—gO–æÒÎ 茀ƒGX£a›L0V[Ìܸ¹ræ|Y¾d…Y¸Ž.BÕâ2 €@ÈÊBNJ„WÀ”õ”…šÒƒè÷õK[w«kGÊ‚†àÀA]=]Òïí—Ô¤ÔAûGû¦¯¿Ovíß![÷žXÖÚÞ*¹Yyf}Øõ«n’é¥3ÌúÅÑÊáy@ÀIeNê-ꊀ% )ñ5¡‚¾ÙaCÀšäCoÑà¦$vpurê›ëäžßÞ-SЧÉ'¯¼}Ô¦è›u}˜&êØ¹»Y6¥hª\pÆEf}XQnñ¨ep àd‚2'÷uZO¬G˜¾µÝo»†k’ØØXÉHÊ´]ݨPäªkÊ=¿ÿ™tvuÈ­W~zÄ Ô5X¶ÿÈ>ÑßkSgÉÕ_gÖ‡e¤²–oD<ž@× ”¹®KiP4ÄÇÆ3R í6êH™Ž’ùolîjSÍ0l³¦>øäýVšÿ>Y²à“ 1ð2º>lëÞÍ&cⱺIIN‘ÊéóeÙ™Ëevù\IŒO <œÇ €@Ô”EMWÓP7 Äy⥧¿ÇMM¢-Б2¦.:¸CTõ7?|]ñ÷f*kbB¢¬Yv•èú0MÒ±çànÙfMMlio‘œ¬\™oÝÈùº•7Èô²®ºBˆ()¢P€ , ;&;_ Þ Êz ʜߑ.i¦Ã¯È›í’ÖÐŒñ<óÚ“òÒúÍ©ºæuJñTùà ¿5ëÃÄ#¹rÞéšõaÅyá¹ïÛxêÍ9 €€]ÊìÒÔ1$x¬ ÌºC,“,Ðëí•öž6ɲ¦/²EŸ€NY}èéeãÎLã5 Ó¤/U‡öʬ©³eíEט@,#•õ†Ñ÷ê Å 0‚²±hq,6ˆ³Ö”é›a6&[ ¥«Y¬[I‘²;b2®ïµúÝúóáŽ÷®®™&}ñõû$ÁZ¦ Èxx€Œ(@P6" O `_)ëêë´o©YÔèM£ãñX7 Èt´ÌkýÙ¼k£ù;cÊL¹déjëFÏså1 €@€AYpŠ@b\¢hÆ;6&[@ƒ2RáOv/LÎõcc¤Þúsß]¿’Æ–i°þ66[õ±õµ®ñ¸ùZU½W[¾óŬôìÉ©,WEl.@Pfó¢z '`eÝ}ÝÃ=Å>"* d'çDôš\Ì^:RšŸ]`þW3ÍÀØÑÕÎ4Æáp؇|$@PÆK èHYw_—kN•Ý& éð‹3JÝÖ¬·§vw­Ô¬?©ÜøÄxÉ.Ë–œ©9sÒó¡ÞQýaµt·wKÅù¡.zÄò4hc]Ùˆ<<‚2^8P ).Éd_ôú¼ÜãÇýç–*w÷wKgo§d%3%m´>ÝôÌ&yû×oxXQe‘\ýWKþŒü ůß÷ºÔí«“¯=ýµPG €! ( $Å ID+(ÓMß'Ç%GòÒ\ %Ótø$£>¸úŸ®–¢¹E'޳²V¶Õ·ÉŽu;ä½ß½'}û1ùüo>/±q±£–à €î (sWÒš(Б2ݺ¬Q ‚²(ét6Sד%Ä%Hr|Š kgÏ*e•f  ËŸ™/ÓÏž.:­P§8ÝqTJ0Ôž½G­@ð ”…Ï–’›@Rü‰Ñ1:–Í@YØœ)øÔšy1“›FŸ)Èg+/©4AYÓá¦AAÙÁ÷ʦ§7IÍÎIHIâÊb9ç“çHFAÆ ’{»zeýÃë¥zcµ´k1߬ gÉÂ+:Žo@ì)À {ö µBà”ñÖÍ£uñ|goÇ)ãIÂ)`‚²ä¬p^"jÊÖàK·´¼´6ë´_}áWRµ¾Êci¹iòÞÞ“{o¾×Œ¬ùl©m‘û>yŸ¼|÷ËæFÞÓΘfÖýñ»”ÇÿþqÑ:³!€Ø[€‘2{÷µC`D2¦#elL–@KW³LËž>Y—wÅuûºûÌš²}öIJVÊÀz3MÆñâO^”©§M•›~t“$¦&šöêþ>ó€<ýOOËý¢Y¶îÇëLvÃo9ËOÜ Ùçõɳÿú¬Y«VqA#f®xµÐp³A™›{—¶¹Z@ו1Ræê.¶uãôµ§÷ÊËd¤lLýôðW–XωI*8õvöšóRäÊÿyå@ðõÞïß_¿OVÿíê}z`Þô<9÷ös奟¾$Uª¤t~©lùó™½|ö@@¦Çizý•ßX)[ÿ²UÞyô‚2EaCl,@PfãΡjœJ€‘²Séð\¸š¬$º‘lÒ³—Í–ô‚ô“ô>eYeYRq^Å ©‹õûë%>9^ gë0eñó°~_½$¥Húãßç?F¿&$'HÑœ"9ºýhàn#€ØP€ Ì†B•F 5!U޵Õs(Ç rM‡Ÿl%œIôœ˜Vò ¸´À%7/‘ᨡÍյѤ¶×ãÓóÓeí?¬•Åkœ>ãœòé{?-úþŸäÉÿù¤Ùï‰÷ÈŒ¥3dÍwÖHrw˜Àâ `S‚2›v ÕB`4M° 7nëiµ-ípžG dþÌ‹¬) žô’o^"úw,[\bœ¬þ›Õré__*M‡›L2Ôì#äCË™rÚsß2MþÑZÛ*ÙeÙæfC»å'· ÝÅ÷ €6 (³A'PÆ+ ÓÛ»ÛÆ{:ç!0.]O¦ÓgãcãÇu>'M &&ÆYÁœ¥ÙsËsƒ9”c@l$@¢uUA`¬:…±µ[GÊØˆœ€¦ÃÏNfêbäĹ €€ÛÊÜÞôÏÕé‰éÒFPæê>¶cã4ÉGFR¦«F@p¤A™#»J#pB@§/žXS†‘ð‰O4(Ëb¤,2à\@ *Ê¢¢›i¤[RÓ¤§¯Gºû»ÝÚDÚe3Öîñz½Ü8ÚfýBu@œ-@Pæìþ£öQ.ñÑýÉZ»Z¢\‚æGJ@דIŒHfRV¤.Éu@p½A™ë»˜ºY@}xb=f:™›ÛIÛì# éðuÚ¬'ÆcŸJQ@.@Pæð¤úhÂRƆ@$4>£d‘æ €Ñ$@PM½M[]) AYsW³+ÛF£ì' Ó³’™ºh¿ž¡F €N (srïQw,]WÆš2^ ‘ðú¼fT–‘²Hhs @h (‹¦Þ¦­®ðO_Ô7Ìl„S@Sáû|>Òᇙ²@¢R€ ,*»F»I ÓšJ¦o”[º™Âè¦~µc[4(‹‰‰áÆÑvìê„ àh‚2Gw•GàDjr}£lR•‚@ô5¦Sc4'> €„L€ ,d”„Àäèd¨Yñا€¦ÃבY6@­AYh=) IÈJÎ}Ã̆@8ü#eá¼e#€ eÑØë´Ùu:¥Œ‘2×u«­Ôçí“¶žVÒáÛªW¨  €€[ÊÜÒ“´#ª²­‘²öž6éõöFµŸ@‹Þ Ïwb cø®BÉ €D§AYtö;­v™@NJ®yÃÜØÑಖÑ»4v6ˆ'Ö#éÖ}ñØ@@ ´q¡-ŽÒ@`2’ãS$1.QôsAZádTkº\@ÓáGÛM£=1Ó«;Öí±w=â¯õǧÈQ¸ÅÆðÙnv;MF0”…•"˜ ì”a¤l2ä£ãš&ÉG”e^\4ã4ùúÕwI¿¯ØN®©=*o½û†Ìž9WæÏY0ì1nß™•šíö&Ò>@ "eaæ"„_ ;9GjZ†ÿB\!*4»çÜŒ’¨j{¼'^ΜµdØ6¿µñ y}ërzå™rãê[ÌÔÎad' €A”Ä!8A@GÊvÔn¯Ï+L)rB9§Ž=ý=ÒÙÓuÓ‡ë!¦øÔËOÈ«ï½,«Ï¿BV-]=ÜaìC@`Lecââ`ì+›’'>ŸÏ¬+ÓÇl„J@דé–eÓ‡úõôöÈCÏ<(»öïÛÖ|ZNŸ{æÐCø@q ”‹“°Ÿ€fÅKˆK†Žz!(³_ÿ8¹FºžL§ò¥Ä§:¹ª{K{³üò±{¥©µQ¾tãW¥¼dú„Êãd@H›¨Ác. ©ñëÛëÞ ªo7Íê™Å£d‡’=ôïÒÓ×#_»í.2»½@© àFÊ\Љ4¿€Žj®öËWB" Ó³’¢3ËÞ¶ª­òðÓÊ”â©rûUwJrbrHL)@@FÊ5xŒ€Ãt¤Lß@÷yûÞªo'hL‡¯þ¯½ÿŠÜÿø½rÚÜ3ä Ÿø ™^”Ôp™#e.ëPšÝy©ù¢÷°­ï¨“´¢èÆ õ!èìë”î¾n+ÉGôŒ”iœÇ×ý^Þüðu¹âµ²âìKBbI! €Œ$@P6’ ûp @r|Š$'¤H]ûq‚2öŸ«¬£dºe&eÙ±z!¯Swo·üú©ûeoõkºâgeá¬Å!¿"€ 0T€ l¨ß#àpüÔ©k«)txC¨¾-t:lR|’$Å%Ù¢>ᬄfVÔ ‹­-ò•›¾.SЦ†ór” €À€kÊ(x€€;t £Ž”±! ³ž, FÉ«–[½ÖŸo|ò[d¡xñP €@ÐŒ”MÅ8C ?­À¬jín½whêjM ãæmËžMòð3¿’e3åSk?#I îtsÒ6@À‰eNì5êŒÀ)²“s$66VŽ·×”‰§‚Б²9ÁìÀ£^~g<ýêåÜÅçËu+o˜˜¶‚*#€8]€ Ìé=Hý"+z¿²º¶ã®~3=¤Ù|ŽÞvéíïu壽>¯<öüïdýæ·äªå×ʲ3—‡A"@N€ ,8'ŽBÀQi…r°é€£êLeí'ÐØÙh*å¶Ì‹]=]òàï“G÷Ëg®ùœÌ›¹À~øÔ@ ªÊ¢ª»il´ä[AÙ֚͢÷˜JŽKŽ–fÓÎ èÔE½ÅB‚'!Ä%O^q Íõrßã÷HgW§ü›¿!¥e“W®Œ €ÀGd_䥀€ 4Ù‡®9ÞvÌ…­£I‘ÐtøY.ʼxðèùñÃÿ.q±qòM+Ã"Y¤^I\@`4‚²Ñ„x ÄÇÆKvJŽk­q`í©²]t¤,+9Û.Õ™P=>Üù¾ÜýèejI¹|å–¯KFZæ„Êãd@B)ÀôÅPjR6Ðue5-GmT#ªâ4)›]0×iÕ>©¾/¬Nž}ý¹ðŒ‹äª‹¯•ë €v (³SoPB(P˜V$;j·Iw·$zCX2EEƒ€Þç®ßÛoM_tîH™ÖÿwÏý·¼·í“îþ¼Ó.ˆ†®£ € (s`§Qe‚ȳ֕éV×V+¥™S‚9…cÐQ2PÊLÎØç¤]&Ãâ¡cÕrçu_”¹å•Nª>uEˆ2‚²(ëpš=::¦£ÇÚjÊ¢§ÛCÖRM‡Ÿ–.žOÈÊŒTAuMÇå¾Çî‘Þ¾^ùê­ß”â¼’H]šë € 0.‚²q±qÎ(L/"Ù‡3ºÊvµÔ$N%Ûw¸Jxâ’“•+_¾ùk’‘’a;[*„ €ÀP²/á{\$P”Q" ÒÕ×å¢VÑ”H81þ{Ûß‘ÿúíOeÆ” ùÊMV†E²H¼T¸ €@ÊB€HØU@“}h¦¹c­da´kÙ±^>ñIKW³£Òá?÷æŸå‘g’ ϼHn¿ê³oGZê„ €À°L_–…¸C@o’›—š/5VP6-{º;E+Â. £d>ŸÏÓûúûäÑg#w~ 7¬¾YÎYxnØ}¸ €¡ ( µ(å!`3¸·n·ÍjEuì, ëÉbbb$#ÑÞ7Xnïl—ûŸ¸WŽÕ×Èç¯ÿ²Ìš:ÛÎ¬Ô @˜¾8" O à¢ôbéèi½ïÁèHYFR¦ÄÆØ÷¿ˆÚ†cò£‡(-í-&Ã"Y0=Ë1 €v`¤Ì®=C½‘€N_Œ÷Ä›)Œé‰d¢ ««‹iÒÌ‹Iö½?Ùžƒ»åÁ'ï“‚œBù̵Ÿ—´ä4W÷Cp¿€}?u¿=-D "šè£ ­P޶‰Èõ¸ˆót¤Ì®éð7ly[îùÃÝ2Ǻô—oú™ó_n´@À`¤Œ—Q PœQ*¼oåÔó™lŒQÐdš8N~_¿™êšeѲ?½ö”¬[ÿ‚\rî¥rÙùkÆÙBNC@À~Œ”Ù¯O¨!(É,•Þþ^9ÞVò²)Ð]šäÊÝm•¿·¯W~õÔýòÊ»/ÉÍWÜF@æ®—­A°)ãe€@¤%¤KzR†i9d¦2FA“iâ8šºÅë‘´Äôq–ÚÓZ;Zå—ß#õMuò…¾"3Ë*B{JC@ÀŒ”Ù ¨‘(±¦0i>‰Kq  èH™f^Ôµˆ“½ÕÔ•?üïÒÙÕ)_¿í¯È&»C¸> €@ØÊÂFKÁØK $³Lš:¥³·Ã^£6¶Ð$YÉÙ“^§ûwÈOùÉÎÈ–¯Ýv—äeåOz¨ €á ( —,å"`3´"3-í°5…‘ ‘ìÿ­oÈ}ý\æW,”/Þð?$5)u¤ê²@W°¦ÌÝH#]@o¬7’Ö)Œ¹³G?#¢N ×Ûkn4îtø- ÒÑÝ!¥ùeƒŒ5;èS/?!¯¾÷²¬>ÿ Yµtõ çù@· 0RæÖž¥] # Sµ¯Ï;̳ìŠv“yÑBw:ü‡ž~PþôÊÓƒ¸{z{äþ'~!o~øºÜ¶æÓdƒtø@ÀíenïaÚ‡@€€&ûÐÔøµmÇöòºæ0Î'© ia#ÙdŸ8²_²3?^·ÖÒÞ,wÿ÷å µÿK7~UNŸ{fØ®OÁ €ØQ€é‹vìê„@˜ôÍvvJŽj>h¦2†é2ëPMò‘æ›F¿ðÖsF''3×|=R{HîµÖ%'&›„¹™yÕ£Ú €Œ_€‘²ñÛq&Ž(Ëœ"‡šªYw*^) gæÅ£uGdǾm¦¹Y¹²­j«ü§5B6µxš|íÖ»„€,¼ýKé €ö (³oßP3Â"Pje=íÒÐY–ò)Ô¹áN‡ÿâÛÏKL̉ûŸí¨Ú.÷?~¯œ6÷ ¹}ígÍH™så¨9 € (›˜g#à8œ”\INH‘ÃŒ–9®ïÂYᮾ.éêí ÛôÅúæ:Ù¸óñù|¦ïlY/ç~\~á•ÒÓ×#}ý}ále#€ `kÖ”Ùº{¨á0S›«eañiá¹¥:N@GÉt W:ü—ßY'±±±Òßß?`óÆû¯‰þõoñqñòíÏ}G2Ó²ü»øŠ €@T”EE7ÓH ”eM•ÝÇwJ{O[X3í ¾*ßÙY@Óá'Æ%Jr\rÈ«ÙÒÑ"6½-ý޲ѩŒ>ILH”9Ó+eá¬Ed!×§@@'”9¡—¨#!(L+’xO¼n>$³ó熸tŠs¢@SWcØFÉ^{÷eéz2ýëõz%/'_V,’Êó¥¼tºèÍÍÙ@ˆV˜v_ω þÑ*@»ˆR7ö¿j­!ꔕ³VG©@d›ýðK¿’çÞ{6²ÃÕÊJÊÄ ™¤úHè3sæHŽÄZz­?=ýé—¡£f"«Ï¼\n½øSc¨5‡"€ à|ëÿß#Œ”9¿iã˜bMa|cŸ˜Y ’â’ÆU'/ÐØÖ(əɒ=åã›&vøôŠ×\¤$§$ôÓ‘„˜Iµþ ·5V7JC[ÃpO±@× ”¹¾‹i à ”d”™)cz#éŠÜÙÃÄÞ Ä%ÆIFAFHËtKa­µ­ni í@@`ÌLâ3' à¸Ø8)É,“êÆîh­@@*@PæÐŽ£Ú„B`jÖ4©i=*ÝýÝ¡(Ž2@@Æ!@P64NAÀ-:R¦Yï5tK“h € à8‚2ÇuF tñ±ñR”Q"ÕMLa *%!€ €c (›G#à:Âx´åˆôô÷¸®m4@p‚A™z‰:"F2+5¾ÞÐW³0²!€ €D^€ ,òæ\[ èÆâŒR9ظßVõ¢2 € €@´”EKOÓNN!Pž=ÝLaÔI³!€ €DV€ ,²Þ\ [ èFO¬‡„¶ì*… €n (s{Ó>‚ðÄx¤,sªìo¨ âhA@¥AY(5)  LË™.ÇÛk¥£·ÝÁ­ ê € €€óÊœ×gÔ°”XÉ>=‰ÖhÙ¾°”O¡ € €à ” ïÂ^¢N FbdŠuϲeNè|o¿W¶¿¸]¾pÄê¶7¶›cŽï=>â1#=qàý²÷­½#=Í~@¡AY1) § ”çÌÆŽiîjrzS\_ÿ¾ž>ùÃßþA^ûÅk#¶Uƒ1=f˳[Fñ¡¬øÚ É,Îx®½¡]^½çU9ºý¨xâ=R2¿DÎýô¹’–›6p @@`bŒ”M̳p@fR–䤿Z£e$yp]çZ Úûæ^©zûäûÑ5T7ÈÖ¿l•®Ö®fw·u˃Ÿ{Pö¿³_ŠfIRZ’¼ûè»rßm÷Éñª±'(˜ € 0H€‘²A|ƒ*03w–|pø]Y2e©ÄÅòkÂÎ¯Šºª:ùÓ?ÿiØ*¶ov°;ÛêÚ¤òôJ¹öÿ»VbãN|†W½±ZúÒCòüŸ—[ÿóÖ`‹â8@8…ï¶NÃSD«ÀÔìryïЩn: Ó­5flöèhê]¯î¶‚ý½ýÃîvgLlŒ¬ú«U™ž7eñYtå"ùà±DG×r¦ä[Ç!€ €Àe#À°hЛH—d”IUý‚2›¿¦ž1Un»û¶ak¹ÿÝýòÐö¹`vf•dIFAÆI‡N=}ª Êê÷×”¤Ã@Æ.Àš²±›qQ!0#w¦k«‘ŽÞö¨h/ù$ö € €À˜ÊÆLÆ D‡@iæÑ³ªz~¸©ÇuJâp[Ûñ¶“v7i½IõЭþ@½Ù•]š=ô)¾G@q”Sˆ‰M¯SÙÜ#š•*µ{j¥§£g QÞ>¯l{~ÛÀ÷þ½½òÁãø¿5_ûºûdÓÓ›$57U gzŽo@@`|ÃÏM_Yœ….¨È›-;k·›iŒ…iE.k]t6§|I¹ì}k¯<ñÝ'äŒOœ!ý=ý²á7†ÅÐé‹Ïÿûó¢Ygœ3C4©È;¾# äò¿¿\FšÞ8laìD@ÊF¤á Ð{–å¦æÉžº]BPæŽ×ÃY7ž%5;kÌ=Év½¼ËV‹Ö,’%7/‘ßÿÍï5Ro"}ÁÈ ÿçyóþ7ÍsiyirÕ?^%z €„F ¦Ý×ã MQ”‚nØS¿KÞ«Þ ×.¼Q< nlbDÚôÓ§~$;·Ié‚Òˆ\o´‹èôÅ–c-’]–-žxÏ)÷y}ft,!5AÒóÓOyìxŸ<¼å°ÌÍž'_]ûñÁy €8RÀZ2r„5eŽì:*@ä¦eO—˜˜ÙßP¹‹r¥° $¤$HÞô¼Q2­ˆ&É-Ï [@öÆr@l.@Pfó¢zL¶@|l¼h`¶·~÷dW…ë#€ €® (se·Ò(B+03w–4v4HCljTè¡-Ò@@è (‹îþ§õ%—š/™ÉY¢ëËØ@@B+@PZOJCÀµš_וõz{]ÛF† €L†AÙd¨sM(0=g¦ø|>9аϵ§Ê € €€}ÊìÛ7Ô [ h:|Mø±»n§­êEe@@§ ”9½©?˜•?Ç$ü¨k?Á«r)@@ÀÝeîî_Z‡@HrSò$'%—ѲªR €D»AY´¿h?cЄ÷Kw÷Ïäp@@áʆSaŒ(Pž3Cbcbe_ýÞá @@àÊ‚·âH°âbãD3~ðr@@B#@PGJA ªfçϕ֮9Òr8ªÚMc@@p”…C•2p¹@fR–¦Éîã;\ÞRš‡ €„_€ ,üÆ\W Ì)¨”Ã-‡¤µ»Å•í£Q € €@¤Ê"%Íup™@iæIMH“]Œ–¹¬gi €DZ€ ,Òâ\—ÄHŒÌÊ›#Uõ{¤ÏÛç’VÑ @@ òe‘7犸F`fÞ,ñú¼²¯ôø®éT‚ €ˆ‹ø¹ ¸F Ñ“hÒãëF5cYÀë‘ÖÚVÙ±nbÉQ<â‘~ë7O®ÇÍ¢M €Œ*@P6* €À©4=þÞºÝRÓzTŠÒ‹OuhT?wý7Ê™³ÎšÁášCò滯Ëêå—KFZæ„ʲãÉÓ gرZÔ @° ”…˜ ànìä“gí6‚²Stu~fèßñnÇkåÉ¿<.ç,+©I©NluF@ ”MS@`d©Yå’–.Ûk·Ž|ÏÈ–=›äÅõÏËu—Ü e…SA@( (‹ÂN§ÉDJ`^áÙW¿W:û:#uIG]çxc­ü÷Ÿ–¥‹Î“³,uTÝ©, €„N€ ,t–”„C¦çΔxO¼ìâ¾eCdDº{»Í ¢ós äڕןô<;@@ zÊ¢§¯i)ðÄxdŽuß²]ÇwH¯·7â×·ó}ö7ÒÞÑfní‰õعªÔ @Â,@Pf`ŠG ÚfçÏ•˜˜ÙÉhÙÀKáåw×ÉæÝå“kïÌ´¬ý<@@è (‹Î~§ÕDL >6^40ÛY»Mú¼}»®]/´§z·<óꓲfÙUR1e–]«I½@@ ‚eÄæRD«€Naì÷õËžº]ÑJ`ÚÝÜÖ$=õ€,œµX–Ÿµ"ª-h< €|,@Pö±@ L‰žD©È›mÒã{}Þ0]ÅÞÅö{ûMbÔ”4¹é²[í]Yj‡ € (‹(7C z*­ôøÝ}]RU¿'*ñ÷r¼¡Ö$öHŒOŒJ €à ” ïÂ^±@r\²ÌÈ­­Ç6‹ÏúMÛ†-oËÛ›Þ”›/¿Mò³ ¢©é´@‚ ( ‰C@ 4ó JGO»ìkØšPÊ¡cÕòØ ¿“•笒‹Pcªˆ €‘ (‹´8×C ŠRÒNŒ–ÕDÇhY{W»<øä/eFÙL¹ì‚5QÜó4@N%@Pv*žC Ì/Z(mÝ­²¿¡*äeÛ©@¢ùðÓš*ÝvåícýaC@† (N…} 6´„t™ž3S¶ÔlrõÚ²g_Fªí•Û¯ú¬¤&¥†Í“‚@@ÀùeÎïCZ€€ã/rõhÙ–=›äÅ·Ÿ—ë.¹AÊ §8®¨0 €DV€ ,²Þ\ ,7–o¬•ÿþóòtñyrö‚¥ô7 € 0ªAÙ¨D€ápãÚ²îÞnsƒèüœ¹våõá`£L@p¡A™ ;•&!àôÄ ×­-{ôÙßH{G›¹A´'Öã„n Ž € `‚2tU@ ZÜ4Zöò»ëdóîòɵwHfZV´v)íF@q”S@ 4:Z6#·B6ÝèèLŒ{ªwË3¯>)k–]%Sf…‡R@@ jÊ¢¦«i(öXP´H:zÛeoÝn{Vp”Z5·5ÉCO= g-–åg­åhžF@N (;Ù„= AÔ„4™™;K6×l¯ÏÁ+OüRýÞ~“Ø#5%MnºìÖ‰H  € •eQÙí4{ èhYO_·ì®Ûi¯ŠÔÆçó|wâáã/þ^Ž7Ԛĉñ‰'=Ï@@ ‚²`”8Â*Ÿ"³òçȶšÍÒïëëµÆSøëþ ¿ùÓ¯ºaËÛòö¦7åæËo“üì‚AÏñ  € 0‚²±hq,„M`^áBéõöÊÎÚía»Æx ֬е ÇN?t¬Z{áw²òœU² bÑÀ~ € €ã (ç €@È’â’dN~¥l?¶Åg!¿À8 ÔDÍ­ÍÒÑÙaJhël“_üáç2£l¦\vÁšq–Êi € €ÀÇe[ð&Y ²pI¯™]6Mw¯[WO§©ÛÃO?(111rÛ•·KŒõ‡ @˜¨AÙD9B&àIƸ£v›töu†¬Ü‰TU½×œÞÕÓ-~íiÙw¸Jî¼î ’š”:‘b9@ˆxÄ@Às *eçñí²Åº¡ô’)K'½F»öï0uðy½²ný rÑYK]ãqÙ²{“>~Xê­ÇŸºê3RœW2éu¥ € àL‚2göµFÀµž,,Z,ïT¿-•…ó%-!}ÒÚÚÒÞ,-ƒ®ÿÊ»/ |¯ÓÏš¶ädæìã € 0V‚²±Šq<„]`fÞ,Ù^»U6ùPÎ+¿0ì×é{«÷ˆµ€L$àeˆé=ËÍ^,—_x%éðGÂc? €-@P4"€@¤4Æâ’Óåõ}¯˜Ñ²ìäœH]zÐu4(óÄÄš{§ÅÆÆŠ×šÂ8{Ú¹bÙZ)-(t,ß € €ã (¯ç!€@X¦f•KNÊÙxä}Y>ó’°^k¤Âu=Y¿÷Äͬ§O“+/ºZÊK¦t8û@@q Ä´ûz|ã:““@j~@:½'îãJO—G<éÉî e±A•åë÷É®wIbv¢ä/É—Ô²ñg[Ô)W¦]#EqÅA]›ƒ@@ z¬BG)‹žþ¦¥„M@²ø¤x‰‰ ñ}»ÒNT9AÂV÷S<÷³s%>-þT‡õ\O[tøÚƒ:–ƒ@@ úÊ¢¯Ïi1aЀ,.Þ]¿Râ²CÓž‰üH_X:™B@@ ,Ü<:,¬Š € €'@PœG!€ € €a ( +…"€ € €Á ”çÄQ € € €@XÊÂÂJ¡ € € €@peÁ9q € € ‚²°°R( € € œAYpN… € €„E€ ,,¬Š € €'@PœG!€ € €a ( +…"€ € €Á ”çÄQ € € €@XâÂR*…"€£|ðÄâí÷Žr”Hfq¦WËŽu;¤ü¬rÉ–;ê9€ €8I€ ÌI½E]p‘À/ný…ôvöŽÚ¢Ekɚ﬑Ÿ_ÿs¹ã;ä¼ÛÏõ@@œ$@Pæ¤Þ¢®¸Hà;ïG|^ß@‹^üÑ‹òÚ=¯É÷ß!åg—ìOJOoŸWοó|)˜U0°Ÿ € €€[ÊÜÒ“´‡ Ï-Tã´¼4ó}ny®”Ì+ôœ~sû/n?i;@@7”¹¡i.¨Ý[+üîå¢/]$³—Í6­}üï äξõlùó÷ÿ,UoUIJvŠ,ýÔRYúÉ¥²oÃ>yùg/ËwHZnšœwÇyæo Õž7öÈk÷¾&ÕVKþÌ|™»b®,ûâ2ñÄyÌa»_ß-/ßý²Üô7Iݾ:YÿÐz‰õÄšïËá1 € 0²/NDs@ "íõíòÎ#ï˜ÀÈÁ-Ïn‘ l½ð_åÈÖ#ì0à¥9IDAT2ýœér¼ê¸Üûýòä÷ž”®ø¡ÚxHf]8KêÖËŸyÀï?ÝO×É.úÞrØÓÝÞ-|õ¹÷æ{¥¿¯ßV¿¿Þ\wÿ;ûå?.ýyå¿^‘ΖN|E@B"ÀHYH)&C@GÁnúÑM²òë+Íå[j[äï¦ü<ýOËÝ)çÜvŽÙßt¤I¾=íÛ²ñ©rö-g˱ÝÇäwßúy»å'·HLLŒ9nýÃëå¾OÞ'oüò Yö…eMzàŽdõ߬–Õ·Zââùµ9Ã@‰#e!a¤˜ ¬²,Yñµ—Î(È0)óÓòÓäÌÏØŸU’%ú·­®Íì{õ篚$#×ýËu™>¡A\ù’ryý¯œ«*.¬0 È±ð  €„H€|CI1 yìÒìAA•ÖÀï1÷5@yN¬ÓctÊ¢® ûéÚŸê·ƒ6êØÑØ1hß×1è{¾A@B)@PJMÊBGt·uKl\¬¹1õÐ ëͪuëíúøjÙeÙCã{@@ de!£¤ pŠ@ÑÜ"“qñs¶îŽns_´ø¤øæÄ%ò«rƒ € rÖ”…œ”@Àî ¯X(=í=²á7Uµ£©Cþ~úßËÏoøù ý|ƒ €„S€éKÙ `K]#6óü™òÀg0™ç\^†½ € ÓîëñÁ€LD ¦ï¨tøÚ'R„«ÏÕ€lJü4ñXØ@@@‰9BP(Âc@@@ ‚”1Ÿ&‚à\ @@*@P6T„ï@@@ ”E›K!€ € €Cþ/¨â~Éå nIEND®B`‚celery-4.1.0/docs/images/result_graph.png0000644000175000017500000010606613130607475020346 0ustar omeromer00000000000000‰PNG  IHDRû(Ëí@IDATxì€eE•÷Oç¦'ç@2#ƒ‚°P0,ŠaÅ€ŠaÝ`ØouW×ÝuWw÷3-úéª *¢Da ’†œaÃäœ:çøßy¯ÞÜ~ýúõ}=Ý“úÔÌíÊuïýßzUÿ:uª*¯m {@Ü8Ž€#à8Ž€#0†äaY^”#à8Ž€#à8†€ ¯Ž€#à8Ž€#0æŽy‰^ #àä„@ÿ@¿ô÷÷K_Ÿô ô™?áÄ%ãÍNs Xzlû‡¹ÈƒI”•Œ#¥¥IÄ…‡% C9IGÊâ"yyyæÌµNµ’aɸ]ió-=ñäãÊÏKŒm°CXÊN¦ËÏOÄ‘Æ.õ§»ó¢q_W ùB^Òºq½‡€Œ½‡½ßyèíޞÄÕ×-½}½ÒÝ«v¯†÷i8—Æ÷i8nâÃüÄ…r°É °têï#¬?婟+ßaH‡~M§ÔAÉR$I0ŽPLè¢SÄÄH‡µ•~H~A‚ˆ@D4¬0¿P œ¨]¤WaA‘^¤Sw$¾°0á/*Ôx '=y‹,}ÂüV–¦#Îâ5oqaq2}±PF(gB(ùùRtÒ]=]Ò­vWo—ôôôHgO§vöÝf ƒpA°»4mÈÛݳ+®GÝ=ýJ”@r16¢ÖŽ&/Ÿˆ'áÆ¯Ãk WKÝüÓ¾)a³¸Äˆ7qƒÂ,2Œ¼ü§Òò?b[¾¤Ÿؽ£vH«-_$޲ÌX±É²C qÉøT>’a);)´ M`„ôÉÒ‡µ‚t|‚;½\„"¡ÜTî™–X‚DšdX*N †|VIƒ´%Ä%mòX:“ÐàI¦ ᤳÿZeð_Ã,OÒV¸«ŒH˜=qü#÷1wB’„ä ¿ÙÉg´|1þ@J /EùJ:Šä2R¬î’Â))â*5‚)±8/Ò«TÃ‹ŠŠ,¹£aɼŖ¿ÄHQŒÇñ$ŽÀn#àc·!<0  íìîÔ«Ã쎤?¸¶Æk§OgOúTé!Ý "ÑÝÝmbúlhÑ¡êø²@EÝŒ8Õ¶™Žº@sj:ÿ`ç$H¥S76£VÂCÞt¿¥QA'È=Ü8ã…@Ÿ1’áH#$}JB”¡r2õ«;ER¢î$q©8,•—{ )3)Yo_‚8ey!~ÅÅIÒR\b䤴¸L¸ÊÌV£¤¢ KÄ‘®4•.‘¯TJ±f¹±GM8œ` Ÿ¼½«]Ú»Ú¤­³Mí„»£+A:’î6KÓji-Nã;“R†á ¡Sf´•_¨D@ AèÜ!&Ðð” I€¨m„!¸‘„8$FénG`·0i „i ¶ÓéQ;åº{“ñIÛÈK’Øô)aé×p¤‚F–†y2¤#¥z••(IÑ«¼¤B*J+¥BíDX¹‘’D\¹Å——”kšŠ”{˜¢=x?FÀ Æ>üñ˜hël•V®ŽÖ”»­£MÃZŒ<gi:Z”$tQèêê21nú«0Ÿ\¤sÆJ èÜ µS§Ã‡@aÐpÒ†°A6ù(¸q …€IU +\‘¨tBâ 4½ªŸ¤R%+}=è*©ø%Í )))I’2©,«2‚R IQRYªþ2lü•¯WÒÍT‘›}'{è» ÐÒÞ,ÍÍFp·()hÕ »EÛ5¬©­IÚ *YÈôC4…°b•©ä@Ut@ŠÒ³3ø 4½K1÷ÐÇöÛ8ŽÀð¨ÞK_’ %ý= âav&¿™U©ê×ô½Ý Åíô‚ !)©PÒQSQ#ÕåÕRUÆUeD»Jà %Õ„«å\7「ÝÀƒ¦¶F#ÍíMƒì¦öFiи% HÐOˆ¦ŠŠU ² ’ƒ¼¢K0Ø­qsý&ÙT¿ÑÈÃ&uoܱÁ$ìêˆa‹ßÒŠ’(MH#H˜$Bý,¯tã8Ž€#àì«°¤··3!ù@âaRõC@:Ûºt&±¬—„H:fO™#³êf ™U7[fª›]^÷E³× g7@"Öo_'v¬— Û×Ë:u7¨Ž†õÑ¥å¥RX®Seº=n¹ˆ`«4Â#à8Ž€#p "€ä£»]B@Ú‘´{Û{¥³½3µßÑ$Õñ˜7užÌ™:WæL™+sÕ ù`Gä½iö(Á`ã(ˆÄê-«dí¶5²jëJÙ¼c“ÅòËÒò2)¬(•JWèV¶åj+¡ðöfñ{;Ž€#àìk°ñ„£«]ÏwjS[¥½m*õhï°e¹‹™SfÉ¢éÉüi dáŒEF<öäÆdãJ0P´|mã«òú¦²bÓk²qûÛ¿@•+K+•ÿ¿ÏËúgÖFm mƒòÇÅdóË›\œðîL2D!s›+4ž¿øÈ/ä±_=&ç|ùœAeäá<F’‹ßºX­Ö<ôó‡l:ì’ÿ¾DjfÔX1HT TÔ½Ó>yšI4âÖ“Ñ>G¦|qê0‡ FM¶ï•Ëï'ZæhÝc}¿¸õ-×çÍå÷ºíõmÂ4áñŸõ6qêúxˆØïø·;dáÒ…òÞÿxoJÙŸßÇ/ø¡ ­ ÁëçÌú²9¦k¹¸ÝòøMºrôù«‹¾˜q7ÑÁ¿àdQ?¹ý Y³sµÌ}ÃÜØseCžÂŒéµlk‘S>zJŠ\ ›ŠA2èÜhX¦<-#Zˆ C.Âtì1N™Ì]²þág ­³}úQç%÷ýð>ÙòÊHpB †©¯¨aîßLdZ½ F#’àòèÕF³ q¿¾ìuY~çr»¢Qî÷ƾQª§W§ÒÒà„Dcõc«Sq¹8 IO\û„l_µÝt]ª§UËQo?JŽ{ÇqCŠaÿÎoß)ýëEòìMÏJ:Áˆ‹ÉÊe+­ìcÎ?fÐ=xOôBü È<÷üàÛ‰Èpc¤weÚcÍãkLÈE¸-ÏÊs­zt• âÖ“$›º÷àOÈWAQI¸x— Ó·G#}¯LÏ”é÷“)]zäüé럖ZadŒiÅÙGÍ–“?z²ÔάMOžòg»ßH¿ƒ¸õ-ÜŒúŠtÓ ›¬™sìœÔà ¤ÁÎå÷ºmÅ69òœ#£Ù‡¸ãÖ+¤¦Ô1”ßü±7§Èò»üÌÍŸRöË÷¼,¯ÜóŠ ¡ ;ú¼£µ!ÃX?g(×íÜ@ðPZ]*kžY-p†Ï½ã¯†0„`°]÷“¯=a`\Eœ!¥z€!À|÷[¾ð!§CÒ¸±Ñ‚¢m4 ƒ·~ãV9äÔCì qqËDLÉ´çHkÚÒd> 9RFKÞ½Ä6?côñäµOÚô‡qX¸½p¢ÊtèÃDïüèA© BÅýÏ¿tÇKréU—Jí¬DcMç Ó2£!4à×~áZ©¬«”y'Ì“òºrkàÀ…°è¨ ÿ¾ö9â-GÈÁ§l#Ü?Øq1aŠí £e¼zÿ«¦ã÷\:ýgo|V>qÍ'RÄ/”ì8ï E2õà]¢í?<+ÏÎFw˜P'BšhX¨'ѸáÜlìsÕǯJ褴„zɼäŠKdꢩÖÙäò»ˆó½ÒŸg¸ßOzºt?õžz´îYU,Mž|ê ;êƒmÊ£fÁàˆsLhx¢‰Ýl*öƽqH&$ËïZ.‹Þ´È¦!†$Ѐe¿\&ÍÛšM¹*·Ì) ¦X6F±Çœ7xt®¦yK³Ùüùà?(×þåµòƒ ~`+86¿²Ùö¾¿ø¿.–é‡NO¥‹ë@¡ë? ÌU£¤ÄÝŒj®ùÜ5Öˆ÷Õóâ7b:”Ș'üèÕMi›3ýîÙßµ‘h”`0ò£Ó¼ø{g-7&¬!GŸ%SÍ|ö@_bW=¾ÛH†Æó–¯ß"g|î “X! Îdâ¼+ea2Ý—Q†yù\ë‰eÌò±éÇ!ýóE† I×?·^~õ©_É]ÿy—0]·‡ÛÄý^!=öp¿ŸhšLî /lµO­•¥—,•·ýÍÛRIÈ<øÿ¾I¦ßÃp÷Ëåw§¾ñ@tÆh÷ðG42M$ìÖoÞ*è¶Ix\ê߈úÁª0p@_Œ2™â…$Ç­WÜ—g¬šR%Oßø´M• BRó¢8Žô}’tA.˜.yÏ·ß“’öBrPŽšñxÎhùîÎÀà#ŒE3Nm£rZî7±r¬xp…Üôµ›L¡“™ sä4ªGŸ{´ø2¥‰†e*“)æ>Ÿ¿õy™¹x¦)tÚjU]ûäZËIJx¹óÍùñ³Í,¯¼rß+2oÉ<ÙGï9’›†‰e·ŒT¹  ÄÑùXôGNýø©)rAÙ¼ "›ÈC£ñA+>:Ò ñQ;&`Ê”W&™‚ˆcnû×ÛL™–wÉfâ¼+Ï…É´¼0„ñ\¹Ö“lÏî÷Ö¿~kŠ\Æ–i™gnxÆ–I2JN7™ê0irù^¡Ì\?!6â…ÿ|¡)eGÃ÷ l4.ÛýrùÄ©otÚ`…~’º`L²ùWo1}©–‹Ý¼µY*&W˜^RL íÃíߺÝŠæLmÆ­Wäo¯oÊå÷±DÚ©¡ÿâayègYøÙ_<[ž¹ñ’Ûʦh»0ã°¶âiÙ•Ë,ž?ãñœ©ÂÝ1*è7à p‡t3D‚Q]V-—œñ!¹úÞ+¥PÏ IŸ—O/Àýñ aúãüѦ!Ð9 «™™P¼K/?Äã.<.=j¤2ßþÕ·Ë _¾Át îþ¿wÛ¶°œûrÞߟ'7üÝ ©U+ŒTnøÊ rð©Ë…ß¼0%U¡q¤á4¿w}ë]ƒî=’§~m½IÁߦ›ã/Ê®D–žÿ½?¸×FSÑ8ʾôÊK-ˆeœÌK£µN§´cÕŽ”H6ä1åÉ¿¿IPÈd¥L6 šñ™ âv æ¢pþ†${ç?½ÓVž À¸ò‘•ÂòÂЙ Iœ ˆó®aJ&ÑOxþϧ’ÅSö×@±}hˆã¼kx¤Pé&„…铸õd¤oÀ}†ÓÛ R,¤cÁŒT‡sù^¡Lìl¿FÈÙ¾¦ðÅóûc:BÚ×Õ'üÏ?Fo“rg»_ÜßAÜúÙÇÐQg2HGC02•Eß Øü“K½ iÑ©H7‡M/n²%ݼSqYqzóó>qÌîëR9㘳'Hú2 âÈÀ‰k¿¸ëdÍ#«¥zfµ1ûá*vÆÒ=PÖ=½Î,v¦¼ð_/Ì8º‹ÂÄ* F=ÌסÇ)Q&ÊŽL•p³þÙõæ sÉ ¬#ÌÔ9TϨ6‚AÇ%¡¬álÖK=Žô:ƒb(š7ýÅ.åÎáÊ á(šF•MC86bS:h$0ïýÎ{M¿ØŽ‹!±´êQf:ðbDuäÙGÁˆ‹ £]̶•Û†lªµ}åv[uÀ¨+:§½?nÈ»"ò ÁÉÝ ×ñ6}ç]«¦UYçÀs¥›ƳçRO²}ƒp”‹i|¡á(ùa´kœ: q‹û½Â}FúýÐaeûì=‚´â#¿øÈ )ÁKw¾n1Èé~qqë[þ@”Ó ƒÛIV•3s5(WSßNþ‹“‡dmohOýöãÖ+ aŠƒU!ÝÝCÊä÷|óN„Ã|~ÈêMðÇs†²Ý”™QoÞÜ, f,”¿ýÐW†è]DK–`…¯½ÿòüšçäæGn×Y!Õ“«¥bZ…°ƒˆ5Z »w!À·Í««ÂsþqØx.dRrÜ2éЮùü5öþÔuŸ²Žü(>¾ð¿/˜vxX¾8ãÐòÚý¯Ù\+ó¬ÁÐ0¬~tµm®’ ¹ ?Ú猼Ѿg×Ò`XBÊòБ–Ã…ôqlÊİ7At—Ɖkò1Kß¿Ô.óDþ\÷ÅëdçºÝÉ3.&è”Ð)± kt×Î×z]X)”¾cj䶃œŸÿßÏòãaÍ.£ìvòd3Ò»òý,]`{t°ìoú!Ó-„‹•<¢j#ˆ­s©'VÀh€˜Sê‘ ¹C‰¤6nÎå{…Çé÷Ò gC 9¬1ý÷Ç4faUÿH÷‹û;ˆ[ßè€YÒýü-ÏÛòÏ …âÙ–ß½Üö&`õG®†÷FC= $†2˜n¤B¼1qëi;ý0ûm0m9󈙙¡Ž°ºÊ¶À×ÍßPî„ô?ö›ÇRûíIŒé°$²ÙßñxÎHñîÌ€¿WôŒÚ¶µIóÎf9xö!réEŸc •ȦgÏJ0Bb âZ»}E²ìå‡eëk[¥rR¥”OIœG’iôòOT›ï¿/~Ûâ!£æ€ sŠQâ±cebd‚¸<“É¥LVN Luý—¯·•$4ŽlÜEe‰®à8á='ØÆKhÊ#=9ìÌÃl [U£Àsá¿\˜éQ²†ÿÎãm›rvæDÉ’†–ÑÞ³7?k$ ÓH)kY"m%„v˜tœè ÏJ–Û!=mÓHæ¢]NVÊ0R»õŸnµ¥¯ü6˜Æà»žôÁ“²ÚiIêÇ‹w¾(¿þÔ¯åÔOžj«‹èÌQÐä7Ä¿ÁÄ­W!ô­ØlIÙágnºH(y²Ú(l@Çrq¦4™¢DG†¥ùÍVoêEÍx×w¥ãgŽˆá¦6s©'#=§é¨Ô¢¸¢xÐÒá‘òí+ñH` /%äaÚzYnõ×HÏ÷w·¾q?ž‡iϰiÝHÏ'$6c0Þ}¸|qêUÈ‹~mPþ„ð¨MGÇo]á?CúñzÎPþl#¡`¤Ë]Í]Ò®ß|@ûò¹Sçʱ‹Ž—c'‡Ì>tØ3Fâb³Û#ýF­­òꆗååõËÕ~E6l[/’Ò)®*¶N†¥’%U%CÁÒËr¿#à8Ž€#àŒ”®»Zº¤³µÓ¦žº[º¥«³ËÈÜisåð9GÈásÃÔ®,­ý2äs‚‘~ΞNY½e•¬Üüº^+då¦×¥©-1¯V\ZlJˆE•Eb¤C5ŠñŒ´@ú=Üï8Ž€#àLdбczŒ‰žÖsww&¦k*jä YËA3Ñë`Y8Cw’.Úµ-Áx`7î#ÓC7µ7ÉÚ­kdí¶ä¥îMÛMÒ¹(­(•ÂòBSXbSIy‰•¹^G&0=ÌpG` À4Ób\ ìÞö^élë´UNùª¢0¥fª.#] óT!sþ4½¦/šòÄž;{¨½B02½`wo·lÚ¹ÑL öz=x­¡¹^—Ã'ç˜fh–%Èsth8c·gD¦{y˜#à8Ž€#°¯"€Нèã`‰è蕞v•Jèô&OÿMª®“¹º[JÌš<;efÞ¸lO¿ï>C0†{ñž¾ÙÒ°Y6ïÜ$›ÕÞÚ°E6Öom [¥½³=•Íȇ’‚ÒÓŽ7â¡S0œæ]šÊàGÀpG`/!€¢6Ž1…‘Àß×Ùgî@"x´òÒr™6iºÌ®›#Ó'Í™“fÊÌɳd†ÚEñv:ÝK¯(û<ÁÈL{W»lkÜ*[u3˜Æm6Ͳ¥q³¹[lÊ…üùùù¦d ù(()°Õ#%ER¤×pšõÙîïqŽ€#à8Ž@:¬êéRÒ $ÂÈ„®Ü`ËyH¢¿?q !SµU“dZí4™Q;Ó¦7pO¯¡aÓ¥¼$ûÊ¿ô{ïKþýš`d’•+ -õ²½y»ìÐkgóN©×k[“‘æmÒØÚ(Ý=»¶°å`7”N! ùÅù¶Â…eFDôÐÂ‹Š•-êúl7Ž€#à8­ïéVIƒVg※¿»ß‘Hôö&¶B¡â¢b©­¬•)ÕÓdZÍ4©«ž,“õšR=U¦ê5©ªn·—ƒî«_â€%qgIm}‹%"­ ænPBRßZ/MJBØD,jŠKtÚE%Ez¾D "b•ŒDí1>Ž<ú îvGÀ;Ø’Ð×=ØN‡ž~=¬Rõ!ºv J¹;›QÕ(y¨«¬3Â0©r’ÔUMVB]gî±^ú9vo=þ%Mh‚^t@ m)»ÉÜÒЦá]vÛÖÑ–š’¡\VÃ))RÒ‘W˜'ùJHaóšÂ¢B ‹º}ynœ/âiGÀ–köCz”0(qˆº l w@¥*…PÒ=c†)‹Š² aYg­‡I“LQSQ+µ\J‚½¯ë@ Òž‰q‚1F8³Ò¥¥£Eš•l4·7ëÕ$,ÇmQwK{‹’F»š;šÉIgWç;«tDÉG¾]JJt«^.HHê*ìvÝ‘!0z€#à  ËÀ)¯»¢nȃÆÛÕ©Péƒ^=݃¥ @QZ¢;ë&RÕeÕJjíª*¯’ªrõëòÍj»ª¥ZIEUY•­Ð8@ Ü«¯ác/ÁŽ„ÒÚѪî„mDÃaoáz’*´v))!_Ô õ(,R…U%FL tñȉJM WbBÂõœ³!/æÆpñ@r"ºCŠ0®R„¾¾„M8…~=ËLj‚Æ÷öô ’,ð|HJ”,T¨âc¥*% ‚ʲJóC pn¶âÉçfÏ#àcÏc¾[wdåLk§’AÒ¦z"¸;4®­³MɈÆAR4¬]ýíÝÉIx¦kPr…täùPMVå“<%,FL4âB˜…Cb4Œ8ö ¤%”é¶#àìŸ!P2À^ FpkçàJ„dÃýÕi„ $IJŽLQd2$”—éÌ “,@*J*¤Býe•6 ßP¡6n¦,*K«öë™°8ÐÜ`è_8ò~]=]ÒÑÝ.¦hÚÍ­DýJ‡’s«Ý®D¥SÃÛ{:,}—nûÞխ˫Ҥ(‘[¤H %‚DEˆäòÂÒa#%3·†¥»ÉcaÄáV‰G`¢"€®?KéÜqcãg‡Ç!n:|.–D*0‚Ìc?còj¼MC$ ÄpØ1(Ö•u{i¦Ê‹ÊÔV’ Ä LÉ„ a—››å•Ä•ëIÛ¤W™ºKŠJ†»…‡`8Á8À>èžxv]íìÖƒs”x@HXî‹’°u/|##¤I’àïÐ<ü=Z¤§§W׋ëÕׯ-àbR¯S=HZ’ä$²¢äC‚Â? 4JD,R’$&FT`;I‚cáÉ8Ê%¨‘MƒòÜÜ·›‰ƒ9 êà“½)jÕÅ6ò­}¹€Ÿ$tðÄ1ˆÆA Ô/Ì~ª…Ý H’#ý}Qv~E…º¿^tæEº«#gN”(9(SRPR¬!éOØI¿¦-Õ8#j³¼ÂP ‘À¿ì9Âë{ô>„€Œ}ècLôG¡íêÕ½õ•t@< 2=:‹mn ñ„+!‰†CrŒ¨èªVþô¨ßÒ&Ý„õ’'i÷ö«WŸ6¢KnEŸ+þF: 2’@d™IÚFF *z ä $”Ç’$ÈîYIƱIú(%9Ì pÍÒ“?§]Ò®ò-’üOdi Ç$ïc^}†!&”7pðü˜t&ø¢åš;‘Õò‡8ž×ò†8ó&Þ!äI¥¥C¦V“ʈ†ã~Ë”ì¼ #}Ÿh¼¹éÐÉc{Â62 aq:u»GÚpGbÇôca>Óz¬î¼ˆÍêƒ"íÀñ—* P·…Y¸¦ÓÎb€p'Â,m4\õ¯ˆ‡HëE<Ã#°/ àc_ø þ {:’Þ>]®ùP»7eãÞu…ø~%BñØ•¨Ÿøþ†[G£Äcs?ËC> £<Ü»ÒõiK«át|ä y"îþdHXè4!ly ¡£$“J:x MvÜIw*õÞ‰@í"Í¡ˆ¤Rщ¦““A~}€&„csAÌx¢„Ô(A\BœI¥4>?™.Aæð#­JØVÒ,õh¸I¶pÓ™[¸ê ©®7¤£Ó77$ é.,H„™®‘æ ~âó“~ˆáC¯Dxˆç~n‰Œ€Œ‰üõýÝøå-¿°Ô¾àÒryRGÀ˜¨8Åž¨_ÞßÛpGÀGœ`Œ#¸^´#à8Ž€#0Qp‚1Q¿¼¿·#à8Ž€#0Ž8ÁGp½hGÀpG`¢"àc¢~yoGÀpG`p‚1ŽàzÑŽ€#à8ŽÀDEÀ ÆDýòþÞŽ€#à8ŽÀ8"àcÁõ¢GÀp‰Š€Œ‰úåý½GÀpqDÀ Æ8‚ëE;Ž€#à8'õËû{;Ž€#à8㈀Œq׋vGÀp&*N0&ê—÷÷vGÀpÆ'ã®í8Ž€#àLTœ`LÔ/ïïí8Ž€#àŒ#N0Æ\/ÚpGÀ˜¨8Á˜¨_ÞßÛpGÀGœ`Œ#¸^´#à8Ž€#0Qp‚1Q¿¼¿·#à8Ž€#0Ž8ÁGp½hGÀpG`¢"àc¢~yoGÀpG`p‚1ŽàzÑŽ€#à8ŽÀDEÀ ÆDýòþÞŽ€#à8ŽÀ8"àcÁõ¢GÀp‰Š€Œ‰úåý½GÀpqDÀ Æ8‚ëE;Ž€#à8'õËû{;Ž€#à8㈀Œq׋vGÀp&*N0&ê—÷÷vGÀpÆ'ã®í8Ž€#àLTœ`LÔ/ïïí8Ž€#àŒ#N0Æ\/ÚpGÀ˜¨8Á˜¨_ÞßÛpGÀGœ`Œ#¸^´#à8Ž€#0Qp‚1Q¿¼¿·#à8Ž€#0Ž8ÁGp½hGÀpG`¢"àc¢~yoG`n¸ç÷òôËO¦RåçåK~~^ÊÿÔËOiÜ8Ž€# '™Pñ0GÀŽÎùím¿–—W/74úú¥¿ÀÜ„]{Ûo,Cå8Ž@&œ`dBÅÃG@N?ñLT\uÓÏdÝæµ)DpFiÜ8Ž€# ¼¶îÄ$S¬‡9ŽÀ„FàŠß~_VmX)¥¥e2¥vŠa±£q‡tªtcÑœƒäò÷}~Bãã/ï8Ã#P8|”Ç8ŽÀDGàô¥gÁ€Plز~ĹqG`8|Šd8d<Üpdñ¢#Ur1UòòòTÁ%Ï|sFœGÀp†CÀ ÆpÈx¸#à§Ÿ”гèïGɳßÂÎ8É¥^=G ;N0²ã㱎À„Gà ‹—JiIi ÜKŸ˜ò»ÃpL8ÁÈ„Š‡9Ž@ ‚B9uÉé©)Ü„¹qG ÞJdCÇã=„À€ H_Ÿôõé5ÐgS¶ï„.`jÂö HÚiîÝ›‚ø-E/ܘh凸NM½!á˜TX2 yey*î'W<.yúÏLÊJ8Ð×ÆÒ¨Ÿ0ÓãÐͺÌMîHéÙÈ+–§›zÙæ^¶Á—ê`GÜyè„D ò ¤ @¯ü‚]ÏÄmGÀØãø2Õ=¹ßpO"@'ÛÓÛ#=}zõv'mÜ!¬Gº5¼¯¿×Âzûz¥Wݽ½ê×<øûô²ü)_" qJ z)‹<Ä'ýýj÷$Ëê‡4˜þB‚<Б“/è4àÞL¹$HF»´ïkD#(¦éPB ÉWR9Q2R˜_(E*!¬°°HýjI‘†ã'ÒÒA^p¯WáIaa"ù-_¡K‘º-=vʭᔡ~’Gà@EÀ Æúe÷ñ÷¢ãïìé”îž.éJ^Ý=Ý Žp¶¦ëU?$A;tìÎnͯa!ùºˆ§£W‚@ÇFóqà°6«$9Ó%Ýøuh­þĨ[è¤s©›N‚¼¸Í&yrôMÜ©ðŽ16n²‘ßî±+?ÏkaQ;™^sZ>ò[–8’Ƀ&I$ÑŒ3‡yíO¸¿y’ÒŠPR‹àÞ•#³ ÂdϤÑA b÷'¹–› KúÍŠJK¸wxæ?ÁšÎòn…í*É¥IÚ©{‘6)Õ a–W;©²(—|úß$=I·Å'ÓYÝAÄm¸B¸¥Mä·û$•_ƒÛÊàÁb¾½%(ÅÅJJ jKIQ‰úK¤´¸4EV -%V¬qŸLWZTjù°CÞD¼æ×0'21>†'sœ`Œ9¤f4¶ÝÒÙÕ!íÝíÒÕÝe6~ ×Î>akÈÆwöh\ˆ×0A Ld3tnŒó i&.ë¨éˆéàéØ (ÐîM¯(! éC^K…“OïCš9ÈöPçÄ@ $T¸útÚ ·#!}0ý¯þÞ¯a©xÒjž4#8‘2ÈÓßÛoR¶‘H Ò—@J ,޲â2)+)Sw™”'ˆH)az‘Æl//.ä'Ü KŒŠàIÄ Æ©tìí]mÒÚÙ*í*äVw›^€ö®vµ¹:,¬MÓ´u§é’d©Ápñp]Ú©C :j¦ƒ§£×޼ Ñ4ñ‰°@RñÚÒÉÓ¦ˆÄp7ôpGÀ„@ 'FÜ•Œ`A„D®Þ©éëÝO^Q¾Ï93JXúìJLºIÄÃô¤òQVR.¥zUJEI…‘ÂÊõ‚ÀVÎUZ.•š7Ò7>N0ö³o híl‘ÖŽÖĥC‹´t¨[‰Å©¿]݈ήN›óOUæ— Š˜{NH èü!F”(`F§o’„¤4ÁHá%nGàÀDrbdD‰ ¤%HMœ‰!\ÉŠùÕ†¨‰A²¢Ó”}=è •VÒö°ÜÙȇ’“ÊÒ*©,S‚¢îª2u«¿¢lW8q„AZÜì?8Á؋ߊ^s{“4w4KK;¡YZÛ[Ínno¶ð&oiÓK¥ lל®S€T ¨HΊ$À$½ ‹’S Á]¤¤²€­Ä"îû^„Èoí8û9Lß@4ú{¤»·§×H‰Mñ·†›ÔòÒ­JÒ=ªCÅôPÄ05Sf’%"5RS^#ÕeÕR]^­Ä¤Z*Ë+Í®*¯J†×˜¢n¤wîAœ`Œ1Ø€¦¶F½š¤±µAÛ•D(YP?d¢¾µÞâZUÚÀTEÔÐáKaq¡ä©~€^ÈÀp—K¢ºÛp$L‚‚d˜ ¢2УS;zõv÷JwwwB‘7‚MͨT¤F I]e’%%ꆔԖ×Jmå$‹«©¨uÝ’ncát‚E­Â‚4¡±¥A”$4@ô®o©—-;ŒH´éÔEÔ0ýPT¢$ÅI Ò$EÅ É„¤ :åàÆpG`ôØ” R%& éî1·=@ÂÒÓ•Xm½S…NÅ@<&WM‘ºª:™¤ä‚=IÉIm•’%(aÕT4¯»‡"à#‰ Ê„ú沓 ·‡ÍÛõÚ¡Ó̓æ™–(*ÕuìÅ:Ý d¡°D×Á+QHÙê¶i ×QZë<Äp}“@<”ŒØÕµË†ôwë>:ºÏN׃þHUEµL©ž¢×T#"’ÉÕ“¥N/Ü(³ºQEÿ¶¥sÀ°„r[ÓVÙѸ]ím²]¯M¸·*¡¨·ý HŠKŠ%¿Tõ zA& Áfe„GÀptC ” Ñ«¿³_º»tÿ•”Ã’àÉÕu2­fºL©™*Sk¦©{šLLK}'‚9 +,¶4l¶k[ÃVµ·ÈæÆM²½q›­´òPT¦Ó¥:M¡ÓFÊt£›RÕP2áz )·GÀpâ €4ÂÑÝÙ-½ 2Â4L_§NÇtèæ€JB‚a…ÌÔÚi2³v–̘4C¦Mš®öL»¤•2û%Á`úbÓÎMzm´kÃÎõ²Y‰E›®ÀÀ ,YZV*eJ ”Hp—)©P)n'¡š»í8Ž€#°'€€@4‚tw¨Ä·^}}ÒÙÑ™RN­Ð•03•lÌ™M0ØäiÃöõ²nÛZY¿}¬Û¾V6îÜ`»Dz%å%RP®»ÔUèºê6"Q^äK0Dz–xYŽ€#à8ã†Ky{ÚÄ£«]DhÓó‘ÚûwÐÿ`c³Ù“çȼ©óeîÔy2oÚ|™3u®mv6n¶›ï32±fËjY½e•¬ÞºJVm^©º;ìõX‰QRY"E BadBýèD¸qGÀpTÐùèjM#m=æg#3ÌdU6]4ó Y8}‘,œ±HÌX¸Ï޽F0¶6n‘W7¼"¯oZaö–úÍú%Uzˆ`#•pã8Ž€#à8 ˜fxtµè•´ƒžÇŒº™rØœÃåàY‡˜=½vÆ^m ô&^Xó¼,_÷’,_û¢í\É®’eÕzÐŽв=`§ºÔ”,÷ ~SGÀpG`?F%ÓÎf=x²©Cºš»¤£¥Ã¶tggÓÅó’Å󎔣³Çô9Æ`°1ÕŠ¯É3+Ÿ’§Vû¨ÙR=½z˜ÔCƒé˜øÆUÓô´ÓÉÙwªä;ïXµÃ$ÖUS«†– ‰›Žä£m'‡½¹GX»Úº½UÚ¶ë)Ü­2sò,9gÉÛå”#O•¢‚¢ÝFh·Fo¯Üóì]ró#7HOTͬ’Ú™µ¶×Än?Ù(à¹[ž“G®zDv¬N¬–aŽÓ?}ºœô“b½ý•½Ò–3]víe©ôÏßú¼Üûƒ{¥uGrO‚:îÇÉ[¾ðSœ%áß¹Sžøí©<™_}ê«™‚c…Ñù]qÑÖ9]ô/ÅÊ“)øÜýÝ»|ÏÖëKß·TN¿üôQ æ&rñO¤´¦T>yí'S·½ú²«­ûôõŸN…esÐy\ñ®+ŒÄ–Ê_ßý×¶D:[žÑÄ ÷¼ãõ oÿÖíòÔuO%°~ÿR9ëóge}lêîmÿz›¬{z]*Ýü%óå‚o\`mA¤N<þ›Çåþ+î·FpêûI—œ$öé?´¬X*ê*2ûø5KymyƸ8ŒrîùÞ=ÖÈÅýf*÷¹?üÓ˼æ t*ˆÐoøÊ ©`äŽyøgËÌ#fÊ»ÿíÝæçÏ‚¥ äûç~_ Á`äRfª }ÈñÐÏ2ì.ùïK¤fF=Ùüæ©¥ÞžöÉÓRäþwý;ÙôÒ¦”¤cw_ƒ©-¦>»è8™{ìÜÝ-Îò/ÿãr#-ü951è™wü<énï6’AÛ óôõOË–W¶È~ôYð†Æo–%”,$Sñ¸éÆ«´ó?Ã"€ €éËêÕö;yÍ?Ê'Îý”œ|Ä›‡Í3\D΃}+~zÛeÚaÓ¤vVípåzxV.[i#‚7ìÍ)rAòêiÕò™›?3$'!#÷M/l’öÆv™sìa„nQs¶ +u‚aÛtô è<˜ï„`d2«[e’ÈEºˆ–Qé×>!4`èBðœG½ý(9îÇ¥ŠbNÖšÓA^ýh*.ÝAƒÃû0çŒ4zĨëð³·Ñ0ïÈóžòÑSRä‚2x,HBýºz™vð´ô¢‡õ£Çpç·ï”‹þõ"yö¦g‡%Œ¬üɃ²ùåÍvo¤0t~aÎy늭B'¸á¹ †ÅÿçFû1žñ™3ìÞ,{äêGlÊ€QñÑo?zˆ(yØMFÄ}Þh9»ó øñöΔ±áÛ¼ñCo4R½GÔ½iù&#²\„¸#Î:Âô^»ÿ5#¦K¡z4GŸtHbvŤ û†|ï`â–ÒÇ©›!ípvÜo6Ò½ø}­y|u®\„{sþ1Âïž=õÀbêA ‚ Ñ@20œéÞä£nb¦2Ýì8x,¿;A" ˇŸqø i“ͯlvL@Ò‘b0Õ Òê{ !üÌÏŸ)K/Y*•SzqÓåÚNŽÔ®„çq;©A[\^œèóõ6öÕÈÅäL0n~ô«(N.rypZæíeì\»Ó:+áG̰(i0Œ”}ù¯­CDÁ`ݳëäçñs›ó/©( Iåð3æ߇ΟQÃÊGVZçÏAn™ ¢Õ[¿q«PÂ(%¤{}Ùë& ­¬«´Qjy]¹5”¤G¡ìø ·¤(LF•&!$™ ›¼s} O\hð|×ùzëÈÏú˳Œt¡3B#–nP2Ã䢜Æsþák#Þr„|ÊÁF0ÒËÅODþaÔùäµOÊò;—Ë%W\bÒ FUè·pˆz¸ÙYüÕ§~eŠp‡þÙ¡Fœ>}ÃÓòþï¿?5Ê·ÄYþÄ}Þh»û é(PþÀïÅ»f3Ü“Æ'Ý0âÜŸ¦-M…¤çË}yY$‚Ñ.â’÷&¤„Å-“´që&i‡3q¿Yœ{AŠ!+S*Õ d×`‚Ô?õp#ν)cÛŠm‚¢8¤à¥;^²Õ´Ç_t¼ H5OþþI“  IEò„nõuà ämó6Kú–¿|‹œùÙ3 L¨÷PˆSuHâÖW·Út/ßpýóëÍÏv.éH›K;§]±ð?9#@_ß¾³]èûÿæ]_Î)æ'Ku^\5´QÉ’Å£Ò@ºjJ•<}ãÓ6UBcÌ\'ËA'd£ì0¯y÷÷î6åÃþ胩Št·~óVAO!4\Üâ¨s²N‚ΑdAa͉S&sêÙe¿\f+!ÞÿÃ÷I‚*å|ôêJÐG<úݳ¿++þ´"E0†d&à¾ÞgäréÐ`]yé•&Õ8ñý'ZcÈè9ÝÐ!-¿k¹,zÓ" ø¤§ÉäGZBgwñ÷.Î £S=âø#䢾(Õ ®n½‘†»þó.á™ÑòÿÐO>$H.˜Â̽߻Wš·4Ë'~󉔈ÎúêO]-7þýò¹[?kê!îó†ûbïî7<çËçÙýÑ»~$'^|¢8£ågr£sAGƼ?[0[_Ûj#[ê4ÂUndú b„iX0qË$ýXÔ͸ß,νÂhÞ–ã‡JÚl"ˆAG)WçÞ” ÁhÙÑ"¿¹ü7&yÿWîyEûÕcòî·µ-Ñ{#-úà?hä‚p%õeÜÅo]láE)bÈt"Âoå’^"H¡0,{Ä0ˆùÅG~!Û^×~B‰7gj`_ðÁ›Ž²ri'ã¶+”ë&wŠô|/úþ\Mfyy–RŽœw”´íh³Î0K2Ê‚@{}»4omÄÒtZ_zèKò¥¿$L™ $ÃlŃ+LlC3B|Ë_½Å¤ ÑÛ0Š@a Ã"ÂÊ :ÊL†) :´£Ï=:£>Íû¾÷¾¹  ÊE|ÆRÏ\ Ï÷Ò_’ÃÎ8,E.ÈOtþ×ηwb˜É€ÃÕŸ¼Úæ¯Ñ^k¶¼ºÅð¼àëHTÚ“)?¸¾õ¯ßš"¤AÊ„x›ÑÚôÈ’£Þ}BŠ\–ÑûéŸ:ݦ|XÒ:’ÉåyCY{ò†{býŠë¿r} êdŠNe8 D±qCBÅÈ‚L.eînÝÌå›Å¹S$êQº a£ÑOŠsoîy™yøL#²ùÅGäã¿þ¸`ó[½õŸn”0£æ˜óŽI‘ Â!oûÛ·Ùó3@I7 ˆÒV¦J™Ö ïÃÔ"æ¾ÿ¾O¦:]þ澿±6g€„Üü7[›7eÅm'w§]á>n²#@{ƒ¾?W“³ã“ß-O¯xR6¿´Yf,žkD–ëCèé-Òø²Ü’U[¢ª~æÑ©#¢Dσø2Ý0Jšºpê Füú/]oDBD—ìÑð»¿úi‚êºOÙK´,¤ ½½rÜ…»ô)¢ñL» FfޔεíèÐÈäjL<¬Jhsª€Æ\t˜Ž–˨ðÿñGS|eÎú¾ÐV,…4,ÉeuIÔ fId妿¿É:ý -‰¦Kw# D¿$Ý0¯üÌ Ï˜kõ¦§1; |õþWeÓ‹›E3‚?ôg~pþÅãAÂD]ÈåyC!ãù ³á .LeÝûÃ{åŠ ¯°:Œx’@ÙøüÆðˆƒlê'†i2Vüìƒ?³e¾±\ÊŒS7©§ÃáÍ ÌHߌßRœ{Ñ‘c2«6œ”eæOœ{“õ²ß^6¤öº8õ§Ú*~·H3ƒá÷”n˜ÒàwÀÔlºAªÁ…Ä+V‹AÒÎüÜ™6]BzV«°‚ ²‚aÚôì/Ÿ-¿ýÜoå…ÿ}!µºe¤tè_Åm'GÓ®ØÃùŸ€4oY¾EJõ}®&g‚Q]V-_½äëòë¾%ëŸÜ S™œÒŠÎõæ5=ç®`,Y`vô?z:¨°¹qQñr4-S+¡ábô‚¢Èié03t„Î7j˜Keꃆ=“aÙ*«C… Eaýû‰ï;Ñ:ýLé³…ñ0â¶8Qì wƒÞŽ+ŒC~Ð ŠaA™Œ)¨kv˜nɃÿïÁmšÑŒz£D_3œŽJب,}˜*P,ñÄ0uîoÉ?ìÀRnÞcßql4ÊÜlP•ëó†BÆóf×û3•…r.W{S»I¦˜¶CLÎî½F»L«QïC'L8x@²!ÆHÞèH1qÊ$]œº™ ï0šé›Å½Wø]³sº a™¦OÒÓ¦ûã¼gzž¨Ÿeëô±¢cØú®Ó¡®C ‹§Y‡2‘8¢?ž;+Za£¼&AÖ rÒÏ;.Ѷpÿð[)yž#µ“¹¶+á¹ÜÎŽ¿+vJMi|ñ’¯}®&g‚Á ¦TO•o~øßäÆe×ÉOÞnû&ÔΫu¢ý‡Í°¹ÑÐ)E³!MÀ Σe:ÉtƒØ ‘}س‚¹Â2)ݱÜ”îBYˆ8ë×ÖÛ|{zÇM¦q|êÁòÞï¼wPÃÁ(&W6Ú‰ŠÄC4>4XŒèôÙ¸éšÏ]c:þë…% äeº…+£ÑGdõ‡-ß$ A‚™#):òì#S¤²EÈ‚¾&<Ú¬ÇÐIžü'G£ì»Ð`Ci|ƒÝ DêYûäÚœž—üãý ³á‹t 16"ñ¨Ò7d= t‚0¯=ðšÍí¿ó›ïL­ °ýôzÀ·Ì¸u̇Û20#}³¸÷by.ö¶•CçªCK¯s1qïÍï‡e°Ç¿óx™vÈàÕUAy—³Ÿ¢&“”‚ ß"‚«>v•ÕßÏÜ4tuß…P :8 „`¥& UÜt¤ÛN†ßeœv…rÝdGbѸ®ÑwÏ~ÃÛå"•\ŽNï2gŒðhÜðâÓ.‘ÿØÊq³OMÏo’µ¯³+ˆ„CZ·#pØé‰‘i‡¨álj¸E7Ä·¬EfÚàù[ž¢ÆÒ2æDƒA2ø•N:}Dÿê}¯Z²è*Bãi †ø ög‰ZtTB‡kÛ=´ß8…D "Øo{qHC„4aÙ•Ëlîžy]v‡dy-Ч™¦-"Åë\ª;Q~áÎ/ ¹yó!&É!‹`ÀŸMÉ¢†i0e4NG:œá½¹ó­‚T)¤}øËœþ©e !<ÝÎõyÉ¿§¿aô™Ñ‹ùŸü¼pû Ñ`ÓE·£ÏK,Ç S_,‰L7/ßý²±G&n™cQ7ã~³¸÷B:ÃÞã°d”w‚Œ›Nƒªf„)QÂ㘸÷f`ÁF}¬FI7Ô_é{cP×Ó9q~(Rc¦4Í~ë,[Žˆ u Ä éÇ 5á»Ï=~nìtäÛNÆmW¢ÏäîÁÐg3ؤ§/§O§o¿ø´÷š\p‡‚¿ÿú×¾>øV¹ù*J+ä„ƒß vì™Rœ_,«V­”M¯o”®Æ.Û=%Ãhç”[éfjæùÒèpœ.{W0â»õŸoµ¹Ïóþþ<ëE²žþÙ?|åÃ6µtÚ'N Ùs²3=o(`,¿!˜°,—Ž‚©°‘ Ä΋o )¤þ±ªäžÜcS%§~üTÑúŽU#LMÑ5ll°U á~6箘Ç-swêfô½â|³\îÅhú™›ž±%§Ô!$<訬y|ýN©ã™ û\ Ìôc˜ ]Ü{³w õ=:¤ ü¶øÑòò]/ äEn º/ÔK¤xüžÙÁ‘7­]Ñ­ùisØõ—¶‚ßYxž +í +Õø–ïøÆ;R:Q죢3õ•)¤vÔV AÎÏþÛ³­>ÄM»ŒÙ®ØËûŸK$ßõ«êeËk[¤¤¯TÞvÜ9òÉó>£›j"ôí»kvë,’ánþÚÆWå±W‘Ç_yTšÛ›¥¬Bh¯Ó#Úu‘~´“®Œ=œ7Û#]¢Zħ€ô½(Ø'‚?ƒ¢ÁJ Ö±³Z$œEBÏnœË®Jì°†têœ7Àþé’ÄŸŒB¾òðW†%ÜE?îƒaÔǽY¾àĶÔÍ"#¾}Ú·í=2Eòò=/ Ë>Ã{£ÌuÂE'Ø32ã PÙÌþø…ÔæWÙÒeŠ»î‹×ÉÎu;3žEÂJîæçѧ`£ 4î£&,Sýâ_Œ[g{û¿Ýn£‹ÐQë‘o;RlÛèQnÁžéyÃMÇò"™b™*u‘â#aZ7™ªÁ0%AýeÕî` Šü¿ŒT³!¢^ —:LÜ2G[7Ã}‚ Aé›år/¤lêê7탖ãF1 ÷ÇFò@{é,’¸÷f*Â0V’VcAè©ÓÝ&Î"9ç+çØ€„åæžÉKJ£ƒÒsö +„‚AÊï"ý EÎ+PA˜ $l‘]Å7].íäHíJxö‰jó= …èÕtÖëÑímR]^-K£œtØ›äÐÙÃL5ï`ãB0¢Ï³zË*yvÕÓz=#k·¬aM¢¶XZSbLVœ>çÍ?ÜÌy2šçÊf¨4ÔÑùîLéÉ2}‚´ñáîJh¸õð­¢ »M²Åh sÃŒšEîî3ŽæþÃ娱‚)DÐ.ípá¬^à ÿÂqo›ñú†á½¨›|OFÒQEÎl°5} íë)gK§Ì±|¯‘¾Y®÷b™¹ýUª±»ƒª\îM§ Æõ°mÀ?“mŽJïF:­|ØLŒAF”f*“çEJ‹D'“~WÈ7éc·“ûh»ÞyOÙL•Ó¾ru6u%¦Ùµo˜?c·èx½N…3SaãõLãN0¢ÞÖÕ&¯¬_./¯[.Ë×½$›vlÔYü)+/“¢*ÝâZ—^¢ˆDG›­á‰–énGÀpG`"#©„2Õ…{OKt´wèÑTy2kÊlY<ïH9bÞb9|îb©(Ùý©¸XïQ‚‘þPíÝí²rÓëz­×6½&«·¬”öÎv…i•ÂÊÄò¨ÒÊRcÌ£)§ß×ýŽ€#à8ŽÀþˆÓ_HÆ:[•P¨ÝÛª’ î`°^^Z®R‰ƒäÐY‡ÊAznÈA³–òâÄâ½ñ®{•`dzáíºéê­«díÖ5j¯–µÛÖHk{B+¹¨¨ÈˆózE•êÖ¥œˆQ’tã8Ž€#à( lΔ8ʲ=­=¶ºBÑÓ“Xö[Y^%ó§-…ÓÊü騋djÍà%Ê{‹}Ž`dEÑuÛ×ÊÆdÃŽõæÞ´s“t÷tYr6‚1¢¡Ä#j3÷½»sŸ™žÇÃGÀpÝEÅKVs@$Xì.UªïëMì‰T\T"³&Ï’ySçËœ)seö”9æFAs_7ûÁĆÖzÙ¸s£l®ß”¸”tànhÝu¶Aq±J8tçHN/5ɇj¶§ìˆ–ûp÷ðpGÀpG`´ Èj—ˆ`³Ü·§] EwbueOªœ$3ëfÉL%f«{öäÙž8‰v´÷ß›ùök‚1p=}=²­q«lmÐã ÕƽEÝØ -õÒןX&—ŸŸ/%¥%RPª{u”Ø2–‘¡ëa$D§^²i@wwGÀ8ð`USHÐd¢¯«Oú:û¤«³Kúûû ˆ‚ü™TU'Ój§ËŒI3Ìž®îéIwQÁ7Õ@ŒlÕE˜ú–²½i»ìàjÞ!;õÚÖ´Uý;Tú¡¤/A@(Ç$ J:òKòx°¤Ö¤!J>Ì­dħa²!îqŽ€#àì0}ÁRO#J":W⌈¢ @ „J¦ÔL‘i5Óerõ=VC¯=ÈQ¯ºªÉ¶€aÿCbôO<áF¨šÚ›¤¾y§‘z•xìlx4(Ù®aõÒÔÖ8ˆ„P±ŠK‹M ’_”oG¤ë¹*A?$ضßGb¯›8áiGÀpÆ=ŸÈHBO¯ eÀÍÆo=ݺó±Úý=ýÞÝ©:‘&m|ME­’%zÓ“«¦˜âPW=YjÊkÆòIˆ²œ`Œò3¶t´Hc[ƒF%ˆ‡¹•„4¨»E•S{ûŠ:á6Å%ÅF:ò‹ó%#!É-ÕÙp "‚½/m>žßmGÀpö%8ËÈBÒ Yè…D$IÇàïîÚ¥óÀ;J•*KNRòÀôE­ˆ·Ù“¤ª¬j_zåýæYœ`Œó§b¯æ¶&Û2Š˜f•4iXS»’–iîh–ÖŽVéìîô4lí‹$Äçnèg@L •„$I S4¤Á&ÞõFÁèGÀØ@Ÿ¡¯W¥É© l#]QaÒ]¥I:’‡ô“K‹K¥²¬Òޝ­RÂP^«¤¡F·Åæª6]­a{sˆýà“ìö#:ÁØmÇ®”O‘Œ´(áhѽ? "ók8nÂð·v´I{—nž\ª} ÈR‘@8DwòÆmD$nnM—W˜— -ÉxŠGÀpFƒ@ F” ô$È’#*EÀæî†ôH é†%šå%åJ*TŠ ¤@Iä‰~#V¥{Bà'eJ7ûN0öï0ê§èíï5âÑ él•6»ÚÔV¢Û³ãñ!¬³«3µš&ýæE…ª? Ò% FÆe$EOmewܯáÌO"UaUN4KRÒ‘u¿#°ï!€ä€­¦í¢ó×U&-P„¾]R…hÂíRI@ÒÐÓ›Ø*ý éðKKô°KÝ¢º\Oè¬,­TbPe§u†0NîL\—Œ‡<æîœ£ôgpÿÞAÀ ÆÞÁ}Ÿ¸kwo·IA:TÒÑÕ!Lç@@°ñŽ”¤£w‡’•V ÃAéTéI& Jx9¦xl*‡i%%FVôtW#,j[Ä¢äÄHL 0é~ò“7y‘Ç#0Q s‡„ D:x dž$I‚…'ý©4ä…`+) ­M3¨Ô }Š!Š)„R.%eÅeR¦Ò„Š’JµÕ­~¤ „áÇ]‰Ð­©-ÞüåR\¸÷û‹¾“»÷N0öÖä´¹2Ý‘NHÄCõH¸ºz jwëá;„›{WXgO"m‡†w[zÈŠîh§¤‡‘PØ«$hŒŒò!/HN²DÈ‹M­$l#1 @`Ô-*MÍÏ‹%EFtS·ÅkZ]_fù'iô¢<ÂÝÀèª:o:aëˆU²oû$ÃEUïÐ4tà¤KÂ,^ýVF§¼¤*_’ôÒ€t ¹oO6”ù- y¤3/.*–%Ū‹PV¤§4+(U»¤²€­ajCRa–Ž´‰pË£n­éÙnëqŽ@Vœ`d…Ç#÷4ª]HHz»ÌN¸•€$IáŒ`Þƒ­iY½â»4-q=äíK˜'Œt\Œ­CÅ Á°@< 0Iò%$x5l oÀÒ¤Úî$iáÖ!¿å¥mO¶ïÁo6Á”mÿõñœÄ})ÃÊOf é7ò`ó,Ú!bB¹æ 4 &WC¹˜t¿rÌ!&Z®¹5 dtPZ¼Éû—*—Nš„t¼ØÑt¤øq?eY‡Mé4¿™dzÊï×p^ÃÒ&ÓàN‘:þ¤ßˆD²ˆ\,+Ó†¬Pà*ÒN¿Èl%êV@XIaIŠÎHi“$²" š6lÒ&Ò'l×;Èå yÚ=…€Œ=…´ßgŸG‚»ÁöÚ• ÂÒô©þ Ä=Â茬(9²4¯iÔ½HǽÈká »_ý=š§?¤×4–Vý¤Oæ³PýI?&éH:NÍ©xÓ¡Úð8®=1}j*õÌ»> y£ÆÊ²<ªÝ¥9ômSÉᦓ“Aþ$¡!,ϤB_;wüyI!MC™N\¸ š‡ž0lÒaQ(HÆ[áz±ëâ.¿?ñ'Þ)‘; ‹€Œ ûéýÅÜøå-¿° ¾àÒÜ2zjGÀ˜0|pã8Ž€#à8ŽÀ˜"àcLáôÂGÀpGœ`x=pGÀp1GÀ ƘCê:Ž€#à8Ž€ ¯Ž€#à8Ž€#0æ8ÁsH½@GÀpGÀp‚áuÀpGÀpÆ'c©è8Ž€#à8N0¼8Ž€#à8ŽÀ˜#àcÌ!õGÀpGÀ †×GÀpGÀsœ`Œ9¤^ #à8Ž€#à8Áð:à8Ž€#à8cŽ€Œ1‡Ô tGÀp'^GÀpG`Ìp‚1æzŽ€#à8Ž€#àÃë€#à8Ž€#àŒ9N0ÆR/ÐpGÀpœ`xpGÀp1GÀ ƘCê:Ž€#à8Ž€ ¯Ž€#à8Ž€#0æ8ÁsH½@GÀpGÀp‚áuÀpGÀpÆ'c©è8Ž€#à8N0¼8Ž€#à8ŽÀ˜#àcÌ!õGÀpGÀ †×GÀpGÀsœ`Œ9¤^ #à8Ž€#à8Áð:à8Ž€#à8cŽ€Œ1‡Ô tGÀp'^GÀpG`Ìp‚1æzŽ€#à8Ž€#àÃë€#à8Ž€#àŒ9N0ÆR/ÐpGÀpœ`xpGÀp1GÀ ƘCê:Ž€#à8Ž€ ¯Ž€#îù½<ýò“©¸ü¼|ÉÏÏKùŸzù !GÀp2!à#*æ8ÒÑÙ!¿½í×òòêå†Fÿ@¿ô÷˜›°koû¥q¨GÀÈ„€ŒL¨x˜#àÈé'ž)Š«nú™¬Û¼6…nˆ#GÀp2!×6Ð’dŠõ0GÀ˜Ð\ñÛï˪ +¥´´L¦ÔN1,v4îN•n,šs\þ¾ÏOh|üåG`x ‡òGÀ˜èœ¾ô,#Š [Ö‚ƒ87Ž€#à ‡€O‘ ‡Œ‡;Ž€,^t¤J.¦J^^ž*x¢ä™onˆsã8ŽÀp8ÁwCàô“zýý(yö[Ø'¹ô«‡#àdGÀ Fv|<Ö˜ð¼añR)-)Má€{ÉâS~w8Ž€# '™Pñ0GÀH!PXP(§.9=5E‚›07Ž€#àdCÀ[‰lèxœ#°‡ÐɇÄ?]ú9 ÿða îdþðÏâ‚/aó¨æÒð”;•#-LÓ„-³È5QpÏ=r®ä=šÈ{uÏJË’—*EÔJL”ü”ŽGøGlpÛÂ’i†Ä£ÿ¡ÿBÚ`³ñW()ÄÛf`–ÚÇNàèÆØ›ø2Õ½‰¾ß{Ü cìÕ}}Ò—²Õmþ„M|¿úé¬C899ð'ºü]yˆ³´ܤ‰0òZªd9F´3'ÌÈ~²^ÞÂ÷ïã†Éî¼ã™VÄ”ãËUw·¼qϯ|â1ÂN¤F‚„%ßM¾è¿@Lv¹ ¤0OÃ‰ãŸæµ¸ˆ» ;ä5—Å‘OËÑ!~°'ÒäÆ8Ðp‚q }Ñ}ø}èô{ô_ï€ví ›¹GÝ\l‹7R@:h@‚(7yw•£q¤0âaH„´AyvT´m£â0ÒNµ÷¡ÝO¦áòýçá"£¹í™ÜFºžò1éþ?gîdz‹Oþ¡ »·ú)ÇLÒJ&ÉlEÒ¤îŸÌ?¤¼Ì%Œš^NôÝS™ãªdšhyIÉLªu„°ð>)Ÿøh\ÔoØiZ»Gô~iî?oWûpˆM9”þ©#¾Ño³‹ (å0‚¸Š¤HÝzñOà *»Üæ²ð"MC v Q"ù5\ÿÙ{Ç2OéŒ'£†îÀÏHçß=Э~·Ùtð]]Ú'Aˆ#7D‚€»›t®¾IP7V£¬1éTí?¸’-µ:ÍXpÒ“f@’P&Õ©‡Îšüú/…D*ÿëŒ= ˆ v ;µ+…r’´3…i†a¥Ê&M‰Ò˜]Ä¥ØÜÅy%RœWl¤º‚ۉڻ‹¤DÓžKÄAŒÜ8™p‚‘ • rèè´¿«¿Ë:}áF´ód 3™.A"ô¤F‡épØ(_;kÈ€uâtØêïÏë·Ž;fn-€F w}[§ŸôS¾ùÓoä~GÀ¼b‚m¤ZmMŠìãM†¢“??( ÒBK—áiøí"!Abñà*Í+U;AZ4D㊜$Ò”HI~"®DÓAZœ¤dör‚±DÆ&$`wöwaaˆ$ÂÜ8(9@·`ˆÑÑ* BèàDü!Ül H’€!áC ÷GÀ8Ðd$E8Ò$,éñ#.I‚‚nþtƒ®J±‘” ùP"’r+!¬4?AJ98¹Ù7p‚±¿‹‘„#ýÒ¡îqH„uô·Kû@{’00ÅÐ3äǘ‡¢šýáæ+aú BÂõxíT˜ÿ÷à×ö[9Ž@&8¥H§óò?)E Ä›©%##ý:Å„b´‘”´iVM‚ô¤4I>ÊóÊ¥,¿ÜHIi~™IGÊòÊ””è9Q77{'»‰3„bYH·ÛúÛ”0´™ä„”X2yOÓ\On½l’%)²%Šç$a7¿˜gwý9Qb2ˆ”DýêÓ<¶ûlšîí.Ó7HBÊó*¤"¿B !“tÂâfô8ÁÈ€•¸C‰C»‡¶þÖ„=Ъ¢Ãü­æFÒÐ5h^IMA(Pé‚‘…$Q0)jN2 îAŽ€#àŒCH‰ AR’’¤%)‰º™Â1â’|Úk$ʼJ%#•ê.“ u—k~ì2%">U3ôûMH‚ѦéBk‹^¸þæþ& G§!ZÉì'”™NTâHáˆ`‰б‡8Ž€#àì'Ɉ»£ãDlÈá¶´AÎåáµD¤Ô¤!Õù5fC>*íªJø•”L4sÀ Ø*„¡¹¿YZô‚D´è•ð7™"E´®‰ƒ-ˆ`‘œ¶î‰V)ü}GÀpFFÀHFR"’r#!,)1=- ‚ô£J Hu~µÚUJ@ªÔ®6?„ä@“‚ì—ƒ%˜Hšûš¤)iãoêo4 D |Ђ|]£”<·ˆ$™8Ð>èÈ? Oá8Ž€#°'°é™$Ù¤¯¿/% Áí¯Ð©É¯U¡$¤ FÝ ÿþ¸”wŸ&(P6ö5$®þ„Ý v»NoVY°) å¶bB‰ƒˆ¥J0Ü8Ž€#à8û*Œ>Ý$-LÅ ÑÕ¼ø”ðàÚ••+ù˜”?Ij ô ¶º÷eEÔ}‚`°¹S}ßN»vö톾z©ï¯—žþn«&‰((0ý‡ü%z2á$b_ýÙøs9Ž€#àìF>”pô÷%ô@°ÑéëÛ%ù(Ê/–ºü:™TP'“ ¦H]Ád»Øðlo›=N0Xι£o»lïÝ&‰m}[¥½O%jlÙ¦’vž5")‘ØÛ@ùýGÀp}”Ä釓z¨¶C(/¨iÓtL-œ&S ¦Ú2Ü=ùüãJ0˜‚LlíÝ¢×fÙÒ·%E& òõ¤øB]¯œ”H©Äž|y¿—#à8Ž€#p !ðÿÛ;«Š«Ÿ­ÀÒ{AE{QPcï=’M> c °E¬š F °+6Dƒ(`EDé ½÷eû~ó;÷Í徻スoYdf÷½™;sfæ¾3sgþsΙ¹—8ìGJŒÄ£¬D" £EV ižÝÒ|Z(èØ–vˆÕ 0+J—ˢⅲ¸d‘‚ ö«jlc‘Sq8ûˆ_RŸv¿ÅqÀqÀqÀq`åª%FÕR vQ6Ze·–Ö9m¤iV³jÝɲՃw`,(ž/ó‹çÊ‚’RRV,ÙYH'ŒšÃ€‰¬lóî vl8ç8à8à8à8à8à8°Cp@·Ñ–”J™æEØRRZ"Ù™9Ò6»­´Ëi/msÚéñê[s³UeÎ)ž%³ŠfÈR£ú@Ä’™c$æãÅÖ4‡Ëë8à8à8à8à8ðósÀÅF½b>h$ZéFÇÜ=¥CNG}Knºw•À@ý1µð;™S4Ë;Í2Çœ3‘kvç)…sÑ8°~™9ükåFiºGSÉ©™“0Æ;+f¯Ð­¸ÐaŸRnù¬åzøKó=›WGqZ÷:gâi±W ©Ó8ùIuÉêÞ°rƒÌøx†t8´ƒ4lÓ°ZîkÓšM²rîJiغ¡Ôk^/i™éò¹¤¨Dæ~1Wv?h÷¤m—´²*&¬š¿J6­Þ$»í¿[Ò¢ô)2¯úi•l^·YhÿœûüHæt+x²ÄñéðÑíâi‹¥N“:Ò eƒ¥íQÓÇN—R³âëÒ»‹ÞP*~AÀ·ð.·ÂüBY>s¹ÔmV7åoJ—g*kÿ(eé®#nOål_™1n†Ë>'쓊ÜOKÅO[¦OlQŸsååŒåR¿¥9S"Åx,ۅͨéë¥Ef·J±¦Í;äv”}ktS5JTþDH)>/˜ ;?²r  Tß¹è˜ðÌyÿ¾÷eÉ´%š©FrÆgHï¾½ýBhÇ<Í:5“>ÃúÈž=öôˉJçgˆˆÚþŠ’á×—†~”’ô‰ò'4ýcÀïÓîHIo£ö¿¨Ïùº¥ë´}ÿÞ÷º°¢ž¶û·•‹ÿq±ìqø¶ZçGà@i±6ðÙ‘rhÍ#TºQYVc)‘Üa_ñIþXµ¯ÈÎÍ–šõj:{ŠäìJšòÅ‹_È3¿{FºÖMμëLa%ÌC:ò†‘Ò¬c3évj7Í;êÎQ2êöQrØ¥‡É—¡èÿÇ?”¯~Q¥D=þÐ#i©@ú¯üõ<ÕåL^ oÜöFÊâ¶UÝáJ‹ ‹eȉC$m¾ÚÍ;7—>øA>ü®³®àlÕˆïÜÿNyû®·åˆß!½¨nÊ»STº°ôÇ¥ò¬N‡èsد‡©Ôáóç>¯Pô¶¬»Be&â«_éyͨk¤Û)`ëtT')ØX c#3?™)ÝOë.éòùgþCæ}1Ï_ý'ª{[ÄM|v¢JbèWlŒ«"jŸ"ÓÛw¿­*¶¾£ûJ£¶´VÌ믓q“Óž&õšÕ“Åß/ÖÁã¨+Ž’ŽGÄŒ¸ÊS\¤ÃÛÿXæOš/ó'éܳ³–Úù˜Î Çþc¬,ùq‰´Ü«eŠÚ¶oêÍSúRá&àÁÝß-=¯î)çIäæ=_¦¾3Uz\ÙCι÷͈C‚1è°A ª.~ôb‰JW¡ÒQÛ?EqIŒG‰T<ô.œþøúãè£^DíQŸóÏþû™.¨~ûÔouÜä>PÃ6nßX>þaÿÔx9ãŽÄ1ê=ïŠtlØÈÉË‘¬šY²pÓOòÊúrJÝÓõdÑDüH8[I‘‚‹âì"ADï\Õ90õ½©’¿&_Nîw².( û€»gÝí¼ðÛ… <÷@?Žö‡\|ˆ¼vËkòÓ7?éCBüæõ›åýûß—¹ŸÏ•µ‹×J“Ý›èƒtÀ9øzßú-êëj‰nô£ÕOôÅDÌ„óݨﴼܼ\­‹Õeãv+dyõæW¥V½ZrŸOD#ºm៿ð¹|ýÒ× XýíÖþ‘Õ&?MúIw-ísb¼¾w¯ãöR€8—.Ÿ‚­÷m­y±¿`•ÌEi“dyƒñ¬ê‡_;\®xñ ÿäø #jŸB]ñã‡?Jçc;ûàÂÖsXŸÃtEÍï9ô’CuuÇqÄmºµ±$iûéðö‹¾NGwòÁ…­Œ µ!úò ›5~–|òïO©vI´kÿëQÁþ+*]°ìpxÊ þ³…½ÔGì¡R–0]ø~#}ÁŽäüÁçûÉß¿ÿ½†ïs¸G€ I›M·~et¶Êú[:íO™éŽö>¦™&/ÿõe¹ú«ª×/_¯’Yêõö‡´WINÐéB”þõ9§Ÿ0|ÑÁö6ÕÌoUÕq‰î"2°‰É­›+Å›<¬p^ý‹$×ü…]B€1½ð)ÍR£öýl8£»ŽÆp&kV,Ëf,“Ùfë*m·vÓÖn*Ô±Í;·ú§Õ:(­œ·R>áa/3Ð2N{šŠÂOüë‰þ* ÃÀ qàG$Ö"ND€í¢Ä}µ¯êN?}òSa2ûûœ¿Ç£iL“O†}¢bÆ’B³¿)‹Z·Í:öŸcÕ€rïÞ{«¸ÃOÄ̳'Ζ _`É’úØ©œuÏYq¶¨Ø.Ýõ䮚7>“áÄ?Ÿè׉º%ÀˆÚ&~aIº!&?ð¼¥ëI]`„I£ö©5‹ÖHñæbiÝÕHÁrÚtõ€Æ·¸…ß-Ô‰›õ €¬åÞ-åèß­`8˜7Y8*oQ .øfœy÷™ªšá™`BÀÈwÏcöTµa°ŽýHFô!m÷k«Ï̲™ËÔžaúÿ¦Ëï‡ÿÞoó¨tÁ²Ãá¥Ó—Êàã ƒ¾f~:Sî9ôá9DMœÌ}l¬pOý¾ê'Ù9[†VËãDí@Üä×'«º**‹Ž(ý-öOw °<@µ¡ê_£æµ’C›†¿yífUU0ÎѶ,†PÏñl#UkÕÅS#EíQŸó}~µž‡™ïˆù¶m¼M®"²kgK1ŸQø£€†‹ÙòRxÅ9e[žð¨îÄШ~«úºúBUØ@½ÀËmö=i_]¥æÕÏÓ‡F°â<üÒø•q8îµ[_“UóVéjÅËL¦÷y¯’wÝq‘'ÊcÈŒ±3¤Wß^q“ù[w¼%o xKMY$m»·…T'ž§/{Zξçl]­,šjVÕàæ}9OnüèF8Ç€7ì¢a‚ ÆŸ•eUGL `€ ¯ïzµ= \v»à¢ðY #~UW›Œ¾´¶óµ£®MZsÔ>µi•'µÁH4ìòyö(¾dÇ Ô&×v¶Ó>:é•I‚èûÊ—®”°d(\×Qy»~ézÍŽšaÐჴÕ¬[S =™ÀóäoÄJò/Ýø’så1rÑ#ùÒ9kp;þ?ãÛ¤¨t‰î;‡]†‰¨n°à}ÏþþY¡®ÖÝ*‚5hàã›Þ”ƒ/8XÚÐŽ(ßÑè°kÖ©NhÎ!@•®vÃÚ‘Æ€tÚ?1Àÿa&€á:vB¤bÚÂJ"lšõy~Ïè|_ÊD<6>¨¦ž&ìÁU¥ÿ¥zεÐÀcÆÎ¨`QûVÕÎ(P¤ ðlr@˜!‘K(Áà¯v毅HNœ8ñx¢B\\r ÎDpÆ3¤Ëñ]”­gÞy¦ŠÛÇ=1Nõµ b—>q©êpÑ¿¿üç—UlZ«~-â‚'t·=OµHØa·´\§'»fb@lÅù„§'¨~’É1vÐaxˆž˜-“|Ó·&Þ¹@YˆÌ±-ÁH÷ê-¯ÊÇÿúXÃö vx %Æu|X±°+á•¿¼¢ç? uÁEá³-?еMäniK…"/{æ25œvÉ0µ+°R© „±ˆ¨}Ê®«agãìvó¿Mþ[˜Duå§Üvмô§—Tµ‡£²6ˆÂ[î‡q)Û4íªµR ¬üék'ßr² !ƒ^Øq®.*]ªûG ‚cGKØ!5hÙ¥¥ÿp Œ;õèTA½-í`\8/ç (‘pD¥CÕÊÀ^Ù€Íζµ^ľlœmÿ¨c@° Æ ü‘—ŒŽ óübov¸OžøDŸíf{4“¨ý/XNeϹ¥üædyþªçµ¯\ôèErìÕÕ»8²õìj>ˆâÅŠÀ ‰\B€aïÚ¿’qùÉìõ³Ôb4Ù¡P‰ uq[8P»±'žN4i`@ˆ‘&ꎦšê¶©›'Þ,3ÇÍ”…Sªq%6Lü8Œ¢¬4!‘¸uK­é…X¹ 9iˆŠªyðÛto£“‘}Gú…¡ge`±ŽÕ4î«‘_iþÞ7ôVãO›ÕÏ©•“”xÄǸö‡¶W°$¬×¢ž^¢3Ï®‘W7 ЉÏL”)oOQµÄœ±PŸƒuTŽÚ&Üß‘—UŒ„1`H±eù­Ûßò«\òÃ=˜8öðïwú~µOY: ÃÎÆ%RŸiá9Žƒ°©Úº(¼µ²1AZpA^Ïné¨,ÜX¨64a£Ohlý4*]ªûg²Ä%³=C½i'f%Œ}M~c²ªw¹è`´¦ô“0¿il„0˜ŽJµ¿¥ÓþQÇÿG™ÒÖ­øª¤`š '}®c‡ Úó~,}Ø÷¿tžsÚêÙ?<«Ï?6€WRÎm=xæJ JesøV¼ã’˜`˜ƒ¿¥g^oÙ=§ƒŒÏ'……æA¯™)9¹f"ÈHZžKqôI/OkL¶zg,¾ÑCræ ‚uzp”á9eÙŪ›uQØYÛ\@ 峵Ɵ–Þn‘µgˆ¤jƒ¨¼å, &+{&¸ì:¦q8\Xj†!¼añ•.ÕýÛÉ'Q_dµÆ œušV<ç6$°ƒ+‘³åÂóà¡ZÐ×x÷Æ*¥‰Jµ¿±#(JûsQÇhqs¿œ+˦/VÝæ¥ÄóürvEØØ) ®I‡&iõ¿¨Ï9e?vÎcª†éód9êwGåÜÖpÀàZ\T,eeRÃüõ¬ÝKñAª"=Ye Æ…õ/Õ“»r ÍjÒXŒ2ÉYý^Ь.ÉpÀNˆlË :GV> Œu›ÔÕ†Cw<îA––­c `H@8Ç€-zX@s~eÝ[ß’÷½—R¤·aö s@NØ’U¿ºØÉÀƒæ’ûß÷¹ñ7* ¢nҘ̫âÐ¥‡'ýfÅÀ­¤Ll% :,é‘a_€CÜ…ÏÁ2* WG›ôº®W_-Ÿ1VC Äõùzà$>Åù+Ù xµÕgM ÿßûø½µ¿|8äC3xŒ%ñ}V©èÑ©°|¢X *o¬(Ùq±vÉÚ¸bìsbm ØýSd¶ÂÙxKŒºéÖö·ª^¸¨t6"éK«}[©„`tèîÑù'rô/Ž_·’™0í¿á]\SÞ™"+ç¬ôûwTº¨ýöˆÒþÜoÔ1Àþ¶¹ŸyÏY"u’¥ÁG²dídl<«_N¡Eúˆñ8öFQû_ÔçœPSFMÔ\XÎWÍg®gÎgîÏ-ÌU,&Tæ²ú ¼m`eDH3še7—®5º›sÈ›JAéfY[°VJ ·‚2¹]'9‰DbÍâ5: £/fEÁyX¥³%5.]¾L~m²ªG§#&fç t—–ƒ.8ÈÿÁtNV7ÛÆ&>=QW¢.¤$tdÀývk$—=uYœ‘˜-3è·Ø»…dl¡,¡'Ì:ÄÐ 4wk¬Y¢ð9X¶ 3‘°ø¸k‹›L¶¦MlÙÉ|TOìàhoëÒéSLFœ¥ñÃs~‡‘‚Ñ^ìxáš2‘v!qÂØŽÃ‰dHVÌY¡§÷º¾—òÙúSùQy‹­}øë‘_«jQ9`†“a™tÔÀר¸x68g- HHø ôUÎaAò±Ïèð£Ò¥ºw$$Ú5’ OMЭ£5ë{;[رÂNžÝ¼†yqmÁŽžwîzG·`&;êÐ=ï«yòíßÊê…«u5Ï×û½®vB—?{¹–•.þ¥ýáIºc6'œkqÚ€Ó’žì ˆ@õ ¦l—Ї¯}Q%¦ÎbØNÿ‹úœï;\Ïak,ã÷ü0>Øñ UŸØÓxÉ/;cì/É/Ñ÷‘´™è¿| åIDATÊl-‡Õ:RŽÌ;F±˜ Š‹ô.’DqŒø¼â92»h¦¾QŒ”2Ì Ð2²ÍKвܻJ,ß9fú›×¾Ñ% ’ˆC9Œ'¸ 1ìý߇~¨«6ò3¹_8äBÿ[æ×¯|­Û¼Ø¡‚CD‹z‚}âÉì3®«êøÃï"Áªÿ±³SC>Êâ0@âü]¨qÔ_Æý%¡®ïí]oOù.ÊLV·}ÉÅÿ¼X"ú2ý[ï¡Ë ]tË"€*Š£¬×Pgé‘\ðÎ û‚*âÓá³-Ÿs00xLö.’ª´I°üDáû/=?%ü.’¨}Š2n 궯о€ Î áÈk.;X8£€Éá€8UX=¡ ¾Òá-‡bat‰ÆŽ›îgt—_?öë8)’­7ŒP)Ò @àÜûÎ;ˆ-*]‚ÛŽ‹ú~ô÷ ìWÌZ¡ñµÔR Ãn&û.›á‡Á½Ë©3ï¹ýt]ÁÇn‚÷x°}Z_m(˜,Y`pâ¬uQé Úߢ´ºcª ÷£›­`Gc‹} 'žŽ¼q¤lX¶A“ضϮ àvütú_eÏ9º¾ ûêBÅÞKØçTUú™sJÍ®Ñò-̋ͬv‚7©î‘ÛI%53jV‰UUÁÚ8ùsañO²¨x¡yoÉ<Ù\–/™ã˜I÷ÆUeä° âì‹dŽ3 Ð]B‡ú$•ãm¤¬pï† æRåK”ƶ3¤€{Pj$?Úg «XU«èµ ת*)•=H:|NįdqÕÙ&ÉêÆGéSÐÃWÚ½·Ýa,‡0À‰bëàI‹aºÊ®Óá-“ g)p_©ôù€T HÒ0.Lz¢ÒUöè‹ Œp«ËaW„tÐ6ø Ö•Ž óÔ.Fç1v|ØÊ«ödÄ9ÇÇÇÇÇ­å€CJõ ba>bD/I!¨IDÝ̺@4”z™õ¤nV=ã××p¿3»`¤b~±ëkd7”š­œe5¼Ñø€ü#¡¨3 #Ë E€Fy „°óEG ˆ¨ÄÄ9ç8à8à8à8° rÀÌH,p°@ð€ôëR¤êy¥¦QYÔɬ£ ¿®µñ³êj8G¿‡é—ÀÝ_4À¨¬ ¦Tà±Éìr€:òÚÉȦrBŒáim22ÍÎ5ù@èø0¶!ø¤©t$æWV¿KwpppØþP vH¬ú)ƒùç£Q±ön÷1¨¬QLjº "òŒ:€¨mvià›YÁfÙåü]`DmmN-xä—ç{¾9HŒ­·ùÆ„p°Øæ²Íºõ6X¦ªeØcR‘òŒ˜q*è„T5€H‚lsaÇÇǭ†j,‰j" yÈ(7c®‘6XIDpI¥l鬕YKž<䙨ØÒ©¾yæÚøU=Ýr«~ØN–ÙŒjl°R³_HÁ†Q½àH¨bðõÚÄ!)*7o‚ èḠ€F=jbÀ€Ä‚|%Œpiãva„\ÍçŠrpØ8`F@hÐJb`Á‚ƒ,Uú€Ñ¿J ¿ƒ…^nF •6Ô˜Õ¾wmâô:ÏœŽà^s`ßVÀØ*öm]fŽX/4` ;e Œøq^|¾0RXf>†. J¸€ö#&`T5æéÓç-N$ˆ$ð5më~†Ëí8à8à8*A@¿aߦ{ Áa$ è#°c*,Ô0ïŨ‘i^n@€IŸF]¡a¤,ظ\sèµsÛ‡`l¾W¹V¤$ ¤ wŠ€;E3UÏMš³Þ|  )./VPâŠR¼ø`¼”Zâù$*Ü¥ªøa+QÑ_aÁ‰"ž MÓüÁ4by¼ %ÖîËq`—‪ øÅLò‰^%æK'õX¼ÆYéGàç ʵà C9fσ~rÕ7rU?ˆ·à€kïS#ϵ¢¾ê;Ù}¸ø‡`ìMŠId(ÓÒãëµ-Bç~ñ°mnW帶ߨ8Ó1´oàãÈ㇙è½È8_óÆò1ék™6›É£¹ð5ìù^É¿¨L³ÍöÇl|†È&ÕÄkšƒ\‡±#ê…œX°¿7yÍ.Åq ž`ÄóÃ]mgpv @¤Ô€þÐÅ`¼°çs­ñÄŽù„‘ÒxqF™d®)ÏÒâ›]ê j Ã ߟ Òýí`œØŸñâÂeA‰-àã;C›>4ù¹öqs³×š–ÜXÀS–½H-#X— ‡óèu¬Ž`aœÍgïÉ‹·¼´46´ÄÙk?c ñá` or :ref:`our mailing-list `. Celery is Open Source and licensed under the `BSD License`_. Getting Started =============== - If you're new to Celery you can get started by following the :ref:`first-steps` tutorial. - You can also check out the :ref:`FAQ `. .. _`BSD License`: http://www.opensource.org/licenses/BSD-3-Clause Contents ======== .. toctree:: :maxdepth: 1 copyright .. toctree:: :maxdepth: 2 getting-started/index userguide/index .. toctree:: :maxdepth: 1 django/index contributing community tutorials/index faq changelog whatsnew-4.0 whatsnew-3.1 reference/index internals/index history/index glossary Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` celery-4.1.0/docs/changelog.rst0000644000175000017500000000003213130607475016337 0ustar omeromer00000000000000.. include:: ../Changelog celery-4.1.0/docs/make.bat0000644000175000017500000001646413130607475015303 0ustar omeromer00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. epub3 to make an epub3 echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PROJ.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PROJ.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "epub3" ( %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end celery-4.1.0/docs/configuration.html0000644000175000017500000000012313130607475017414 0ustar omeromer00000000000000Moved ===== This document has now moved into the userguide: :ref:`configuration` celery-4.1.0/docs/includes/0000755000175000017500000000000013135426347015473 5ustar omeromer00000000000000celery-4.1.0/docs/includes/introduction.txt0000644000175000017500000001432613135426314020755 0ustar omeromer00000000000000:Version: 4.1.0 (latentcall) :Web: http://celeryproject.org/ :Download: https://pypi.python.org/pypi/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors -- What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, and a `PHP client`_. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php What do I need? =============== Celery version 4.0 runs on, - Python (2.7, 3.4, 3.5) - PyPy (5.4, 5.5) This is the last version to support Python 2.7, and from the next version (Celery 5.x) Python 3.5 or newer is required. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4 was Celery series 2.2 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery 4.0 coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html Celery is… ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make:: from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports… ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``) - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.python.org/pypi/pyramid_celery/ .. _`celery-pylons`: https://pypi.python.org/pypi/celery-pylons .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ celery-4.1.0/docs/includes/resources.txt0000644000175000017500000000254113130607475020246 0ustar omeromer00000000000000.. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ network. .. _`Freenode`: https://freenode.net .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://wiki.github.com/celery/celery/ .. _contributing-short: Contributing ============ Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html .. _license: License ======= This software is licensed under the `New BSD License`. See the :file:`LICENSE` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround celery-4.1.0/docs/includes/installation.txt0000644000175000017500000000753613130607475020746 0ustar omeromer00000000000000.. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using :command:`pip`: .. code-block:: console $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the :command:`pip` command-line by using brackets. Multiple bundles can be specified by separating them by commas. .. code-block:: console $ pip install "celery[librabbitmq]" $ pip install "celery[librabbitmq,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the :pypi:`eventlet` pool. :``celery[gevent]``: for using the :pypi:`gevent` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[librabbitmq]``: for using the librabbitmq C library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport (*experimental*). :``celery[tblib]``: for using the :setting:`task_remote_tracebacks` feature. :``celery[memcache]``: for using Memcached as a result backend (using :pypi:`pylibmc`) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[dynamodb]``: for using AWS DynamoDB as a result backend. :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]``: specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.python.org/pypi/celery/ You can install it by doing the following,: .. code-block:: console $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of :pypi:`kombu`, :pypi:`amqp`, :pypi:`billiard`, and :pypi:`vine`. You can install the latest snapshot of these using the following pip commands: .. code-block:: console $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please the Contributing section. celery-4.1.0/docs/whatsnew-4.0.rst0000644000175000017500000022510313130607475016557 0ustar omeromer00000000000000.. _whatsnew-4.0: =========================================== What's new in Celery 4.0 (latentcall) =========================================== :Author: Ask Solem (``ask at celeryproject.org``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.7, 3.4, and 3.5. and also supported on PyPy. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 3 Preface ======= Welcome to Celery 4! This is a massive release with over two years of changes. Not only does it come with many new features, but it also fixes a massive list of bugs, so in many ways you could call it our "Snow Leopard" release. The next major version of Celery will support Python 3.5 only, where we are planning to take advantage of the new asyncio library. This release would not have been possible without the support of my employer, `Robinhood`_ (we're hiring!). - Ask Solem Dedicated to Sebastian "Zeb" Bjørnerud (RIP), with special thanks to `Ty Wilkins`_, for designing our new logo, all the contributors who help make this happen, and my colleagues at `Robinhood`_. .. _`Ty Wilkins`: http://tywilkins.com .. _`Robinhood`: https://robinhood.com Wall of Contributors -------------------- Aaron McMillin, Adam Chainz, Adam Renberg, Adriano Martins de Jesus, Adrien Guinet, Ahmet Demir, Aitor Gómez-Goiri, Alan Justino, Albert Wang, Alex Koshelev, Alex Rattray, Alex Williams, Alexander Koshelev, Alexander Lebedev, Alexander Oblovatniy, Alexey Kotlyarov, Ali Bozorgkhan, Alice Zoë Bevan–McGregor, Allard Hoeve, Alman One, Amir Rustamzadeh, Andrea Rabbaglietti, Andrea Rosa, Andrei Fokau, Andrew Rodionoff, Andrew Stewart, Andriy Yurchuk, Aneil Mallavarapu, Areski Belaid, Armenak Baburyan, Arthur Vuillard, Artyom Koval, Asif Saifuddin Auvi, Ask Solem, Balthazar Rouberol, Batiste Bieler, Berker Peksag, Bert Vanderbauwhede, Brendan Smithyman, Brian Bouterse, Bryce Groff, Cameron Will, ChangBo Guo, Chris Clark, Chris Duryee, Chris Erway, Chris Harris, Chris Martin, Chillar Anand, Colin McIntosh, Conrad Kramer, Corey Farwell, Craig Jellick, Cullen Rhodes, Dallas Marlow, Daniel Devine, Daniel Wallace, Danilo Bargen, Davanum Srinivas, Dave Smith, David Baumgold, David Harrigan, David Pravec, Dennis Brakhane, Derek Anderson, Dmitry Dygalo, Dmitry Malinovsky, Dongweiming, Dudás Ãdám, Dustin J. Mitchell, Ed Morley, Edward Betts, Éloi Rivard, Emmanuel Cazenave, Fahad Siddiqui, Fatih Sucu, Feanil Patel, Federico Ficarelli, Felix Schwarz, Felix Yan, Fernando Rocha, Flavio Grossi, Frantisek Holop, Gao Jiangmiao, George Whewell, Gerald Manipon, Gilles Dartiguelongue, Gino Ledesma, Greg Wilbur, Guillaume Seguin, Hank John, Hogni Gylfason, Ilya Georgievsky, Ionel Cristian MărieÈ™, Ivan Larin, James Pulec, Jared Lewis, Jason Veatch, Jasper Bryant-Greene, Jeff Widman, Jeremy Tillman, Jeremy Zafran, Jocelyn Delalande, Joe Jevnik, Joe Sanford, John Anderson, John Barham, John Kirkham, John Whitlock, Jonathan Vanasco, Joshua Harlow, João Ricardo, Juan Carlos Ferrer, Juan Rossi, Justin Patrin, Kai Groner, Kevin Harvey, Kevin Richardson, Komu Wairagu, Konstantinos Koukopoulos, Kouhei Maeda, Kracekumar Ramaraju, Krzysztof Bujniewicz, Latitia M. Haskins, Len Buckens, Lev Berman, lidongming, Lorenzo Mancini, Lucas Wiman, Luke Pomfrey, Luyun Xie, Maciej Obuchowski, Manuel Kaufmann, Marat Sharafutdinov, Marc Sibson, Marcio Ribeiro, Marin Atanasov Nikolov, Mathieu Fenniak, Mark Parncutt, Mauro Rocco, Maxime Beauchemin, Maxime Vdb, Mher Movsisyan, Michael Aquilina, Michael Duane Mooring, Michael Permana, Mickaël Penhard, Mike Attwood, Mitchel Humpherys, Mohamed Abouelsaoud, Morris Tweed, Morton Fox, Môshe van der Sterre, Nat Williams, Nathan Van Gheem, Nicolas Unravel, Nik Nyby, Omer Katz, Omer Korner, Ori Hoch, Paul Pearce, Paulo Bu, Pavlo Kapyshin, Philip Garnero, Pierre Fersing, Piotr Kilczuk, Piotr MaÅ›lanka, Quentin Pradet, Radek Czajka, Raghuram Srinivasan, Randy Barlow, Raphael Michel, Rémy Léone, Robert Coup, Robert Kolba, Rockallite Wulf, Rodolfo Carvalho, Roger Hu, Romuald Brunet, Rongze Zhu, Ross Deane, Ryan Luckie, Rémy Greinhofer, Samuel Giffard, Samuel Jaillet, Sergey Azovskov, Sergey Tikhonov, Seungha Kim, Simon Peeters, Spencer E. Olson, Srinivas Garlapati, Stephen Milner, Steve Peak, Steven Sklar, Stuart Axon, Sukrit Khera, Tadej Janež, Taha Jahangir, Takeshi Kanemoto, Tayfun Sen, Tewfik Sadaoui, Thomas French, Thomas Grainger, Tomas Machalek, Tobias Schottdorf, Tocho Tochev, Valentyn Klindukh, Vic Kumar, Vladimir Bolshakov, Vladimir Gorbunov, Wayne Chang, Wieland Hoffmann, Wido den Hollander, Wil Langford, Will Thompson, William King, Yury Selivanov, Vytis Banaitis, Zoran Pavlovic, Xin Li, 許邱翔, :github_user:`allenling`, :github_user:`alzeih`, :github_user:`bastb`, :github_user:`bee-keeper`, :github_user:`ffeast`, :github_user:`firefly4268`, :github_user:`flyingfoxlee`, :github_user:`gdw2`, :github_user:`gitaarik`, :github_user:`hankjin`, :github_user:`lvh`, :github_user:`m-vdb`, :github_user:`kindule`, :github_user:`mdk`:, :github_user:`michael-k`, :github_user:`mozillazg`, :github_user:`nokrik`, :github_user:`ocean1`, :github_user:`orlo666`, :github_user:`raducc`, :github_user:`wanglei`, :github_user:`worldexception`, :github_user:`xBeAsTx`. .. note:: This wall was automatically generated from git history, so sadly it doesn't not include the people who help with more important things like answering mailing-list questions. Upgrading from Celery 3.1 ========================= Step 1: Upgrade to Celery 3.1.25 -------------------------------- If you haven't already, the first step is to upgrade to Celery 3.1.25. This version adds forward compatibility to the new message protocol, so that you can incrementally upgrade from 3.1 to 4.0. Deploy the workers first by upgrading to 3.1.25, this means these workers can process messages sent by clients using both 3.1 and 4.0. After the workers are upgraded you can upgrade the clients (e.g. web servers). Step 2: Update your configuration with the new setting names ------------------------------------------------------------ This version radically changes the configuration setting names, to be more consistent. The changes are fully backwards compatible, so you have the option to wait until the old setting names are deprecated, but to ease the transition we have included a command-line utility that rewrites your settings automatically. See :ref:`v400-upgrade-settings` for more information. Step 3: Read the important notes in this document ------------------------------------------------- Make sure you are not affected by any of the important upgrade notes mentioned in the following section. An especially important note is that Celery now checks the arguments you send to a task by matching it to the signature (:ref:`v400-typing`). Step 4: Upgrade to Celery 4.0 ----------------------------- At this point you can upgrade your workers and clients with the new version. .. _v400-important: Important Notes =============== Dropped support for Python 2.6 ------------------------------ Celery now requires Python 2.7 or later, and also drops support for Python 3.3 so supported versions are: - CPython 2.7 - CPython 3.4 - CPython 3.5 - PyPy 5.4 (``pypy2``) - PyPy 5.5-alpha (``pypy3``) Last major version to support Python 2 -------------------------------------- Starting from Celery 5.0 only Python 3.5+ will be supported. To make sure you're not affected by this change you should pin the Celery version in your requirements file, either to a specific version: ``celery==4.0.0``, or a range: ``celery>=4.0,<5.0``. Dropping support for Python 2 will enable us to remove massive amounts of compatibility code, and going with Python 3.5 allows us to take advantage of typing, async/await, asyncio, and similar concepts there's no alternative for in older versions. Celery 4.x will continue to work on Python 2.7, 3.4, 3.5; just as Celery 3.x still works on Python 2.6. Django support -------------- Celery 4.x requires Django 1.8 or later, but we really recommend using at least Django 1.9 for the new ``transaction.on_commit`` feature. A common problem when calling tasks from Django is when the task is related to a model change, and you wish to cancel the task if the transaction is rolled back, or ensure the task is only executed after the changes have been written to the database. ``transaction.atomic`` enables you to solve this problem by adding the task as a callback to be called only when the transaction is committed. Example usage: .. code-block:: python from functools import partial from django.db import transaction from .models import Article, Log from .tasks import send_article_created_notification def create_article(request): with transaction.atomic(): article = Article.objects.create(**request.POST) # send this task only if the rest of the transaction succeeds. transaction.on_commit(partial( send_article_created_notification.delay, article_id=article.pk)) Log.objects.create(type=Log.ARTICLE_CREATED, object_pk=article.pk) Removed features ---------------- - Microsoft Windows is no longer supported. The test suite is passing, and Celery seems to be working with Windows, but we make no guarantees as we are unable to diagnose issues on this platform. If you are a company requiring support on this platform, please get in touch. - Jython is no longer supported. Features removed for simplicity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Webhook task machinery (``celery.task.http``) has been removed. Nowadays it's easy to use the :pypi:`requests` module to write webhook tasks manually. We would love to use requests but we are simply unable to as there's a very vocal 'anti-dependency' mob in the Python community If you need backwards compatibility you can simply copy + paste the 3.1 version of the module and make sure it's imported by the worker: https://github.com/celery/celery/blob/3.1/celery/task/http.py - Tasks no longer sends error emails. This also removes support for ``app.mail_admins``, and any functionality related to sending emails. - ``celery.contrib.batches`` has been removed. This was an experimental feature, so not covered by our deprecation timeline guarantee. You can copy and pase the existing batches code for use within your projects: https://github.com/celery/celery/blob/3.1/celery/contrib/batches.py Features removed for lack of funding ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We announced with the 3.1 release that some transports were moved to experimental status, and that there'd be no official support for the transports. As this subtle hint for the need of funding failed we've removed them completely, breaking backwards compatibility. - Using the Django ORM as a broker is no longer supported. You can still use the Django ORM as a result backend: see :ref:`django-celery-results` section for more information. - Using SQLAlchemy as a broker is no longer supported. You can still use SQLAlchemy as a result backend. - Using CouchDB as a broker is no longer supported. You can still use CouchDB as a result backend. - Using IronMQ as a broker is no longer supported. - Using Beanstalk as a broker is no longer supported. In addition some features have been removed completely so that attempting to use them will raise an exception: - The ``--autoreload`` feature has been removed. This was an experimental feature, and not covered by our deprecation timeline guarantee. The flag is removed completely so the worker will crash at startup when present. Luckily this flag isn't used in production systems. - The experimental ``threads`` pool is no longer supported and has been removed. - The ``force_execv`` feature is no longer supported. The ``celery worker`` command now ignores the ``--no-execv``, ``--force-execv``, and the ``CELERYD_FORCE_EXECV`` setting. This flag will be removed completely in 5.0 and the worker will raise an error. - The old legacy "amqp" result backend has been deprecated, and will be removed in Celery 5.0. Please use the ``rpc`` result backend for RPC-style calls, and a persistent result backend for multi-consumer results. We think most of these can be fixed without considerable effort, so if you're interested in getting any of these features back, please get in touch. **Now to the good news**... New Task Message Protocol ------------------------- .. :sha:`e71652d384b1b5df2a4e6145df9f0efb456bc71c` This version introduces a brand new task message protocol, the first major change to the protocol since the beginning of the project. The new protocol is enabled by default in this version and since the new version isn't backwards compatible you have to be careful when upgrading. The 3.1.25 version was released to add compatibility with the new protocol so the easiest way to upgrade is to upgrade to that version first, then upgrade to 4.0 in a second deployment. If you wish to keep using the old protocol you may also configure the protocol version number used: .. code-block:: python app = Celery() app.conf.task_protocol = 1 Read more about the features available in the new protocol in the news section found later in this document. .. _v400-upgrade-settings: Lowercase setting names ----------------------- In the pursuit of beauty all settings are now renamed to be in all lowercase and some setting names have been renamed for consistency. This change is fully backwards compatible so you can still use the uppercase setting names, but we would like you to upgrade as soon as possible and you can do this automatically using the :program:`celery upgrade settings` command: .. code-block:: console $ celery upgrade settings proj/settings.py This command will modify your module in-place to use the new lower-case names (if you want uppercase with a "``CELERY``" prefix see block below), and save a backup in :file:`proj/settings.py.orig`. .. admonition:: For Django users and others who want to keep uppercase names If you're loading Celery configuration from the Django settings module then you'll want to keep using the uppercase names. You also want to use a ``CELERY_`` prefix so that no Celery settings collide with Django settings used by other apps. To do this, you'll first need to convert your settings file to use the new consistent naming scheme, and add the prefix to all Celery related settings: .. code-block:: console $ celery upgrade settings proj/settings.py --django After upgrading the settings file, you need to set the prefix explicitly in your ``proj/celery.py`` module: .. code-block:: python app.config_from_object('django.conf:settings', namespace='CELERY') You can find the most up to date Django Celery integration example here: :ref:`django-first-steps`. .. note:: This will also add a prefix to settings that didn't previously have one, for example ``BROKER_URL`` should be written ``CELERY_BROKER_URL`` with a namespace of ``CELERY`` ``CELERY_BROKER_URL``. Luckily you don't have to manually change the files, as the :program:`celery upgrade settings --django` program should do the right thing. The loader will try to detect if your configuration is using the new format, and act accordingly, but this also means you're not allowed to mix and match new and old setting names, that's unless you provide a value for both alternatives. The major difference between previous versions, apart from the lower case names, are the renaming of some prefixes, like ``celerybeat_`` to ``beat_``, ``celeryd_`` to ``worker_``. The ``celery_`` prefix has also been removed, and task related settings from this name-space is now prefixed by ``task_``, worker related settings with ``worker_``. Apart from this most of the settings will be the same in lowercase, apart from a few special ones: ===================================== ========================================================== **Setting name** **Replace with** ===================================== ========================================================== ``CELERY_MAX_CACHED_RESULTS`` :setting:`result_cache_max` ``CELERY_MESSAGE_COMPRESSION`` :setting:`result_compression`/:setting:`task_compression`. ``CELERY_TASK_RESULT_EXPIRES`` :setting:`result_expires` ``CELERY_RESULT_DBURI`` :setting:`result_backend` ``CELERY_RESULT_ENGINE_OPTIONS`` :setting:`database_engine_options` ``-*-_DB_SHORT_LIVED_SESSIONS`` :setting:`database_short_lived_sessions` ``CELERY_RESULT_DB_TABLE_NAMES`` :setting:`database_db_names` ``CELERY_ACKS_LATE`` :setting:`task_acks_late` ``CELERY_ALWAYS_EAGER`` :setting:`task_always_eager` ``CELERY_ANNOTATIONS`` :setting:`task_annotations` ``CELERY_MESSAGE_COMPRESSION`` :setting:`task_compression` ``CELERY_CREATE_MISSING_QUEUES`` :setting:`task_create_missing_queues` ``CELERY_DEFAULT_DELIVERY_MODE`` :setting:`task_default_delivery_mode` ``CELERY_DEFAULT_EXCHANGE`` :setting:`task_default_exchange` ``CELERY_DEFAULT_EXCHANGE_TYPE`` :setting:`task_default_exchange_type` ``CELERY_DEFAULT_QUEUE`` :setting:`task_default_queue` ``CELERY_DEFAULT_RATE_LIMIT`` :setting:`task_default_rate_limit` ``CELERY_DEFAULT_ROUTING_KEY`` :setting:`task_default_routing_key` ``-"-_EAGER_PROPAGATES_EXCEPTIONS`` :setting:`task_eager_propagates` ``CELERY_IGNORE_RESULT`` :setting:`task_ignore_result` ``CELERY_TASK_PUBLISH_RETRY`` :setting:`task_publish_retry` ``CELERY_TASK_PUBLISH_RETRY_POLICY`` :setting:`task_publish_retry_policy` ``CELERY_QUEUES`` :setting:`task_queues` ``CELERY_ROUTES`` :setting:`task_routes` ``CELERY_SEND_TASK_SENT_EVENT`` :setting:`task_send_sent_event` ``CELERY_TASK_SERIALIZER`` :setting:`task_serializer` ``CELERYD_TASK_SOFT_TIME_LIMIT`` :setting:`task_soft_time_limit` ``CELERYD_TASK_TIME_LIMIT`` :setting:`task_time_limit` ``CELERY_TRACK_STARTED`` :setting:`task_track_started` ``CELERY_DISABLE_RATE_LIMITS`` :setting:`worker_disable_rate_limits` ``CELERY_ENABLE_REMOTE_CONTROL`` :setting:`worker_enable_remote_control` ``CELERYD_SEND_EVENTS`` :setting:`worker_send_task_events` ===================================== ========================================================== You can see a full table of the changes in :ref:`conf-old-settings-map`. Json is now the default serializer ---------------------------------- The time has finally come to end the reign of :mod:`pickle` as the default serialization mechanism, and json is the default serializer starting from this version. This change was :ref:`announced with the release of Celery 3.1 `. If you're still depending on :mod:`pickle` being the default serializer, then you have to configure your app before upgrading to 4.0: .. code-block:: python task_serializer = 'pickle' result_serializer = 'pickle' accept_content = {'pickle'} The Json serializer now also supports some additional types: - :class:`~datetime.datetime`, :class:`~datetime.time`, :class:`~datetime.date` Converted to json text, in ISO-8601 format. - :class:`~decimal.Decimal` Converted to json text. - :class:`django.utils.functional.Promise` Django only: Lazy strings used for translation etc., are evaluated and conversion to a json type is attempted. - :class:`uuid.UUID` Converted to json text. You can also define a ``__json__`` method on your custom classes to support JSON serialization (must return a json compatible type): .. code-block:: python class Person: first_name = None last_name = None address = None def __json__(self): return { 'first_name': self.first_name, 'last_name': self.last_name, 'address': self.address, } The Task base class no longer automatically register tasks ---------------------------------------------------------- The :class:`~@Task` class is no longer using a special meta-class that automatically registers the task in the task registry. Instead this is now handled by the :class:`@task` decorators. If you're still using class based tasks, then you need to register these manually: .. code-block:: python class CustomTask(Task): def run(self): print('running') CustomTask = app.register_task(CustomTask()) The best practice is to use custom task classes only for overriding general behavior, and then using the task decorator to realize the task: .. code-block:: python @app.task(bind=True, base=CustomTask) def custom(self): print('running') This change also means that the ``abstract`` attribute of the task no longer has any effect. .. _v400-typing: Task argument checking ---------------------- The arguments of the task are now verified when calling the task, even asynchronously: .. code-block:: pycon >>> @app.task ... def add(x, y): ... return x + y >>> add.delay(8, 8) >>> add.delay(8) Traceback (most recent call last): File "", line 1, in File "celery/app/task.py", line 376, in delay return self.apply_async(args, kwargs) File "celery/app/task.py", line 485, in apply_async check_arguments(*(args or ()), **(kwargs or {})) TypeError: add() takes exactly 2 arguments (1 given) You can disable the argument checking for any task by setting its :attr:`~@Task.typing` attribute to :const:`False`: .. code-block:: pycon >>> @app.task(typing=False) ... def add(x, y): ... return x + y Or if you would like to disable this completely for all tasks you can pass ``strict_typing=False`` when creating the app: .. code-block:: python app = Celery(..., strict_typing=False) Redis Events not backward compatible ------------------------------------ The Redis ``fanout_patterns`` and ``fanout_prefix`` transport options are now enabled by default. Workers/monitors without these flags enabled won't be able to see workers with this flag disabled. They can still execute tasks, but they cannot receive each others monitoring messages. You can upgrade in a backward compatible manner by first configuring your 3.1 workers and monitors to enable the settings, before the final upgrade to 4.0: .. code-block:: python BROKER_TRANSPORT_OPTIONS = { 'fanout_patterns': True, 'fanout_prefix': True, } Redis Priorities Reversed ------------------------- Priority 0 is now lowest, 9 is highest. This change was made to make priority support consistent with how it works in AMQP. Contributed by **Alex Koshelev**. Django: Auto-discover now supports Django app configurations ------------------------------------------------------------ The ``autodiscover_tasks()`` function can now be called without arguments, and the Django handler will automatically find your installed apps: .. code-block:: python app.autodiscover_tasks() The Django integration :ref:`example in the documentation ` has been updated to use the argument-less call. This also ensures compatibility with the new, ehm, ``AppConfig`` stuff introduced in recent Django versions. Worker direct queues no longer use auto-delete ---------------------------------------------- Workers/clients running 4.0 will no longer be able to send worker direct messages to workers running older versions, and vice versa. If you're relying on worker direct messages you should upgrade your 3.x workers and clients to use the new routing settings first, by replacing :func:`celery.utils.worker_direct` with this implementation: .. code-block:: python from kombu import Exchange, Queue worker_direct_exchange = Exchange('C.dq2') def worker_direct(hostname): return Queue( '{hostname}.dq2'.format(hostname), exchange=worker_direct_exchange, routing_key=hostname, ) This feature closed Issue #2492. Old command-line programs removed --------------------------------- Installing Celery will no longer install the ``celeryd``, ``celerybeat`` and ``celeryd-multi`` programs. This was announced with the release of Celery 3.1, but you may still have scripts pointing to the old names, so make sure you update these to use the new umbrella command: +-------------------+--------------+-------------------------------------+ | Program | New Status | Replacement | +===================+==============+=====================================+ | ``celeryd`` | **REMOVED** | :program:`celery worker` | +-------------------+--------------+-------------------------------------+ | ``celerybeat`` | **REMOVED** | :program:`celery beat` | +-------------------+--------------+-------------------------------------+ | ``celeryd-multi`` | **REMOVED** | :program:`celery multi` | +-------------------+--------------+-------------------------------------+ .. _v400-news: News ==== New protocol highlights ----------------------- The new protocol fixes many problems with the old one, and enables some long-requested features: - Most of the data are now sent as message headers, instead of being serialized with the message body. In version 1 of the protocol the worker always had to deserialize the message to be able to read task meta-data like the task id, name, etc. This also meant that the worker was forced to double-decode the data, first deserializing the message on receipt, serializing the message again to send to child process, then finally the child process deserializes the message again. Keeping the meta-data fields in the message headers means the worker doesn't actually have to decode the payload before delivering the task to the child process, and also that it's now possible for the worker to reroute a task written in a language different from Python to a different worker. - A new ``lang`` message header can be used to specify the programming language the task is written in. - Worker stores results for internal errors like ``ContentDisallowed``, and other deserialization errors. - Worker stores results and sends monitoring events for unregistered task errors. - Worker calls callbacks/errbacks even when the result is sent by the parent process (e.g., :exc:`WorkerLostError` when a child process terminates, deserialization errors, unregistered tasks). - A new ``origin`` header contains information about the process sending the task (worker node-name, or PID and host-name information). - A new ``shadow`` header allows you to modify the task name used in logs. This is useful for dispatch like patterns, like a task that calls any function using pickle (don't do this at home): .. code-block:: python from celery import Task from celery.utils.imports import qualname class call_as_task(Task): def shadow_name(self, args, kwargs, options): return 'call_as_task:{0}'.format(qualname(args[0])) def run(self, fun, *args, **kwargs): return fun(*args, **kwargs) call_as_task = app.register_task(call_as_task()) - New ``argsrepr`` and ``kwargsrepr`` fields contain textual representations of the task arguments (possibly truncated) for use in logs, monitors, etc. This means the worker doesn't have to deserialize the message payload to display the task arguments for informational purposes. - Chains now use a dedicated ``chain`` field enabling support for chains of thousands and more tasks. - New ``parent_id`` and ``root_id`` headers adds information about a tasks relationship with other tasks. - ``parent_id`` is the task id of the task that called this task - ``root_id`` is the first task in the work-flow. These fields can be used to improve monitors like flower to group related messages together (like chains, groups, chords, complete work-flows, etc). - ``app.TaskProducer`` replaced by :meth:`@amqp.create_task_message` and :meth:`@amqp.send_task_message`. Dividing the responsibilities into creating and sending means that people who want to send messages using a Python AMQP client directly, don't have to implement the protocol. The :meth:`@amqp.create_task_message` method calls either :meth:`@amqp.as_task_v2`, or :meth:`@amqp.as_task_v1` depending on the configured task protocol, and returns a special :class:`~celery.app.amqp.task_message` tuple containing the headers, properties and body of the task message. .. seealso:: The new task protocol is documented in full here: :ref:`message-protocol-task-v2`. Prefork Pool Improvements ------------------------- Tasks now log from the child process ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Logging of task success/failure now happens from the child process executing the task. As a result logging utilities, like Sentry can get full information about tasks, including variables in the traceback stack. ``-Ofair`` is now the default scheduling strategy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To re-enable the default behavior in 3.1 use the ``-Ofast`` command-line option. There's been lots of confusion about what the ``-Ofair`` command-line option does, and using the term "prefetch" in explanations have probably not helped given how confusing this terminology is in AMQP. When a Celery worker using the prefork pool receives a task, it needs to delegate that task to a child process for execution. The prefork pool has a configurable number of child processes (``--concurrency``) that can be used to execute tasks, and each child process uses pipes/sockets to communicate with the parent process: - inqueue (pipe/socket): parent sends task to the child process - outqueue (pipe/socket): child sends result/return value to the parent. In Celery 3.1 the default scheduling mechanism was simply to send the task to the first ``inqueue`` that was writable, with some heuristics to make sure we round-robin between them to ensure each child process would receive the same amount of tasks. This means that in the default scheduling strategy, a worker may send tasks to the same child process that is already executing a task. If that task is long running, it may block the waiting task for a long time. Even worse, hundreds of short-running tasks may be stuck behind a long running task even when there are child processes free to do work. The ``-Ofair`` scheduling strategy was added to avoid this situation, and when enabled it adds the rule that no task should be sent to the a child process that is already executing a task. The fair scheduling strategy may perform slightly worse if you have only short running tasks. Limit child process resident memory size ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. :sha:`5cae0e754128750a893524dcba4ae030c414de33` You can now limit the maximum amount of memory allocated per prefork pool child process by setting the worker :option:`--max-memory-per-child ` option, or the :setting:`worker_max_memory_per_child` setting. The limit is for RSS/resident memory size and is specified in kilobytes. A child process having exceeded the limit will be terminated and replaced with a new process after the currently executing task returns. See :ref:`worker-max-memory-per-child` for more information. Contributed by **Dave Smith**. One log-file per child process ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Init-scrips and :program:`celery multi` now uses the `%I` log file format option (e.g., :file:`/var/log/celery/%n%I.log`). This change was necessary to ensure each child process has a separate log file after moving task logging to the child process, as multiple processes writing to the same log file can cause corruption. You're encouraged to upgrade your init-scripts and :program:`celery multi` arguments to use this new option. Transports ---------- RabbitMQ priority queue support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`routing-options-rabbitmq-priorities` for more information. Contributed by **Gerald Manipon**. Configure broker URL for read/write separately ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ New :setting:`broker_read_url` and :setting:`broker_write_url` settings have been added so that separate broker URLs can be provided for connections used for consuming/publishing. In addition to the configuration options, two new methods have been added the app API: - ``app.connection_for_read()`` - ``app.connection_for_write()`` These should now be used in place of ``app.connection()`` to specify the intent of the required connection. .. note:: Two connection pools are available: ``app.pool`` (read), and ``app.producer_pool`` (write). The latter doesn't actually give connections but full :class:`kombu.Producer` instances. .. code-block:: python def publish_some_message(app, producer=None): with app.producer_or_acquire(producer) as producer: ... def consume_messages(app, connection=None): with app.connection_or_acquire(connection) as connection: ... RabbitMQ queue extensions support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Queue declarations can now set a message TTL and queue expiry time directly, by using the ``message_ttl`` and ``expires`` arguments New arguments have been added to :class:`~kombu.Queue` that lets you directly and conveniently configure RabbitMQ queue extensions in queue declarations: - ``Queue(expires=20.0)`` Set queue expiry time in float seconds. See :attr:`kombu.Queue.expires`. - ``Queue(message_ttl=30.0)`` Set queue message time-to-live float seconds. See :attr:`kombu.Queue.message_ttl`. - ``Queue(max_length=1000)`` Set queue max length (number of messages) as int. See :attr:`kombu.Queue.max_length`. - ``Queue(max_length_bytes=1000)`` Set queue max length (message size total in bytes) as int. See :attr:`kombu.Queue.max_length_bytes`. - ``Queue(max_priority=10)`` Declare queue to be a priority queue that routes messages based on the ``priority`` field of the message. See :attr:`kombu.Queue.max_priority`. Amazon SQS transport now officially supported ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The SQS broker transport has been rewritten to use async I/O and as such joins RabbitMQ, Redis and QPid as officially supported transports. The new implementation also takes advantage of long polling, and closes several issues related to using SQS as a broker. This work was sponsored by Nextdoor. Apache QPid transport now officially supported ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Contributed by **Brian Bouterse**. Redis: Support for Sentinel --------------------------- You can point the connection to a list of sentinel URLs like: .. code-block:: text sentinel://0.0.0.0:26379;sentinel://0.0.0.0:26380/... where each sentinel is separated by a `;`. Multiple sentinels are handled by :class:`kombu.Connection` constructor, and placed in the alternative list of servers to connect to in case of connection failure. Contributed by **Sergey Azovskov**, and **Lorenzo Mancini**. Tasks ----- Task Auto-retry Decorator ~~~~~~~~~~~~~~~~~~~~~~~~~ Writing custom retry handling for exception events is so common that we now have built-in support for it. For this a new ``autoretry_for`` argument is now supported by the task decorators, where you can specify a tuple of exceptions to automatically retry for: .. code-block:: python from twitter.exceptions import FailWhaleError @app.task(autoretry_for=(FailWhaleError,)) def refresh_timeline(user): return twitter.refresh_timeline(user) See :ref:`task-autoretry` for more information. Contributed by **Dmitry Malinovsky**. .. :sha:`75246714dd11e6c463b9dc67f4311690643bff24` ``Task.replace`` Improvements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ``self.replace(signature)`` can now replace any task, chord or group, and the signature to replace with can be a chord, group or any other type of signature. - No longer inherits the callbacks and errbacks of the existing task. If you replace a node in a tree, then you wouldn't expect the new node to inherit the children of the old node. - ``Task.replace_in_chord`` has been removed, use ``.replace`` instead. - If the replacement is a group, that group will be automatically converted to a chord, where the callback "accumulates" the results of the group tasks. A new built-in task (`celery.accumulate` was added for this purpose) Contributed by **Steeve Morin**, and **Ask Solem**. Remote Task Tracebacks ~~~~~~~~~~~~~~~~~~~~~~ The new :setting:`task_remote_tracebacks` will make task tracebacks more useful by injecting the stack of the remote worker. This feature requires the additional :pypi:`tblib` library. Contributed by **Ionel Cristian MărieÈ™**. Handling task connection errors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Connection related errors occuring while sending a task is now re-raised as a :exc:`kombu.exceptions.OperationalError` error: .. code-block:: pycon >>> try: ... add.delay(2, 2) ... except add.OperationalError as exc: ... print('Could not send task %r: %r' % (add, exc)) See :ref:`calling-connection-errors` for more information. Gevent/Eventlet: Dedicated thread for consuming results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using :pypi:`gevent`, or :pypi:`eventlet` there is now a single thread responsible for consuming events. This means that if you have many calls retrieving results, there will be a dedicated thread for consuming them: .. code-block:: python result = add.delay(2, 2) # this call will delegate to the result consumer thread: # once the consumer thread has received the result this greenlet can # continue. value = result.get(timeout=3) This makes performing RPC calls when using gevent/eventlet perform much better. ``AsyncResult.then(on_success, on_error)`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The AsyncResult API has been extended to support the :class:`~vine.promise` protocol. This currently only works with the RPC (amqp) and Redis result backends, but lets you attach callbacks to when tasks finish: .. code-block:: python import gevent.monkey monkey.patch_all() import time from celery import Celery app = Celery(broker='amqp://', backend='rpc') @app.task def add(x, y): return x + y def on_result_ready(result): print('Received result for id %r: %r' % (result.id, result.result,)) add.delay(2, 2).then(on_result_ready) time.sleep(3) # run gevent event loop for a while. Demonstrated using :pypi:`gevent` here, but really this is an API that's more useful in callback-based event loops like :pypi:`twisted`, or :pypi:`tornado`. New Task Router API ~~~~~~~~~~~~~~~~~~~ The :setting:`task_routes` setting can now hold functions, and map routes now support glob patterns and regexes. Instead of using router classes you can now simply define a function: .. code-block:: python def route_for_task(name, args, kwargs, options, task=None, **kwargs): from proj import tasks if name == tasks.add.name: return {'queue': 'hipri'} If you don't need the arguments you can use start arguments, just make sure you always also accept star arguments so that we have the ability to add more features in the future: .. code-block:: python def route_for_task(name, *args, **kwargs): from proj import tasks if name == tasks.add.name: return {'queue': 'hipri', 'priority': 9} Both the ``options`` argument and the new ``task`` keyword argument are new to the function-style routers, and will make it easier to write routers based on execution options, or properties of the task. The optional ``task`` keyword argument won't be set if a task is called by name using :meth:`@send_task`. For more examples, including using glob/regexes in routers please see :setting:`task_routes` and :ref:`routing-automatic`. Canvas Refactor ~~~~~~~~~~~~~~~ The canvas/work-flow implementation have been heavily refactored to fix some long outstanding issues. .. :sha:`d79dcd8e82c5e41f39abd07ffed81ca58052bcd2` .. :sha:`1e9dd26592eb2b93f1cb16deb771cfc65ab79612` .. :sha:`e442df61b2ff1fe855881c1e2ff9acc970090f54` .. :sha:`0673da5c09ac22bdd49ba811c470b73a036ee776` - Error callbacks can now take real exception and traceback instances (Issue #2538). .. code-block:: pycon >>> add.s(2, 2).on_error(log_error.s()).delay() Where ``log_error`` could be defined as: .. code-block:: python @app.task def log_error(request, exc, traceback): with open(os.path.join('/var/errors', request.id), 'a') as fh: print('--\n\n{0} {1} {2}'.format( task_id, exc, traceback), file=fh) See :ref:`guide-canvas` for more examples. - ``chain(a, b, c)`` now works the same as ``a | b | c``. This means chain may no longer return an instance of ``chain``, instead it may optimize the workflow so that e.g. two groups chained together becomes one group. - Now unrolls groups within groups into a single group (Issue #1509). - chunks/map/starmap tasks now routes based on the target task - chords and chains can now be immutable. - Fixed bug where serialized signatures weren't converted back into signatures (Issue #2078) Fix contributed by **Ross Deane**. - Fixed problem where chains and groups didn't work when using JSON serialization (Issue #2076). Fix contributed by **Ross Deane**. - Creating a chord no longer results in multiple values for keyword argument 'task_id' (Issue #2225). Fix contributed by **Aneil Mallavarapu**. - Fixed issue where the wrong result is returned when a chain contains a chord as the penultimate task. Fix contributed by **Aneil Mallavarapu**. - Special case of ``group(A.s() | group(B.s() | C.s()))`` now works. - Chain: Fixed bug with incorrect id set when a subtask is also a chain. - ``group | group`` is now flattened into a single group (Issue #2573). - Fixed issue where ``group | task`` wasn't upgrading correctly to chord (Issue #2922). - Chords now properly sets ``result.parent`` links. - ``chunks``/``map``/``starmap`` are now routed based on the target task. - ``Signature.link`` now works when argument is scalar (not a list) (Issue #2019). - ``group()`` now properly forwards keyword arguments (Issue #3426). Fix contributed by **Samuel Giffard**. - A ``chord`` where the header group only consists of a single task is now turned into a simple chain. - Passing a ``link`` argument to ``group.apply_async()`` now raises an error (Issue #3508). - ``chord | sig`` now attaches to the chord callback (Issue #3356). Periodic Tasks -------------- New API for configuring periodic tasks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This new API enables you to use signatures when defining periodic tasks, removing the chance of mistyping task names. An example of the new API is :ref:`here `. .. :sha:`bc18d0859c1570f5eb59f5a969d1d32c63af764b` .. :sha:`132d8d94d38f4050db876f56a841d5a5e487b25b` Optimized Beat implementation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :program:`celery beat` implementation has been optimized for millions of periodic tasks by using a heap to schedule entries. Contributed by **Ask Solem** and **Alexander Koshelev**. Schedule tasks based on sunrise, sunset, dawn and dusk ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`beat-solar` for more information. Contributed by **Mark Parncutt**. Result Backends --------------- RPC Result Backend matured ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lots of bugs in the previously experimental RPC result backend have been fixed and can now be considered to production use. Contributed by **Ask Solem**, **Morris Tweed**. Redis: Result backend optimizations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``result.get()`` is now using pub/sub for streaming task results ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Calling ``result.get()`` when using the Redis result backend used to be extremely expensive as it was using polling to wait for the result to become available. A default polling interval of 0.5 seconds didn't help performance, but was necessary to avoid a spin loop. The new implementation is using Redis Pub/Sub mechanisms to publish and retrieve results immediately, greatly improving task round-trip times. Contributed by **Yaroslav Zhavoronkov** and **Ask Solem**. New optimized chord join implementation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This was an experimental feature introduced in Celery 3.1, that could only be enabled by adding ``?new_join=1`` to the result backend URL configuration. We feel that the implementation has been tested thoroughly enough to be considered stable and enabled by default. The new implementation greatly reduces the overhead of chords, and especially with larger chords the performance benefit can be massive. New Riak result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-riak-result-backend` for more information. Contributed by **Gilles Dartiguelongue**, **Alman One** and **NoKriK**. New CouchDB result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-couchdb-result-backend` for more information. Contributed by **Nathan Van Gheem**. New Consul result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add support for Consul as a backend using the Key/Value store of Consul. Consul has an HTTP API where through you can store keys with their values. The backend extends KeyValueStoreBackend and implements most of the methods. Mainly to set, get and remove objects. This allows Celery to store Task results in the K/V store of Consul. Consul also allows to set a TTL on keys using the Sessions from Consul. This way the backend supports auto expiry of Task results. For more information on Consul visit https://consul.io/ The backend uses :pypi:`python-consul` for talking to the HTTP API. This package is fully Python 3 compliant just as this backend is: .. code-block:: console $ pip install python-consul That installs the required package to talk to Consul's HTTP API from Python. You can also specify consul as an extension in your dependency on Celery: .. code-block:: console $ pip install celery[consul] See :ref:`bundles` for more information. Contributed by **Wido den Hollander**. Brand new Cassandra result backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A brand new Cassandra backend utilizing the new :pypi:`cassandra-driver` library is replacing the old result backend using the older :pypi:`pycassa` library. See :ref:`conf-cassandra-result-backend` for more information. To depend on Celery with Cassandra as the result backend use: .. code-block:: console $ pip install celery[cassandra] You can also combine multiple extension requirements, please see :ref:`bundles` for more information. .. # XXX What changed? New Elasticsearch result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-elasticsearch-result-backend` for more information. To depend on Celery with Elasticsearch as the result bakend use: .. code-block:: console $ pip install celery[elasticsearch] You can also combine multiple extension requirements, please see :ref:`bundles` for more information. Contributed by **Ahmet Demir**. New File-system result backend introduced ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`conf-filesystem-result-backend` for more information. Contributed by **Môshe van der Sterre**. Event Batching -------------- Events are now buffered in the worker and sent as a list, reducing the overhead required to send monitoring events. For authors of custom event monitors there will be no action required as long as you're using the Python Celery helpers (:class:`~@events.Receiver`) to implement your monitor. However, if you're parsing raw event messages you must now account for batched event messages, as they differ from normal event messages in the following way: - The routing key for a batch of event messages will be set to ``.multi`` where the only batched event group is currently ``task`` (giving a routing key of ``task.multi``). - The message body will be a serialized list-of-dictionaries instead of a dictionary. Each item in the list can be regarded as a normal event message body. .. :sha:`03399b4d7c26fb593e61acf34f111b66b340ba4e` In Other News... ---------------- Requirements ~~~~~~~~~~~~ - Now depends on :ref:`Kombu 4.0 `. - Now depends on :pypi:`billiard` version 3.5. - No longer depends on :pypi:`anyjson`. Good-bye old friend :( Tasks ~~~~~ - The "anon-exchange" is now used for simple name-name direct routing. This increases performance as it completely bypasses the routing table, in addition it also improves reliability for the Redis broker transport. - An empty ResultSet now evaluates to True. Fix contributed by **Colin McIntosh**. - The default routing key and exchange name is now taken from the :setting:`task_default_queue` setting. This means that to change the name of the default queue, you now only have to set a single setting. - New :setting:`task_reject_on_worker_lost` setting, and :attr:`~@Task.reject_on_worker_lost` task attribute decides what happens when the child worker process executing a late ack task is terminated. Contributed by **Michael Permana**. - ``Task.subtask`` renamed to ``Task.signature`` with alias. - ``Task.subtask_from_request`` renamed to ``Task.signature_from_request`` with alias. - The ``delivery_mode`` attribute for :class:`kombu.Queue` is now respected (Issue #1953). - Routes in :setting:`task-routes` can now specify a :class:`~kombu.Queue` instance directly. Example: .. code-block:: python task_routes = {'proj.tasks.add': {'queue': Queue('add')}} - ``AsyncResult`` now raises :exc:`ValueError` if task_id is None. (Issue #1996). - Retried tasks didn't forward expires setting (Issue #3297). - ``result.get()`` now supports an ``on_message`` argument to set a callback to be called for every message received. - New abstract classes added: - :class:`~celery.utils.abstract.CallableTask` Looks like a task. - :class:`~celery.utils.abstract.CallableSignature` Looks like a task signature. - ``Task.replace`` now properly forwards callbacks (Issue #2722). Fix contributed by **Nicolas Unravel**. - ``Task.replace``: Append to chain/chord (Closes #3232) Fixed issue #3232, adding the signature to the chain (if there's any). Fixed the chord suppress if the given signature contains one. Fix contributed by :github_user:`honux`. - Task retry now also throws in eager mode. Fix contributed by **Feanil Patel**. Beat ~~~~ - Fixed crontab infinite loop with invalid date. When occurrence can never be reached (example, April, 31th), trying to reach the next occurrence would trigger an infinite loop. Try fixing that by raising a :exc:`RuntimeError` after 2,000 iterations (Also added a test for crontab leap years in the process) Fix contributed by **Romuald Brunet**. - Now ensures the program exits with a non-zero exit code when an exception terminates the service. Fix contributed by **Simon Peeters**. App ~~~ - Dates are now always timezone aware even if :setting:`enable_utc` is disabled (Issue #943). Fix contributed by **Omer Katz**. - **Config**: App preconfiguration is now also pickled with the configuration. Fix contributed by **Jeremy Zafran**. - The application can now change how task names are generated using the :meth:`~@gen_task_name` method. Contributed by **Dmitry Malinovsky**. - App has new ``app.current_worker_task`` property that returns the task that's currently being worked on (or :const:`None`). (Issue #2100). Logging ~~~~~~~ - :func:`~celery.utils.log.get_task_logger` now raises an exception if trying to use the name "celery" or "celery.task" (Issue #3475). Execution Pools ~~~~~~~~~~~~~~~ - **Eventlet/Gevent**: now enables AMQP heartbeat (Issue #3338). - **Eventlet/Gevent**: Fixed race condition leading to "simultaneous read" errors (Issue #2755). - **Prefork**: Prefork pool now uses ``poll`` instead of ``select`` where available (Issue #2373). - **Prefork**: Fixed bug where the pool would refuse to shut down the worker (Issue #2606). - **Eventlet**: Now returns pool size in :program:`celery inspect stats` command. Contributed by **Alexander Oblovatniy**. Testing ------- - Celery is now a :pypi:`pytest` plugin, including fixtures useful for unit and integration testing. See the :ref:`testing user guide ` for more information. Transports ~~~~~~~~~~ - ``amqps://`` can now be specified to require SSL. - **Redis Transport**: The Redis transport now supports the :setting:`broker_use_ssl` option. Contributed by **Robert Kolba**. - JSON serializer now calls ``obj.__json__`` for unsupported types. This means you can now define a ``__json__`` method for custom types that can be reduced down to a built-in json type. Example: .. code-block:: python class Person: first_name = None last_name = None address = None def __json__(self): return { 'first_name': self.first_name, 'last_name': self.last_name, 'address': self.address, } - JSON serializer now handles datetime's, Django promise, UUID and Decimal. - New ``Queue.consumer_arguments`` can be used for the ability to set consumer priority via ``x-priority``. See https://www.rabbitmq.com/consumer-priority.html Example: .. code-block:: python consumer = Consumer(channel, consumer_arguments={'x-priority': 3}) - Queue/Exchange: ``no_declare`` option added (also enabled for internal amq. exchanges). Programs ~~~~~~~~ - Celery is now using :mod:`argparse`, instead of :mod:`optparse`. - All programs now disable colors if the controlling terminal is not a TTY. - :program:`celery worker`: The ``-q`` argument now disables the startup banner. - :program:`celery worker`: The "worker ready" message is now logged using severity info, instead of warn. - :program:`celery multi`: ``%n`` format for is now synonym with ``%N`` to be consistent with :program:`celery worker`. - :program:`celery inspect`/:program:`celery control`: now supports a new :option:`--json ` option to give output in json format. - :program:`celery inspect registered`: now ignores built-in tasks. - :program:`celery purge` now takes ``-Q`` and ``-X`` options used to specify what queues to include and exclude from the purge. - New :program:`celery logtool`: Utility for filtering and parsing celery worker log-files - :program:`celery multi`: now passes through `%i` and `%I` log file formats. - General: ``%p`` can now be used to expand to the full worker node-name in log-file/pid-file arguments. - A new command line option :option:`--executable ` is now available for daemonizing programs (:program:`celery worker` and :program:`celery beat`). Contributed by **Bert Vanderbauwhede**. - :program:`celery worker`: supports new :option:`--prefetch-multiplier ` option. Contributed by **Mickaël Penhard**. - The ``--loader`` argument is now always effective even if an app argument is set (Issue #3405). - inspect/control now takes commands from registry This means user remote-control commands can also be used from the command-line. Note that you need to specify the arguments/and type of arguments for the arguments to be correctly passed on the command-line. There are now two decorators, which use depends on the type of command: `@inspect_command` + `@control_command`: .. code-block:: python from celery.worker.control import control_command @control_command( args=[('n', int)] signature='[N=1]', ) def something(state, n=1, **kwargs): ... Here ``args`` is a list of args supported by the command. The list must contain tuples of ``(argument_name, type)``. ``signature`` is just the command-line help used in e.g. ``celery -A proj control --help``. Commands also support `variadic` arguments, which means that any arguments left over will be added to a single variable. Here demonstrated by the ``terminate`` command which takes a signal argument and a variable number of task_ids: .. code-block:: python from celery.worker.control import control_command @control_command( args=[('signal', str)], signature=' [id1, [id2, [..., [idN]]]]', variadic='ids', ) def terminate(state, signal, ids, **kwargs): ... This command can now be called using: .. code-block:: console $ celery -A proj control terminate SIGKILL id1 id2 id3` See :ref:`worker-custom-control-commands` for more information. Worker ~~~~~~ - Improvements and fixes for :class:`~celery.utils.collections.LimitedSet`. Getting rid of leaking memory + adding ``minlen`` size of the set: the minimal residual size of the set after operating for some time. ``minlen`` items are kept, even if they should've been expired. Problems with older and even more old code: #. Heap would tend to grow in some scenarios (like adding an item multiple times). #. Adding many items fast wouldn't clean them soon enough (if ever). #. When talking to other workers, revoked._data was sent, but it was processed on the other side as iterable. That means giving those keys new (current) time-stamp. By doing this workers could recycle items forever. Combined with 1) and 2), this means that in large set of workers, you're getting out of memory soon. All those problems should be fixed now. This should fix issues #3095, #3086. Contributed by **David Pravec**. - New settings to control remote control command queues. - :setting:`control_queue_expires` Set queue expiry time for both remote control command queues, and remote control reply queues. - :setting:`control_queue_ttl` Set message time-to-live for both remote control command queues, and remote control reply queues. Contributed by **Alan Justino**. - The :signal:`worker_shutdown` signal is now always called during shutdown. Previously it would not be called if the worker instance was collected by gc first. - Worker now only starts the remote control command consumer if the broker transport used actually supports them. - Gossip now sets ``x-message-ttl`` for event queue to heartbeat_interval s. (Issue #2005). - Now preserves exit code (Issue #2024). - Now rejects messages with an invalid ETA value (instead of ack, which means they will be sent to the dead-letter exchange if one is configured). - Fixed crash when the ``-purge`` argument was used. - Log--level for unrecoverable errors changed from ``error`` to ``critical``. - Improved rate limiting accuracy. - Account for missing timezone information in task expires field. Fix contributed by **Albert Wang**. - The worker no longer has a ``Queues`` bootsteps, as it is now superfluous. - Now emits the "Received task" line even for revoked tasks. (Issue #3155). - Now respects :setting:`broker_connection_retry` setting. Fix contributed by **Nat Williams**. - New :setting:`control_queue_ttl` and :setting:`control_queue_expires` settings now enables you to configure remote control command message TTLs, and queue expiry time. Contributed by **Alan Justino**. - New :data:`celery.worker.state.requests` enables O(1) loookup of active/reserved tasks by id. - Auto-scale didn't always update keep-alive when scaling down. Fix contributed by **Philip Garnero**. - Fixed typo ``options_list`` -> ``option_list``. Fix contributed by **Greg Wilbur**. - Some worker command-line arguments and ``Worker()`` class arguments have been renamed for consistency. All of these have aliases for backward compatibility. - ``--send-events`` -> ``--task-events`` - ``--schedule`` -> ``--schedule-filename`` - ``--maxtasksperchild`` -> ``--max-tasks-per-child`` - ``Beat(scheduler_cls=)`` -> ``Beat(scheduler=)`` - ``Worker(send_events=True)`` -> ``Worker(task_events=True)`` - ``Worker(task_time_limit=)`` -> ``Worker(time_limit=``) - ``Worker(task_soft_time_limit=)`` -> ``Worker(soft_time_limit=)`` - ``Worker(state_db=)`` -> ``Worker(statedb=)`` - ``Worker(working_directory=)`` -> ``Worker(workdir=)`` Debugging Utilities ~~~~~~~~~~~~~~~~~~~ - :mod:`celery.contrib.rdb`: Changed remote debugger banner so that you can copy and paste the address easily (no longer has a period in the address). Contributed by **Jonathan Vanasco**. - Fixed compatibility with recent :pypi:`psutil` versions (Issue #3262). Signals ~~~~~~~ - **App**: New signals for app configuration/finalization: - :data:`app.on_configure <@on_configure>` - :data:`app.on_after_configure <@on_after_configure>` - :data:`app.on_after_finalize <@on_after_finalize>` - **Task**: New task signals for rejected task messages: - :data:`celery.signals.task_rejected`. - :data:`celery.signals.task_unknown`. - **Worker**: New signal for when a heartbeat event is sent. - :data:`celery.signals.heartbeat_sent` Contributed by **Kevin Richardson**. Events ~~~~~~ - Event messages now uses the RabbitMQ ``x-message-ttl`` option to ensure older event messages are discarded. The default is 5 seconds, but can be changed using the :setting:`event_queue_ttl` setting. - ``Task.send_event`` now automatically retries sending the event on connection failure, according to the task publish retry settings. - Event monitors now sets the :setting:`event_queue_expires` setting by default. The queues will now expire after 60 seconds after the monitor stops consuming from it. - Fixed a bug where a None value wasn't handled properly. Fix contributed by **Dongweiming**. - New :setting:`event_queue_prefix` setting can now be used to change the default ``celeryev`` queue prefix for event receiver queues. Contributed by **Takeshi Kanemoto**. - ``State.tasks_by_type`` and ``State.tasks_by_worker`` can now be used as a mapping for fast access to this information. Deployment ~~~~~~~~~~ - Generic init-scripts now support :envvar:`CELERY_SU` and :envvar:`CELERYD_SU_ARGS` environment variables to set the path and arguments for :command:`su` (:manpage:`su(1)`). - Generic init-scripts now better support FreeBSD and other BSD systems by searching :file:`/usr/local/etc/` for the configuration file. Contributed by **Taha Jahangir**. - Generic init-script: Fixed strange bug for ``celerybeat`` where restart didn't always work (Issue #3018). - The systemd init script now uses a shell when executing services. Contributed by **Tomas Machalek**. Result Backends ~~~~~~~~~~~~~~~ - Redis: Now has a default socket timeout of 120 seconds. The default can be changed using the new :setting:`redis_socket_timeout` setting. Contributed by **Raghuram Srinivasan**. - RPC Backend result queues are now auto delete by default (Issue #2001). - RPC Backend: Fixed problem where exception wasn't deserialized properly with the json serializer (Issue #2518). Fix contributed by **Allard Hoeve**. - CouchDB: The backend used to double-json encode results. Fix contributed by **Andrew Stewart**. - CouchDB: Fixed typo causing the backend to not be found (Issue #3287). Fix contributed by **Andrew Stewart**. - MongoDB: Now supports setting the :setting:`result_serialzier` setting to ``bson`` to use the MongoDB libraries own serializer. Contributed by **Davide Quarta**. - MongoDB: URI handling has been improved to use database name, user and password from the URI if provided. Contributed by **Samuel Jaillet**. - SQLAlchemy result backend: Now ignores all result engine options when using NullPool (Issue #1930). - SQLAlchemy result backend: Now sets max char size to 155 to deal with brain damaged MySQL Unicode implementation (Issue #1748). - **General**: All Celery exceptions/warnings now inherit from common :class:`~celery.exceptions.CeleryError`/:class:`~celery.exceptions.CeleryWarning`. (Issue #2643). Documentation Improvements ~~~~~~~~~~~~~~~~~~~~~~~~~~ Contributed by: - Adam Chainz - Amir Rustamzadeh - Arthur Vuillard - Batiste Bieler - Berker Peksag - Bryce Groff - Daniel Devine - Edward Betts - Jason Veatch - Jeff Widman - Maciej Obuchowski - Manuel Kaufmann - Maxime Beauchemin - Mitchel Humpherys - Pavlo Kapyshin - Pierre Fersing - Rik - Steven Sklar - Tayfun Sen - Wieland Hoffmann Reorganization, Deprecations, and Removals ========================================== Incompatible changes -------------------- - Prefork: Calling ``result.get()`` or joining any result from within a task now raises :exc:`RuntimeError`. In previous versions this would emit a warning. - :mod:`celery.worker.consumer` is now a package, not a module. - Module ``celery.worker.job`` renamed to :mod:`celery.worker.request`. - Beat: ``Scheduler.Publisher``/``.publisher`` renamed to ``.Producer``/``.producer``. - Result: The task_name argument/attribute of :class:`@AsyncResult` was removed. This was historically a field used for :mod:`pickle` compatibility, but is no longer needed. - Backends: Arguments named ``status`` renamed to ``state``. - Backends: ``backend.get_status()`` renamed to ``backend.get_state()``. - Backends: ``backend.maybe_reraise()`` renamed to ``.maybe_throw()`` The promise API uses .throw(), so this change was made to make it more consistent. There's an alias available, so you can still use maybe_reraise until Celery 5.0. .. _v400-unscheduled-removals: Unscheduled Removals -------------------- - The experimental :mod:`celery.contrib.methods` feature has been removed, as there were far many bugs in the implementation to be useful. - The CentOS init-scripts have been removed. These didn't really add any features over the generic init-scripts, so you're encouraged to use them instead, or something like :pypi:`supervisor`. .. _v400-deprecations-reorg: Reorganization Deprecations --------------------------- These symbols have been renamed, and while there's an alias available in this version for backward compatibility, they will be removed in Celery 5.0, so make sure you rename these ASAP to make sure it won't break for that release. Chances are that you'll only use the first in this list, but you never know: - ``celery.utils.worker_direct`` -> :meth:`celery.utils.nodenames.worker_direct`. - ``celery.utils.nodename`` -> :meth:`celery.utils.nodenames.nodename`. - ``celery.utils.anon_nodename`` -> :meth:`celery.utils.nodenames.anon_nodename`. - ``celery.utils.nodesplit`` -> :meth:`celery.utils.nodenames.nodesplit`. - ``celery.utils.default_nodename`` -> :meth:`celery.utils.nodenames.default_nodename`. - ``celery.utils.node_format`` -> :meth:`celery.utils.nodenames.node_format`. - ``celery.utils.host_format`` -> :meth:`celery.utils.nodenames.host_format`. .. _v400-removals: Scheduled Removals ------------------ Modules ~~~~~~~ - Module ``celery.worker.job`` has been renamed to :mod:`celery.worker.request`. This was an internal module so shouldn't have any effect. It's now part of the public API so must not change again. - Module ``celery.task.trace`` has been renamed to ``celery.app.trace`` as the ``celery.task`` package is being phased out. The module will be removed in version 5.0 so please change any import from:: from celery.task.trace import X to:: from celery.app.trace import X - Old compatibility aliases in the :mod:`celery.loaders` module has been removed. - Removed ``celery.loaders.current_loader()``, use: ``current_app.loader`` - Removed ``celery.loaders.load_settings()``, use: ``current_app.conf`` Result ~~~~~~ - ``AsyncResult.serializable()`` and ``celery.result.from_serializable`` has been removed: Use instead: .. code-block:: pycon >>> tup = result.as_tuple() >>> from celery.result import result_from_tuple >>> result = result_from_tuple(tup) - Removed ``BaseAsyncResult``, use ``AsyncResult`` for instance checks instead. - Removed ``TaskSetResult``, use ``GroupResult`` instead. - ``TaskSetResult.total`` -> ``len(GroupResult)`` - ``TaskSetResult.taskset_id`` -> ``GroupResult.id`` - Removed ``ResultSet.subtasks``, use ``ResultSet.results`` instead. TaskSet ~~~~~~~ TaskSet has been removed, as it was replaced by the ``group`` construct in Celery 3.0. If you have code like this: .. code-block:: pycon >>> from celery.task import TaskSet >>> TaskSet(add.subtask((i, i)) for i in xrange(10)).apply_async() You need to replace that with: .. code-block:: pycon >>> from celery import group >>> group(add.s(i, i) for i in xrange(10))() Events ~~~~~~ - Removals for class :class:`celery.events.state.Worker`: - ``Worker._defaults`` attribute. Use ``{k: getattr(worker, k) for k in worker._fields}``. - ``Worker.update_heartbeat`` Use ``Worker.event(None, timestamp, received)`` - ``Worker.on_online`` Use ``Worker.event('online', timestamp, received, fields)`` - ``Worker.on_offline`` Use ``Worker.event('offline', timestamp, received, fields)`` - ``Worker.on_heartbeat`` Use ``Worker.event('heartbeat', timestamp, received, fields)`` - Removals for class :class:`celery.events.state.Task`: - ``Task._defaults`` attribute. Use ``{k: getattr(task, k) for k in task._fields}``. - ``Task.on_sent`` Use ``Worker.event('sent', timestamp, received, fields)`` - ``Task.on_received`` Use ``Task.event('received', timestamp, received, fields)`` - ``Task.on_started`` Use ``Task.event('started', timestamp, received, fields)`` - ``Task.on_failed`` Use ``Task.event('failed', timestamp, received, fields)`` - ``Task.on_retried`` Use ``Task.event('retried', timestamp, received, fields)`` - ``Task.on_succeeded`` Use ``Task.event('succeeded', timestamp, received, fields)`` - ``Task.on_revoked`` Use ``Task.event('revoked', timestamp, received, fields)`` - ``Task.on_unknown_event`` Use ``Task.event(short_type, timestamp, received, fields)`` - ``Task.update`` Use ``Task.event(short_type, timestamp, received, fields)`` - ``Task.merge`` Contact us if you need this. Magic keyword arguments ~~~~~~~~~~~~~~~~~~~~~~~ Support for the very old magic keyword arguments accepted by tasks is finally removed in this version. If you're still using these you have to rewrite any task still using the old ``celery.decorators`` module and depending on keyword arguments being passed to the task, for example:: from celery.decorators import task @task() def add(x, y, task_id=None): print('My task id is %r' % (task_id,)) should be rewritten into:: from celery import task @task(bind=True) def add(self, x, y): print('My task id is {0.request.id}'.format(self)) Removed Settings ---------------- The following settings have been removed, and is no longer supported: Logging Settings ~~~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERYD_LOG_LEVEL`` :option:`celery worker --loglevel` ``CELERYD_LOG_FILE`` :option:`celery worker --logfile` ``CELERYBEAT_LOG_LEVEL`` :option:`celery beat --loglevel` ``CELERYBEAT_LOG_FILE`` :option:`celery beat --logfile` ``CELERYMON_LOG_LEVEL`` celerymon is deprecated, use flower ``CELERYMON_LOG_FILE`` celerymon is deprecated, use flower ``CELERYMON_LOG_FORMAT`` celerymon is deprecated, use flower ===================================== ===================================== Task Settings ~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERY_CHORD_PROPAGATES`` N/A ===================================== ===================================== Changes to internal API ----------------------- - Module ``celery.datastructures`` renamed to :mod:`celery.utils.collections`. - Module ``celery.utils.timeutils`` renamed to :mod:`celery.utils.time`. - ``celery.utils.datastructures.DependencyGraph`` moved to :mod:`celery.utils.graph`. - ``celery.utils.jsonify`` is now :func:`celery.utils.serialization.jsonify`. - ``celery.utils.strtobool`` is now :func:`celery.utils.serialization.strtobool`. - ``celery.utils.is_iterable`` has been removed. Instead use: .. code-block:: python isinstance(x, collections.Iterable) - ``celery.utils.lpmerge`` is now :func:`celery.utils.collections.lpmerge`. - ``celery.utils.cry`` is now :func:`celery.utils.debug.cry`. - ``celery.utils.isatty`` is now :func:`celery.platforms.isatty`. - ``celery.utils.gen_task_name`` is now :func:`celery.utils.imports.gen_task_name`. - ``celery.utils.deprecated`` is now :func:`celery.utils.deprecated.Callable` - ``celery.utils.deprecated_property`` is now :func:`celery.utils.deprecated.Property`. - ``celery.utils.warn_deprecated`` is now :func:`celery.utils.deprecated.warn` .. _v400-deprecations: Deprecation Time-line Changes ============================= See the :ref:`deprecation-timeline`. celery-4.1.0/docs/django/0000755000175000017500000000000013135426347015127 5ustar omeromer00000000000000celery-4.1.0/docs/django/index.rst0000644000175000017500000000021113130607475016760 0ustar omeromer00000000000000.. _django: ========= Django ========= :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 first-steps-with-django celery-4.1.0/docs/django/first-steps-with-django.rst0000644000175000017500000001627713130607475022370 0ustar omeromer00000000000000.. _django-first-steps: ========================= First steps with Django ========================= Using Celery with Django ======================== .. note:: Previous versions of Celery required a separate library to work with Django, but since 3.1 this is no longer the case. Django is supported out of the box now so this document only contains a basic way to integrate Celery and Django. You'll use the same API as non-Django users so you're recommended to read the :ref:`first-steps` tutorial first and come back to this tutorial. When you have a working example you can continue to the :ref:`next-steps` guide. .. note:: Celery 4.0 supports Django 1.8 and newer versions. Please use Celery 3.1 for versions older than Django 1.8. To use Celery with your Django project you must first define an instance of the Celery library (called an "app") If you have a modern Django project layout like:: - proj/ - manage.py - proj/ - __init__.py - settings.py - urls.py then the recommended way is to create a new `proj/proj/celery.py` module that defines the Celery instance: :file: `proj/proj/celery.py` .. literalinclude:: ../../examples/django/proj/celery.py Then you need to import this app in your :file:`proj/proj/__init__.py` module. This ensures that the app is loaded when Django starts so that the ``@shared_task`` decorator (mentioned later) will use it: :file:`proj/proj/__init__.py`: .. literalinclude:: ../../examples/django/proj/__init__.py Note that this example project layout is suitable for larger projects, for simple projects you may use a single contained module that defines both the app and tasks, like in the :ref:`tut-celery` tutorial. Let's break down what happens in the first module, first we import absolute imports from the future, so that our ``celery.py`` module won't clash with the library: .. code-block:: python from __future__ import absolute_import Then we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment variable for the :program:`celery` command-line program: .. code-block:: python os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') You don't need this line, but it saves you from always passing in the settings module to the ``celery`` program. It must always come before creating the app instances, as is what we do next: .. code-block:: python app = Celery('proj') This is our instance of the library, you can have many instances but there's probably no reason for that when using Django. We also add the Django settings module as a configuration source for Celery. This means that you don't have to use multiple configuration files, and instead configure Celery directly from the Django settings; but you can also separate them if wanted. The uppercase name-space means that all Celery configuration options must be specified in uppercase instead of lowercase, and start with ``CELERY_``, so for example the :setting:`task_always_eager` setting becomes ``CELERY_TASK_ALWAYS_EAGER``, and the :setting:`broker_url` setting becomes ``CELERY_BROKER_URL``. You can pass the object directly here, but using a string is better since then the worker doesn't have to serialize the object. .. code-block:: python app.config_from_object('django.conf:settings', namespace='CELERY') Next, a common practice for reusable apps is to define all tasks in a separate ``tasks.py`` module, and Celery does have a way to auto-discover these modules: .. code-block:: python app.autodiscover_tasks() With the line above Celery will automatically discover tasks from all of your installed apps, following the ``tasks.py`` convention:: - app1/ - tasks.py - models.py - app2/ - tasks.py - models.py This way you don't have to manually add the individual modules to the :setting:`CELERY_IMPORTS ` setting. Finally, the ``debug_task`` example is a task that dumps its own request information. This is using the new ``bind=True`` task option introduced in Celery 3.1 to easily refer to the current task instance. Using the ``@shared_task`` decorator ------------------------------------ The tasks you write will probably live in reusable apps, and reusable apps cannot depend on the project itself, so you also cannot import your app instance directly. The ``@shared_task`` decorator lets you create tasks without having any concrete app instance: :file:`demoapp/tasks.py`: .. literalinclude:: ../../examples/django/demoapp/tasks.py .. seealso:: You can find the full source code for the Django example project at: https://github.com/celery/celery/tree/master/examples/django/ .. admonition:: Relative Imports You have to be consistent in how you import the task module. For example, if you have ``project.app`` in ``INSTALLED_APPS``, then you must also import the tasks ``from project.app`` or else the names of the tasks will end up being different. See :ref:`task-naming-relative-imports` Extensions ========== .. _django-celery-results: ``django-celery-results`` - Using the Django ORM/Cache as a result backend -------------------------------------------------------------------------- The :pypi:`django-celery-results` extension provides result backends using either the Django ORM, or the Django Cache framework. To use this with your project you need to follow these steps: #. Install the :pypi:`django-celery-results` library: .. code-block:: console $ pip install django-celery-results #. Add ``django_celery_results`` to ``INSTALLED_APPS`` in your Django project's :file:`settings.py`:: INSTALLED_APPS = ( ..., 'django_celery_results', ) Note that there is no dash in the module name, only underscores. #. Create the Celery database tables by performing a database migrations: .. code-block:: console $ python manage.py migrate django_celery_results #. Configure Celery to use the :pypi:`django-celery-results` backend. Assuming you are using Django's :file:`settings.py` to also configure Celery, add the following settings: .. code-block:: python CELERY_RESULT_BACKEND = 'django-db' For the cache backend you can use: .. code-block:: python CELERY_RESULT_BACKEND = 'django-cache' ``django-celery-beat`` - Database-backed Periodic Tasks with Admin interface. ----------------------------------------------------------------------------- See :ref:`beat-custom-schedulers` for more information. Starting the worker process =========================== In a production environment you'll want to run the worker in the background as a daemon - see :ref:`daemonizing` - but for testing and development it is useful to be able to start a worker instance by using the :program:`celery worker` manage command, much as you'd use Django's :command:`manage.py runserver`: .. code-block:: console $ celery -A proj worker -l info For a complete listing of the command-line options available, use the help command: .. code-block:: console $ celery help Where to go from here ===================== If you want to learn more you should continue to the :ref:`Next Steps ` tutorial, and after that you can study the :ref:`User Guide `. celery-4.1.0/docs/reference/0000755000175000017500000000000013135426347015623 5ustar omeromer00000000000000celery-4.1.0/docs/reference/celery.states.rst0000644000175000017500000000015213130607475021136 0ustar omeromer00000000000000.. currentmodule:: celery.states .. contents:: :local: .. automodule:: celery.states :members: celery-4.1.0/docs/reference/celery.loaders.base.rst0000644000175000017500000000037213130607475022201 0ustar omeromer00000000000000=========================================== ``celery.loaders.base`` =========================================== .. contents:: :local: .. currentmodule:: celery.loaders.base .. automodule:: celery.loaders.base :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.events.rst0000644000175000017500000000041013130607475021703 0ustar omeromer00000000000000===================================================== ``celery.bin.events`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.events .. automodule:: celery.bin.events :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.worker.rst0000644000175000017500000000035713130607475022463 0ustar omeromer00000000000000==================================== ``celery.worker.worker`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.worker .. automodule:: celery.worker.worker :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.rst0000644000175000017500000000034213130607475021145 0ustar omeromer00000000000000======================================== ``celery.worker`` ======================================== .. contents:: :local: .. currentmodule:: celery.worker .. automodule:: celery.worker :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.gossip.rst0000644000175000017500000000044613130607475024307 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.gossip`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.gossip .. automodule:: celery.worker.consumer.gossip :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.testing.mocks.rst0000644000175000017500000000044613130607475024070 0ustar omeromer00000000000000==================================== ``celery.contrib.testing.mocks`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.mocks .. automodule:: celery.contrib.testing.mocks :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.connection.rst0000644000175000017500000000046213130607475025140 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.connection`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.connection .. automodule:: celery.worker.consumer.connection :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.logtool.rst0000644000175000017500000000041313130607475022061 0ustar omeromer00000000000000===================================================== ``celery.bin.logtool`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.logtool .. automodule:: celery.bin.logtool :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.log.rst0000644000175000017500000000032513130607475021175 0ustar omeromer00000000000000================================ ``celery.app.log`` ================================ .. contents:: :local: .. currentmodule:: celery.app.log .. automodule:: celery.app.log :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.registry.rst0000644000175000017500000000034413130607475022265 0ustar omeromer00000000000000================================ ``celery.app.registry`` ================================ .. contents:: :local: .. currentmodule:: celery.app.registry .. automodule:: celery.app.registry :members: :undoc-members: celery-4.1.0/docs/reference/celery.apps.beat.rst0000644000175000017500000000037513130607475021517 0ustar omeromer00000000000000================================================= ``celery.apps.beat`` ================================================= .. contents:: :local: .. currentmodule:: celery.apps.beat .. automodule:: celery.apps.beat :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.testing.manager.rst0000644000175000017500000000045413130607475024365 0ustar omeromer00000000000000==================================== ``celery.contrib.testing.manager`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.manager .. automodule:: celery.contrib.testing.manager :members: :undoc-members: celery-4.1.0/docs/reference/celery.signals.rst0000644000175000017500000000040113130607475021270 0ustar omeromer00000000000000====================================================== ``celery.signals`` ====================================================== .. contents:: :local: .. currentmodule:: celery.signals .. automodule:: celery.signals :members: :undoc-members: celery-4.1.0/docs/reference/celery.events.rst0000644000175000017500000000030213130607475021134 0ustar omeromer00000000000000======================== ``celery.events`` ======================== .. contents:: :local: .. currentmodule:: celery.events .. automodule:: celery.events :members: :undoc-members: celery-4.1.0/docs/reference/index.rst0000644000175000017500000000357713130607475017476 0ustar omeromer00000000000000.. _apiref: =============== API Reference =============== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 celery celery.app celery.app.task celery.app.amqp celery.app.defaults celery.app.control celery.app.registry celery.app.backends celery.app.builtins celery.app.events celery.app.log celery.app.utils celery.bootsteps celery.result celery.schedules celery.signals celery.security celery.utils.debug celery.exceptions celery.loaders celery.loaders.app celery.loaders.default celery.loaders.base celery.states celery.contrib.abortable celery.contrib.migrate celery.contrib.pytest celery.contrib.sphinx celery.contrib.testing.worker celery.contrib.testing.app celery.contrib.testing.manager celery.contrib.testing.mocks celery.contrib.rdb celery.events celery.events.receiver celery.events.dispatcher celery.events.event celery.events.state celery.beat celery.apps.worker celery.apps.beat celery.apps.multi celery.worker celery.worker.request celery.worker.state celery.worker.strategy celery.worker.consumer celery.worker.consumer.agent celery.worker.consumer.connection celery.worker.consumer.consumer celery.worker.consumer.control celery.worker.consumer.events celery.worker.consumer.gossip celery.worker.consumer.heart celery.worker.consumer.mingle celery.worker.consumer.tasks celery.worker.worker celery.bin.base celery.bin.celery celery.bin.worker celery.bin.beat celery.bin.events celery.bin.logtool celery.bin.amqp celery.bin.graph celery.bin.multi celery.bin.call celery.bin.control celery.bin.list celery.bin.migrate celery.bin.purge celery.bin.result celery.bin.shell celery.bin.upgrade celery-4.1.0/docs/reference/celery.loaders.default.rst0000644000175000017500000000037713130607475022720 0ustar omeromer00000000000000========================================= ``celery.loaders.default`` ========================================= .. contents:: :local: .. currentmodule:: celery.loaders.default .. automodule:: celery.loaders.default :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.base.rst0000644000175000017500000000033013130607475021312 0ustar omeromer00000000000000================================ ``celery.bin.base`` ================================ .. contents:: :local: .. currentmodule:: celery.bin.base .. automodule:: celery.bin.base :members: :undoc-members: celery-4.1.0/docs/reference/celery.beat.rst0000644000175000017500000000033413130607475020550 0ustar omeromer00000000000000======================================== ``celery.beat`` ======================================== .. contents:: :local: .. currentmodule:: celery.beat .. automodule:: celery.beat :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.celery.rst0000644000175000017500000000036213130607475021670 0ustar omeromer00000000000000========================================== ``celery.bin.celery`` ========================================== .. contents:: :local: .. currentmodule:: celery.bin.celery .. automodule:: celery.bin.celery :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.graph.rst0000644000175000017500000000040513130607475021504 0ustar omeromer00000000000000===================================================== ``celery.bin.graph`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.graph .. automodule:: celery.bin.graph :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.tasks.rst0000644000175000017500000000044313130607475024125 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.tasks`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.tasks .. automodule:: celery.worker.consumer.tasks :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.control.rst0000644000175000017500000000045113130607475024457 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.control`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.control .. automodule:: celery.worker.consumer.control :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.defaults.rst0000644000175000017500000000044213130607475022223 0ustar omeromer00000000000000=============================================================== ``celery.app.defaults`` =============================================================== .. contents:: :local: .. currentmodule:: celery.app.defaults .. automodule:: celery.app.defaults :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.list.rst0000644000175000017500000000040213130607475021353 0ustar omeromer00000000000000===================================================== ``celery.bin.list`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.list .. automodule:: celery.bin.list :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.strategy.rst0000644000175000017500000000036513130607475023013 0ustar omeromer00000000000000==================================== ``celery.worker.strategy`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.strategy .. automodule:: celery.worker.strategy :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.task.rst0000644000175000017500000000034213130607475021355 0ustar omeromer00000000000000=================================== ``celery.app.task`` =================================== .. contents:: :local: .. currentmodule:: celery.app.task .. automodule:: celery.app.task :members: Task, Context, TaskType celery-4.1.0/docs/reference/celery.app.utils.rst0000644000175000017500000000033313130607475021553 0ustar omeromer00000000000000================================ ``celery.app.utils`` ================================ .. contents:: :local: .. currentmodule:: celery.app.utils .. automodule:: celery.app.utils :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.builtins.rst0000644000175000017500000000041413130607475022244 0ustar omeromer00000000000000==================================================== ``celery.app.builtins`` ==================================================== .. contents:: :local: .. currentmodule:: celery.app.builtins .. automodule:: celery.app.builtins :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.sphinx.rst0000644000175000017500000000027013130607475022604 0ustar omeromer00000000000000================================ celery.contrib.sphinx ================================ .. currentmodule:: celery.contrib.sphinx .. automodule:: celery.contrib.sphinx :members: celery-4.1.0/docs/reference/celery.rst0000644000175000017500000000674513130607475017652 0ustar omeromer00000000000000=========================================== :mod:`celery` --- Distributed processing =========================================== .. currentmodule:: celery .. module:: celery :synopsis: Distributed processing .. moduleauthor:: Ask Solem .. sectionauthor:: Ask Solem -------------- This module is the main entry-point for the Celery API. It includes commonly needed things for calling tasks, and creating Celery applications. ===================== =================================================== :class:`Celery` Celery application instance :class:`group` group tasks together :class:`chain` chain tasks together :class:`chord` chords enable callbacks for groups :func:`signature` create a new task signature :class:`Signature` object describing a task invocation :data:`current_app` proxy to the current application instance :data:`current_task` proxy to the currently executing task ===================== =================================================== :class:`Celery` application objects ----------------------------------- .. versionadded:: 2.5 .. autoclass:: Celery .. autoattribute:: user_options .. autoattribute:: steps .. autoattribute:: current_task .. autoattribute:: current_worker_task .. autoattribute:: amqp .. autoattribute:: backend .. autoattribute:: loader .. autoattribute:: control .. autoattribute:: events .. autoattribute:: log .. autoattribute:: tasks .. autoattribute:: pool .. autoattribute:: producer_pool .. autoattribute:: Task .. autoattribute:: timezone .. autoattribute:: builtin_fixups .. autoattribute:: oid .. automethod:: close .. automethod:: signature .. automethod:: bugreport .. automethod:: config_from_object .. automethod:: config_from_envvar .. automethod:: autodiscover_tasks .. automethod:: add_defaults .. automethod:: add_periodic_task .. automethod:: setup_security .. automethod:: start .. automethod:: task .. automethod:: send_task .. automethod:: gen_task_name .. autoattribute:: AsyncResult .. autoattribute:: GroupResult .. automethod:: worker_main .. autoattribute:: Worker .. autoattribute:: WorkController .. autoattribute:: Beat .. automethod:: connection_for_read .. automethod:: connection_for_write .. automethod:: connection .. automethod:: connection_or_acquire .. automethod:: producer_or_acquire .. automethod:: select_queues .. automethod:: now .. automethod:: set_current .. automethod:: set_default .. automethod:: finalize .. automethod:: on_init .. automethod:: prepare_config .. data:: on_configure Signal sent when app is loading configuration. .. data:: on_after_configure Signal sent after app has prepared the configuration. .. data:: on_after_finalize Signal sent after app has been finalized. .. data:: on_after_fork Signal sent in child process after fork. Canvas primitives ----------------- See :ref:`guide-canvas` for more about creating task work-flows. .. autoclass:: group .. autoclass:: chain .. autoclass:: chord .. autofunction:: signature .. autoclass:: Signature Proxies ------- .. data:: current_app The currently set app for this thread. .. data:: current_task The task currently being executed (only set in the worker, or when eager/apply is used). celery-4.1.0/docs/reference/celery.worker.state.rst0000644000175000017500000000035413130607475022267 0ustar omeromer00000000000000==================================== ``celery.worker.state`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.state .. automodule:: celery.worker.state :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.shell.rst0000644000175000017500000000040513130607475021512 0ustar omeromer00000000000000===================================================== ``celery.bin.shell`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.shell .. automodule:: celery.bin.shell :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.events.rst0000644000175000017500000000033613130607475021722 0ustar omeromer00000000000000================================ ``celery.app.events`` ================================ .. contents:: :local: .. currentmodule:: celery.app.events .. automodule:: celery.app.events :members: :undoc-members: celery-4.1.0/docs/reference/celery.events.dispatcher.rst0000644000175000017500000000046013130607475023266 0ustar omeromer00000000000000================================================================= ``celery.events.state`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.dispatcher .. automodule:: celery.events.dispatcher :members: :undoc-members: celery-4.1.0/docs/reference/celery.exceptions.rst0000644000175000017500000000033613130607475022020 0ustar omeromer00000000000000================================ ``celery.exceptions`` ================================ .. contents:: :local: .. currentmodule:: celery.exceptions .. automodule:: celery.exceptions :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.consumer.rst0000644000175000017500000000045413130607475024635 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.consumer`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.consumer .. automodule:: celery.worker.consumer.consumer :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.amqp.rst0000644000175000017500000000041613130607475021343 0ustar omeromer00000000000000=========================================================== ``celery.bin.amqp`` =========================================================== .. contents:: :local: .. currentmodule:: celery.bin.amqp .. automodule:: celery.bin.amqp :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.call.rst0000644000175000017500000000040213130607475021313 0ustar omeromer00000000000000===================================================== ``celery.bin.call`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.call .. automodule:: celery.bin.call :members: :undoc-members: celery-4.1.0/docs/reference/celery.utils.debug.rst0000644000175000017500000000154713130607475022071 0ustar omeromer00000000000000==================================== ``celery.utils.debug`` ==================================== .. contents:: :local: Sampling Memory Usage ===================== This module can be used to diagnose and sample the memory usage used by parts of your application. For example, to sample the memory usage of calling tasks you can do this: .. code-block:: python from celery.utils.debug import sample_mem, memdump from tasks import add try: for i in range(100): for j in range(100): add.delay(i, j) sample_mem() finally: memdump() API Reference ============= .. currentmodule:: celery.utils.debug .. automodule:: celery.utils.debug .. autofunction:: sample_mem .. autofunction:: memdump .. autofunction:: sample .. autofunction:: mem_rss .. autofunction:: ps celery-4.1.0/docs/reference/celery.worker.consumer.heart.rst0000644000175000017500000000044313130607475024103 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.heart`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.heart .. automodule:: celery.worker.consumer.heart :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.testing.app.rst0000644000175000017500000000044013130607475023526 0ustar omeromer00000000000000==================================== ``celery.contrib.testing.app`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.app .. automodule:: celery.contrib.testing.app :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.migrate.rst0000644000175000017500000000041313130607475022032 0ustar omeromer00000000000000===================================================== ``celery.bin.migrate`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.migrate .. automodule:: celery.bin.migrate :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.request.rst0000644000175000017500000000036413130607475022640 0ustar omeromer00000000000000===================================== ``celery.worker.request`` ===================================== .. contents:: :local: .. currentmodule:: celery.worker.request .. automodule:: celery.worker.request :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.backends.rst0000644000175000017500000000035213130607475022166 0ustar omeromer00000000000000=================================== ``celery.app.backends`` =================================== .. contents:: :local: .. currentmodule:: celery.app.backends .. automodule:: celery.app.backends :members: :undoc-members: celery-4.1.0/docs/reference/celery.bootsteps.rst0000644000175000017500000000035713130607475021664 0ustar omeromer00000000000000========================================== ``celery.bootsteps`` ========================================== .. contents:: :local: .. currentmodule:: celery.bootsteps .. automodule:: celery.bootsteps :members: :undoc-members: celery-4.1.0/docs/reference/celery.events.receiver.rst0000644000175000017500000000045713130607475022752 0ustar omeromer00000000000000================================================================= ``celery.events.receiver`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.receiver .. automodule:: celery.events.receiver :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.rst0000644000175000017500000000042113130607475022775 0ustar omeromer00000000000000================================================== ``celery.worker.consumer`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer .. automodule:: celery.worker.consumer :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.abortable.rst0000644000175000017500000000044213130607475023227 0ustar omeromer00000000000000======================================================= ``celery.contrib.abortable`` ======================================================= .. contents:: :local: .. currentmodule:: celery.contrib.abortable .. automodule:: celery.contrib.abortable :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.purge.rst0000644000175000017500000000040513130607475021525 0ustar omeromer00000000000000===================================================== ``celery.bin.purge`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.purge .. automodule:: celery.bin.purge :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.amqp.rst0000644000175000017500000000205213130607475021351 0ustar omeromer00000000000000.. currentmodule:: celery.app.amqp .. automodule:: celery.app.amqp .. contents:: :local: AMQP ---- .. autoclass:: AMQP .. attribute:: Connection Broker connection class used. Default is :class:`kombu.Connection`. .. attribute:: Consumer Base Consumer class used. Default is :class:`kombu.Consumer`. .. attribute:: Producer Base Producer class used. Default is :class:`kombu.Producer`. .. attribute:: queues All currently defined task queues (a :class:`Queues` instance). .. automethod:: Queues .. automethod:: Router .. automethod:: flush_routes .. autoattribute:: create_task_message .. autoattribute:: send_task_message .. autoattribute:: default_queue .. autoattribute:: default_exchange .. autoattribute:: producer_pool .. autoattribute:: router .. autoattribute:: routes Queues ------ .. autoclass:: Queues :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.agent.rst0000644000175000017500000000044313130607475024076 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.agent`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.agent .. automodule:: celery.worker.consumer.agent :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.testing.worker.rst0000644000175000017500000000045113130607475024261 0ustar omeromer00000000000000==================================== ``celery.contrib.testing.worker`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.testing.worker .. automodule:: celery.contrib.testing.worker :members: :undoc-members: celery-4.1.0/docs/reference/celery.events.state.rst0000644000175000017500000000044613130607475022264 0ustar omeromer00000000000000================================================================= ``celery.events.state`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.state .. automodule:: celery.events.state :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.mingle.rst0000644000175000017500000000044613130607475024256 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.mingle`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.mingle .. automodule:: celery.worker.consumer.mingle :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.rst0000644000175000017500000000044013130607475020413 0ustar omeromer00000000000000.. currentmodule:: celery.app .. automodule:: celery.app .. contents:: :local: Proxies ------- .. autodata:: default_app Functions --------- .. autofunction:: app_or_default .. autofunction:: enable_trace .. autofunction:: disable_trace celery-4.1.0/docs/reference/celery.contrib.pytest.rst0000644000175000017500000000042113130607475022621 0ustar omeromer00000000000000==================================== ``celery.contrib.pytest`` ==================================== .. contents:: :local: API Reference ============= .. currentmodule:: celery.contrib.pytest .. automodule:: celery.contrib.pytest :members: :undoc-members: celery-4.1.0/docs/reference/celery.result.rst0000644000175000017500000000031413130607475021151 0ustar omeromer00000000000000============================= ``celery.result`` ============================= .. contents:: :local: .. currentmodule:: celery.result .. automodule:: celery.result :members: :undoc-members: celery-4.1.0/docs/reference/celery.apps.multi.rst0000644000175000017500000000035413130607475021733 0ustar omeromer00000000000000======================================= ``celery.apps.multi`` ======================================= .. contents:: :local: .. currentmodule:: celery.apps.multi .. automodule:: celery.apps.multi :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.migrate.rst0000644000175000017500000000034613130607475022727 0ustar omeromer00000000000000============================ ``celery.contrib.migrate`` ============================ .. contents:: :local: .. currentmodule:: celery.contrib.migrate .. automodule:: celery.contrib.migrate :members: :undoc-members: celery-4.1.0/docs/reference/celery.loaders.rst0000644000175000017500000000035513130607475021271 0ustar omeromer00000000000000============================================ ``celery.loaders`` ============================================ .. contents:: :local: .. currentmodule:: celery.loaders .. automodule:: celery.loaders :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.worker.rst0000644000175000017500000000036213130607475021716 0ustar omeromer00000000000000========================================== ``celery.bin.worker`` ========================================== .. contents:: :local: .. currentmodule:: celery.bin.worker .. automodule:: celery.bin.worker :members: :undoc-members: celery-4.1.0/docs/reference/celery.contrib.rdb.rst0000644000175000017500000000040013130607475022035 0ustar omeromer00000000000000================================== ``celery.contrib.rdb`` ================================== .. currentmodule:: celery.contrib.rdb .. automodule:: celery.contrib.rdb .. autofunction:: set_trace .. autofunction:: debugger .. autoclass:: Rdb celery-4.1.0/docs/reference/celery.schedules.rst0000644000175000017500000000040513130607475021613 0ustar omeromer00000000000000===================================================== ``celery.schedules`` ===================================================== .. contents:: :local: .. currentmodule:: celery.schedules .. automodule:: celery.schedules :members: :undoc-members: celery-4.1.0/docs/reference/celery.apps.worker.rst0000644000175000017500000000035713130607475022115 0ustar omeromer00000000000000======================================= ``celery.apps.worker`` ======================================= .. contents:: :local: .. currentmodule:: celery.apps.worker .. automodule:: celery.apps.worker :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.control.rst0000644000175000017500000000041313130607475022062 0ustar omeromer00000000000000===================================================== ``celery.bin.control`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.control .. automodule:: celery.bin.control :members: :undoc-members: celery-4.1.0/docs/reference/celery.loaders.app.rst0000644000175000017500000000034313130607475022045 0ustar omeromer00000000000000================================= ``celery.loaders.app`` ================================= .. contents:: :local: .. currentmodule:: celery.loaders.app .. automodule:: celery.loaders.app :members: :undoc-members: celery-4.1.0/docs/reference/celery.events.event.rst0000644000175000017500000000044613130607475022265 0ustar omeromer00000000000000================================================================= ``celery.events.event`` ================================================================= .. contents:: :local: .. currentmodule:: celery.events.event .. automodule:: celery.events.event :members: :undoc-members: celery-4.1.0/docs/reference/celery.security.rst0000644000175000017500000000031013130607475021476 0ustar omeromer00000000000000======================== ``celery.security`` ======================== .. contents:: :local: .. currentmodule:: celery.security .. automodule:: celery.security :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.beat.rst0000644000175000017500000000037613130607475021325 0ustar omeromer00000000000000=================================================== ``celery.bin.beat`` =================================================== .. contents:: :local: .. currentmodule:: celery.bin.beat .. automodule:: celery.bin.beat :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.result.rst0000644000175000017500000000041013130607475021715 0ustar omeromer00000000000000===================================================== ``celery.bin.result`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.result .. automodule:: celery.bin.result :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.upgrade.rst0000644000175000017500000000041313130607475022031 0ustar omeromer00000000000000===================================================== ``celery.bin.upgrade`` ===================================================== .. contents:: :local: .. currentmodule:: celery.bin.upgrade .. automodule:: celery.bin.upgrade :members: :undoc-members: celery-4.1.0/docs/reference/celery.worker.consumer.events.rst0000644000175000017500000000044613130607475024307 0ustar omeromer00000000000000================================================== ``celery.worker.consumer.events`` ================================================== .. contents:: :local: .. currentmodule:: celery.worker.consumer.events .. automodule:: celery.worker.consumer.events :members: :undoc-members: celery-4.1.0/docs/reference/celery.bin.multi.rst0000644000175000017500000000037113130607475021537 0ustar omeromer00000000000000=============================================== ``celery.bin.multi`` =============================================== .. contents:: :local: .. currentmodule:: celery.bin.multi .. automodule:: celery.bin.multi :members: :undoc-members: celery-4.1.0/docs/reference/celery.app.control.rst0000644000175000017500000000041113130607475022070 0ustar omeromer00000000000000==================================================== ``celery.app.control`` ==================================================== .. contents:: :local: .. currentmodule:: celery.app.control .. automodule:: celery.app.control :members: :undoc-members: celery-4.1.0/docs/whatsnew-3.1.rst0000644000175000017500000012614113130607475016561 0ustar omeromer00000000000000.. _whatsnew-3.1: =========================================== What's new in Celery 3.1 (Cipater) =========================================== :Author: Ask Solem (``ask at celeryproject.org``) .. sidebar:: Change history What's new documents describe the changes in major versions, we also have a :ref:`changelog` that lists the changes in bugfix releases (0.0.x), while older series are archived under the :ref:`history` section. Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. This version is officially supported on CPython 2.6, 2.7, and 3.3, and also supported on PyPy. .. _`website`: http://celeryproject.org/ .. topic:: Table of Contents Make sure you read the important notes before upgrading to this version. .. contents:: :local: :depth: 2 Preface ======= Deadlocks have long plagued our workers, and while uncommon they're not acceptable. They're also infamous for being extremely hard to diagnose and reproduce, so to make this job easier I wrote a stress test suite that bombards the worker with different tasks in an attempt to break it. What happens if thousands of worker child processes are killed every second? what if we also kill the broker connection every 10 seconds? These are examples of what the stress test suite will do to the worker, and it reruns these tests using different configuration combinations to find edge case bugs. The end result was that I had to rewrite the prefork pool to avoid the use of the POSIX semaphore. This was extremely challenging, but after months of hard work the worker now finally passes the stress test suite. There's probably more bugs to find, but the good news is that we now have a tool to reproduce them, so should you be so unlucky to experience a bug then we'll write a test for it and squash it! Note that I've also moved many broker transports into experimental status: the only transports recommended for production use today is RabbitMQ and Redis. I don't have the resources to maintain all of them, so bugs are left unresolved. I wish that someone will step up and take responsibility for these transports or donate resources to improve them, but as the situation is now I don't think the quality is up to date with the rest of the code-base so I cannot recommend them for production use. The next version of Celery 4.0 will focus on performance and removing rarely used parts of the library. Work has also started on a new message protocol, supporting multiple languages and more. The initial draft can be found :ref:`here `. This has probably been the hardest release I've worked on, so no introduction to this changelog would be complete without a massive thank you to everyone who contributed and helped me test it! Thank you for your support! *— Ask Solem* .. _v310-important: Important Notes =============== Dropped support for Python 2.5 ------------------------------ Celery now requires Python 2.6 or later. The new dual code base runs on both Python 2 and 3, without requiring the ``2to3`` porting tool. .. note:: This is also the last version to support Python 2.6! From Celery 4.0 and on-wards Python 2.7 or later will be required. .. _last-version-to-enable-pickle: Last version to enable Pickle by default ---------------------------------------- Starting from Celery 4.0 the default serializer will be json. If you depend on pickle being accepted you should be prepared for this change by explicitly allowing your worker to consume pickled messages using the :setting:`CELERY_ACCEPT_CONTENT` setting: .. code-block:: python CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] Make sure you only select the serialization formats you'll actually be using, and make sure you've properly secured your broker from unwanted access (see the :ref:`Security Guide `). The worker will emit a deprecation warning if you don't define this setting. .. topic:: for Kombu users Kombu 3.0 no longer accepts pickled messages by default, so if you use Kombu directly then you have to configure your consumers: see the :ref:`Kombu 3.0 Changelog ` for more information. Old command-line programs removed and deprecated ------------------------------------------------ Everyone should move to the new :program:`celery` umbrella command, so we're incrementally deprecating the old command names. In this version we've removed all commands that aren't used in init-scripts. The rest will be removed in 4.0. +-------------------+--------------+-------------------------------------+ | Program | New Status | Replacement | +===================+==============+=====================================+ | ``celeryd`` | *DEPRECATED* | :program:`celery worker` | +-------------------+--------------+-------------------------------------+ | ``celerybeat`` | *DEPRECATED* | :program:`celery beat` | +-------------------+--------------+-------------------------------------+ | ``celeryd-multi`` | *DEPRECATED* | :program:`celery multi` | +-------------------+--------------+-------------------------------------+ | ``celeryctl`` | **REMOVED** | :program:`celery inspect|control` | +-------------------+--------------+-------------------------------------+ | ``celeryev`` | **REMOVED** | :program:`celery events` | +-------------------+--------------+-------------------------------------+ | ``camqadm`` | **REMOVED** | :program:`celery amqp` | +-------------------+--------------+-------------------------------------+ If this isn't a new installation then you may want to remove the old commands: .. code-block:: console $ pip uninstall celery $ # repeat until it fails # ... $ pip uninstall celery $ pip install celery Please run :program:`celery --help` for help using the umbrella command. .. _v310-news: News ==== Prefork Pool Improvements ------------------------- These improvements are only active if you use an async capable transport. This means only RabbitMQ (AMQP) and Redis are supported at this point and other transports will still use the thread-based fallback implementation. - Pool is now using one IPC queue per child process. Previously the pool shared one queue between all child processes, using a POSIX semaphore as a mutex to achieve exclusive read and write access. The POSIX semaphore has now been removed and each child process gets a dedicated queue. This means that the worker will require more file descriptors (two descriptors per process), but it also means that performance is improved and we can send work to individual child processes. POSIX semaphores aren't released when a process is killed, so killing processes could lead to a deadlock if it happened while the semaphore was acquired. There's no good solution to fix this, so the best option was to remove the semaphore. - Asynchronous write operations The pool now uses async I/O to send work to the child processes. - Lost process detection is now immediate. If a child process is killed or exits mysteriously the pool previously had to wait for 30 seconds before marking the task with a :exc:`~celery.exceptions.WorkerLostError`. It had to do this because the out-queue was shared between all processes, and the pool couldn't be certain whether the process completed the task or not. So an arbitrary timeout of 30 seconds was chosen, as it was believed that the out-queue would've been drained by this point. This timeout is no longer necessary, and so the task can be marked as failed as soon as the pool gets the notification that the process exited. - Rare race conditions fixed Most of these bugs were never reported to us, but were discovered while running the new stress test suite. Caveats ~~~~~~~ .. topic:: Long running tasks The new pool will send tasks to a child process as long as the process in-queue is writable, and since the socket is buffered this means that the processes are, in effect, prefetching tasks. This benefits performance but it also means that other tasks may be stuck waiting for a long running task to complete:: -> send T1 to Process A # A executes T1 -> send T2 to Process B # B executes T2 <- T2 complete -> send T3 to Process A # A still executing T1, T3 stuck in local buffer and # won't start until T1 returns The buffer size varies based on the operating system: some may have a buffer as small as 64KB but on recent Linux versions the buffer size is 1MB (can only be changed system wide). You can disable this prefetching behavior by enabling the :option:`-Ofair ` worker option: .. code-block:: console $ celery -A proj worker -l info -Ofair With this option enabled the worker will only write to workers that are available for work, disabling the prefetch behavior. .. topic:: Max tasks per child If a process exits and pool prefetch is enabled the worker may have already written many tasks to the process in-queue, and these tasks must then be moved back and rewritten to a new process. This is very expensive if you have the :option:`--max-tasks-per-child ` option set to a low value (e.g., less than 10), you should not be using the :option:`-Ofast ` scheduler option. Django supported out of the box ------------------------------- Celery 3.0 introduced a shiny new API, but unfortunately didn't have a solution for Django users. The situation changes with this version as Django is now supported in core and new Django users coming to Celery are now expected to use the new API directly. The Django community has a convention where there's a separate ``django-x`` package for every library, acting like a bridge between Django and the library. Having a separate project for Django users has been a pain for Celery, with multiple issue trackers and multiple documentation sources, and then lastly since 3.0 we even had different APIs. With this version we challenge that convention and Django users will use the same library, the same API and the same documentation as everyone else. There's no rush to port your existing code to use the new API, but if you'd like to experiment with it you should know that: - You need to use a Celery application instance. The new Celery API introduced in 3.0 requires users to instantiate the library by creating an application: .. code-block:: python from celery import Celery app = Celery() - You need to explicitly integrate Celery with Django Celery won't automatically use the Django settings, so you can either configure Celery separately or you can tell it to use the Django settings with: .. code-block:: python app.config_from_object('django.conf:settings') Neither will it automatically traverse your installed apps to find task modules. If you want this behavior, you must explicitly pass a list of Django instances to the Celery app: .. code-block:: python from django.conf import settings app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) - You no longer use ``manage.py`` Instead you use the :program:`celery` command directly: .. code-block:: console $ celery -A proj worker -l info For this to work your app module must store the :envvar:`DJANGO_SETTINGS_MODULE` environment variable, see the example in the :ref:`Django guide `. To get started with the new API you should first read the :ref:`first-steps` tutorial, and then you should read the Django-specific instructions in :ref:`django-first-steps`. The fixes and improvements applied by the :pypi:`django-celery` library are now automatically applied by core Celery when it detects that the :envvar:`DJANGO_SETTINGS_MODULE` environment variable is set. The distribution ships with a new example project using Django in :file:`examples/django`: https://github.com/celery/celery/tree/3.1/examples/django Some features still require the :pypi:`django-celery` library: - Celery doesn't implement the Django database or cache result backends. - Celery doesn't ship with the database-based periodic task scheduler. .. note:: If you're still using the old API when you upgrade to Celery 3.1 then you must make sure that your settings module contains the ``djcelery.setup_loader()`` line, since this will no longer happen as a side-effect of importing the :pypi:`django-celery` module. New users (or if you've ported to the new API) don't need the ``setup_loader`` line anymore, and must make sure to remove it. Events are now ordered using logical time ----------------------------------------- Keeping physical clocks in perfect sync is impossible, so using time-stamps to order events in a distributed system isn't reliable. Celery event messages have included a logical clock value for some time, but starting with this version that field is also used to order them. Also, events now record timezone information by including a new ``utcoffset`` field in the event message. This is a signed integer telling the difference from UTC time in hours, so for example, an event sent from the Europe/London timezone in daylight savings time will have an offset of 1. :class:`@events.Receiver` will automatically convert the time-stamps to the local timezone. .. note:: The logical clock is synchronized with other nodes in the same cluster (neighbors), so this means that the logical epoch will start at the point when the first worker in the cluster starts. If all of the workers are shutdown the clock value will be lost and reset to 0. To protect against this, you should specify the :option:`celery worker --statedb` option such that the worker can persist the clock value at shutdown. You may notice that the logical clock is an integer value and increases very rapidly. Don't worry about the value overflowing though, as even in the most busy clusters it may take several millennium before the clock exceeds a 64 bits value. New worker node name format (``name@host``) ------------------------------------------- Node names are now constructed by two elements: name and host-name separated by '@'. This change was made to more easily identify multiple instances running on the same machine. If a custom name isn't specified then the worker will use the name 'celery' by default, resulting in a fully qualified node name of 'celery@hostname': .. code-block:: console $ celery worker -n example.com celery@example.com To also set the name you must include the @: .. code-block:: console $ celery worker -n worker1@example.com worker1@example.com The worker will identify itself using the fully qualified node name in events and broadcast messages, so where before a worker would identify itself as 'worker1.example.com', it'll now use 'celery@worker1.example.com'. Remember that the :option:`-n ` argument also supports simple variable substitutions, so if the current host-name is *george.example.com* then the ``%h`` macro will expand into that: .. code-block:: console $ celery worker -n worker1@%h worker1@george.example.com The available substitutions are as follows: +---------------+----------------------------------------+ | Variable | Substitution | +===============+========================================+ | ``%h`` | Full host-name (including domain name) | +---------------+----------------------------------------+ | ``%d`` | Domain name only | +---------------+----------------------------------------+ | ``%n`` | Host-name only (without domain name) | +---------------+----------------------------------------+ | ``%%`` | The character ``%`` | +---------------+----------------------------------------+ Bound tasks ----------- The task decorator can now create "bound tasks", which means that the task will receive the ``self`` argument. .. code-block:: python @app.task(bind=True) def send_twitter_status(self, oauth, tweet): try: twitter = Twitter(oauth) twitter.update_status(tweet) except (Twitter.FailWhaleError, Twitter.LoginError) as exc: raise self.retry(exc=exc) Using *bound tasks* is now the recommended approach whenever you need access to the task instance or request context. Previously one would've to refer to the name of the task instead (``send_twitter_status.retry``), but this could lead to problems in some configurations. Mingle: Worker synchronization ------------------------------ The worker will now attempt to synchronize with other workers in the same cluster. Synchronized data currently includes revoked tasks and logical clock. This only happens at start-up and causes a one second start-up delay to collect broadcast responses from other workers. You can disable this bootstep using the :option:`celery worker --without-mingle` option. Gossip: Worker <-> Worker communication --------------------------------------- Workers are now passively subscribing to worker related events like heartbeats. This means that a worker knows what other workers are doing and can detect if they go offline. Currently this is only used for clock synchronization, but there are many possibilities for future additions and you can write extensions that take advantage of this already. Some ideas include consensus protocols, reroute task to best worker (based on resource usage or data locality) or restarting workers when they crash. We believe that although this is a small addition, it opens amazing possibilities. You can disable this bootstep using the :option:`celery worker --without-gossip` option. Bootsteps: Extending the worker ------------------------------- By writing bootsteps you can now easily extend the consumer part of the worker to add additional features, like custom message consumers. The worker has been using bootsteps for some time, but these were never documented. In this version the consumer part of the worker has also been rewritten to use bootsteps and the new :ref:`guide-extending` guide documents examples extending the worker, including adding custom message consumers. See the :ref:`guide-extending` guide for more information. .. note:: Bootsteps written for older versions won't be compatible with this version, as the API has changed significantly. The old API was experimental and internal but should you be so unlucky to use it then please contact the mailing-list and we'll help you port the bootstep to the new API. New RPC result backend ---------------------- This new experimental version of the ``amqp`` result backend is a good alternative to use in classical RPC scenarios, where the process that initiates the task is always the process to retrieve the result. It uses Kombu to send and retrieve results, and each client uses a unique queue for replies to be sent to. This avoids the significant overhead of the original amqp result backend which creates one queue per task. By default results sent using this backend won't persist, so they won't survive a broker restart. You can enable the :setting:`CELERY_RESULT_PERSISTENT` setting to change that. .. code-block:: python CELERY_RESULT_BACKEND = 'rpc' CELERY_RESULT_PERSISTENT = True Note that chords are currently not supported by the RPC backend. Time limits can now be set by the client ---------------------------------------- Two new options have been added to the Calling API: ``time_limit`` and ``soft_time_limit``: .. code-block:: pycon >>> res = add.apply_async((2, 2), time_limit=10, soft_time_limit=8) >>> res = add.subtask((2, 2), time_limit=10, soft_time_limit=8).delay() >>> res = add.s(2, 2).set(time_limit=10, soft_time_limit=8).delay() Contributed by Mher Movsisyan. Redis: Broadcast messages and virtual hosts ------------------------------------------- Broadcast messages are currently seen by all virtual hosts when using the Redis transport. You can now fix this by enabling a prefix to all channels so that the messages are separated: .. code-block:: python BROKER_TRANSPORT_OPTIONS = {'fanout_prefix': True} Note that you'll not be able to communicate with workers running older versions or workers that doesn't have this setting enabled. This setting will be the default in a future version. Related to Issue #1490. :pypi:`pytz` replaces :pypi:`python-dateutil` dependency -------------------------------------------------------- Celery no longer depends on the :pypi:`python-dateutil` library, but instead a new dependency on the :pypi:`pytz` library was added. The :pypi:`pytz` library was already recommended for accurate timezone support. This also means that dependencies are the same for both Python 2 and Python 3, and that the :file:`requirements/default-py3k.txt` file has been removed. Support for :pypi:`setuptools` extra requirements ------------------------------------------------- Pip now supports the :pypi:`setuptools` extra requirements format, so we've removed the old bundles concept, and instead specify setuptools extras. You install extras by specifying them inside brackets: .. code-block:: console $ pip install celery[redis,mongodb] The above will install the dependencies for Redis and MongoDB. You can list as many extras as you want. .. warning:: You can't use the ``celery-with-*`` packages anymore, as these won't be updated to use Celery 3.1. +-------------+-------------------------+---------------------------+ | Extension | Requirement entry | Type | +=============+=========================+===========================+ | Redis | ``celery[redis]`` | transport, result backend | +-------------+-------------------------+---------------------------+ | MongoDB | ``celery[mongodb]`` | transport, result backend | +-------------+-------------------------+---------------------------+ | CouchDB | ``celery[couchdb]`` | transport | +-------------+-------------------------+---------------------------+ | Beanstalk | ``celery[beanstalk]`` | transport | +-------------+-------------------------+---------------------------+ | ZeroMQ | ``celery[zeromq]`` | transport | +-------------+-------------------------+---------------------------+ | Zookeeper | ``celery[zookeeper]`` | transport | +-------------+-------------------------+---------------------------+ | SQLAlchemy | ``celery[sqlalchemy]`` | transport, result backend | +-------------+-------------------------+---------------------------+ | librabbitmq | ``celery[librabbitmq]`` | transport (C amqp client) | +-------------+-------------------------+---------------------------+ The complete list with examples is found in the :ref:`bundles` section. ``subtask.__call__()`` now executes the task directly ----------------------------------------------------- A misunderstanding led to ``Signature.__call__`` being an alias of ``.delay`` but this doesn't conform to the calling API of ``Task`` which calls the underlying task method. This means that: .. code-block:: python @app.task def add(x, y): return x + y add.s(2, 2)() now does the same as calling the task directly: .. code-block:: pycon >>> add(2, 2) In Other News ------------- - Now depends on :ref:`Kombu 3.0 `. - Now depends on :pypi:`billiard` version 3.3. - Worker will now crash if running as the root user with pickle enabled. - Canvas: ``group.apply_async`` and ``chain.apply_async`` no longer starts separate task. That the group and chord primitives supported the "calling API" like other subtasks was a nice idea, but it was useless in practice and often confused users. If you still want this behavior you can define a task to do it for you. - New method ``Signature.freeze()`` can be used to "finalize" signatures/subtask. Regular signature: .. code-block:: pycon >>> s = add.s(2, 2) >>> result = s.freeze() >>> result >>> s.delay() Group: .. code-block:: pycon >>> g = group(add.s(2, 2), add.s(4, 4)) >>> result = g.freeze() >>> g() - Chord exception behavior defined (Issue #1172). From this version the chord callback will change state to FAILURE when a task part of a chord raises an exception. See more at :ref:`chord-errors`. - New ability to specify additional command line options to the worker and beat programs. The :attr:`@user_options` attribute can be used to add additional command-line arguments, and expects :mod:`optparse`-style options: .. code-block:: python from celery import Celery from celery.bin import Option app = Celery() app.user_options['worker'].add( Option('--my-argument'), ) See the :ref:`guide-extending` guide for more information. - All events now include a ``pid`` field, which is the process id of the process that sent the event. - Event heartbeats are now calculated based on the time when the event was received by the monitor, and not the time reported by the worker. This means that a worker with an out-of-sync clock will no longer show as 'Offline' in monitors. A warning is now emitted if the difference between the senders time and the internal time is greater than 15 seconds, suggesting that the clocks are out of sync. - Monotonic clock support. A monotonic clock is now used for timeouts and scheduling. The monotonic clock function is built-in starting from Python 3.4, but we also have fallback implementations for Linux and macOS. - :program:`celery worker` now supports a new :option:`--detach ` argument to start the worker as a daemon in the background. - :class:`@events.Receiver` now sets a ``local_received`` field for incoming events, which is set to the time of when the event was received. - :class:`@events.Dispatcher` now accepts a ``groups`` argument which decides a white-list of event groups that'll be sent. The type of an event is a string separated by '-', where the part before the first '-' is the group. Currently there are only two groups: ``worker`` and ``task``. A dispatcher instantiated as follows: .. code-block:: pycon >>> app.events.Dispatcher(connection, groups=['worker']) will only send worker related events and silently drop any attempts to send events related to any other group. - New :setting:`BROKER_FAILOVER_STRATEGY` setting. This setting can be used to change the transport fail-over strategy, can either be a callable returning an iterable or the name of a Kombu built-in failover strategy. Default is "round-robin". Contributed by Matt Wise. - ``Result.revoke`` will no longer wait for replies. You can add the ``reply=True`` argument if you really want to wait for responses from the workers. - Better support for link and link_error tasks for chords. Contributed by Steeve Morin. - Worker: Now emits warning if the :setting:`CELERYD_POOL` setting is set to enable the eventlet/gevent pools. The `-P` option should always be used to select the eventlet/gevent pool to ensure that the patches are applied as early as possible. If you start the worker in a wrapper (like Django's :file:`manage.py`) then you must apply the patches manually, for example by creating an alternative wrapper that monkey patches at the start of the program before importing any other modules. - There's a now an 'inspect clock' command which will collect the current logical clock value from workers. - `celery inspect stats` now contains the process id of the worker's main process. Contributed by Mher Movsisyan. - New remote control command to dump a workers configuration. Example: .. code-block:: console $ celery inspect conf Configuration values will be converted to values supported by JSON where possible. Contributed by Mher Movsisyan. - New settings :setting:`CELERY_EVENT_QUEUE_TTL` and :setting:`CELERY_EVENT_QUEUE_EXPIRES`. These control when a monitors event queue is deleted, and for how long events published to that queue will be visible. Only supported on RabbitMQ. - New Couchbase result backend. This result backend enables you to store and retrieve task results using `Couchbase`_. See :ref:`conf-couchbase-result-backend` for more information about configuring this result backend. Contributed by Alain Masiero. .. _`Couchbase`: https://www.couchbase.com - CentOS init-script now supports starting multiple worker instances. See the script header for details. Contributed by Jonathan Jordan. - ``AsyncResult.iter_native`` now sets default interval parameter to 0.5 Fix contributed by Idan Kamara - New setting :setting:`BROKER_LOGIN_METHOD`. This setting can be used to specify an alternate login method for the AMQP transports. Contributed by Adrien Guinet - The ``dump_conf`` remote control command will now give the string representation for types that aren't JSON compatible. - Function `celery.security.setup_security` is now :func:`@setup_security`. - Task retry now propagates the message expiry value (Issue #980). The value is forwarded at is, so the expiry time won't change. To update the expiry time you'd've to pass a new expires argument to ``retry()``. - Worker now crashes if a channel error occurs. Channel errors are transport specific and is the list of exceptions returned by ``Connection.channel_errors``. For RabbitMQ this means that Celery will crash if the equivalence checks for one of the queues in :setting:`CELERY_QUEUES` mismatches, which makes sense since this is a scenario where manual intervention is required. - Calling ``AsyncResult.get()`` on a chain now propagates errors for previous tasks (Issue #1014). - The parent attribute of ``AsyncResult`` is now reconstructed when using JSON serialization (Issue #1014). - Worker disconnection logs are now logged with severity warning instead of error. Contributed by Chris Adams. - ``events.State`` no longer crashes when it receives unknown event types. - SQLAlchemy Result Backend: New :setting:`CELERY_RESULT_DB_TABLENAMES` setting can be used to change the name of the database tables used. Contributed by Ryan Petrello. - SQLAlchemy Result Backend: Now calls ``enginge.dispose`` after fork (Issue #1564). If you create your own SQLAlchemy engines then you must also make sure that these are closed after fork in the worker: .. code-block:: python from multiprocessing.util import register_after_fork engine = create_engine(*engine_args) register_after_fork(engine, engine.dispose) - A stress test suite for the Celery worker has been written. This is located in the ``funtests/stress`` directory in the git repository. There's a README file there to get you started. - The logger named ``celery.concurrency`` has been renamed to ``celery.pool``. - New command line utility ``celery graph``. This utility creates graphs in GraphViz dot format. You can create graphs from the currently installed bootsteps: .. code-block:: console # Create graph of currently installed bootsteps in both the worker # and consumer name-spaces. $ celery graph bootsteps | dot -T png -o steps.png # Graph of the consumer name-space only. $ celery graph bootsteps consumer | dot -T png -o consumer_only.png # Graph of the worker name-space only. $ celery graph bootsteps worker | dot -T png -o worker_only.png Or graphs of workers in a cluster: .. code-block:: console # Create graph from the current cluster $ celery graph workers | dot -T png -o workers.png # Create graph from a specified list of workers $ celery graph workers nodes:w1,w2,w3 | dot -T png workers.png # also specify the number of threads in each worker $ celery graph workers nodes:w1,w2,w3 threads:2,4,6 # …also specify the broker and backend URLs shown in the graph $ celery graph workers broker:amqp:// backend:redis:// # …also specify the max number of workers/threads shown (wmax/tmax), # enumerating anything that exceeds that number. $ celery graph workers wmax:10 tmax:3 - Changed the way that app instances are pickled. Apps can now define a ``__reduce_keys__`` method that's used instead of the old ``AppPickler`` attribute. For example, if your app defines a custom 'foo' attribute that needs to be preserved when pickling you can define a ``__reduce_keys__`` as such: .. code-block:: python import celery class Celery(celery.Celery): def __init__(self, *args, **kwargs): super(Celery, self).__init__(*args, **kwargs) self.foo = kwargs.get('foo') def __reduce_keys__(self): return super(Celery, self).__reduce_keys__().update( foo=self.foo, ) This is a much more convenient way to add support for pickling custom attributes. The old ``AppPickler`` is still supported but its use is discouraged and we would like to remove it in a future version. - Ability to trace imports for debugging purposes. The :envvar:`C_IMPDEBUG` can be set to trace imports as they occur: .. code-block:: console $ C_IMDEBUG=1 celery worker -l info .. code-block:: console $ C_IMPDEBUG=1 celery shell - Message headers now available as part of the task request. Example adding and retrieving a header value: .. code-block:: python @app.task(bind=True) def t(self): return self.request.headers.get('sender') >>> t.apply_async(headers={'sender': 'George Costanza'}) - New :signal:`before_task_publish` signal dispatched before a task message is sent and can be used to modify the final message fields (Issue #1281). - New :signal:`after_task_publish` signal replaces the old :signal:`task_sent` signal. The :signal:`task_sent` signal is now deprecated and shouldn't be used. - New :signal:`worker_process_shutdown` signal is dispatched in the prefork pool child processes as they exit. Contributed by Daniel M Taub. - ``celery.platforms.PIDFile`` renamed to :class:`celery.platforms.Pidfile`. - MongoDB Backend: Can now be configured using a URL: - MongoDB Backend: No longer using deprecated ``pymongo.Connection``. - MongoDB Backend: Now disables ``auto_start_request``. - MongoDB Backend: Now enables ``use_greenlets`` when eventlet/gevent is used. - ``subtask()`` / ``maybe_subtask()`` renamed to ``signature()``/``maybe_signature()``. Aliases still available for backwards compatibility. - The ``correlation_id`` message property is now automatically set to the id of the task. - The task message ``eta`` and ``expires`` fields now includes timezone information. - All result backends ``store_result``/``mark_as_*`` methods must now accept a ``request`` keyword argument. - Events now emit warning if the broken ``yajl`` library is used. - The :signal:`celeryd_init` signal now takes an extra keyword argument: ``option``. This is the mapping of parsed command line arguments, and can be used to prepare new preload arguments (``app.user_options['preload']``). - New callback: :meth:`@on_configure`. This callback is called when an app is about to be configured (a configuration key is required). - Worker: No longer forks on :sig:`HUP`. This means that the worker will reuse the same pid for better support with external process supervisors. Contributed by Jameel Al-Aziz. - Worker: The log message ``Got task from broker …`` was changed to ``Received task …``. - Worker: The log message ``Skipping revoked task …`` was changed to ``Discarding revoked task …``. - Optimization: Improved performance of ``ResultSet.join_native()``. Contributed by Stas Rudakou. - The :signal:`task_revoked` signal now accepts new ``request`` argument (Issue #1555). The revoked signal is dispatched after the task request is removed from the stack, so it must instead use the :class:`~celery.worker.request.Request` object to get information about the task. - Worker: New :option:`-X ` command line argument to exclude queues (Issue #1399). The :option:`-X ` argument is the inverse of the :option:`-Q ` argument and accepts a list of queues to exclude (not consume from): .. code-block:: console # Consume from all queues in CELERY_QUEUES, but not the 'foo' queue. $ celery worker -A proj -l info -X foo - Adds :envvar:`C_FAKEFORK` environment variable for simple init-script/:program:`celery multi` debugging. This means that you can now do: .. code-block:: console $ C_FAKEFORK=1 celery multi start 10 or: .. code-block:: console $ C_FAKEFORK=1 /etc/init.d/celeryd start to avoid the daemonization step to see errors that aren't visible due to missing stdout/stderr. A ``dryrun`` command has been added to the generic init-script that enables this option. - New public API to push and pop from the current task stack: :func:`celery.app.push_current_task` and :func:`celery.app.pop_current_task``. - ``RetryTaskError`` has been renamed to :exc:`~celery.exceptions.Retry`. The old name is still available for backwards compatibility. - New semi-predicate exception :exc:`~celery.exceptions.Reject`. This exception can be raised to ``reject``/``requeue`` the task message, see :ref:`task-semipred-reject` for examples. - :ref:`Semipredicates ` documented: (Retry/Ignore/Reject). .. _v310-removals: Scheduled Removals ================== - The ``BROKER_INSIST`` setting and the ``insist`` argument to ``~@connection`` is no longer supported. - The ``CELERY_AMQP_TASK_RESULT_CONNECTION_MAX`` setting is no longer supported. Use :setting:`BROKER_POOL_LIMIT` instead. - The ``CELERY_TASK_ERROR_WHITELIST`` setting is no longer supported. You should set the :class:`~celery.utils.mail.ErrorMail` attribute of the task class instead. You can also do this using :setting:`CELERY_ANNOTATIONS`: .. code-block:: python from celery import Celery from celery.utils.mail import ErrorMail class MyErrorMail(ErrorMail): whitelist = (KeyError, ImportError) def should_send(self, context, exc): return isinstance(exc, self.whitelist) app = Celery() app.conf.CELERY_ANNOTATIONS = { '*': { 'ErrorMail': MyErrorMails, } } - Functions that creates a broker connections no longer supports the ``connect_timeout`` argument. This can now only be set using the :setting:`BROKER_CONNECTION_TIMEOUT` setting. This is because functions no longer create connections directly, but instead get them from the connection pool. - The ``CELERY_AMQP_TASK_RESULT_EXPIRES`` setting is no longer supported. Use :setting:`CELERY_TASK_RESULT_EXPIRES` instead. .. _v310-deprecations: Deprecation Time-line Changes ============================= See the :ref:`deprecation-timeline`. .. _v310-fixes: Fixes ===== - AMQP Backend: join didn't convert exceptions when using the json serializer. - Non-abstract task classes are now shared between apps (Issue #1150). Note that non-abstract task classes shouldn't be used in the new API. You should only create custom task classes when you use them as a base class in the ``@task`` decorator. This fix ensure backwards compatibility with older Celery versions so that non-abstract task classes works even if a module is imported multiple times so that the app is also instantiated multiple times. - Worker: Workaround for Unicode errors in logs (Issue #427). - Task methods: ``.apply_async`` now works properly if args list is None (Issue #1459). - Eventlet/gevent/solo/threads pools now properly handles :exc:`BaseException` errors raised by tasks. - :control:`autoscale` and :control:`pool_grow`/:control:`pool_shrink` remote control commands will now also automatically increase and decrease the consumer prefetch count. Fix contributed by Daniel M. Taub. - ``celery control pool_`` commands didn't coerce string arguments to int. - Redis/Cache chords: Callback result is now set to failure if the group disappeared from the database (Issue #1094). - Worker: Now makes sure that the shutdown process isn't initiated more than once. - Programs: :program:`celery multi` now properly handles both ``-f`` and :option:`--logfile ` options (Issue #1541). .. _v310-internal: Internal changes ================ - Module ``celery.task.trace`` has been renamed to :mod:`celery.app.trace`. - Module ``celery.concurrency.processes`` has been renamed to :mod:`celery.concurrency.prefork`. - Classes that no longer fall back to using the default app: - Result backends (:class:`celery.backends.base.BaseBackend`) - :class:`celery.worker.WorkController` - :class:`celery.worker.Consumer` - :class:`celery.worker.request.Request` This means that you have to pass a specific app when instantiating these classes. - ``EventDispatcher.copy_buffer`` renamed to :meth:`@events.Dispatcher.extend_buffer`. - Removed unused and never documented global instance ``celery.events.state.state``. - :class:`@events.Receiver` is now a :class:`kombu.mixins.ConsumerMixin` subclass. - :class:`celery.apps.worker.Worker` has been refactored as a subclass of :class:`celery.worker.WorkController`. This removes a lot of duplicate functionality. - The ``Celery.with_default_connection`` method has been removed in favor of ``with app.connection_or_acquire`` (:meth:`@connection_or_acquire`) - The ``celery.results.BaseDictBackend`` class has been removed and is replaced by :class:`celery.results.BaseBackend`. celery-4.1.0/docs/getting-started/0000755000175000017500000000000013135426347016772 5ustar omeromer00000000000000celery-4.1.0/docs/getting-started/first-steps-with-celery.rst0000644000175000017500000003366013130607475024247 0ustar omeromer00000000000000.. _tut-celery: .. _first-steps: ========================= First Steps with Celery ========================= Celery is a task queue with batteries included. It's easy to use so that you can get started without learning the full complexities of the problem it solves. It's designed around best practices so that your product can scale and integrate with other languages, and it comes with the tools and support you need to run such a system in production. In this tutorial you'll learn the absolute basics of using Celery. Learn about; - Choosing and installing a message transport (broker). - Installing Celery and creating your first task. - Starting the worker and calling tasks. - Keeping track of tasks as they transition through different states, and inspecting return values. Celery may seem daunting at first - but don't worry - this tutorial will get you started in no time. It's deliberately kept simple, so as to not confuse you with advanced features. After you have finished this tutorial, it's a good idea to browse the rest of the documentation. For example the :ref:`next-steps` tutorial will showcase Celery's capabilities. .. contents:: :local: .. _celerytut-broker: Choosing a Broker ================= Celery requires a solution to send and receive messages; usually this comes in the form of a separate service called a *message broker*. There are several choices available, including: RabbitMQ -------- `RabbitMQ`_ is feature-complete, stable, durable and easy to install. It's an excellent choice for a production environment. Detailed information about using RabbitMQ with Celery: :ref:`broker-rabbitmq` .. _`RabbitMQ`: http://www.rabbitmq.com/ If you're using Ubuntu or Debian install RabbitMQ by executing this command: .. code-block:: console $ sudo apt-get install rabbitmq-server When the command completes, the broker will already be running in the background, ready to move messages for you: ``Starting rabbitmq-server: SUCCESS``. Don't worry if you're not running Ubuntu or Debian, you can go to this website to find similarly simple installation instructions for other platforms, including Microsoft Windows: http://www.rabbitmq.com/download.html Redis ----- `Redis`_ is also feature-complete, but is more susceptible to data loss in the event of abrupt termination or power failures. Detailed information about using Redis: :ref:`broker-redis` .. _`Redis`: https://redis.io/ Other brokers ------------- In addition to the above, there are other experimental transport implementations to choose from, including :ref:`Amazon SQS `. See :ref:`broker-overview` for a full list. .. _celerytut-installation: Installing Celery ================= Celery is on the Python Package Index (PyPI), so it can be installed with standard Python tools like ``pip`` or ``easy_install``: .. code-block:: console $ pip install celery Application =========== The first thing you need is a Celery instance. We call this the *Celery application* or just *app* for short. As this instance is used as the entry-point for everything you want to do in Celery, like creating tasks and managing workers, it must be possible for other modules to import it. In this tutorial we keep everything contained in a single module, but for larger projects you want to create a :ref:`dedicated module `. Let's create the file :file:`tasks.py`: .. code-block:: python from celery import Celery app = Celery('tasks', broker='pyamqp://guest@localhost//') @app.task def add(x, y): return x + y The first argument to :class:`~celery.app.Celery` is the name of the current module. This is only needed so that names can be automatically generated when the tasks are defined in the `__main__` module. The second argument is the broker keyword argument, specifying the URL of the message broker you want to use. Here using RabbitMQ (also the default option). See :ref:`celerytut-broker` above for more choices -- for RabbitMQ you can use ``amqp://localhost``, or for Redis you can use ``redis://localhost``. You defined a single task, called ``add``, returning the sum of two numbers. .. _celerytut-running-the-worker: Running the Celery worker server ================================ You can now run the worker by executing our program with the ``worker`` argument: .. code-block:: console $ celery -A tasks worker --loglevel=info .. note:: See the :ref:`celerytut-troubleshooting` section if the worker doesn't start. In production you'll want to run the worker in the background as a daemon. To do this you need to use the tools provided by your platform, or something like `supervisord`_ (see :ref:`daemonizing` for more information). For a complete listing of the command-line options available, do: .. code-block:: console $ celery worker --help There are also several other commands available, and help is also available: .. code-block:: console $ celery help .. _`supervisord`: http://supervisord.org .. _celerytut-calling: Calling the task ================ To call our task you can use the :meth:`~@Task.delay` method. This is a handy shortcut to the :meth:`~@Task.apply_async` method that gives greater control of the task execution (see :ref:`guide-calling`):: >>> from tasks import add >>> add.delay(4, 4) The task has now been processed by the worker you started earlier. You can verify this by looking at the worker's console output. Calling a task returns an :class:`~@AsyncResult` instance. This can be used to check the state of the task, wait for the task to finish, or get its return value (or if the task failed, to get the exception and traceback). Results are not enabled by default. In order to do remote procedure calls or keep track of task results in a database, you will need to configure Celery to use a result backend. This is described in the next section. .. _celerytut-keeping-results: Keeping Results =============== If you want to keep track of the tasks' states, Celery needs to store or send the states somewhere. There are several built-in result backends to choose from: `SQLAlchemy`_/`Django`_ ORM, `Memcached`_, `Redis`_, :ref:`RPC ` (`RabbitMQ`_/AMQP), and -- or you can define your own. .. _`Memcached`: http://memcached.org .. _`MongoDB`: http://www.mongodb.org .. _`SQLAlchemy`: http://www.sqlalchemy.org/ .. _`Django`: http://djangoproject.com For this example we use the `rpc` result backend, that sends states back as transient messages. The backend is specified via the ``backend`` argument to :class:`@Celery`, (or via the :setting:`result_backend` setting if you choose to use a configuration module): .. code-block:: python app = Celery('tasks', backend='rpc://', broker='pyamqp://') Or if you want to use Redis as the result backend, but still use RabbitMQ as the message broker (a popular combination): .. code-block:: python app = Celery('tasks', backend='redis://localhost', broker='pyamqp://') To read more about result backends please see :ref:`task-result-backends`. Now with the result backend configured, let's call the task again. This time you'll hold on to the :class:`~@AsyncResult` instance returned when you call a task: .. code-block:: pycon >>> result = add.delay(4, 4) The :meth:`~@AsyncResult.ready` method returns whether the task has finished processing or not: .. code-block:: pycon >>> result.ready() False You can wait for the result to complete, but this is rarely used since it turns the asynchronous call into a synchronous one: .. code-block:: pycon >>> result.get(timeout=1) 8 In case the task raised an exception, :meth:`~@AsyncResult.get` will re-raise the exception, but you can override this by specifying the ``propagate`` argument: .. code-block:: pycon >>> result.get(propagate=False) If the task raised an exception, you can also gain access to the original traceback: .. code-block:: pycon >>> result.traceback … See :mod:`celery.result` for the complete result object reference. .. _celerytut-configuration: Configuration ============= Celery, like a consumer appliance, doesn't need much configuration to operate. It has an input and an output. The input must be connected to a broker, and the output can be optionally connected to a result backend. However, if you look closely at the back, there's a lid revealing loads of sliders, dials, and buttons: this is the configuration. The default configuration should be good enough for most use cases, but there are many options that can be configured to make Celery work exactly as needed. Reading about the options available is a good idea to familiarize yourself with what can be configured. You can read about the options in the :ref:`configuration` reference. The configuration can be set on the app directly or by using a dedicated configuration module. As an example you can configure the default serializer used for serializing task payloads by changing the :setting:`task_serializer` setting: .. code-block:: python app.conf.task_serializer = 'json' If you're configuring many settings at once you can use ``update``: .. code-block:: python app.conf.update( task_serializer='json', accept_content=['json'], # Ignore other content result_serializer='json', timezone='Europe/Oslo', enable_utc=True, ) For larger projects, a dedicated configuration module is recommended. Hard coding periodic task intervals and task routing options is discouraged. It is much better to keep these in a centralized location. This is especially true for libraries, as it enables users to control how their tasks behave. A centralized configuration will also allow your SysAdmin to make simple changes in the event of system trouble. You can tell your Celery instance to use a configuration module by calling the :meth:`@config_from_object` method: .. code-block:: python app.config_from_object('celeryconfig') This module is often called "``celeryconfig``", but you can use any module name. In the above case, a module named ``celeryconfig.py`` must be available to load from the current directory or on the Python path. It could look something like this: :file:`celeryconfig.py`: .. code-block:: python broker_url = 'pyamqp://' result_backend = 'rpc://' task_serializer = 'json' result_serializer = 'json' accept_content = ['json'] timezone = 'Europe/Oslo' enable_utc = True To verify that your configuration file works properly and doesn't contain any syntax errors, you can try to import it: .. code-block:: console $ python -m celeryconfig For a complete reference of configuration options, see :ref:`configuration`. To demonstrate the power of configuration files, this is how you'd route a misbehaving task to a dedicated queue: :file:`celeryconfig.py`: .. code-block:: python task_routes = { 'tasks.add': 'low-priority', } Or instead of routing it you could rate limit the task instead, so that only 10 tasks of this type can be processed in a minute (10/m): :file:`celeryconfig.py`: .. code-block:: python task_annotations = { 'tasks.add': {'rate_limit': '10/m'} } If you're using RabbitMQ or Redis as the broker then you can also direct the workers to set a new rate limit for the task at runtime: .. code-block:: console $ celery -A tasks control rate_limit tasks.add 10/m worker@example.com: OK new rate limit set successfully See :ref:`guide-routing` to read more about task routing, and the :setting:`task_annotations` setting for more about annotations, or :ref:`guide-monitoring` for more about remote control commands and how to monitor what your workers are doing. Where to go from here ===================== If you want to learn more you should continue to the :ref:`Next Steps ` tutorial, and after that you can read the :ref:`User Guide `. .. _celerytut-troubleshooting: Troubleshooting =============== There's also a troubleshooting section in the :ref:`faq`. Worker doesn't start: Permission Error -------------------------------------- - If you're using Debian, Ubuntu or other Debian-based distributions: Debian recently renamed the :file:`/dev/shm` special file to :file:`/run/shm`. A simple workaround is to create a symbolic link: .. code-block:: console # ln -s /run/shm /dev/shm - Others: If you provide any of the :option:`--pidfile `, :option:`--logfile ` or :option:`--statedb ` arguments, then you must make sure that they point to a file or directory that's writable and readable by the user starting the worker. Result backend doesn't work or tasks are always in ``PENDING`` state -------------------------------------------------------------------- All tasks are :state:`PENDING` by default, so the state would've been better named "unknown". Celery doesn't update the state when a task is sent, and any task with no history is assumed to be pending (you know the task id, after all). 1) Make sure that the task doesn't have ``ignore_result`` enabled. Enabling this option will force the worker to skip updating states. 2) Make sure the :setting:`task_ignore_result` setting isn't enabled. 3) Make sure that you don't have any old workers still running. It's easy to start multiple workers by accident, so make sure that the previous worker is properly shut down before you start a new one. An old worker that isn't configured with the expected result backend may be running and is hijacking the tasks. The :option:`--pidfile ` argument can be set to an absolute path to make sure this doesn't happen. 4) Make sure the client is configured with the right backend. If, for some reason, the client is configured to use a different backend than the worker, you won't be able to receive the result. Make sure the backend is configured correctly: .. code-block:: pycon >>> result = task.delay() >>> print(result.backend) celery-4.1.0/docs/getting-started/introduction.rst0000644000175000017500000002410413130607475022244 0ustar omeromer00000000000000.. _intro: ======================== Introduction to Celery ======================== .. contents:: :local: :depth: 1 What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work called a task. Dedicated worker processes constantly monitor task queues for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task the client adds a message to the queue, the broker then delivers that message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, and a `PHP client`_. Language interoperability can also be achieved exposing an HTTP endpoint and having a task that requests it (webhooks). .. _`PHP client`: https://github.com/gjedeer/celery-php .. _node-celery: https://github.com/mher/node-celery What do I need? =============== .. sidebar:: Version Requirements :subtitle: Celery version 4.0 runs on - Python â¨2.7, 3.4, 3.5â© - PyPy â¨5.4, 5.5â© This is the last version to support Python 2.7, and from the next version (Celery 5.x) Python 3.5 or newer is required. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4 was Celery series 2.2 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* requires a message transport to send and receive messages. The RabbitMQ and Redis broker transports are feature complete, but there's also support for a myriad of other experimental solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across data centers. Get Started =========== If this is the first time you're trying to use Celery, or if you haven't kept up with development in the 3.1 version and are coming from previous versions, then you should read our getting started tutorials: - :ref:`first-steps` - :ref:`next-steps` Celery is… ========== .. _`mailing-list`: https://groups.google.com/group/celery-users .. topic:: \ - **Simple** Celery is easy to use and maintain, and it *doesn't need configuration files*. It has an active, friendly community you can talk to for support, including a `mailing-list`_ and an :ref:`IRC channel `. Here's one of the simplest applications you can make: .. code-block:: python from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. .. topic:: It supports .. hlist:: :columns: 2 - **Brokers** - :ref:`RabbitMQ `, :ref:`Redis `, - :ref:`Amazon SQS `, and more… - **Concurrency** - prefork (multiprocessing), - Eventlet_, gevent_ - `solo` (single threaded) - **Result Stores** - AMQP, Redis - Memcached, - SQLAlchemy, Django ORM - Apache Cassandra, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. Features ======== .. topic:: \ .. hlist:: :columns: 2 - **Monitoring** A stream of monitoring events is emitted by workers and is used by built-in and external tools to tell you what your cluster is doing -- in real-time. :ref:`Read more… `. - **Work-flows** Simple and complex work-flows can be composed using a set of powerful primitives we call the "canvas", including grouping, chaining, chunking, and more. :ref:`Read more… `. - **Time & Rate Limits** You can control how many tasks can be executed per second/minute/hour, or how long a task can be allowed to run, and this can be set as a default, for a specific worker or individually for each task type. :ref:`Read more… `. - **Scheduling** You can specify the time to run a task in seconds or a :class:`~datetime.datetime`, or or you can use periodic tasks for recurring events based on a simple interval, or Crontab expressions supporting minute, hour, day of week, day of month, and month of year. :ref:`Read more… `. - **Resource Leak Protection** The :option:`--max-tasks-per-child ` option is used for user tasks leaking resources, like memory or file descriptors, that are simply out of your control. :ref:`Read more… `. - **User Components** Each worker component can be customized, and additional components can be defined by the user. The worker is built up using "bootsteps" — a dependency graph enabling fine grained control of the worker's internals. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ Framework Integration ===================== Celery is easy to integrate with web frameworks, some of them even have integration packages: +--------------------+------------------------+ | `Pyramid`_ | :pypi:`pyramid_celery` | +--------------------+------------------------+ | `Pylons`_ | :pypi:`celery-pylons` | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | :pypi:`web2py-celery` | +--------------------+------------------------+ | `Tornado`_ | :pypi:`tornado-celery` | +--------------------+------------------------+ For `Django`_ see :ref:`django-first-steps`. The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at :manpage:`fork(2)`. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonshq.com/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ Quick Jump ========== .. topic:: I want to ⟶ .. hlist:: :columns: 2 - :ref:`get the return value of a task ` - :ref:`use logging from my task ` - :ref:`learn about best practices ` - :ref:`create a custom task base class ` - :ref:`add a callback to a group of tasks ` - :ref:`split a task into several chunks ` - :ref:`optimize the worker ` - :ref:`see a list of built-in task states ` - :ref:`create custom task states ` - :ref:`set a custom task name ` - :ref:`track when a task starts ` - :ref:`retry a task when it fails ` - :ref:`get the id of the current task ` - :ref:`know what queue a task was delivered to ` - :ref:`see a list of running workers ` - :ref:`purge all messages ` - :ref:`inspect what the workers are doing ` - :ref:`see what tasks a worker has registered ` - :ref:`migrate tasks to a new broker ` - :ref:`see a list of event message types ` - :ref:`contribute to Celery ` - :ref:`learn about available configuration settings ` - :ref:`get a list of people and companies using Celery ` - :ref:`write my own remote control command ` - :ref:`change worker queues at runtime ` .. topic:: Jump to ⟶ .. hlist:: :columns: 4 - :ref:`Brokers ` - :ref:`Applications ` - :ref:`Tasks ` - :ref:`Calling ` - :ref:`Workers ` - :ref:`Daemonizing ` - :ref:`Monitoring ` - :ref:`Optimizing ` - :ref:`Security ` - :ref:`Routing ` - :ref:`Configuration ` - :ref:`Django ` - :ref:`Contributing ` - :ref:`Signals ` - :ref:`FAQ ` - :ref:`API Reference ` .. include:: ../includes/installation.txt celery-4.1.0/docs/getting-started/index.rst0000644000175000017500000000032513130607475020631 0ustar omeromer00000000000000================= Getting Started ================= :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 introduction brokers/index first-steps-with-celery next-steps resources celery-4.1.0/docs/getting-started/resources.rst0000644000175000017500000000020413130607475021530 0ustar omeromer00000000000000.. _resources: =========== Resources =========== .. contents:: :local: :depth: 2 .. include:: ../includes/resources.txt celery-4.1.0/docs/getting-started/next-steps.rst0000644000175000017500000005406313130607475021644 0ustar omeromer00000000000000.. _next-steps: ============ Next Steps ============ The :ref:`first-steps` guide is intentionally minimal. In this guide I'll demonstrate what Celery offers in more detail, including how to add Celery support for your application and library. This document doesn't document all of Celery's features and best practices, so it's recommended that you also read the :ref:`User Guide ` .. contents:: :local: :depth: 1 Using Celery in your Application ================================ .. _project-layout: Our Project ----------- Project layout:: proj/__init__.py /celery.py /tasks.py :file:`proj/celery.py` ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/next-steps/proj/celery.py :language: python In this module you created our :class:`@Celery` instance (sometimes referred to as the *app*). To use Celery within your project you simply import this instance. - The ``broker`` argument specifies the URL of the broker to use. See :ref:`celerytut-broker` for more information. - The ``backend`` argument specifies the result backend to use, It's used to keep track of task state and results. While results are disabled by default I use the RPC result backend here because I demonstrate how retrieving results work later, you may want to use a different backend for your application. They all have different strengths and weaknesses. If you don't need results it's better to disable them. Results can also be disabled for individual tasks by setting the ``@task(ignore_result=True)`` option. See :ref:`celerytut-keeping-results` for more information. - The ``include`` argument is a list of modules to import when the worker starts. You need to add our tasks module here so that the worker is able to find our tasks. :file:`proj/tasks.py` ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/next-steps/proj/tasks.py :language: python Starting the worker ------------------- The :program:`celery` program can be used to start the worker (you need to run the worker in the directory above proj): .. code-block:: console $ celery -A proj worker -l info When the worker starts you should see a banner and some messages:: -------------- celery@halcyon.local v4.0 (latentcall) ---- **** ----- --- * *** * -- [Configuration] -- * - **** --- . broker: amqp://guest@localhost:5672// - ** ---------- . app: __main__:0x1012d8590 - ** ---------- . concurrency: 8 (processes) - ** ---------- . events: OFF (enable -E to monitor this worker) - ** ---------- - *** --- * --- [Queues] -- ******* ---- . celery: exchange:celery(direct) binding:celery --- ***** ----- [2012-06-08 16:23:51,078: WARNING/MainProcess] celery@halcyon.local has started. -- The *broker* is the URL you specified in the broker argument in our ``celery`` module, you can also specify a different broker on the command-line by using the :option:`-b ` option. -- *Concurrency* is the number of prefork worker process used to process your tasks concurrently, when all of these are busy doing work new tasks will have to wait for one of the tasks to finish before it can be processed. The default concurrency number is the number of CPU's on that machine (including cores), you can specify a custom number using the :option:`celery worker -c` option. There's no recommended value, as the optimal number depends on a number of factors, but if your tasks are mostly I/O-bound then you can try to increase it, experimentation has shown that adding more than twice the number of CPU's is rarely effective, and likely to degrade performance instead. Including the default prefork pool, Celery also supports using Eventlet, Gevent, and running in a single thread (see :ref:`concurrency`). -- *Events* is an option that when enabled causes Celery to send monitoring messages (events) for actions occurring in the worker. These can be used by monitor programs like ``celery events``, and Flower - the real-time Celery monitor, that you can read about in the :ref:`Monitoring and Management guide `. -- *Queues* is the list of queues that the worker will consume tasks from. The worker can be told to consume from several queues at once, and this is used to route messages to specific workers as a means for Quality of Service, separation of concerns, and prioritization, all described in the :ref:`Routing Guide `. You can get a complete list of command-line arguments by passing in the :option:`--help ` flag: .. code-block:: console $ celery worker --help These options are described in more detailed in the :ref:`Workers Guide `. Stopping the worker ~~~~~~~~~~~~~~~~~~~ To stop the worker simply hit :kbd:`Control-c`. A list of signals supported by the worker is detailed in the :ref:`Workers Guide `. In the background ~~~~~~~~~~~~~~~~~ In production you'll want to run the worker in the background, this is described in detail in the :ref:`daemonization tutorial `. The daemonization scripts uses the :program:`celery multi` command to start one or more workers in the background: .. code-block:: console $ celery multi start w1 -A proj -l info celery multi v4.0.0 (latentcall) > Starting nodes... > w1.halcyon.local: OK You can restart it too: .. code-block:: console $ celery multi restart w1 -A proj -l info celery multi v4.0.0 (latentcall) > Stopping nodes... > w1.halcyon.local: TERM -> 64024 > Waiting for 1 node..... > w1.halcyon.local: OK > Restarting node w1.halcyon.local: OK celery multi v4.0.0 (latentcall) > Stopping nodes... > w1.halcyon.local: TERM -> 64052 or stop it: .. code-block:: console $ celery multi stop w1 -A proj -l info The ``stop`` command is asynchronous so it won't wait for the worker to shutdown. You'll probably want to use the ``stopwait`` command instead, this ensures all currently executing tasks is completed before exiting: .. code-block:: console $ celery multi stopwait w1 -A proj -l info .. note:: :program:`celery multi` doesn't store information about workers so you need to use the same command-line arguments when restarting. Only the same pidfile and logfile arguments must be used when stopping. By default it'll create pid and log files in the current directory, to protect against multiple workers launching on top of each other you're encouraged to put these in a dedicated directory: .. code-block:: console $ mkdir -p /var/run/celery $ mkdir -p /var/log/celery $ celery multi start w1 -A proj -l info --pidfile=/var/run/celery/%n.pid \ --logfile=/var/log/celery/%n%I.log With the multi command you can start multiple workers, and there's a powerful command-line syntax to specify arguments for different workers too, for example: .. code-block:: console $ celery multi start 10 -A proj -l info -Q:1-3 images,video -Q:4,5 data \ -Q default -L:4,5 debug For more examples see the :mod:`~celery.bin.multi` module in the API reference. .. _app-argument: About the :option:`--app ` argument ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :option:`--app ` argument specifies the Celery app instance to use, it must be in the form of ``module.path:attribute`` But it also supports a shortcut form If only a package name is specified, where it'll try to search for the app instance, in the following order: With :option:`--app=proj `: 1) an attribute named ``proj.app``, or 2) an attribute named ``proj.celery``, or 3) any attribute in the module ``proj`` where the value is a Celery application, or If none of these are found it'll try a submodule named ``proj.celery``: 4) an attribute named ``proj.celery.app``, or 5) an attribute named ``proj.celery.celery``, or 6) Any attribute in the module ``proj.celery`` where the value is a Celery application. This scheme mimics the practices used in the documentation -- that is, ``proj:app`` for a single contained module, and ``proj.celery:app`` for larger projects. .. _calling-tasks: Calling Tasks ============= You can call a task using the :meth:`delay` method: .. code-block:: pycon >>> add.delay(2, 2) This method is actually a star-argument shortcut to another method called :meth:`apply_async`: .. code-block:: pycon >>> add.apply_async((2, 2)) The latter enables you to specify execution options like the time to run (countdown), the queue it should be sent to, and so on: .. code-block:: pycon >>> add.apply_async((2, 2), queue='lopri', countdown=10) In the above example the task will be sent to a queue named ``lopri`` and the task will execute, at the earliest, 10 seconds after the message was sent. Applying the task directly will execute the task in the current process, so that no message is sent: .. code-block:: pycon >>> add(2, 2) 4 These three methods - :meth:`delay`, :meth:`apply_async`, and applying (``__call__``), represents the Celery calling API, that's also used for signatures. A more detailed overview of the Calling API can be found in the :ref:`Calling User Guide `. Every task invocation will be given a unique identifier (an UUID), this is the task id. The ``delay`` and ``apply_async`` methods return an :class:`~@AsyncResult` instance, that can be used to keep track of the tasks execution state. But for this you need to enable a :ref:`result backend ` so that the state can be stored somewhere. Results are disabled by default because of the fact that there's no result backend that suits every application, so to choose one you need to consider the drawbacks of each individual backend. For many tasks keeping the return value isn't even very useful, so it's a sensible default to have. Also note that result backends aren't used for monitoring tasks and workers, for that Celery uses dedicated event messages (see :ref:`guide-monitoring`). If you have a result backend configured you can retrieve the return value of a task: .. code-block:: pycon >>> res = add.delay(2, 2) >>> res.get(timeout=1) 4 You can find the task's id by looking at the :attr:`id` attribute: .. code-block:: pycon >>> res.id d6b3aea2-fb9b-4ebc-8da4-848818db9114 You can also inspect the exception and traceback if the task raised an exception, in fact ``result.get()`` will propagate any errors by default: .. code-block:: pycon >>> res = add.delay(2) >>> res.get(timeout=1) .. code-block:: pytb Traceback (most recent call last): File "", line 1, in File "/opt/devel/celery/celery/result.py", line 113, in get interval=interval) File "/opt/devel/celery/celery/backends/rpc.py", line 138, in wait_for raise meta['result'] TypeError: add() takes exactly 2 arguments (1 given) If you don't wish for the errors to propagate then you can disable that by passing the ``propagate`` argument: .. code-block:: pycon >>> res.get(propagate=False) TypeError('add() takes exactly 2 arguments (1 given)',) In this case it'll return the exception instance raised instead, and so to check whether the task succeeded or failed you'll have to use the corresponding methods on the result instance: .. code-block:: pycon >>> res.failed() True >>> res.successful() False So how does it know if the task has failed or not? It can find out by looking at the tasks *state*: .. code-block:: pycon >>> res.state 'FAILURE' A task can only be in a single state, but it can progress through several states. The stages of a typical task can be:: PENDING -> STARTED -> SUCCESS The started state is a special state that's only recorded if the :setting:`task_track_started` setting is enabled, or if the ``@task(track_started=True)`` option is set for the task. The pending state is actually not a recorded state, but rather the default state for any task id that's unknown: this you can see from this example: .. code-block:: pycon >>> from proj.celery import app >>> res = app.AsyncResult('this-id-does-not-exist') >>> res.state 'PENDING' If the task is retried the stages can become even more complex. To demonstrate, for a task that's retried two times the stages would be: .. code-block:: text PENDING -> STARTED -> RETRY -> STARTED -> RETRY -> STARTED -> SUCCESS To read more about task states you should see the :ref:`task-states` section in the tasks user guide. Calling tasks is described in detail in the :ref:`Calling Guide `. .. _designing-workflows: *Canvas*: Designing Work-flows ============================== You just learned how to call a task using the tasks ``delay`` method, and this is often all you need, but sometimes you may want to pass the signature of a task invocation to another process or as an argument to another function, for this Celery uses something called *signatures*. A signature wraps the arguments and execution options of a single task invocation in a way such that it can be passed to functions or even serialized and sent across the wire. You can create a signature for the ``add`` task using the arguments ``(2, 2)``, and a countdown of 10 seconds like this: .. code-block:: pycon >>> add.signature((2, 2), countdown=10) tasks.add(2, 2) There's also a shortcut using star arguments: .. code-block:: pycon >>> add.s(2, 2) tasks.add(2, 2) And there's that calling API again… ----------------------------------- Signature instances also supports the calling API: meaning they have the ``delay`` and ``apply_async`` methods. But there's a difference in that the signature may already have an argument signature specified. The ``add`` task takes two arguments, so a signature specifying two arguments would make a complete signature: .. code-block:: pycon >>> s1 = add.s(2, 2) >>> res = s1.delay() >>> res.get() 4 But, you can also make incomplete signatures to create what we call *partials*: .. code-block:: pycon # incomplete partial: add(?, 2) >>> s2 = add.s(2) ``s2`` is now a partial signature that needs another argument to be complete, and this can be resolved when calling the signature: .. code-block:: pycon # resolves the partial: add(8, 2) >>> res = s2.delay(8) >>> res.get() 10 Here you added the argument 8 that was prepended to the existing argument 2 forming a complete signature of ``add(8, 2)``. Keyword arguments can also be added later, these are then merged with any existing keyword arguments, but with new arguments taking precedence: .. code-block:: pycon >>> s3 = add.s(2, 2, debug=True) >>> s3.delay(debug=False) # debug is now False. As stated signatures supports the calling API: meaning that; - ``sig.apply_async(args=(), kwargs={}, **options)`` Calls the signature with optional partial arguments and partial keyword arguments. Also supports partial execution options. - ``sig.delay(*args, **kwargs)`` Star argument version of ``apply_async``. Any arguments will be prepended to the arguments in the signature, and keyword arguments is merged with any existing keys. So this all seems very useful, but what can you actually do with these? To get to that I must introduce the canvas primitives… The Primitives -------------- .. topic:: \ .. hlist:: :columns: 2 - :ref:`group ` - :ref:`chain ` - :ref:`chord ` - :ref:`map ` - :ref:`starmap ` - :ref:`chunks ` These primitives are signature objects themselves, so they can be combined in any number of ways to compose complex work-flows. .. note:: These examples retrieve results, so to try them out you need to configure a result backend. The example project above already does that (see the backend argument to :class:`~celery.Celery`). Let's look at some examples: Groups ~~~~~~ A :class:`~celery.group` calls a list of tasks in parallel, and it returns a special result instance that lets you inspect the results as a group, and retrieve the return values in order. .. code-block:: pycon >>> from celery import group >>> from proj.tasks import add >>> group(add.s(i, i) for i in xrange(10))().get() [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] - Partial group .. code-block:: pycon >>> g = group(add.s(i) for i in xrange(10)) >>> g(10).get() [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Chains ~~~~~~ Tasks can be linked together so that after one task returns the other is called: .. code-block:: pycon >>> from celery import chain >>> from proj.tasks import add, mul # (4 + 4) * 8 >>> chain(add.s(4, 4) | mul.s(8))().get() 64 or a partial chain: .. code-block:: pycon >>> # (? + 4) * 8 >>> g = chain(add.s(4) | mul.s(8)) >>> g(4).get() 64 Chains can also be written like this: .. code-block:: pycon >>> (add.s(4, 4) | mul.s(8))().get() 64 Chords ~~~~~~ A chord is a group with a callback: .. code-block:: pycon >>> from celery import chord >>> from proj.tasks import add, xsum >>> chord((add.s(i, i) for i in xrange(10)), xsum.s())().get() 90 A group chained to another task will be automatically converted to a chord: .. code-block:: pycon >>> (group(add.s(i, i) for i in xrange(10)) | xsum.s())().get() 90 Since these primitives are all of the signature type they can be combined almost however you want, for example: .. code-block:: pycon >>> upload_document.s(file) | group(apply_filter.s() for filter in filters) Be sure to read more about work-flows in the :ref:`Canvas ` user guide. Routing ======= Celery supports all of the routing facilities provided by AMQP, but it also supports simple routing where messages are sent to named queues. The :setting:`task_routes` setting enables you to route tasks by name and keep everything centralized in one location: .. code-block:: python app.conf.update( task_routes = { 'proj.tasks.add': {'queue': 'hipri'}, }, ) You can also specify the queue at runtime with the ``queue`` argument to ``apply_async``: .. code-block:: pycon >>> from proj.tasks import add >>> add.apply_async((2, 2), queue='hipri') You can then make a worker consume from this queue by specifying the :option:`celery worker -Q` option: .. code-block:: console $ celery -A proj worker -Q hipri You may specify multiple queues by using a comma separated list, for example you can make the worker consume from both the default queue, and the ``hipri`` queue, where the default queue is named ``celery`` for historical reasons: .. code-block:: console $ celery -A proj worker -Q hipri,celery The order of the queues doesn't matter as the worker will give equal weight to the queues. To learn more about routing, including taking use of the full power of AMQP routing, see the :ref:`Routing Guide `. Remote Control ============== If you're using RabbitMQ (AMQP), Redis, or Qpid as the broker then you can control and inspect the worker at runtime. For example you can see what tasks the worker is currently working on: .. code-block:: console $ celery -A proj inspect active This is implemented by using broadcast messaging, so all remote control commands are received by every worker in the cluster. You can also specify one or more workers to act on the request using the :option:`--destination ` option. This is a comma separated list of worker host names: .. code-block:: console $ celery -A proj inspect active --destination=celery@example.com If a destination isn't provided then every worker will act and reply to the request. The :program:`celery inspect` command contains commands that doesn't change anything in the worker, it only replies information and statistics about what's going on inside the worker. For a list of inspect commands you can execute: .. code-block:: console $ celery -A proj inspect --help Then there's the :program:`celery control` command, that contains commands that actually changes things in the worker at runtime: .. code-block:: console $ celery -A proj control --help For example you can force workers to enable event messages (used for monitoring tasks and workers): .. code-block:: console $ celery -A proj control enable_events When events are enabled you can then start the event dumper to see what the workers are doing: .. code-block:: console $ celery -A proj events --dump or you can start the curses interface: .. code-block:: console $ celery -A proj events when you're finished monitoring you can disable events again: .. code-block:: console $ celery -A proj control disable_events The :program:`celery status` command also uses remote control commands and shows a list of online workers in the cluster: .. code-block:: console $ celery -A proj status You can read more about the :program:`celery` command and monitoring in the :ref:`Monitoring Guide `. Timezone ======== All times and dates, internally and in messages uses the UTC timezone. When the worker receives a message, for example with a countdown set it converts that UTC time to local time. If you wish to use a different timezone than the system timezone then you must configure that using the :setting:`timezone` setting: .. code-block:: python app.conf.timezone = 'Europe/London' Optimization ============ The default configuration isn't optimized for throughput by default, it tries to walk the middle way between many short tasks and fewer long tasks, a compromise between throughput and fair scheduling. If you have strict fair scheduling requirements, or want to optimize for throughput then you should read the :ref:`Optimizing Guide `. If you're using RabbitMQ then you can install the :pypi:`librabbitmq` module: this is an AMQP client implemented in C: .. code-block:: console $ pip install librabbitmq What to do now? =============== Now that you have read this document you should continue to the :ref:`User Guide `. There's also an :ref:`API reference ` if you're so inclined. celery-4.1.0/docs/getting-started/brokers/0000755000175000017500000000000013135426347020441 5ustar omeromer00000000000000celery-4.1.0/docs/getting-started/brokers/index.rst0000644000175000017500000000327713130607475022311 0ustar omeromer00000000000000.. _brokers: ===================== Brokers ===================== :Release: |version| :Date: |today| Celery supports several message transport alternatives. .. _broker_toc: Broker Instructions =================== .. toctree:: :maxdepth: 1 rabbitmq redis sqs .. _broker-overview: Broker Overview =============== This is comparison table of the different transports supports, more information can be found in the documentation for each individual transport (see :ref:`broker_toc`). +---------------+--------------+----------------+--------------------+ | **Name** | **Status** | **Monitoring** | **Remote Control** | +---------------+--------------+----------------+--------------------+ | *RabbitMQ* | Stable | Yes | Yes | +---------------+--------------+----------------+--------------------+ | *Redis* | Stable | Yes | Yes | +---------------+--------------+----------------+--------------------+ | *Amazon SQS* | Stable | No | No | +---------------+--------------+----------------+--------------------+ | *Zookeeper* | Experimental | No | No | +---------------+--------------+----------------+--------------------+ Experimental brokers may be functional but they don't have dedicated maintainers. Missing monitor support means that the transport doesn't implement events, and as such Flower, `celery events`, `celerymon` and other event-based monitoring tools won't work. Remote control means the ability to inspect and manage workers at runtime using the `celery inspect` and `celery control` commands (and other tools using the remote control API). celery-4.1.0/docs/getting-started/brokers/redis.rst0000644000175000017500000001073413130607475022304 0ustar omeromer00000000000000.. _broker-redis: ============= Using Redis ============= .. _broker-redis-installation: Installation ============ For the Redis support you have to install additional dependencies. You can install both Celery and these dependencies in one go using the ``celery[redis]`` :ref:`bundle `: .. code-block:: console $ pip install -U "celery[redis]" .. _broker-redis-configuration: Configuration ============= Configuration is easy, just configure the location of your Redis database: .. code-block:: python app.conf.broker_url = 'redis://localhost:6379/0' Where the URL is in the format of: .. code-block:: text redis://:password@hostname:port/db_number all fields after the scheme are optional, and will default to ``localhost`` on port 6379, using database 0. If a Unix socket connection should be used, the URL needs to be in the format: .. code-block:: text redis+socket:///path/to/redis.sock Specifying a different database number when using a Unix socket is possible by adding the ``virtual_host`` parameter to the URL: .. code-block:: text redis+socket:///path/to/redis.sock?virtual_host=db_number .. _redis-visibility_timeout: Visibility Timeout ------------------ The visibility timeout defines the number of seconds to wait for the worker to acknowledge the task before the message is redelivered to another worker. Be sure to see :ref:`redis-caveats` below. This option is set via the :setting:`broker_transport_options` setting: .. code-block:: python app.conf.broker_transport_options = {'visibility_timeout': 3600} # 1 hour. The default visibility timeout for Redis is 1 hour. .. _redis-results-configuration: Results ------- If you also want to store the state and return values of tasks in Redis, you should configure these settings:: app.conf.result_backend = 'redis://localhost:6379/0' For a complete list of options supported by the Redis result backend, see :ref:`conf-redis-result-backend` .. _redis-caveats: Caveats ======= .. _redis-caveat-fanout-prefix: Fanout prefix ------------- Broadcast messages will be seen by all virtual hosts by default. You have to set a transport option to prefix the messages so that they will only be received by the active virtual host: .. code-block:: python app.conf.broker_transport_options = {'fanout_prefix': True} Note that you won't be able to communicate with workers running older versions or workers that doesn't have this setting enabled. This setting will be the default in the future, so better to migrate sooner rather than later. .. _redis-caveat-fanout-patterns: Fanout patterns --------------- Workers will receive all task related events by default. To avoid this you must set the ``fanout_patterns`` fanout option so that the workers may only subscribe to worker related events: .. code-block:: python app.conf.broker_transport_options = {'fanout_patterns': True} Note that this change is backward incompatible so all workers in the cluster must have this option enabled, or else they won't be able to communicate. This option will be enabled by default in the future. Visibility timeout ------------------ If a task isn't acknowledged within the :ref:`redis-visibility_timeout` the task will be redelivered to another worker and executed. This causes problems with ETA/countdown/retry tasks where the time to execute exceeds the visibility timeout; in fact if that happens it will be executed again, and again in a loop. So you have to increase the visibility timeout to match the time of the longest ETA you're planning to use. Note that Celery will redeliver messages at worker shutdown, so having a long visibility timeout will only delay the redelivery of 'lost' tasks in the event of a power failure or forcefully terminated workers. Periodic tasks won't be affected by the visibility timeout, as this is a concept separate from ETA/countdown. You can increase this timeout by configuring a transport option with the same name: .. code-block:: python app.conf.broker_transport_options = {'visibility_timeout': 43200} The value must be an int describing the number of seconds. Key eviction ------------ Redis may evict keys from the database in some situations If you experience an error like: .. code-block:: text InconsistencyError: Probably the key ('_kombu.binding.celery') has been removed from the Redis database. then you may want to configure the :command:`redis-server` to not evict keys by setting the ``timeout`` parameter to 0 in the redis configuration file. celery-4.1.0/docs/getting-started/brokers/sqs.rst0000644000175000017500000001113113130607475021774 0ustar omeromer00000000000000.. _broker-sqs: ================== Using Amazon SQS ================== .. _broker-sqs-installation: Installation ============ For the Amazon SQS support you have to install the :pypi:`boto` library using :command:`pip`: .. code-block:: console $ pip install -U boto .. _broker-sqs-configuration: Configuration ============= You have to specify SQS in the broker URL:: broker_url = 'sqs://ABCDEFGHIJKLMNOPQRST:ZYXK7NiynGlTogH8Nj+P9nlE73sq3@' where the URL format is: .. code-block:: text sqs://aws_access_key_id:aws_secret_access_key@ you must *remember to include the "@" at the end*. The login credentials can also be set using the environment variables :envvar:`AWS_ACCESS_KEY_ID` and :envvar:`AWS_SECRET_ACCESS_KEY`, in that case the broker URL may only be ``sqs://``. If you are using IAM roles on instances, you can set the BROKER_URL to: ``sqs://`` and kombu will attempt to retrieve access tokens from the instance metadata. .. note:: If you specify AWS credentials in the broker URL, then please keep in mind that the secret access key may contain unsafe characters that need to be URL encoded. Options ======= Region ------ The default region is ``us-east-1`` but you can select another region by configuring the :setting:`broker_transport_options` setting:: broker_transport_options = {'region': 'eu-west-1'} .. seealso:: An overview of Amazon Web Services regions can be found here: http://aws.amazon.com/about-aws/globalinfrastructure/ Visibility Timeout ------------------ The visibility timeout defines the number of seconds to wait for the worker to acknowledge the task before the message is redelivered to another worker. Also see caveats below. This option is set via the :setting:`broker_transport_options` setting:: broker_transport_options = {'visibility_timeout': 3600} # 1 hour. The default visibility timeout is 30 seconds. Polling Interval ---------------- The polling interval decides the number of seconds to sleep between unsuccessful polls. This value can be either an int or a float. By default the value is *one second*: this means the worker will sleep for one second when there's no more messages to read. You must note that **more frequent polling is also more expensive, so increasing the polling interval can save you money**. The polling interval can be set via the :setting:`broker_transport_options` setting:: broker_transport_options = {'polling_interval': 0.3} Very frequent polling intervals can cause *busy loops*, resulting in the worker using a lot of CPU time. If you need sub-millisecond precision you should consider using another transport, like `RabbitMQ `, or `Redis `. Queue Prefix ------------ By default Celery won't assign any prefix to the queue names, If you have other services using SQS you can configure it do so using the :setting:`broker_transport_options` setting:: broker_transport_options = {'queue_name_prefix': 'celery-'} .. _sqs-caveats: Caveats ======= - If a task isn't acknowledged within the ``visibility_timeout``, the task will be redelivered to another worker and executed. This causes problems with ETA/countdown/retry tasks where the time to execute exceeds the visibility timeout; in fact if that happens it will be executed again, and again in a loop. So you have to increase the visibility timeout to match the time of the longest ETA you're planning to use. Note that Celery will redeliver messages at worker shutdown, so having a long visibility timeout will only delay the redelivery of 'lost' tasks in the event of a power failure or forcefully terminated workers. Periodic tasks won't be affected by the visibility timeout, as it is a concept separate from ETA/countdown. The maximum visibility timeout supported by AWS as of this writing is 12 hours (43200 seconds):: broker_transport_options = {'visibility_timeout': 43200} - SQS doesn't yet support worker remote control commands. - SQS doesn't yet support events, and so cannot be used with :program:`celery events`, :program:`celerymon`, or the Django Admin monitor. .. _sqs-results-configuration: Results ------- Multiple products in the Amazon Web Services family could be a good candidate to store or publish results with, but there's no such result backend included at this point. .. warning:: Don't use the ``amqp`` result backend with SQS. It will create one queue for every task, and the queues will not be collected. This could cost you money that would be better spent contributing an AWS result store backend back to Celery :) celery-4.1.0/docs/getting-started/brokers/rabbitmq.rst0000644000175000017500000001156613130607475023003 0ustar omeromer00000000000000.. _broker-rabbitmq: ================ Using RabbitMQ ================ .. contents:: :local: Installation & Configuration ============================ RabbitMQ is the default broker so it doesn't require any additional dependencies or initial configuration, other than the URL location of the broker instance you want to use: .. code-block:: python broker_url = 'amqp://myuser:mypassword@localhost:5672/myvhost' For a description of broker URLs and a full list of the various broker configuration options available to Celery, see :ref:`conf-broker-settings`, and see below for setting up the username, password and vhost. .. _installing-rabbitmq: Installing the RabbitMQ Server ============================== See `Installing RabbitMQ`_ over at RabbitMQ's website. For macOS see `Installing RabbitMQ on macOS`_. .. _`Installing RabbitMQ`: http://www.rabbitmq.com/install.html .. note:: If you're getting `nodedown` errors after installing and using :command:`rabbitmqctl` then this blog post can help you identify the source of the problem: http://www.somic.org/2009/02/19/on-rabbitmqctl-and-badrpcnodedown/ .. _rabbitmq-configuration: Setting up RabbitMQ ------------------- To use Celery we need to create a RabbitMQ user, a virtual host and allow that user access to that virtual host: .. code-block:: console $ sudo rabbitmqctl add_user myuser mypassword .. code-block:: console $ sudo rabbitmqctl add_vhost myvhost .. code-block:: console $ sudo rabbitmqctl set_user_tags myuser mytag .. code-block:: console $ sudo rabbitmqctl set_permissions -p myvhost myuser ".*" ".*" ".*" Substitute in appropriate values for ``myuser``, ``mypassword`` and ``myvhost`` above. See the RabbitMQ `Admin Guide`_ for more information about `access control`_. .. _`Admin Guide`: http://www.rabbitmq.com/admin-guide.html .. _`access control`: http://www.rabbitmq.com/admin-guide.html#access-control .. _rabbitmq-macOS-installation: Installing RabbitMQ on macOS ---------------------------- The easiest way to install RabbitMQ on macOS is using `Homebrew`_ the new and shiny package management system for macOS. First, install Homebrew using the one-line command provided by the `Homebrew documentation`_: .. code-block:: console ruby -e "$(curl -fsSL https://raw.github.com/Homebrew/homebrew/go/install)" Finally, we can install RabbitMQ using :command:`brew`: .. code-block:: console $ brew install rabbitmq .. _`Homebrew`: https://github.com/mxcl/homebrew/ .. _`Homebrew documentation`: https://github.com/Homebrew/homebrew/wiki/Installation .. _rabbitmq-macOS-system-hostname: After you've installed RabbitMQ with :command:`brew` you need to add the following to your path to be able to start and stop the broker: add it to the start-up file for your shell (e.g., :file:`.bash_profile` or :file:`.profile`). .. code-block:: bash PATH=$PATH:/usr/local/sbin Configuring the system host name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you're using a DHCP server that's giving you a random host name, you need to permanently configure the host name. This is because RabbitMQ uses the host name to communicate with nodes. Use the :command:`scutil` command to permanently set your host name: .. code-block:: console $ sudo scutil --set HostName myhost.local Then add that host name to :file:`/etc/hosts` so it's possible to resolve it back into an IP address:: 127.0.0.1 localhost myhost myhost.local If you start the :command:`rabbitmq-server`, your rabbit node should now be `rabbit@myhost`, as verified by :command:`rabbitmqctl`: .. code-block:: console $ sudo rabbitmqctl status Status of node rabbit@myhost ... [{running_applications,[{rabbit,"RabbitMQ","1.7.1"}, {mnesia,"MNESIA CXC 138 12","4.4.12"}, {os_mon,"CPO CXC 138 46","2.2.4"}, {sasl,"SASL CXC 138 11","2.1.8"}, {stdlib,"ERTS CXC 138 10","1.16.4"}, {kernel,"ERTS CXC 138 10","2.13.4"}]}, {nodes,[rabbit@myhost]}, {running_nodes,[rabbit@myhost]}] ...done. This is especially important if your DHCP server gives you a host name starting with an IP address, (e.g., `23.10.112.31.comcast.net`). In this case RabbitMQ will try to use `rabbit@23`: an illegal host name. .. _rabbitmq-macOS-start-stop: Starting/Stopping the RabbitMQ server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To start the server: .. code-block:: console $ sudo rabbitmq-server you can also run it in the background by adding the ``-detached`` option (note: only one dash): .. code-block:: console $ sudo rabbitmq-server -detached Never use :command:`kill` (:manpage:`kill(1)`) to stop the RabbitMQ server, but rather use the :command:`rabbitmqctl` command: .. code-block:: console $ sudo rabbitmqctl stop When the server is running, you can continue reading `Setting up RabbitMQ`_. celery-4.1.0/docs/THANKS0000644000175000017500000000075613130607475014606 0ustar omeromer00000000000000Thanks to Rune Halvorsen for the name. Thanks to Anton Tsigularov for the previous name (crunchy) that we had to abandon because of an existing project with that name. Thanks to Armin Ronacher for the Sphinx theme. Thanks to Brian K. Jones for bunny.py (https://github.com/bkjones/bunny), the tool that inspired 'celery amqp'. Thanks to Barry Pederson for amqplib (the project py-amqp forked). Thanks to Ty Wilkins for the Celery stalk logo (2016). celery-4.1.0/docs/userguide/0000755000175000017500000000000013135426347015661 5ustar omeromer00000000000000celery-4.1.0/docs/userguide/extending.rst0000644000175000017500000007223613130607475020410 0ustar omeromer00000000000000.. _guide-extending: ========================== Extensions and Bootsteps ========================== .. contents:: :local: :depth: 2 .. _extending-custom-consumers: Custom Message Consumers ======================== You may want to embed custom Kombu consumers to manually process your messages. For that purpose a special :class:`~celery.bootstep.ConsumerStep` bootstep class exists, where you only need to define the ``get_consumers`` method, that must return a list of :class:`kombu.Consumer` objects to start whenever the connection is established: .. code-block:: python from celery import Celery from celery import bootsteps from kombu import Consumer, Exchange, Queue my_queue = Queue('custom', Exchange('custom'), 'routing_key') app = Celery(broker='amqp://') class MyConsumerStep(bootsteps.ConsumerStep): def get_consumers(self, channel): return [Consumer(channel, queues=[my_queue], callbacks=[self.handle_message], accept=['json'])] def handle_message(self, body, message): print('Received message: {0!r}'.format(body)) message.ack() app.steps['consumer'].add(MyConsumerStep) def send_me_a_message(who, producer=None): with app.producer_or_acquire(producer) as producer: producer.publish( {'hello': who}, serializer='json', exchange=my_queue.exchange, routing_key='routing_key', declare=[my_queue], retry=True, ) if __name__ == '__main__': send_me_a_message('world!') .. note:: Kombu Consumers can take use of two different message callback dispatching mechanisms. The first one is the ``callbacks`` argument that accepts a list of callbacks with a ``(body, message)`` signature, the second one is the ``on_message`` argument that takes a single callback with a ``(message,)`` signature. The latter won't automatically decode and deserialize the payload. .. code-block:: python def get_consumers(self, channel): return [Consumer(channel, queues=[my_queue], on_message=self.on_message)] def on_message(self, message): payload = message.decode() print( 'Received message: {0!r} {props!r} rawlen={s}'.format( payload, props=message.properties, s=len(message.body), )) message.ack() .. _extending-blueprints: Blueprints ========== Bootsteps is a technique to add functionality to the workers. A bootstep is a custom class that defines hooks to do custom actions at different stages in the worker. Every bootstep belongs to a blueprint, and the worker currently defines two blueprints: **Worker**, and **Consumer** ---------------------------------------------------------- **Figure A:** Bootsteps in the Worker and Consumer blueprints. Starting from the bottom up the first step in the worker blueprint is the Timer, and the last step is to start the Consumer blueprint, that then establishes the broker connection and starts consuming messages. .. figure:: ../images/worker_graph_full.png ---------------------------------------------------------- .. _extending-worker_blueprint: Worker ====== The Worker is the first blueprint to start, and with it starts major components like the event loop, processing pool, and the timer used for ETA tasks and other timed events. When the worker is fully started it continues with the Consumer blueprint, that sets up how tasks are executed, connects to the broker and starts the message consumers. The :class:`~celery.worker.WorkController` is the core worker implementation, and contains several methods and attributes that you can use in your bootstep. .. _extending-worker_blueprint-attributes: Attributes ---------- .. _extending-worker-app: .. attribute:: app The current app instance. .. _extending-worker-hostname: .. attribute:: hostname The workers node name (e.g., `worker1@example.com`) .. _extending-worker-blueprint: .. attribute:: blueprint This is the worker :class:`~celery.bootsteps.Blueprint`. .. _extending-worker-hub: .. attribute:: hub Event loop object (:class:`~kombu.async.Hub`). You can use this to register callbacks in the event loop. This is only supported by async I/O enabled transports (amqp, redis), in which case the `worker.use_eventloop` attribute should be set. Your worker bootstep must require the Hub bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Hub'} .. _extending-worker-pool: .. attribute:: pool The current process/eventlet/gevent/thread pool. See :class:`celery.concurrency.base.BasePool`. Your worker bootstep must require the Pool bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Pool'} .. _extending-worker-timer: .. attribute:: timer :class:`~kombu.async.timer.Timer` used to schedule functions. Your worker bootstep must require the Timer bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Timer'} .. _extending-worker-statedb: .. attribute:: statedb :class:`Database `` to persist state between worker restarts. This is only defined if the ``statedb`` argument is enabled. Your worker bootstep must require the ``Statedb`` bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Statedb'} .. _extending-worker-autoscaler: .. attribute:: autoscaler :class:`~celery.worker.autoscaler.Autoscaler` used to automatically grow and shrink the number of processes in the pool. This is only defined if the ``autoscale`` argument is enabled. Your worker bootstep must require the `Autoscaler` bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = ('celery.worker.autoscaler:Autoscaler',) .. _extending-worker-autoreloader: .. attribute:: autoreloader :class:`~celery.worker.autoreloder.Autoreloader` used to automatically reload use code when the file-system changes. This is only defined if the ``autoreload`` argument is enabled. Your worker bootstep must require the `Autoreloader` bootstep to use this; .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = ('celery.worker.autoreloader:Autoreloader',) Example worker bootstep ----------------------- An example Worker bootstep could be: .. code-block:: python from celery import bootsteps class ExampleWorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Pool'} def __init__(self, worker, **kwargs): print('Called when the WorkController instance is constructed') print('Arguments to WorkController: {0!r}'.format(kwargs)) def create(self, worker): # this method can be used to delegate the action methods # to another object that implements ``start`` and ``stop``. return self def start(self, worker): print('Called when the worker is started.') def stop(self, worker): print('Called when the worker shuts down.') def terminate(self, worker): print('Called when the worker terminates') Every method is passed the current ``WorkController`` instance as the first argument. Another example could use the timer to wake up at regular intervals: .. code-block:: python from celery import bootsteps class DeadlockDetection(bootsteps.StartStopStep): requires = {'celery.worker.components:Timer'} def __init__(self, worker, deadlock_timeout=3600): self.timeout = deadlock_timeout self.requests = [] self.tref = None def start(self, worker): # run every 30 seconds. self.tref = worker.timer.call_repeatedly( 30.0, self.detect, (worker,), priority=10, ) def stop(self, worker): if self.tref: self.tref.cancel() self.tref = None def detect(self, worker): # update active requests for req in worker.active_requests: if req.time_start and time() - req.time_start > self.timeout: raise SystemExit() .. _extending-consumer_blueprint: Consumer ======== The Consumer blueprint establishes a connection to the broker, and is restarted every time this connection is lost. Consumer bootsteps include the worker heartbeat, the remote control command consumer, and importantly, the task consumer. When you create consumer bootsteps you must take into account that it must be possible to restart your blueprint. An additional 'shutdown' method is defined for consumer bootsteps, this method is called when the worker is shutdown. .. _extending-consumer-attributes: Attributes ---------- .. _extending-consumer-app: .. attribute:: app The current app instance. .. _extending-consumer-controller: .. attribute:: controller The parent :class:`~@WorkController` object that created this consumer. .. _extending-consumer-hostname: .. attribute:: hostname The workers node name (e.g., `worker1@example.com`) .. _extending-consumer-blueprint: .. attribute:: blueprint This is the worker :class:`~celery.bootsteps.Blueprint`. .. _extending-consumer-hub: .. attribute:: hub Event loop object (:class:`~kombu.async.Hub`). You can use this to register callbacks in the event loop. This is only supported by async I/O enabled transports (amqp, redis), in which case the `worker.use_eventloop` attribute should be set. Your worker bootstep must require the Hub bootstep to use this: .. code-block:: python class WorkerStep(bootsteps.StartStopStep): requires = {'celery.worker.components:Hub'} .. _extending-consumer-connection: .. attribute:: connection The current broker connection (:class:`kombu.Connection`). A consumer bootstep must require the 'Connection' bootstep to use this: .. code-block:: python class Step(bootsteps.StartStopStep): requires = {'celery.worker.consumer.connection:Connection'} .. _extending-consumer-event_dispatcher: .. attribute:: event_dispatcher A :class:`@events.Dispatcher` object that can be used to send events. A consumer bootstep must require the `Events` bootstep to use this. .. code-block:: python class Step(bootsteps.StartStopStep): requires = {'celery.worker.consumer.events:Events'} .. _extending-consumer-gossip: .. attribute:: gossip Worker to worker broadcast communication (:class:`~celery.worker.consumer.gossip.Gossip`). A consumer bootstep must require the `Gossip` bootstep to use this. .. code-block:: python class RatelimitStep(bootsteps.StartStopStep): """Rate limit tasks based on the number of workers in the cluster.""" requires = {'celery.worker.consumer.gossip:Gossip'} def start(self, c): self.c = c self.c.gossip.on.node_join.add(self.on_cluster_size_change) self.c.gossip.on.node_leave.add(self.on_cluster_size_change) self.c.gossip.on.node_lost.add(self.on_node_lost) self.tasks = [ self.app.tasks['proj.tasks.add'] self.app.tasks['proj.tasks.mul'] ] self.last_size = None def on_cluster_size_change(self, worker): cluster_size = len(list(self.c.gossip.state.alive_workers())) if cluster_size != self.last_size: for task in self.tasks: task.rate_limit = 1.0 / cluster_size self.c.reset_rate_limits() self.last_size = cluster_size def on_node_lost(self, worker): # may have processed heartbeat too late, so wake up soon # in order to see if the worker recovered. self.c.timer.call_after(10.0, self.on_cluster_size_change) **Callbacks** - `` gossip.on.node_join`` Called whenever a new node joins the cluster, providing a :class:`~celery.events.state.Worker` instance. - `` gossip.on.node_leave`` Called whenever a new node leaves the cluster (shuts down), providing a :class:`~celery.events.state.Worker` instance. - `` gossip.on.node_lost`` Called whenever heartbeat was missed for a worker instance in the cluster (heartbeat not received or processed in time), providing a :class:`~celery.events.state.Worker` instance. This doesn't necessarily mean the worker is actually offline, so use a time out mechanism if the default heartbeat timeout isn't sufficient. .. _extending-consumer-pool: .. attribute:: pool The current process/eventlet/gevent/thread pool. See :class:`celery.concurrency.base.BasePool`. .. _extending-consumer-timer: .. attribute:: timer :class:`Timer >> app = Celery() >>> app.steps['worker'].add(MyWorkerStep) # < add class, don't instantiate >>> app.steps['consumer'].add(MyConsumerStep) >>> app.steps['consumer'].update([StepA, StepB]) >>> app.steps['consumer'] {step:proj.StepB{()}, step:proj.MyConsumerStep{()}, step:proj.StepA{()} The order of steps isn't important here as the order is decided by the resulting dependency graph (``Step.requires``). To illustrate how you can install bootsteps and how they work, this is an example step that prints some useless debugging information. It can be added both as a worker and consumer bootstep: .. code-block:: python from celery import Celery from celery import bootsteps class InfoStep(bootsteps.Step): def __init__(self, parent, **kwargs): # here we can prepare the Worker/Consumer object # in any way we want, set attribute defaults, and so on. print('{0!r} is in init'.format(parent)) def start(self, parent): # our step is started together with all other Worker/Consumer # bootsteps. print('{0!r} is starting'.format(parent)) def stop(self, parent): # the Consumer calls stop every time the consumer is # restarted (i.e., connection is lost) and also at shutdown. # The Worker will call stop at shutdown only. print('{0!r} is stopping'.format(parent)) def shutdown(self, parent): # shutdown is called by the Consumer at shutdown, it's not # called by Worker. print('{0!r} is shutting down'.format(parent)) app = Celery(broker='amqp://') app.steps['worker'].add(InfoStep) app.steps['consumer'].add(InfoStep) Starting the worker with this step installed will give us the following logs: .. code-block:: text is in init is in init [2013-05-29 16:18:20,544: WARNING/MainProcess] is starting [2013-05-29 16:18:21,577: WARNING/MainProcess] is starting is stopping is stopping is shutting down The ``print`` statements will be redirected to the logging subsystem after the worker has been initialized, so the "is starting" lines are time-stamped. You may notice that this does no longer happen at shutdown, this is because the ``stop`` and ``shutdown`` methods are called inside a *signal handler*, and it's not safe to use logging inside such a handler. Logging with the Python logging module isn't :term:`reentrant`: meaning you cannot interrupt the function then call it again later. It's important that the ``stop`` and ``shutdown`` methods you write is also :term:`reentrant`. Starting the worker with :option:`--loglevel=debug ` will show us more information about the boot process: .. code-block:: text [2013-05-29 16:18:20,509: DEBUG/MainProcess] | Worker: Preparing bootsteps. [2013-05-29 16:18:20,511: DEBUG/MainProcess] | Worker: Building graph... is in init [2013-05-29 16:18:20,511: DEBUG/MainProcess] | Worker: New boot order: {Hub, Pool, Timer, StateDB, Autoscaler, InfoStep, Beat, Consumer} [2013-05-29 16:18:20,514: DEBUG/MainProcess] | Consumer: Preparing bootsteps. [2013-05-29 16:18:20,514: DEBUG/MainProcess] | Consumer: Building graph... is in init [2013-05-29 16:18:20,515: DEBUG/MainProcess] | Consumer: New boot order: {Connection, Mingle, Events, Gossip, InfoStep, Agent, Heart, Control, Tasks, event loop} [2013-05-29 16:18:20,522: DEBUG/MainProcess] | Worker: Starting Hub [2013-05-29 16:18:20,522: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,522: DEBUG/MainProcess] | Worker: Starting Pool [2013-05-29 16:18:20,542: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,543: DEBUG/MainProcess] | Worker: Starting InfoStep [2013-05-29 16:18:20,544: WARNING/MainProcess] is starting [2013-05-29 16:18:20,544: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,544: DEBUG/MainProcess] | Worker: Starting Consumer [2013-05-29 16:18:20,544: DEBUG/MainProcess] | Consumer: Starting Connection [2013-05-29 16:18:20,559: INFO/MainProcess] Connected to amqp://guest@127.0.0.1:5672// [2013-05-29 16:18:20,560: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:20,560: DEBUG/MainProcess] | Consumer: Starting Mingle [2013-05-29 16:18:20,560: INFO/MainProcess] mingle: searching for neighbors [2013-05-29 16:18:21,570: INFO/MainProcess] mingle: no one here [2013-05-29 16:18:21,570: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,571: DEBUG/MainProcess] | Consumer: Starting Events [2013-05-29 16:18:21,572: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,572: DEBUG/MainProcess] | Consumer: Starting Gossip [2013-05-29 16:18:21,577: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,577: DEBUG/MainProcess] | Consumer: Starting InfoStep [2013-05-29 16:18:21,577: WARNING/MainProcess] is starting [2013-05-29 16:18:21,578: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,578: DEBUG/MainProcess] | Consumer: Starting Heart [2013-05-29 16:18:21,579: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,579: DEBUG/MainProcess] | Consumer: Starting Control [2013-05-29 16:18:21,583: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,583: DEBUG/MainProcess] | Consumer: Starting Tasks [2013-05-29 16:18:21,606: DEBUG/MainProcess] basic.qos: prefetch_count->80 [2013-05-29 16:18:21,606: DEBUG/MainProcess] ^-- substep ok [2013-05-29 16:18:21,606: DEBUG/MainProcess] | Consumer: Starting event loop [2013-05-29 16:18:21,608: WARNING/MainProcess] celery@example.com ready. .. _extending-programs: Command-line programs ===================== .. _extending-commandoptions: Adding new command-line options ------------------------------- .. _extending-command-options: Command-specific options ~~~~~~~~~~~~~~~~~~~~~~~~ You can add additional command-line options to the ``worker``, ``beat``, and ``events`` commands by modifying the :attr:`~@user_options` attribute of the application instance. Celery commands uses the :mod:`argparse` module to parse command-line arguments, and so to add custom arguments you need to specify a callback that takes a :class:`argparse.ArgumentParser` instance - and adds arguments. Please see the :mod:`argparse` documentation to read about the fields supported. Example adding a custom option to the :program:`celery worker` command: .. code-block:: python from celery import Celery app = Celery(broker='amqp://') def add_worker_arguments(parser): parser.add_argument( '--enable-my-option', action='store_true', default=False, help='Enable custom option.', ), app.user_options['worker'].add(add_worker_arguments) All bootsteps will now receive this argument as a keyword argument to ``Bootstep.__init__``: .. code-block:: python from celery import bootsteps class MyBootstep(bootsteps.Step): def __init__(self, worker, enable_my_option=False, **options): if enable_my_option: party() app.steps['worker'].add(MyBootstep) .. _extending-preload_options: Preload options ~~~~~~~~~~~~~~~ The :program:`celery` umbrella command supports the concept of 'preload options'. These are special options passed to all sub-commands and parsed outside of the main parsing step. The list of default preload options can be found in the API reference: :mod:`celery.bin.base`. You can add new preload options too, for example to specify a configuration template: .. code-block:: python from celery import Celery from celery import signals from celery.bin import Option app = Celery() def add_preload_options(parser): parser.add_argument( '-Z', '--template', default='default', help='Configuration template to use.', ) app.user_options['preload'].add(add_preload_options) @signals.user_preload_options.connect def on_preload_parsed(options, **kwargs): use_template(options['template']) .. _extending-subcommands: Adding new :program:`celery` sub-commands ----------------------------------------- New commands can be added to the :program:`celery` umbrella command by using `setuptools entry-points`_. .. _`setuptools entry-points`: http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html Entry-points is special meta-data that can be added to your packages ``setup.py`` program, and then after installation, read from the system using the :mod:`pkg_resources` module. Celery recognizes ``celery.commands`` entry-points to install additional sub-commands, where the value of the entry-point must point to a valid subclass of :class:`celery.bin.base.Command`. There's limited documentation, unfortunately, but you can find inspiration from the various commands in the :mod:`celery.bin` package. This is how the :pypi:`Flower` monitoring extension adds the :program:`celery flower` command, by adding an entry-point in :file:`setup.py`: .. code-block:: python setup( name='flower', entry_points={ 'celery.commands': [ 'flower = flower.command:FlowerCommand', ], } ) The command definition is in two parts separated by the equal sign, where the first part is the name of the sub-command (flower), then the second part is the fully qualified symbol path to the class that implements the command: .. code-block:: text flower.command:FlowerCommand The module path and the name of the attribute should be separated by colon as above. In the module :file:`flower/command.py`, the command class is defined something like this: .. code-block:: python from celery.bin.base import Command class FlowerCommand(Command): def add_arguments(self, parser): parser.add_argument( '--port', default=8888, type='int', help='Webserver port', ), parser.add_argument( '--debug', action='store_true', ) def run(self, port=None, debug=False, **kwargs): print('Running our command') Worker API ========== :class:`~kombu.async.Hub` - The workers async event loop -------------------------------------------------------- :supported transports: amqp, redis .. versionadded:: 3.0 The worker uses asynchronous I/O when the amqp or redis broker transports are used. The eventual goal is for all transports to use the event-loop, but that will take some time so other transports still use a threading-based solution. .. method:: hub.add(fd, callback, flags) .. method:: hub.add_reader(fd, callback, \*args) Add callback to be called when ``fd`` is readable. The callback will stay registered until explicitly removed using :meth:`hub.remove(fd) `, or the file descriptor is automatically discarded because it's no longer valid. Note that only one callback can be registered for any given file descriptor at a time, so calling ``add`` a second time will remove any callback that was previously registered for that file descriptor. A file descriptor is any file-like object that supports the ``fileno`` method, or it can be the file descriptor number (int). .. method:: hub.add_writer(fd, callback, \*args) Add callback to be called when ``fd`` is writable. See also notes for :meth:`hub.add_reader` above. .. method:: hub.remove(fd) Remove all callbacks for file descriptor ``fd`` from the loop. Timer - Scheduling events ------------------------- .. method:: timer.call_after(secs, callback, args=(), kwargs=(), priority=0) .. method:: timer.call_repeatedly(secs, callback, args=(), kwargs=(), priority=0) .. method:: timer.call_at(eta, callback, args=(), kwargs=(), priority=0) celery-4.1.0/docs/userguide/index.rst0000644000175000017500000000055113130607475017521 0ustar omeromer00000000000000.. _guide: ============ User Guide ============ :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 application tasks calling canvas workers daemonizing periodic-tasks routing monitoring security optimizing debugging concurrency/index signals testing extending configuration celery-4.1.0/docs/userguide/debugging.rst0000644000175000017500000000607313130607475020352 0ustar omeromer00000000000000.. _guide-debugging: ====================================== Debugging ====================================== .. _tut-remote_debug: Debugging Tasks Remotely (using pdb) ==================================== Basics ------ :mod:`celery.contrib.rdb` is an extended version of :mod:`pdb` that enables remote debugging of processes that doesn't have terminal access. Example usage: .. code-block:: python from celery import task from celery.contrib import rdb @task() def add(x, y): result = x + y rdb.set_trace() # <- set break-point return result :func:`~celery.contrib.rdb.set_trace` sets a break-point at the current location and creates a socket you can telnet into to remotely debug your task. The debugger may be started by multiple processes at the same time, so rather than using a fixed port the debugger will search for an available port, starting from the base port (6900 by default). The base port can be changed using the environment variable :envvar:`CELERY_RDB_PORT`. By default the debugger will only be available from the local host, to enable access from the outside you have to set the environment variable :envvar:`CELERY_RDB_HOST`. When the worker encounters your break-point it'll log the following information: .. code-block:: text [INFO/MainProcess] Received task: tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8] [WARNING/PoolWorker-1] Remote Debugger:6900: Please telnet 127.0.0.1 6900. Type `exit` in session to continue. [2011-01-18 14:25:44,119: WARNING/PoolWorker-1] Remote Debugger:6900: Waiting for client... If you telnet the port specified you'll be presented with a `pdb` shell: .. code-block:: console $ telnet localhost 6900 Connected to localhost. Escape character is '^]'. > /opt/devel/demoapp/tasks.py(128)add() -> return result (Pdb) Enter ``help`` to get a list of available commands, It may be a good idea to read the `Python Debugger Manual`_ if you have never used `pdb` before. To demonstrate, we'll read the value of the ``result`` variable, change it and continue execution of the task: .. code-block:: text (Pdb) result 4 (Pdb) result = 'hello from rdb' (Pdb) continue Connection closed by foreign host. The result of our vandalism can be seen in the worker logs: .. code-block:: text [2011-01-18 14:35:36,599: INFO/MainProcess] Task tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8] succeeded in 61.481s: 'hello from rdb' .. _`Python Debugger Manual`: http://docs.python.org/library/pdb.html Tips ---- .. _breakpoint_signal: Enabling the break-point signal ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the environment variable :envvar:`CELERY_RDBSIG` is set, the worker will open up an rdb instance whenever the `SIGUSR2` signal is sent. This is the case for both main and worker processes. For example starting the worker with: .. code-block:: console $ CELERY_RDBSIG=1 celery worker -l info You can start an rdb session for any of the worker processes by executing: .. code-block:: console $ kill -USR2 celery-4.1.0/docs/userguide/optimizing.rst0000644000175000017500000002126713130607475020612 0ustar omeromer00000000000000.. _guide-optimizing: ============ Optimizing ============ Introduction ============ The default configuration makes a lot of compromises. It's not optimal for any single case, but works well enough for most situations. There are optimizations that can be applied based on specific use cases. Optimizations can apply to different properties of the running environment, be it the time tasks take to execute, the amount of memory used, or responsiveness at times of high load. Ensuring Operations =================== In the book `Programming Pearls`_, Jon Bentley presents the concept of back-of-the-envelope calculations by asking the question; â How much water flows out of the Mississippi River in a day? âž The point of this exercise [*]_ is to show that there's a limit to how much data a system can process in a timely manner. Back of the envelope calculations can be used as a means to plan for this ahead of time. In Celery; If a task takes 10 minutes to complete, and there are 10 new tasks coming in every minute, the queue will never be empty. This is why it's very important that you monitor queue lengths! A way to do this is by :ref:`using Munin `. You should set up alerts, that'll notify you as soon as any queue has reached an unacceptable size. This way you can take appropriate action like adding new worker nodes, or revoking unnecessary tasks. .. _`Programming Pearls`: http://www.cs.bell-labs.com/cm/cs/pearls/ .. _`The back of the envelope`: http://books.google.com/books?id=kse_7qbWbjsC&pg=PA67 .. _optimizing-general-settings: General Settings ================ .. _optimizing-librabbitmq: librabbitmq ----------- If you're using RabbitMQ (AMQP) as the broker then you can install the :pypi:`librabbitmq` module to use an optimized client written in C: .. code-block:: console $ pip install librabbitmq The 'amqp' transport will automatically use the librabbitmq module if it's installed, or you can also specify the transport you want directly by using the ``pyamqp://`` or ``librabbitmq://`` prefixes. .. _optimizing-connection-pools: Broker Connection Pools ----------------------- The broker connection pool is enabled by default since version 2.5. You can tweak the :setting:`broker_pool_limit` setting to minimize contention, and the value should be based on the number of active threads/green-threads using broker connections. .. _optimizing-transient-queues: Using Transient Queues ---------------------- Queues created by Celery are persistent by default. This means that the broker will write messages to disk to ensure that the tasks will be executed even if the broker is restarted. But in some cases it's fine that the message is lost, so not all tasks require durability. You can create a *transient* queue for these tasks to improve performance: .. code-block:: python from kombu import Exchange, Queue task_queues = ( Queue('celery', routing_key='celery'), Queue('transient', Exchange('transient', delivery_mode=1), routing_key='transient', durable=False), ) or by using :setting:`task_routes`: .. code-block:: python task_routes = { 'proj.tasks.add': {'queue': 'celery', 'delivery_mode': 'transient'} } The ``delivery_mode`` changes how the messages to this queue are delivered. A value of one means that the message won't be written to disk, and a value of two (default) means that the message can be written to disk. To direct a task to your new transient queue you can specify the queue argument (or use the :setting:`task_routes` setting): .. code-block:: python task.apply_async(args, queue='transient') For more information see the :ref:`routing guide `. .. _optimizing-worker-settings: Worker Settings =============== .. _optimizing-prefetch-limit: Prefetch Limits --------------- *Prefetch* is a term inherited from AMQP that's often misunderstood by users. The prefetch limit is a **limit** for the number of tasks (messages) a worker can reserve for itself. If it is zero, the worker will keep consuming messages, not respecting that there may be other available worker nodes that may be able to process them sooner [*]_, or that the messages may not even fit in memory. The workers' default prefetch count is the :setting:`worker_prefetch_multiplier` setting multiplied by the number of concurrency slots [*]_ (processes/threads/green-threads). If you have many tasks with a long duration you want the multiplier value to be *one*: meaning it'll only reserve one task per worker process at a time. However -- If you have many short-running tasks, and throughput/round trip latency is important to you, this number should be large. The worker is able to process more tasks per second if the messages have already been prefetched, and is available in memory. You may have to experiment to find the best value that works for you. Values like 50 or 150 might make sense in these circumstances. Say 64, or 128. If you have a combination of long- and short-running tasks, the best option is to use two worker nodes that are configured separately, and route the tasks according to the run-time (see :ref:`guide-routing`). Reserve one task at a time -------------------------- The task message is only deleted from the queue after the task is :term:`acknowledged`, so if the worker crashes before acknowledging the task, it can be redelivered to another worker (or the same after recovery). When using the default of early acknowledgment, having a prefetch multiplier setting of *one*, means the worker will reserve at most one extra task for every worker process: or in other words, if the worker is started with :option:`-c 10 `, the worker may reserve at most 20 tasks (10 unacknowledged tasks executing, and 10 unacknowledged reserved tasks) at any time. Often users ask if disabling "prefetching of tasks" is possible, but what they really mean by that, is to have a worker only reserve as many tasks as there are worker processes (10 unacknowledged tasks for :option:`-c 10 `) That's possible, but not without also enabling :term:`late acknowledgment`. Using this option over the default behavior means a task that's already started executing will be retried in the event of a power failure or the worker instance being killed abruptly, so this also means the task must be :term:`idempotent` .. seealso:: Notes at :ref:`faq-acks_late-vs-retry`. You can enable this behavior by using the following configuration options: .. code-block:: python task_acks_late = True worker_prefetch_multiplier = 1 .. _prefork-pool-prefetch: Prefork pool prefetch settings ------------------------------ The prefork pool will asynchronously send as many tasks to the processes as it can and this means that the processes are, in effect, prefetching tasks. This benefits performance but it also means that tasks may be stuck waiting for long running tasks to complete:: -> send task T1 to process A # A executes T1 -> send task T2 to process B # B executes T2 <- T2 complete sent by process B -> send task T3 to process A # A still executing T1, T3 stuck in local buffer and won't start until # T1 returns, and other queued tasks won't be sent to idle processes <- T1 complete sent by process A # A executes T3 The worker will send tasks to the process as long as the pipe buffer is writable. The pipe buffer size varies based on the operating system: some may have a buffer as small as 64KB but on recent Linux versions the buffer size is 1MB (can only be changed system wide). You can disable this prefetching behavior by enabling the :option:`-Ofair ` worker option: .. code-block:: console $ celery -A proj worker -l info -Ofair With this option enabled the worker will only write to processes that are available for work, disabling the prefetch behavior:: -> send task T1 to process A # A executes T1 -> send task T2 to process B # B executes T2 <- T2 complete sent by process B -> send T3 to process B # B executes T3 <- T3 complete sent by process B <- T1 complete sent by process A .. rubric:: Footnotes .. [*] The chapter is available to read for free here: `The back of the envelope`_. The book is a classic text. Highly recommended. .. [*] RabbitMQ and other brokers deliver messages round-robin, so this doesn't apply to an active system. If there's no prefetch limit and you restart the cluster, there will be timing delays between nodes starting. If there are 3 offline nodes and one active node, all messages will be delivered to the active node. .. [*] This is the concurrency setting; :setting:`worker_concurrency` or the :option:`celery worker -c` option. celery-4.1.0/docs/userguide/concurrency/0000755000175000017500000000000013135426347020213 5ustar omeromer00000000000000celery-4.1.0/docs/userguide/concurrency/index.rst0000644000175000017500000000021413130607475022047 0ustar omeromer00000000000000.. _concurrency: ============= Concurrency ============= :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 eventlet celery-4.1.0/docs/userguide/concurrency/eventlet.rst0000644000175000017500000000447113130607475022577 0ustar omeromer00000000000000.. _concurrency-eventlet: =========================== Concurrency with Eventlet =========================== .. _eventlet-introduction: Introduction ============ The `Eventlet`_ homepage describes it as; A concurrent networking library for Python that allows you to change how you run your code, not how you write it. * It uses `epoll(4)`_ or `libevent`_ for `highly scalable non-blocking I/O`_. * `Coroutines`_ ensure that the developer uses a blocking style of programming that's similar to threading, but provide the benefits of non-blocking I/O. * The event dispatch is implicit: meaning you can easily use Eventlet from the Python interpreter, or as a small part of a larger application. Celery supports Eventlet as an alternative execution pool implementation. It's in some cases superior to prefork, but you need to ensure your tasks don't perform blocking calls, as this will halt all other operations in the worker until the blocking call returns. The prefork pool can take use of multiple processes, but how many is often limited to a few processes per CPU. With Eventlet you can efficiently spawn hundreds, or thousands of green threads. In an informal test with a feed hub system the Eventlet pool could fetch and process hundreds of feeds every second, while the prefork pool spent 14 seconds processing 100 feeds. Note that this is one of the applications async I/O is especially good at (asynchronous HTTP requests). You may want a mix of both Eventlet and prefork workers, and route tasks according to compatibility or what works best. Enabling Eventlet ================= You can enable the Eventlet pool by using the :option:`celery worker -P` worker option. .. code-block:: console $ celery -A proj worker -P eventlet -c 1000 .. _eventlet-examples: Examples ======== See the `Eventlet examples`_ directory in the Celery distribution for some examples taking use of Eventlet support. .. _`Eventlet`: http://eventlet.net .. _`epoll(4)`: http://linux.die.net/man/4/epoll .. _`libevent`: http://monkey.org/~provos/libevent/ .. _`highly scalable non-blocking I/O`: https://en.wikipedia.org/wiki/Asynchronous_I/O#Select.28.2Fpoll.29_loops .. _`Coroutines`: https://en.wikipedia.org/wiki/Coroutine .. _`Eventlet examples`: https://github.com/celery/celery/tree/master/examples/eventlet celery-4.1.0/docs/userguide/application.rst0000644000175000017500000003473113130607475020724 0ustar omeromer00000000000000.. _guide-app: ============= Application ============= .. contents:: :local: :depth: 1 The Celery library must be instantiated before use, this instance is called an application (or *app* for short). The application is thread-safe so that multiple Celery applications with different configurations, components, and tasks can co-exist in the same process space. Let's create one now: .. code-block:: pycon >>> from celery import Celery >>> app = Celery() >>> app The last line shows the textual representation of the application: including the name of the app class (``Celery``), the name of the current main module (``__main__``), and the memory address of the object (``0x100469fd0``). Main Name ========= Only one of these is important, and that's the main module name. Let's look at why that is. When you send a task message in Celery, that message won't contain any source code, but only the name of the task you want to execute. This works similarly to how host names work on the internet: every worker maintains a mapping of task names to their actual functions, called the *task registry*. Whenever you define a task, that task will also be added to the local registry: .. code-block:: pycon >>> @app.task ... def add(x, y): ... return x + y >>> add <@task: __main__.add> >>> add.name __main__.add >>> app.tasks['__main__.add'] <@task: __main__.add> and there you see that ``__main__`` again; whenever Celery isn't able to detect what module the function belongs to, it uses the main module name to generate the beginning of the task name. This is only a problem in a limited set of use cases: #. If the module that the task is defined in is run as a program. #. If the application is created in the Python shell (REPL). For example here, where the tasks module is also used to start a worker with :meth:`@worker_main`: :file:`tasks.py`: .. code-block:: python from celery import Celery app = Celery() @app.task def add(x, y): return x + y if __name__ == '__main__': app.worker_main() When this module is executed the tasks will be named starting with "``__main__``", but when the module is imported by another process, say to call a task, the tasks will be named starting with "``tasks``" (the real name of the module): .. code-block:: pycon >>> from tasks import add >>> add.name tasks.add You can specify another name for the main module: .. code-block:: pycon >>> app = Celery('tasks') >>> app.main 'tasks' >>> @app.task ... def add(x, y): ... return x + y >>> add.name tasks.add .. seealso:: :ref:`task-names` Configuration ============= There are several options you can set that'll change how Celery works. These options can be set directly on the app instance, or you can use a dedicated configuration module. The configuration is available as :attr:`@conf`: .. code-block:: pycon >>> app.conf.timezone 'Europe/London' where you can also set configuration values directly: .. code-block:: pycon >>> app.conf.enable_utc = True or update several keys at once by using the ``update`` method: .. code-block:: python >>> app.conf.update( ... enable_utc=True, ... timezone='Europe/London', ...) The configuration object consists of multiple dictionaries that are consulted in order: #. Changes made at run-time. #. The configuration module (if any) #. The default configuration (:mod:`celery.app.defaults`). You can even add new default sources by using the :meth:`@add_defaults` method. .. seealso:: Go to the :ref:`Configuration reference ` for a complete listing of all the available settings, and their default values. ``config_from_object`` ---------------------- The :meth:`@config_from_object` method loads configuration from a configuration object. This can be a configuration module, or any object with configuration attributes. Note that any configuration that was previously set will be reset when :meth:`~@config_from_object` is called. If you want to set additional configuration you should do so after. Example 1: Using the name of a module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :meth:`@config_from_object` method can take the fully qualified name of a Python module, or even the name of a Python attribute, for example: ``"celeryconfig"``, ``"myproj.config.celery"``, or ``"myproj.config:CeleryConfig"``: .. code-block:: python from celery import Celery app = Celery() app.config_from_object('celeryconfig') The ``celeryconfig`` module may then look like this: :file:`celeryconfig.py`: .. code-block:: python enable_utc = True timezone = 'Europe/London' and the app will be able to use it as long as ``import celeryconfig`` is possible. Example 2: Passing an actual module object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can also pass an already imported module object, but this isn't always recommended. .. tip:: Using the name of a module is recommended as this means the module does not need to be serialized when the prefork pool is used. If you're experiencing configuration problems or pickle errors then please try using the name of a module instead. .. code-block:: python import celeryconfig from celery import Celery app = Celery() app.config_from_object(celeryconfig) Example 3: Using a configuration class/object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python from celery import Celery app = Celery() class Config: enable_utc = True timezone = 'Europe/London' app.config_from_object(Config) # or using the fully qualified name of the object: # app.config_from_object('module:Config') ``config_from_envvar`` ---------------------- The :meth:`@config_from_envvar` takes the configuration module name from an environment variable For example -- to load configuration from a module specified in the environment variable named :envvar:`CELERY_CONFIG_MODULE`: .. code-block:: python import os from celery import Celery #: Set default configuration module name os.environ.setdefault('CELERY_CONFIG_MODULE', 'celeryconfig') app = Celery() app.config_from_envvar('CELERY_CONFIG_MODULE') You can then specify the configuration module to use via the environment: .. code-block:: console $ CELERY_CONFIG_MODULE="celeryconfig.prod" celery worker -l info .. _app-censored-config: Censored configuration ---------------------- If you ever want to print out the configuration, as debugging information or similar, you may also want to filter out sensitive information like passwords and API keys. Celery comes with several utilities useful for presenting the configuration, one is :meth:`~celery.app.utils.Settings.humanize`: .. code-block:: pycon >>> app.conf.humanize(with_defaults=False, censored=True) This method returns the configuration as a tabulated string. This will only contain changes to the configuration by default, but you can include the built-in default keys and values by enabling the ``with_defaults`` argument. If you instead want to work with the configuration as a dictionary, you can use the :meth:`~celery.app.utils.Settings.table` method: .. code-block:: pycon >>> app.conf.table(with_defaults=False, censored=True) Please note that Celery won't be able to remove all sensitive information, as it merely uses a regular expression to search for commonly named keys. If you add custom settings containing sensitive information you should name the keys using a name that Celery identifies as secret. A configuration setting will be censored if the name contains any of these sub-strings: ``API``, ``TOKEN``, ``KEY``, ``SECRET``, ``PASS``, ``SIGNATURE``, ``DATABASE`` Laziness ======== The application instance is lazy, meaning it won't be evaluated until it's actually needed. Creating a :class:`@Celery` instance will only do the following: #. Create a logical clock instance, used for events. #. Create the task registry. #. Set itself as the current app (but not if the ``set_as_current`` argument was disabled) #. Call the :meth:`@on_init` callback (does nothing by default). The :meth:`@task` decorators don't create the tasks at the point when the task is defined, instead it'll defer the creation of the task to happen either when the task is used, or after the application has been *finalized*, This example shows how the task isn't created until you use the task, or access an attribute (in this case :meth:`repr`): .. code-block:: pycon >>> @app.task >>> def add(x, y): ... return x + y >>> type(add) >>> add.__evaluated__() False >>> add # <-- causes repr(add) to happen <@task: __main__.add> >>> add.__evaluated__() True *Finalization* of the app happens either explicitly by calling :meth:`@finalize` -- or implicitly by accessing the :attr:`@tasks` attribute. Finalizing the object will: #. Copy tasks that must be shared between apps Tasks are shared by default, but if the ``shared`` argument to the task decorator is disabled, then the task will be private to the app it's bound to. #. Evaluate all pending task decorators. #. Make sure all tasks are bound to the current app. Tasks are bound to an app so that they can read default values from the configuration. .. _default-app: .. topic:: The "default app" Celery didn't always have applications, it used to be that there was only a module-based API, and for backwards compatibility the old API is still there until the release of Celery 5.0. Celery always creates a special app - the "default app", and this is used if no custom application has been instantiated. The :mod:`celery.task` module is there to accommodate the old API, and shouldn't be used if you use a custom app. You should always use the methods on the app instance, not the module based API. For example, the old Task base class enables many compatibility features where some may be incompatible with newer features, such as task methods: .. code-block:: python from celery.task import Task # << OLD Task base class. from celery import Task # << NEW base class. The new base class is recommended even if you use the old module-based API. Breaking the chain ================== While it's possible to depend on the current app being set, the best practice is to always pass the app instance around to anything that needs it. I call this the "app chain", since it creates a chain of instances depending on the app being passed. The following example is considered bad practice: .. code-block:: python from celery import current_app class Scheduler(object): def run(self): app = current_app Instead it should take the ``app`` as an argument: .. code-block:: python class Scheduler(object): def __init__(self, app): self.app = app Internally Celery uses the :func:`celery.app.app_or_default` function so that everything also works in the module-based compatibility API .. code-block:: python from celery.app import app_or_default class Scheduler(object): def __init__(self, app=None): self.app = app_or_default(app) In development you can set the :envvar:`CELERY_TRACE_APP` environment variable to raise an exception if the app chain breaks: .. code-block:: console $ CELERY_TRACE_APP=1 celery worker -l info .. topic:: Evolving the API Celery has changed a lot in the 7 years since it was initially created. For example, in the beginning it was possible to use any callable as a task: .. code-block:: pycon def hello(to): return 'hello {0}'.format(to) >>> from celery.execute import apply_async >>> apply_async(hello, ('world!',)) or you could also create a ``Task`` class to set certain options, or override other behavior .. code-block:: python from celery.task import Task from celery.registry import tasks class Hello(Task): queue = 'hipri' def run(self, to): return 'hello {0}'.format(to) tasks.register(Hello) >>> Hello.delay('world!') Later, it was decided that passing arbitrary call-able's was an anti-pattern, since it makes it very hard to use serializers other than pickle, and the feature was removed in 2.0, replaced by task decorators: .. code-block:: python from celery.task import task @task(queue='hipri') def hello(to): return 'hello {0}'.format(to) Abstract Tasks ============== All tasks created using the :meth:`~@task` decorator will inherit from the application's base :attr:`~@Task` class. You can specify a different base class using the ``base`` argument: .. code-block:: python @app.task(base=OtherTask): def add(x, y): return x + y To create a custom task class you should inherit from the neutral base class: :class:`celery.Task`. .. code-block:: python from celery import Task class DebugTask(Task): def __call__(self, *args, **kwargs): print('TASK STARTING: {0.name}[{0.request.id}]'.format(self)) return super(DebugTask, self).__call__(*args, **kwargs) .. tip:: If you override the tasks ``__call__`` method, then it's very important that you also call super so that the base call method can set up the default request used when a task is called directly. The neutral base class is special because it's not bound to any specific app yet. Once a task is bound to an app it'll read configuration to set default values, and so on. To realize a base class you need to create a task using the :meth:`@task` decorator: .. code-block:: python @app.task(base=DebugTask) def add(x, y): return x + y It's even possible to change the default base class for an application by changing its :meth:`@Task` attribute: .. code-block:: pycon >>> from celery import Celery, Task >>> app = Celery() >>> class MyBaseTask(Task): ... queue = 'hipri' >>> app.Task = MyBaseTask >>> app.Task >>> @app.task ... def add(x, y): ... return x + y >>> add <@task: __main__.add> >>> add.__class__.mro() [>, , , ] celery-4.1.0/docs/userguide/workers.rst0000644000175000017500000010167513130607475020117 0ustar omeromer00000000000000.. _guide-workers: =============== Workers Guide =============== .. contents:: :local: :depth: 1 .. _worker-starting: Starting the worker =================== .. sidebar:: Daemonizing You probably want to use a daemonization tool to start the worker in the background. See :ref:`daemonizing` for help starting the worker as a daemon using popular service managers. You can start the worker in the foreground by executing the command: .. code-block:: console $ celery -A proj worker -l info For a full list of available command-line options see :mod:`~celery.bin.worker`, or simply do: .. code-block:: console $ celery worker --help You can start multiple workers on the same machine, but be sure to name each individual worker by specifying a node name with the :option:`--hostname ` argument: .. code-block:: console $ celery -A proj worker --loglevel=INFO --concurrency=10 -n worker1@%h $ celery -A proj worker --loglevel=INFO --concurrency=10 -n worker2@%h $ celery -A proj worker --loglevel=INFO --concurrency=10 -n worker3@%h The ``hostname`` argument can expand the following variables: - ``%h``: Hostname, including domain name. - ``%n``: Hostname only. - ``%d``: Domain name only. If the current hostname is *george.example.com*, these will expand to: +----------+----------------+------------------------------+ | Variable | Template | Result | +----------+----------------+------------------------------+ | ``%h`` | ``worker1@%h`` | *worker1@george.example.com* | +----------+----------------+------------------------------+ | ``%n`` | ``worker1@%n`` | *worker1@george* | +----------+----------------+------------------------------+ | ``%d`` | ``worker1@%d`` | *worker1@example.com* | +----------+----------------+------------------------------+ .. admonition:: Note for :pypi:`supervisor` users The ``%`` sign must be escaped by adding a second one: `%%h`. .. _worker-stopping: Stopping the worker =================== Shutdown should be accomplished using the :sig:`TERM` signal. When shutdown is initiated the worker will finish all currently executing tasks before it actually terminates. If these tasks are important, you should wait for it to finish before doing anything drastic, like sending the :sig:`KILL` signal. If the worker won't shutdown after considerate time, for being stuck in an infinite-loop or similar, you can use the :sig:`KILL` signal to force terminate the worker: but be aware that currently executing tasks will be lost (i.e., unless the tasks have the :attr:`~@Task.acks_late` option set). Also as processes can't override the :sig:`KILL` signal, the worker will not be able to reap its children; make sure to do so manually. This command usually does the trick: .. code-block:: console $ pkill -9 -f 'celery worker' If you don't have the :command:`pkill` command on your system, you can use the slightly longer version: .. code-block:: console $ ps auxww | grep 'celery worker' | awk '{print $2}' | xargs kill -9 .. _worker-restarting: Restarting the worker ===================== To restart the worker you should send the `TERM` signal and start a new instance. The easiest way to manage workers for development is by using `celery multi`: .. code-block:: console $ celery multi start 1 -A proj -l info -c4 --pidfile=/var/run/celery/%n.pid $ celery multi restart 1 --pidfile=/var/run/celery/%n.pid For production deployments you should be using init-scripts or a process supervision system (see :ref:`daemonizing`). Other than stopping, then starting the worker to restart, you can also restart the worker using the :sig:`HUP` signal. Note that the worker will be responsible for restarting itself so this is prone to problems and isn't recommended in production: .. code-block:: console $ kill -HUP $pid .. note:: Restarting by :sig:`HUP` only works if the worker is running in the background as a daemon (it doesn't have a controlling terminal). :sig:`HUP` is disabled on macOS because of a limitation on that platform. .. _worker-process-signals: Process Signals =============== The worker's main process overrides the following signals: +--------------+-------------------------------------------------+ | :sig:`TERM` | Warm shutdown, wait for tasks to complete. | +--------------+-------------------------------------------------+ | :sig:`QUIT` | Cold shutdown, terminate ASAP | +--------------+-------------------------------------------------+ | :sig:`USR1` | Dump traceback for all active threads. | +--------------+-------------------------------------------------+ | :sig:`USR2` | Remote debug, see :mod:`celery.contrib.rdb`. | +--------------+-------------------------------------------------+ .. _worker-files: Variables in file paths ======================= The file path arguments for :option:`--logfile `, :option:`--pidfile `, and :option:`--statedb ` can contain variables that the worker will expand: Node name replacements ---------------------- - ``%p``: Full node name. - ``%h``: Hostname, including domain name. - ``%n``: Hostname only. - ``%d``: Domain name only. - ``%i``: Prefork pool process index or 0 if MainProcess. - ``%I``: Prefork pool process index with separator. For example, if the current hostname is ``george@foo.example.com`` then these will expand to: - ``--logfile-%p.log`` -> :file:`george@foo.example.com.log` - ``--logfile=%h.log`` -> :file:`foo.example.com.log` - ``--logfile=%n.log`` -> :file:`george.log` - ``--logfile=%d`` -> :file:`example.com.log` .. _worker-files-process-index: Prefork pool process index -------------------------- The prefork pool process index specifiers will expand into a different filename depending on the process that'll eventually need to open the file. This can be used to specify one log file per child process. Note that the numbers will stay within the process limit even if processes exit or if autoscale/``maxtasksperchild``/time limits are used. That is, the number is the *process index* not the process count or pid. * ``%i`` - Pool process index or 0 if MainProcess. Where ``-n worker1@example.com -c2 -f %n-%i.log`` will result in three log files: - :file:`worker1-0.log` (main process) - :file:`worker1-1.log` (pool process 1) - :file:`worker1-2.log` (pool process 2) * ``%I`` - Pool process index with separator. Where ``-n worker1@example.com -c2 -f %n%I.log`` will result in three log files: - :file:`worker1.log` (main process) - :file:`worker1-1.log` (pool process 1) - :file:`worker1-2.log` (pool process 2) .. _worker-concurrency: Concurrency =========== By default multiprocessing is used to perform concurrent execution of tasks, but you can also use :ref:`Eventlet `. The number of worker processes/threads can be changed using the :option:`--concurrency ` argument and defaults to the number of CPUs available on the machine. .. admonition:: Number of processes (multiprocessing/prefork pool) More pool processes are usually better, but there's a cut-off point where adding more pool processes affects performance in negative ways. There's even some evidence to support that having multiple worker instances running, may perform better than having a single worker. For example 3 workers with 10 pool processes each. You need to experiment to find the numbers that works best for you, as this varies based on application, work load, task run times and other factors. .. _worker-remote-control: Remote control ============== .. versionadded:: 2.0 .. sidebar:: The ``celery`` command The :program:`celery` program is used to execute remote control commands from the command-line. It supports all of the commands listed below. See :ref:`monitoring-control` for more information. :pool support: *prefork, eventlet, gevent*, blocking:*solo* (see note) :broker support: *amqp, redis* Workers have the ability to be remote controlled using a high-priority broadcast message queue. The commands can be directed to all, or a specific list of workers. Commands can also have replies. The client can then wait for and collect those replies. Since there's no central authority to know how many workers are available in the cluster, there's also no way to estimate how many workers may send a reply, so the client has a configurable timeout — the deadline in seconds for replies to arrive in. This timeout defaults to one second. If the worker doesn't reply within the deadline it doesn't necessarily mean the worker didn't reply, or worse is dead, but may simply be caused by network latency or the worker being slow at processing commands, so adjust the timeout accordingly. In addition to timeouts, the client can specify the maximum number of replies to wait for. If a destination is specified, this limit is set to the number of destination hosts. .. note:: The ``solo`` pool supports remote control commands, but any task executing will block any waiting control command, so it is of limited use if the worker is very busy. In that case you must increase the timeout waiting for replies in the client. .. _worker-broadcast-fun: The :meth:`~@control.broadcast` function ---------------------------------------------------- This is the client function used to send commands to the workers. Some remote control commands also have higher-level interfaces using :meth:`~@control.broadcast` in the background, like :meth:`~@control.rate_limit`, and :meth:`~@control.ping`. Sending the :control:`rate_limit` command and keyword arguments: .. code-block:: pycon >>> app.control.broadcast('rate_limit', ... arguments={'task_name': 'myapp.mytask', ... 'rate_limit': '200/m'}) This will send the command asynchronously, without waiting for a reply. To request a reply you have to use the `reply` argument: .. code-block:: pycon >>> app.control.broadcast('rate_limit', { ... 'task_name': 'myapp.mytask', 'rate_limit': '200/m'}, reply=True) [{'worker1.example.com': 'New rate limit set successfully'}, {'worker2.example.com': 'New rate limit set successfully'}, {'worker3.example.com': 'New rate limit set successfully'}] Using the `destination` argument you can specify a list of workers to receive the command: .. code-block:: pycon >>> app.control.broadcast('rate_limit', { ... 'task_name': 'myapp.mytask', ... 'rate_limit': '200/m'}, reply=True, ... destination=['worker1@example.com']) [{'worker1.example.com': 'New rate limit set successfully'}] Of course, using the higher-level interface to set rate limits is much more convenient, but there are commands that can only be requested using :meth:`~@control.broadcast`. Commands ======== .. control:: revoke ``revoke``: Revoking tasks -------------------------- :pool support: all, terminate only supported by prefork :broker support: *amqp, redis* :command: :program:`celery -A proj control revoke ` All worker nodes keeps a memory of revoked task ids, either in-memory or persistent on disk (see :ref:`worker-persistent-revokes`). When a worker receives a revoke request it will skip executing the task, but it won't terminate an already executing task unless the `terminate` option is set. .. note:: The terminate option is a last resort for administrators when a task is stuck. It's not for terminating the task, it's for terminating the process that's executing the task, and that process may have already started processing another task at the point when the signal is sent, so for this reason you must never call this programmatically. If `terminate` is set the worker child process processing the task will be terminated. The default signal sent is `TERM`, but you can specify this using the `signal` argument. Signal can be the uppercase name of any signal defined in the :mod:`signal` module in the Python Standard Library. Terminating a task also revokes it. **Example** .. code-block:: pycon >>> result.revoke() >>> AsyncResult(id).revoke() >>> app.control.revoke('d9078da5-9915-40a0-bfa1-392c7bde42ed') >>> app.control.revoke('d9078da5-9915-40a0-bfa1-392c7bde42ed', ... terminate=True) >>> app.control.revoke('d9078da5-9915-40a0-bfa1-392c7bde42ed', ... terminate=True, signal='SIGKILL') Revoking multiple tasks ----------------------- .. versionadded:: 3.1 The revoke method also accepts a list argument, where it will revoke several tasks at once. **Example** .. code-block:: pycon >>> app.control.revoke([ ... '7993b0aa-1f0b-4780-9af0-c47c0858b3f2', ... 'f565793e-b041-4b2b-9ca4-dca22762a55d', ... 'd9d35e03-2997-42d0-a13e-64a66b88a618', ]) The ``GroupResult.revoke`` method takes advantage of this since version 3.1. .. _worker-persistent-revokes: Persistent revokes ------------------ Revoking tasks works by sending a broadcast message to all the workers, the workers then keep a list of revoked tasks in memory. When a worker starts up it will synchronize revoked tasks with other workers in the cluster. The list of revoked tasks is in-memory so if all workers restart the list of revoked ids will also vanish. If you want to preserve this list between restarts you need to specify a file for these to be stored in by using the `--statedb` argument to :program:`celery worker`: .. code-block:: console $ celery -A proj worker -l info --statedb=/var/run/celery/worker.state or if you use :program:`celery multi` you want to create one file per worker instance so use the `%n` format to expand the current node name: .. code-block:: console celery multi start 2 -l info --statedb=/var/run/celery/%n.state See also :ref:`worker-files` Note that remote control commands must be working for revokes to work. Remote control commands are only supported by the RabbitMQ (amqp) and Redis at this point. .. _worker-time-limits: Time Limits =========== .. versionadded:: 2.0 :pool support: *prefork/gevent* .. sidebar:: Soft, or hard? The time limit is set in two values, `soft` and `hard`. The soft time limit allows the task to catch an exception to clean up before it is killed: the hard timeout isn't catch-able and force terminates the task. A single task can potentially run forever, if you have lots of tasks waiting for some event that'll never happen you'll block the worker from processing new tasks indefinitely. The best way to defend against this scenario happening is enabling time limits. The time limit (`--time-limit`) is the maximum number of seconds a task may run before the process executing it is terminated and replaced by a new process. You can also enable a soft time limit (`--soft-time-limit`), this raises an exception the task can catch to clean up before the hard time limit kills it: .. code-block:: python from myapp import app from celery.exceptions import SoftTimeLimitExceeded @app.task def mytask(): try: do_work() except SoftTimeLimitExceeded: clean_up_in_a_hurry() Time limits can also be set using the :setting:`task_time_limit` / :setting:`task_soft_time_limit` settings. .. note:: Time limits don't currently work on platforms that don't support the :sig:`SIGUSR1` signal. Changing time limits at run-time -------------------------------- .. versionadded:: 2.3 :broker support: *amqp, redis* There's a remote control command that enables you to change both soft and hard time limits for a task — named ``time_limit``. Example changing the time limit for the ``tasks.crawl_the_web`` task to have a soft time limit of one minute, and a hard time limit of two minutes: .. code-block:: pycon >>> app.control.time_limit('tasks.crawl_the_web', soft=60, hard=120, reply=True) [{'worker1.example.com': {'ok': 'time limits set successfully'}}] Only tasks that starts executing after the time limit change will be affected. .. _worker-rate-limits: Rate Limits =========== .. control:: rate_limit Changing rate-limits at run-time -------------------------------- Example changing the rate limit for the `myapp.mytask` task to execute at most 200 tasks of that type every minute: .. code-block:: pycon >>> app.control.rate_limit('myapp.mytask', '200/m') The above doesn't specify a destination, so the change request will affect all worker instances in the cluster. If you only want to affect a specific list of workers you can include the ``destination`` argument: .. code-block:: pycon >>> app.control.rate_limit('myapp.mytask', '200/m', ... destination=['celery@worker1.example.com']) .. warning:: This won't affect workers with the :setting:`worker_disable_rate_limits` setting enabled. .. _worker-max-tasks-per-child: Max tasks per child setting =========================== .. versionadded:: 2.0 :pool support: *prefork* With this option you can configure the maximum number of tasks a worker can execute before it's replaced by a new process. This is useful if you have memory leaks you have no control over for example from closed source C extensions. The option can be set using the workers :option:`--max-tasks-per-child ` argument or using the :setting:`worker_max_tasks_per_child` setting. .. _worker-max-memory-per-child: Max memory per child setting ============================ .. versionadded:: 4.0 :pool support: *prefork* With this option you can configure the maximum amount of resident memory a worker can execute before it's replaced by a new process. This is useful if you have memory leaks you have no control over for example from closed source C extensions. The option can be set using the workers :option:`--max-memory-per-child ` argument or using the :setting:`worker_max_memory_per_child` setting. .. _worker-autoscaling: Autoscaling =========== .. versionadded:: 2.2 :pool support: *prefork*, *gevent* The *autoscaler* component is used to dynamically resize the pool based on load: - The autoscaler adds more pool processes when there is work to do, - and starts removing processes when the workload is low. It's enabled by the :option:`--autoscale ` option, which needs two numbers: the maximum and minimum number of pool processes: .. code-block:: text --autoscale=AUTOSCALE Enable autoscaling by providing max_concurrency,min_concurrency. Example: --autoscale=10,3 (always keep 3 processes, but grow to 10 if necessary). You can also define your own rules for the autoscaler by subclassing :class:`~celery.worker.autoscaler.Autoscaler`. Some ideas for metrics include load average or the amount of memory available. You can specify a custom autoscaler with the :setting:`worker_autoscaler` setting. .. _worker-queues: Queues ====== A worker instance can consume from any number of queues. By default it will consume from all queues defined in the :setting:`task_queues` setting (that if not specified falls back to the default queue named ``celery``). You can specify what queues to consume from at start-up, by giving a comma separated list of queues to the :option:`-Q ` option: .. code-block:: console $ celery -A proj worker -l info -Q foo,bar,baz If the queue name is defined in :setting:`task_queues` it will use that configuration, but if it's not defined in the list of queues Celery will automatically generate a new queue for you (depending on the :setting:`task_create_missing_queues` option). You can also tell the worker to start and stop consuming from a queue at run-time using the remote control commands :control:`add_consumer` and :control:`cancel_consumer`. .. control:: add_consumer Queues: Adding consumers ------------------------ The :control:`add_consumer` control command will tell one or more workers to start consuming from a queue. This operation is idempotent. To tell all workers in the cluster to start consuming from a queue named "``foo``" you can use the :program:`celery control` program: .. code-block:: console $ celery -A proj control add_consumer foo -> worker1.local: OK started consuming from u'foo' If you want to specify a specific worker you can use the :option:`--destination ` argument: .. code-block:: console $ celery -A proj control add_consumer foo -d celery@worker1.local The same can be accomplished dynamically using the :meth:`@control.add_consumer` method: .. code-block:: pycon >>> app.control.add_consumer('foo', reply=True) [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}] >>> app.control.add_consumer('foo', reply=True, ... destination=['worker1@example.com']) [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}] By now we've only shown examples using automatic queues, If you need more control you can also specify the exchange, routing_key and even other options: .. code-block:: pycon >>> app.control.add_consumer( ... queue='baz', ... exchange='ex', ... exchange_type='topic', ... routing_key='media.*', ... options={ ... 'queue_durable': False, ... 'exchange_durable': False, ... }, ... reply=True, ... destination=['w1@example.com', 'w2@example.com']) .. control:: cancel_consumer Queues: Canceling consumers --------------------------- You can cancel a consumer by queue name using the :control:`cancel_consumer` control command. To force all workers in the cluster to cancel consuming from a queue you can use the :program:`celery control` program: .. code-block:: console $ celery -A proj control cancel_consumer foo The :option:`--destination ` argument can be used to specify a worker, or a list of workers, to act on the command: .. code-block:: console $ celery -A proj control cancel_consumer foo -d celery@worker1.local You can also cancel consumers programmatically using the :meth:`@control.cancel_consumer` method: .. code-block:: console >>> app.control.cancel_consumer('foo', reply=True) [{u'worker1.local': {u'ok': u"no longer consuming from u'foo'"}}] .. control:: active_queues Queues: List of active queues ----------------------------- You can get a list of queues that a worker consumes from by using the :control:`active_queues` control command: .. code-block:: console $ celery -A proj inspect active_queues [...] Like all other remote control commands this also supports the :option:`--destination ` argument used to specify the workers that should reply to the request: .. code-block:: console $ celery -A proj inspect active_queues -d celery@worker1.local [...] This can also be done programmatically by using the :meth:`@control.inspect.active_queues` method: .. code-block:: pycon >>> app.control.inspect().active_queues() [...] >>> app.control.inspect(['worker1.local']).active_queues() [...] .. _worker-inspect: Inspecting workers ================== :class:`@control.inspect` lets you inspect running workers. It uses remote control commands under the hood. You can also use the ``celery`` command to inspect workers, and it supports the same commands as the :class:`@control` interface. .. code-block:: pycon >>> # Inspect all nodes. >>> i = app.control.inspect() >>> # Specify multiple nodes to inspect. >>> i = app.control.inspect(['worker1.example.com', 'worker2.example.com']) >>> # Specify a single node to inspect. >>> i = app.control.inspect('worker1.example.com') .. _worker-inspect-registered-tasks: Dump of registered tasks ------------------------ You can get a list of tasks registered in the worker using the :meth:`~@control.inspect.registered`: .. code-block:: pycon >>> i.registered() [{'worker1.example.com': ['tasks.add', 'tasks.sleeptask']}] .. _worker-inspect-active-tasks: Dump of currently executing tasks --------------------------------- You can get a list of active tasks using :meth:`~@control.inspect.active`: .. code-block:: pycon >>> i.active() [{'worker1.example.com': [{'name': 'tasks.sleeptask', 'id': '32666e9b-809c-41fa-8e93-5ae0c80afbbf', 'args': '(8,)', 'kwargs': '{}'}]}] .. _worker-inspect-eta-schedule: Dump of scheduled (ETA) tasks ----------------------------- You can get a list of tasks waiting to be scheduled by using :meth:`~@control.inspect.scheduled`: .. code-block:: pycon >>> i.scheduled() [{'worker1.example.com': [{'eta': '2010-06-07 09:07:52', 'priority': 0, 'request': { 'name': 'tasks.sleeptask', 'id': '1a7980ea-8b19-413e-91d2-0b74f3844c4d', 'args': '[1]', 'kwargs': '{}'}}, {'eta': '2010-06-07 09:07:53', 'priority': 0, 'request': { 'name': 'tasks.sleeptask', 'id': '49661b9a-aa22-4120-94b7-9ee8031d219d', 'args': '[2]', 'kwargs': '{}'}}]}] .. note:: These are tasks with an ETA/countdown argument, not periodic tasks. .. _worker-inspect-reserved: Dump of reserved tasks ---------------------- Reserved tasks are tasks that have been received, but are still waiting to be executed. You can get a list of these using :meth:`~@control.inspect.reserved`: .. code-block:: pycon >>> i.reserved() [{'worker1.example.com': [{'name': 'tasks.sleeptask', 'id': '32666e9b-809c-41fa-8e93-5ae0c80afbbf', 'args': '(8,)', 'kwargs': '{}'}]}] .. _worker-statistics: Statistics ---------- The remote control command ``inspect stats`` (or :meth:`~@control.inspect.stats`) will give you a long list of useful (or not so useful) statistics about the worker: .. code-block:: console $ celery -A proj inspect stats The output will include the following fields: - ``broker`` Section for broker information. * ``connect_timeout`` Timeout in seconds (int/float) for establishing a new connection. * ``heartbeat`` Current heartbeat value (set by client). * ``hostname`` Node name of the remote broker. * ``insist`` No longer used. * ``login_method`` Login method used to connect to the broker. * ``port`` Port of the remote broker. * ``ssl`` SSL enabled/disabled. * ``transport`` Name of transport used (e.g., ``amqp`` or ``redis``) * ``transport_options`` Options passed to transport. * ``uri_prefix`` Some transports expects the host name to be a URL. .. code-block:: text redis+socket:///tmp/redis.sock In this example the URI-prefix will be ``redis``. * ``userid`` User id used to connect to the broker with. * ``virtual_host`` Virtual host used. - ``clock`` Value of the workers logical clock. This is a positive integer and should be increasing every time you receive statistics. - ``pid`` Process id of the worker instance (Main process). - ``pool`` Pool-specific section. * ``max-concurrency`` Max number of processes/threads/green threads. * ``max-tasks-per-child`` Max number of tasks a thread may execute before being recycled. * ``processes`` List of PIDs (or thread-id's). * ``put-guarded-by-semaphore`` Internal * ``timeouts`` Default values for time limits. * ``writes`` Specific to the prefork pool, this shows the distribution of writes to each process in the pool when using async I/O. - ``prefetch_count`` Current prefetch count value for the task consumer. - ``rusage`` System usage statistics. The fields available may be different on your platform. From :manpage:`getrusage(2)`: * ``stime`` Time spent in operating system code on behalf of this process. * ``utime`` Time spent executing user instructions. * ``maxrss`` The maximum resident size used by this process (in kilobytes). * ``idrss`` Amount of non-shared memory used for data (in kilobytes times ticks of execution) * ``isrss`` Amount of non-shared memory used for stack space (in kilobytes times ticks of execution) * ``ixrss`` Amount of memory shared with other processes (in kilobytes times ticks of execution). * ``inblock`` Number of times the file system had to read from the disk on behalf of this process. * ``oublock`` Number of times the file system has to write to disk on behalf of this process. * ``majflt`` Number of page faults that were serviced by doing I/O. * ``minflt`` Number of page faults that were serviced without doing I/O. * ``msgrcv`` Number of IPC messages received. * ``msgsnd`` Number of IPC messages sent. * ``nvcsw`` Number of times this process voluntarily invoked a context switch. * ``nivcsw`` Number of times an involuntary context switch took place. * ``nsignals`` Number of signals received. * ``nswap`` The number of times this process was swapped entirely out of memory. - ``total`` Map of task names and the total number of tasks with that type the worker has accepted since start-up. Additional Commands =================== .. control:: shutdown Remote shutdown --------------- This command will gracefully shut down the worker remotely: .. code-block:: pycon >>> app.control.broadcast('shutdown') # shutdown all workers >>> app.control.broadcast('shutdown', destination='worker1@example.com') .. control:: ping Ping ---- This command requests a ping from alive workers. The workers reply with the string 'pong', and that's just about it. It will use the default one second timeout for replies unless you specify a custom timeout: .. code-block:: pycon >>> app.control.ping(timeout=0.5) [{'worker1.example.com': 'pong'}, {'worker2.example.com': 'pong'}, {'worker3.example.com': 'pong'}] :meth:`~@control.ping` also supports the `destination` argument, so you can specify the workers to ping: .. code-block:: pycon >>> ping(['worker2.example.com', 'worker3.example.com']) [{'worker2.example.com': 'pong'}, {'worker3.example.com': 'pong'}] .. _worker-enable-events: .. control:: enable_events .. control:: disable_events Enable/disable events --------------------- You can enable/disable events by using the `enable_events`, `disable_events` commands. This is useful to temporarily monitor a worker using :program:`celery events`/:program:`celerymon`. .. code-block:: pycon >>> app.control.enable_events() >>> app.control.disable_events() .. _worker-custom-control-commands: Writing your own remote control commands ======================================== There are two types of remote control commands: - Inspect command Does not have side effects, will usually just return some value found in the worker, like the list of currently registered tasks, the list of active tasks, etc. - Control command Performs side effects, like adding a new queue to consume from. Remote control commands are registered in the control panel and they take a single argument: the current :class:`~celery.worker.control.ControlDispatch` instance. From there you have access to the active :class:`~celery.worker.consumer.Consumer` if needed. Here's an example control command that increments the task prefetch count: .. code-block:: python from celery.worker.control import control_command @control_command( args=[('n', int)], signature='[N=1]', # <- used for help on the command-line. ) def increase_prefetch_count(state, n=1): state.consumer.qos.increment_eventually(n) return {'ok': 'prefetch count incremented'} Make sure you add this code to a module that is imported by the worker: this could be the same module as where your Celery app is defined, or you can add the module to the :setting:`imports` setting. Restart the worker so that the control command is registered, and now you can call your command using the :program:`celery control` utility: .. code-block:: console $ celery -A proj control increase_prefetch_count 3 You can also add actions to the :program:`celery inspect` program, for example one that reads the current prefetch count: .. code-block:: python from celery.worker.control import inspect_command @inspect_command def current_prefetch_count(state): return {'prefetch_count': state.consumer.qos.value} After restarting the worker you can now query this value using the :program:`celery inspect` program: .. code-block:: console $ celery -A proj inspect current_prefetch_count celery-4.1.0/docs/userguide/routing.rst0000644000175000017500000005132013130607475020101 0ustar omeromer00000000000000.. _guide-routing: =============== Routing Tasks =============== .. note:: Alternate routing concepts like topic and fanout is not available for all transports, please consult the :ref:`transport comparison table `. .. contents:: :local: .. _routing-basics: Basics ====== .. _routing-automatic: Automatic routing ----------------- The simplest way to do routing is to use the :setting:`task_create_missing_queues` setting (on by default). With this setting on, a named queue that's not already defined in :setting:`task_queues` will be created automatically. This makes it easy to perform simple routing tasks. Say you have two servers, `x`, and `y` that handles regular tasks, and one server `z`, that only handles feed related tasks. You can use this configuration:: task_routes = {'feed.tasks.import_feed': {'queue': 'feeds'}} With this route enabled import feed tasks will be routed to the `"feeds"` queue, while all other tasks will be routed to the default queue (named `"celery"` for historical reasons). Alternatively, you can use glob pattern matching, or even regular expressions, to match all tasks in the ``feed.tasks`` name-space: .. code-block:: python app.conf.task_routes = {'feed.tasks.*': {'queue': 'feeds'}} If the order of matching patterns is important you should specify the router in *items* format instead: .. code-block:: python task_routes = ([ ('feed.tasks.*', {'queue': 'feeds'}), ('web.tasks.*', {'queue': 'web'}), (re.compile(r'(video|image)\.tasks\..*'), {'queue': 'media'}), ],) .. note:: The :setting:`task_routes` setting can either be a dictionary, or a list of router objects, so in this case we need to specify the setting as a tuple containing a list. After installing the router, you can start server `z` to only process the feeds queue like this: .. code-block:: console user@z:/$ celery -A proj worker -Q feeds You can specify as many queues as you want, so you can make this server process the default queue as well: .. code-block:: console user@z:/$ celery -A proj worker -Q feeds,celery .. _routing-changing-default-queue: Changing the name of the default queue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can change the name of the default queue by using the following configuration: .. code-block:: python app.conf.task_default_queue = 'default' .. _routing-autoqueue-details: How the queues are defined ~~~~~~~~~~~~~~~~~~~~~~~~~~ The point with this feature is to hide the complex AMQP protocol for users with only basic needs. However -- you may still be interested in how these queues are declared. A queue named `"video"` will be created with the following settings: .. code-block:: javascript {'exchange': 'video', 'exchange_type': 'direct', 'routing_key': 'video'} The non-AMQP backends like `Redis` or `SQS` don't support exchanges, so they require the exchange to have the same name as the queue. Using this design ensures it will work for them as well. .. _routing-manual: Manual routing -------------- Say you have two servers, `x`, and `y` that handles regular tasks, and one server `z`, that only handles feed related tasks, you can use this configuration: .. code-block:: python from kombu import Queue app.conf.task_default_queue = 'default' app.conf.task_queues = ( Queue('default', routing_key='task.#'), Queue('feed_tasks', routing_key='feed.#'), ) task_default_exchange = 'tasks' task_default_exchange_type = 'topic' task_default_routing_key = 'task.default' :setting:`task_queues` is a list of :class:`~kombu.entitity.Queue` instances. If you don't set the exchange or exchange type values for a key, these will be taken from the :setting:`task_default_exchange` and :setting:`task_default_exchange_type` settings. To route a task to the `feed_tasks` queue, you can add an entry in the :setting:`task_routes` setting: .. code-block:: python task_routes = { 'feeds.tasks.import_feed': { 'queue': 'feed_tasks', 'routing_key': 'feed.import', }, } You can also override this using the `routing_key` argument to :meth:`Task.apply_async`, or :func:`~celery.execute.send_task`: >>> from feeds.tasks import import_feed >>> import_feed.apply_async(args=['http://cnn.com/rss'], ... queue='feed_tasks', ... routing_key='feed.import') To make server `z` consume from the feed queue exclusively you can start it with the :option:`celery worker -Q` option: .. code-block:: console user@z:/$ celery -A proj worker -Q feed_tasks --hostname=z@%h Servers `x` and `y` must be configured to consume from the default queue: .. code-block:: console user@x:/$ celery -A proj worker -Q default --hostname=x@%h user@y:/$ celery -A proj worker -Q default --hostname=y@%h If you want, you can even have your feed processing worker handle regular tasks as well, maybe in times when there's a lot of work to do: .. code-block:: console user@z:/$ celery -A proj worker -Q feed_tasks,default --hostname=z@%h If you have another queue but on another exchange you want to add, just specify a custom exchange and exchange type: .. code-block:: python from kombu import Exchange, Queue app.conf.task_queues = ( Queue('feed_tasks', routing_key='feed.#'), Queue('regular_tasks', routing_key='task.#'), Queue('image_tasks', exchange=Exchange('mediatasks', type='direct'), routing_key='image.compress'), ) If you're confused about these terms, you should read up on AMQP. .. seealso:: In addition to the :ref:`amqp-primer` below, there's `Rabbits and Warrens`_, an excellent blog post describing queues and exchanges. There's also The `CloudAMQP tutorial`, For users of RabbitMQ the `RabbitMQ FAQ`_ could be useful as a source of information. .. _`Rabbits and Warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ .. _`CloudAMQP tutorial`: amqp in 10 minutes part 3 https://www.cloudamqp.com/blog/2015-09-03-part4-rabbitmq-for-beginners-exchanges-routing-keys-bindings.html .. _`RabbitMQ FAQ`: https://www.rabbitmq.com/faq.html .. _routing-special_options: Special Routing Options ======================= .. _routing-options-rabbitmq-priorities: RabbitMQ Message Priorities --------------------------- :supported transports: RabbitMQ .. versionadded:: 4.0 Queues can be configured to support priorities by setting the ``x-max-priority`` argument: .. code-block:: python from kombu import Exchange, Queue app.conf.task_queues = [ Queue('tasks', Exchange('tasks'), routing_key='tasks', queue_arguments={'x-max-priority': 10}, ] A default value for all queues can be set using the :setting:`task_queue_max_priority` setting: .. code-block:: python app.conf.task_queue_max_priority = 10 .. _amqp-primer: AMQP Primer =========== Messages -------- A message consists of headers and a body. Celery uses headers to store the content type of the message and its content encoding. The content type is usually the serialization format used to serialize the message. The body contains the name of the task to execute, the task id (UUID), the arguments to apply it with and some additional meta-data -- like the number of retries or an ETA. This is an example task message represented as a Python dictionary: .. code-block:: javascript {'task': 'myapp.tasks.add', 'id': '54086c5e-6193-4575-8308-dbab76798756', 'args': [4, 4], 'kwargs': {}} .. _amqp-producers-consumers-brokers: Producers, consumers, and brokers --------------------------------- The client sending messages is typically called a *publisher*, or a *producer*, while the entity receiving messages is called a *consumer*. The *broker* is the message server, routing messages from producers to consumers. You're likely to see these terms used a lot in AMQP related material. .. _amqp-exchanges-queues-keys: Exchanges, queues, and routing keys ----------------------------------- 1. Messages are sent to exchanges. 2. An exchange routes messages to one or more queues. Several exchange types exists, providing different ways to do routing, or implementing different messaging scenarios. 3. The message waits in the queue until someone consumes it. 4. The message is deleted from the queue when it has been acknowledged. The steps required to send and receive messages are: 1. Create an exchange 2. Create a queue 3. Bind the queue to the exchange. Celery automatically creates the entities necessary for the queues in :setting:`task_queues` to work (except if the queue's `auto_declare` setting is set to :const:`False`). Here's an example queue configuration with three queues; One for video, one for images, and one default queue for everything else: .. code-block:: python from kombu import Exchange, Queue app.conf.task_queues = ( Queue('default', Exchange('default'), routing_key='default'), Queue('videos', Exchange('media'), routing_key='media.video'), Queue('images', Exchange('media'), routing_key='media.image'), ) app.conf.task_default_queue = 'default' app.conf.task_default_exchange_type = 'direct' app.conf.task_default_routing_key = 'default' .. _amqp-exchange-types: Exchange types -------------- The exchange type defines how the messages are routed through the exchange. The exchange types defined in the standard are `direct`, `topic`, `fanout` and `headers`. Also non-standard exchange types are available as plug-ins to RabbitMQ, like the `last-value-cache plug-in`_ by Michael Bridgen. .. _`last-value-cache plug-in`: https://github.com/squaremo/rabbitmq-lvc-plugin .. _amqp-exchange-type-direct: Direct exchanges ~~~~~~~~~~~~~~~~ Direct exchanges match by exact routing keys, so a queue bound by the routing key `video` only receives messages with that routing key. .. _amqp-exchange-type-topic: Topic exchanges ~~~~~~~~~~~~~~~ Topic exchanges matches routing keys using dot-separated words, and the wild-card characters: ``*`` (matches a single word), and ``#`` (matches zero or more words). With routing keys like ``usa.news``, ``usa.weather``, ``norway.news``, and ``norway.weather``, bindings could be ``*.news`` (all news), ``usa.#`` (all items in the USA), or ``usa.weather`` (all USA weather items). .. _amqp-api: Related API commands -------------------- .. method:: exchange.declare(exchange_name, type, passive, durable, auto_delete, internal) Declares an exchange by name. See :meth:`amqp:Channel.exchange_declare `. :keyword passive: Passive means the exchange won't be created, but you can use this to check if the exchange already exists. :keyword durable: Durable exchanges are persistent (i.e., they survive a broker restart). :keyword auto_delete: This means the queue will be deleted by the broker when there are no more queues using it. .. method:: queue.declare(queue_name, passive, durable, exclusive, auto_delete) Declares a queue by name. See :meth:`amqp:Channel.queue_declare ` Exclusive queues can only be consumed from by the current connection. Exclusive also implies `auto_delete`. .. method:: queue.bind(queue_name, exchange_name, routing_key) Binds a queue to an exchange with a routing key. Unbound queues won't receive messages, so this is necessary. See :meth:`amqp:Channel.queue_bind ` .. method:: queue.delete(name, if_unused=False, if_empty=False) Deletes a queue and its binding. See :meth:`amqp:Channel.queue_delete ` .. method:: exchange.delete(name, if_unused=False) Deletes an exchange. See :meth:`amqp:Channel.exchange_delete ` .. note:: Declaring doesn't necessarily mean "create". When you declare you *assert* that the entity exists and that it's operable. There's no rule as to whom should initially create the exchange/queue/binding, whether consumer or producer. Usually the first one to need it will be the one to create it. .. _amqp-api-hands-on: Hands-on with the API --------------------- Celery comes with a tool called :program:`celery amqp` that's used for command line access to the AMQP API, enabling access to administration tasks like creating/deleting queues and exchanges, purging queues or sending messages. It can also be used for non-AMQP brokers, but different implementation may not implement all commands. You can write commands directly in the arguments to :program:`celery amqp`, or just start with no arguments to start it in shell-mode: .. code-block:: console $ celery -A proj amqp -> connecting to amqp://guest@localhost:5672/. -> connected. 1> Here ``1>`` is the prompt. The number 1, is the number of commands you have executed so far. Type ``help`` for a list of commands available. It also supports auto-completion, so you can start typing a command and then hit the `tab` key to show a list of possible matches. Let's create a queue you can send messages to: .. code-block:: console $ celery -A proj amqp 1> exchange.declare testexchange direct ok. 2> queue.declare testqueue ok. queue:testqueue messages:0 consumers:0. 3> queue.bind testqueue testexchange testkey ok. This created the direct exchange ``testexchange``, and a queue named ``testqueue``. The queue is bound to the exchange using the routing key ``testkey``. From now on all messages sent to the exchange ``testexchange`` with routing key ``testkey`` will be moved to this queue. You can send a message by using the ``basic.publish`` command: .. code-block:: console 4> basic.publish 'This is a message!' testexchange testkey ok. Now that the message is sent you can retrieve it again. You can use the ``basic.get``` command here, that polls for new messages on the queue in a synchronous manner (this is OK for maintenance tasks, but for services you want to use ``basic.consume`` instead) Pop a message off the queue: .. code-block:: console 5> basic.get testqueue {'body': 'This is a message!', 'delivery_info': {'delivery_tag': 1, 'exchange': u'testexchange', 'message_count': 0, 'redelivered': False, 'routing_key': u'testkey'}, 'properties': {}} AMQP uses acknowledgment to signify that a message has been received and processed successfully. If the message hasn't been acknowledged and consumer channel is closed, the message will be delivered to another consumer. Note the delivery tag listed in the structure above; Within a connection channel, every received message has a unique delivery tag, This tag is used to acknowledge the message. Also note that delivery tags aren't unique across connections, so in another client the delivery tag `1` might point to a different message than in this channel. You can acknowledge the message you received using ``basic.ack``: .. code-block:: console 6> basic.ack 1 ok. To clean up after our test session you should delete the entities you created: .. code-block:: console 7> queue.delete testqueue ok. 0 messages deleted. 8> exchange.delete testexchange ok. .. _routing-tasks: Routing Tasks ============= .. _routing-defining-queues: Defining queues --------------- In Celery available queues are defined by the :setting:`task_queues` setting. Here's an example queue configuration with three queues; One for video, one for images, and one default queue for everything else: .. code-block:: python default_exchange = Exchange('default', type='direct') media_exchange = Exchange('media', type='direct') app.conf.task_queues = ( Queue('default', default_exchange, routing_key='default'), Queue('videos', media_exchange, routing_key='media.video'), Queue('images', media_exchange, routing_key='media.image') ) app.conf.task_default_queue = 'default' app.conf.task_default_exchange = 'default' app.conf.task_default_routing_key = 'default' Here, the :setting:`task_default_queue` will be used to route tasks that doesn't have an explicit route. The default exchange, exchange type, and routing key will be used as the default routing values for tasks, and as the default values for entries in :setting:`task_queues`. Multiple bindings to a single queue are also supported. Here's an example of two routing keys that are both bound to the same queue: .. code-block:: python from kombu import Exchange, Queue, binding media_exchange = Exchange('media', type='direct') CELERY_QUEUES = ( Queue('media', [ binding(media_exchange, routing_key='media.video'), binding(media_exchange, routing_key='media.image'), ]), ) .. _routing-task-destination: Specifying task destination --------------------------- The destination for a task is decided by the following (in order): 1. The :ref:`routers` defined in :setting:`task_routes`. 2. The routing arguments to :func:`Task.apply_async`. 3. Routing related attributes defined on the :class:`~celery.task.base.Task` itself. It's considered best practice to not hard-code these settings, but rather leave that as configuration options by using :ref:`routers`; This is the most flexible approach, but sensible defaults can still be set as task attributes. .. _routers: Routers ------- A router is a function that decides the routing options for a task. All you need to define a new router is to define a function with the signature ``(name, args, kwargs, options, task=None, **kw)``: .. code-block:: python def route_task(name, args, kwargs, options, task=None, **kw): if name == 'myapp.tasks.compress_video': return {'exchange': 'video', 'exchange_type': 'topic', 'routing_key': 'video.compress'} If you return the ``queue`` key, it'll expand with the defined settings of that queue in :setting:`task_queues`: .. code-block:: javascript {'queue': 'video', 'routing_key': 'video.compress'} becomes --> .. code-block:: javascript {'queue': 'video', 'exchange': 'video', 'exchange_type': 'topic', 'routing_key': 'video.compress'} You install router classes by adding them to the :setting:`task_routes` setting: .. code-block:: python task_routes = (route_task,) Router functions can also be added by name: .. code-block:: python task_routes = ('myapp.routers.route_task',) For simple task name -> route mappings like the router example above, you can simply drop a dict into :setting:`task_routes` to get the same behavior: .. code-block:: python task_routes = { 'myapp.tasks.compress_video': { 'queue': 'video', 'routing_key': 'video.compress', }, } The routers will then be traversed in order, it will stop at the first router returning a true value, and use that as the final route for the task. You can also have multiple routers defined in a sequence: .. code-block:: python task_routes = [ route_task, { 'myapp.tasks.compress_video': { 'queue': 'video', 'routing_key': 'video.compress', }, ] The routers will then be visited in turn, and the first to return a value will be chosen. Broadcast --------- Celery can also support broadcast routing. Here is an example exchange ``broadcast_tasks`` that delivers copies of tasks to all workers connected to it: .. code-block:: python from kombu.common import Broadcast app.conf.task_queues = (Broadcast('broadcast_tasks'),) app.conf.task_routes = { 'tasks.reload_cache': { 'queue': 'broadcast_tasks', 'exchange': 'broadcast_tasks' } } Now the ``tasks.reload_cache`` task will be sent to every worker consuming from this queue. Here is another example of broadcast routing, this time with a :program:`celery beat` schedule: .. code-block:: python from kombu.common import Broadcast from celery.schedules import crontab app.conf.task_queues = (Broadcast('broadcast_tasks'),) app.conf.beat_schedule = { 'test-task': { 'task': 'tasks.reload_cache', 'schedule': crontab(minute=0, hour='*/3'), 'options': {'exchange': 'broadcast_tasks'} }, } .. admonition:: Broadcast & Results Note that Celery result doesn't define what happens if two tasks have the same task_id. If the same task is distributed to more than one worker, then the state history may not be preserved. It's a good idea to set the ``task.ignore_result`` attribute in this case. celery-4.1.0/docs/userguide/calling.rst0000644000175000017500000004301413130607475020024 0ustar omeromer00000000000000.. _guide-calling: =============== Calling Tasks =============== .. contents:: :local: :depth: 1 .. _calling-basics: Basics ====== This document describes Celery's uniform "Calling API" used by task instances and the :ref:`canvas `. The API defines a standard set of execution options, as well as three methods: - ``apply_async(args[, kwargs[, …]])`` Sends a task message. - ``delay(*args, **kwargs)`` Shortcut to send a task message, but doesn't support execution options. - *calling* (``__call__``) Applying an object supporting the calling API (e.g., ``add(2, 2)``) means that the task will not be executed by a worker, but in the current process instead (a message won't be sent). .. _calling-cheat: .. topic:: Quick Cheat Sheet - ``T.delay(arg, kwarg=value)`` Star arguments shortcut to ``.apply_async``. (``.delay(*args, **kwargs)`` calls ``.apply_async(args, kwargs)``). - ``T.apply_async((arg,), {'kwarg': value})`` - ``T.apply_async(countdown=10)`` executes in 10 seconds from now. - ``T.apply_async(eta=now + timedelta(seconds=10))`` executes in 10 seconds from now, specified using ``eta`` - ``T.apply_async(countdown=60, expires=120)`` executes in one minute from now, but expires after 2 minutes. - ``T.apply_async(expires=now + timedelta(days=2))`` expires in 2 days, set using :class:`~datetime.datetime`. Example ------- The :meth:`~@Task.delay` method is convenient as it looks like calling a regular function: .. code-block:: python task.delay(arg1, arg2, kwarg1='x', kwarg2='y') Using :meth:`~@Task.apply_async` instead you have to write: .. code-block:: python task.apply_async(args=[arg1, arg2], kwargs={'kwarg1': 'x', 'kwarg2': 'y'}) .. sidebar:: Tip If the task isn't registered in the current process you can use :meth:`~@send_task` to call the task by name instead. So `delay` is clearly convenient, but if you want to set additional execution options you have to use ``apply_async``. The rest of this document will go into the task execution options in detail. All examples use a task called `add`, returning the sum of two arguments: .. code-block:: python @app.task def add(x, y): return x + y .. topic:: There's another way… You'll learn more about this later while reading about the :ref:`Canvas `, but :class:`~celery.signature`'s are objects used to pass around the signature of a task invocation, (for example to send it over the network), and they also support the Calling API: .. code-block:: python task.s(arg1, arg2, kwarg1='x', kwargs2='y').apply_async() .. _calling-links: Linking (callbacks/errbacks) ============================ Celery supports linking tasks together so that one task follows another. The callback task will be applied with the result of the parent task as a partial argument: .. code-block:: python add.apply_async((2, 2), link=add.s(16)) .. sidebar:: What's ``s``? The ``add.s`` call used here is called a signature. If you don't know what they are you should read about them in the :ref:`canvas guide `. There you can also learn about :class:`~celery.chain`: a simpler way to chain tasks together. In practice the ``link`` execution option is considered an internal primitive, and you'll probably not use it directly, but use chains instead. Here the result of the first task (4) will be sent to a new task that adds 16 to the previous result, forming the expression :math:`(2 + 2) + 16 = 20` You can also cause a callback to be applied if task raises an exception (*errback*), but this behaves differently from a regular callback in that it will be passed the id of the parent task, not the result. This is because it may not always be possible to serialize the exception raised, and so this way the error callback requires a result backend to be enabled, and the task must retrieve the result of the task instead. This is an example error callback: .. code-block:: python @app.task def error_handler(uuid): result = AsyncResult(uuid) exc = result.get(propagate=False) print('Task {0} raised exception: {1!r}\n{2!r}'.format( uuid, exc, result.traceback)) it can be added to the task using the ``link_error`` execution option: .. code-block:: python add.apply_async((2, 2), link_error=error_handler.s()) In addition, both the ``link`` and ``link_error`` options can be expressed as a list: .. code-block:: python add.apply_async((2, 2), link=[add.s(16), other_task.s()]) The callbacks/errbacks will then be called in order, and all callbacks will be called with the return value of the parent task as a partial argument. .. _calling-on-message: On message ========== Celery supports catching all states changes by setting on_message callback. For example for long-running tasks to send task progress you can do something like this: .. code-block:: python @app.task(bind=True) def hello(self, a, b): time.sleep(1) self.update_state(state="PROGRESS", meta={'progress': 50}) time.sleep(1) self.update_state(state="PROGRESS", meta={'progress': 90}) time.sleep(1) return 'hello world: %i' % (a+b) .. code-block:: python def on_raw_message(body): print(body) r = hello.apply_async() print(r.get(on_message=on_raw_message, propagate=False)) Will generate output like this: .. code-block:: text {'task_id': '5660d3a3-92b8-40df-8ccc-33a5d1d680d7', 'result': {'progress': 50}, 'children': [], 'status': 'PROGRESS', 'traceback': None} {'task_id': '5660d3a3-92b8-40df-8ccc-33a5d1d680d7', 'result': {'progress': 90}, 'children': [], 'status': 'PROGRESS', 'traceback': None} {'task_id': '5660d3a3-92b8-40df-8ccc-33a5d1d680d7', 'result': 'hello world: 10', 'children': [], 'status': 'SUCCESS', 'traceback': None} hello world: 10 .. _calling-eta: ETA and Countdown ================= The ETA (estimated time of arrival) lets you set a specific date and time that is the earliest time at which your task will be executed. `countdown` is a shortcut to set ETA by seconds into the future. .. code-block:: pycon >>> result = add.apply_async((2, 2), countdown=3) >>> result.get() # this takes at least 3 seconds to return 20 The task is guaranteed to be executed at some time *after* the specified date and time, but not necessarily at that exact time. Possible reasons for broken deadlines may include many items waiting in the queue, or heavy network latency. To make sure your tasks are executed in a timely manner you should monitor the queue for congestion. Use Munin, or similar tools, to receive alerts, so appropriate action can be taken to ease the workload. See :ref:`monitoring-munin`. While `countdown` is an integer, `eta` must be a :class:`~datetime.datetime` object, specifying an exact date and time (including millisecond precision, and timezone information): .. code-block:: pycon >>> from datetime import datetime, timedelta >>> tomorrow = datetime.utcnow() + timedelta(days=1) >>> add.apply_async((2, 2), eta=tomorrow) .. _calling-expiration: Expiration ========== The `expires` argument defines an optional expiry time, either as seconds after task publish, or a specific date and time using :class:`~datetime.datetime`: .. code-block:: pycon >>> # Task expires after one minute from now. >>> add.apply_async((10, 10), expires=60) >>> # Also supports datetime >>> from datetime import datetime, timedelta >>> add.apply_async((10, 10), kwargs, ... expires=datetime.now() + timedelta(days=1) When a worker receives an expired task it will mark the task as :state:`REVOKED` (:exc:`~@TaskRevokedError`). .. _calling-retry: Message Sending Retry ===================== Celery will automatically retry sending messages in the event of connection failure, and retry behavior can be configured -- like how often to retry, or a maximum number of retries -- or disabled all together. To disable retry you can set the ``retry`` execution option to :const:`False`: .. code-block:: python add.apply_async((2, 2), retry=False) .. topic:: Related Settings .. hlist:: :columns: 2 - :setting:`task_publish_retry` - :setting:`task_publish_retry_policy` Retry Policy ------------ A retry policy is a mapping that controls how retries behave, and can contain the following keys: - `max_retries` Maximum number of retries before giving up, in this case the exception that caused the retry to fail will be raised. A value of :const:`None` means it will retry forever. The default is to retry 3 times. - `interval_start` Defines the number of seconds (float or integer) to wait between retries. Default is 0 (the first retry will be instantaneous). - `interval_step` On each consecutive retry this number will be added to the retry delay (float or integer). Default is 0.2. - `interval_max` Maximum number of seconds (float or integer) to wait between retries. Default is 0.2. For example, the default policy correlates to: .. code-block:: python add.apply_async((2, 2), retry=True, retry_policy={ 'max_retries': 3, 'interval_start': 0, 'interval_step': 0.2, 'interval_max': 0.2, }) the maximum time spent retrying will be 0.4 seconds. It's set relatively short by default because a connection failure could lead to a retry pile effect if the broker connection is down -- For example, many web server processes waiting to retry, blocking other incoming requests. .. _calling-connection-errors: Connection Error Handling ========================= When you send a task and the message transport connection is lost, or the connection cannot be initiated, an :exc:`~kombu.exceptions.OperationalError` error will be raised: .. code-block:: pycon >>> from proj.tasks import add >>> add.delay(2, 2) Traceback (most recent call last): File "", line 1, in File "celery/app/task.py", line 388, in delay return self.apply_async(args, kwargs) File "celery/app/task.py", line 503, in apply_async **options File "celery/app/base.py", line 662, in send_task amqp.send_task_message(P, name, message, **options) File "celery/backends/rpc.py", line 275, in on_task_call maybe_declare(self.binding(producer.channel), retry=True) File "/opt/celery/kombu/kombu/messaging.py", line 204, in _get_channel channel = self._channel = channel() File "/opt/celery/py-amqp/amqp/connection.py", line 272, in connect self.transport.connect() File "/opt/celery/py-amqp/amqp/transport.py", line 100, in connect self._connect(self.host, self.port, self.connect_timeout) File "/opt/celery/py-amqp/amqp/transport.py", line 141, in _connect self.sock.connect(sa) kombu.exceptions.OperationalError: [Errno 61] Connection refused If you have :ref:`retries ` enabled this will only happen after retries are exhausted, or when disabled immediately. You can handle this error too: .. code-block:: pycon >>> from celery.utils.log import get_logger >>> logger = get_logger(__name__) >>> try: ... add.delay(2, 2) ... except add.OperationalError as exc: ... logger.exception('Sending task raised: %r', exc) .. _calling-serializers: Serializers =========== .. sidebar:: Security The pickle module allows for execution of arbitrary functions, please see the :ref:`security guide `. Celery also comes with a special serializer that uses cryptography to sign your messages. Data transferred between clients and workers needs to be serialized, so every message in Celery has a ``content_type`` header that describes the serialization method used to encode it. The default serializer is `JSON`, but you can change this using the :setting:`task_serializer` setting, or for each individual task, or even per message. There's built-in support for `JSON`, :mod:`pickle`, `YAML` and ``msgpack``, and you can also add your own custom serializers by registering them into the Kombu serializer registry .. seealso:: :ref:`Message Serialization ` in the Kombu user guide. Each option has its advantages and disadvantages. json -- JSON is supported in many programming languages, is now a standard part of Python (since 2.6), and is fairly fast to decode using the modern Python libraries, such as :pypi:`simplejson`. The primary disadvantage to JSON is that it limits you to the following data types: strings, Unicode, floats, Boolean, dictionaries, and lists. Decimals and dates are notably missing. Binary data will be transferred using Base64 encoding, increasing the size of the transferred data by 34% compared to an encoding format where native binary types are supported. However, if your data fits inside the above constraints and you need cross-language support, the default setting of JSON is probably your best choice. See http://json.org for more information. pickle -- If you have no desire to support any language other than Python, then using the pickle encoding will gain you the support of all built-in Python data types (except class instances), smaller messages when sending binary files, and a slight speedup over JSON processing. See :mod:`pickle` for more information. yaml -- YAML has many of the same characteristics as json, except that it natively supports more data types (including dates, recursive references, etc.). However, the Python libraries for YAML are a good bit slower than the libraries for JSON. If you need a more expressive set of data types and need to maintain cross-language compatibility, then YAML may be a better fit than the above. See http://yaml.org/ for more information. msgpack -- msgpack is a binary serialization format that's closer to JSON in features. It's very young however, and support should be considered experimental at this point. See http://msgpack.org/ for more information. The encoding used is available as a message header, so the worker knows how to deserialize any task. If you use a custom serializer, this serializer must be available for the worker. The following order is used to decide the serializer used when sending a task: 1. The `serializer` execution option. 2. The :attr:`@-Task.serializer` attribute 3. The :setting:`task_serializer` setting. Example setting a custom serializer for a single task invocation: .. code-block:: pycon >>> add.apply_async((10, 10), serializer='json') .. _calling-compression: Compression =========== Celery can compress the messages using either *gzip*, or *bzip2*. You can also create your own compression schemes and register them in the :func:`kombu compression registry `. The following order is used to decide the compression scheme used when sending a task: 1. The `compression` execution option. 2. The :attr:`@-Task.compression` attribute. 3. The :setting:`task_compression` attribute. Example specifying the compression used when calling a task:: >>> add.apply_async((2, 2), compression='zlib') .. _calling-connections: Connections =========== .. sidebar:: Automatic Pool Support Since version 2.3 there's support for automatic connection pools, so you don't have to manually handle connections and publishers to reuse connections. The connection pool is enabled by default since version 2.5. See the :setting:`broker_pool_limit` setting for more information. You can handle the connection manually by creating a publisher: .. code-block:: python results = [] with add.app.pool.acquire(block=True) as connection: with add.get_publisher(connection) as publisher: try: for args in numbers: res = add.apply_async((2, 2), publisher=publisher) results.append(res) print([res.get() for res in results]) Though this particular example is much better expressed as a group: .. code-block:: pycon >>> from celery import group >>> numbers = [(2, 2), (4, 4), (8, 8), (16, 16)] >>> res = group(add.s(i, j) for i, j in numbers).apply_async() >>> res.get() [4, 8, 16, 32] .. _calling-routing: Routing options =============== Celery can route tasks to different queues. Simple routing (name <-> name) is accomplished using the ``queue`` option:: add.apply_async(queue='priority.high') You can then assign workers to the ``priority.high`` queue by using the workers :option:`-Q ` argument: .. code-block:: console $ celery -A proj worker -l info -Q celery,priority.high .. seealso:: Hard-coding queue names in code isn't recommended, the best practice is to use configuration routers (:setting:`task_routes`). To find out more about routing, please see :ref:`guide-routing`. Advanced Options ---------------- These options are for advanced users who want to take use of AMQP's full routing capabilities. Interested parties may read the :ref:`routing guide `. - exchange Name of exchange (or a :class:`kombu.entity.Exchange`) to send the message to. - routing_key Routing key used to determine. - priority A number between `0` and `255`, where `255` is the highest priority. Supported by: RabbitMQ, Redis (priority reversed, 0 is highest). celery-4.1.0/docs/userguide/security.rst0000644000175000017500000002000413130607475020254 0ustar omeromer00000000000000.. _guide-security: ========== Security ========== .. contents:: :local: Introduction ============ While Celery is written with security in mind, it should be treated as an unsafe component. Depending on your `Security Policy`_, there are various steps you can take to make your Celery installation more secure. .. _`Security Policy`: https://en.wikipedia.org/wiki/Security_policy Areas of Concern ================ Broker ------ It's imperative that the broker is guarded from unwanted access, especially if accessible to the public. By default, workers trust that the data they get from the broker hasn't been tampered with. See `Message Signing`_ for information on how to make the broker connection more trustworthy. The first line of defense should be to put a firewall in front of the broker, allowing only white-listed machines to access it. Keep in mind that both firewall misconfiguration, and temporarily disabling the firewall, is common in the real world. Solid security policy includes monitoring of firewall equipment to detect if they've been disabled, be it accidentally or on purpose. In other words, one shouldn't blindly trust the firewall either. If your broker supports fine-grained access control, like RabbitMQ, this is something you should look at enabling. See for example http://www.rabbitmq.com/access-control.html. If supported by your broker backend, you can enable end-to-end SSL encryption and authentication using :setting:`broker_use_ssl`. Client ------ In Celery, "client" refers to anything that sends messages to the broker, for example web-servers that apply tasks. Having the broker properly secured doesn't matter if arbitrary messages can be sent through a client. *[Need more text here]* Worker ------ The default permissions of tasks running inside a worker are the same ones as the privileges of the worker itself. This applies to resources, such as; memory, file-systems, and devices. An exception to this rule is when using the multiprocessing based task pool, which is currently the default. In this case, the task will have access to any memory copied as a result of the :func:`fork` call, and access to memory contents written by parent tasks in the same worker child process. Limiting access to memory contents can be done by launching every task in a subprocess (:func:`fork` + :func:`execve`). Limiting file-system and device access can be accomplished by using `chroot`_, `jail`_, `sandboxing`_, virtual machines, or other mechanisms as enabled by the platform or additional software. Note also that any task executed in the worker will have the same network access as the machine on which it's running. If the worker is located on an internal network it's recommended to add firewall rules for outbound traffic. .. _`chroot`: https://en.wikipedia.org/wiki/Chroot .. _`jail`: https://en.wikipedia.org/wiki/FreeBSD_jail .. _`sandboxing`: https://en.wikipedia.org/wiki/Sandbox_(computer_security) .. _security-serializers: Serializers =========== The default serializer is JSON since version 4.0, but since it has only support for a restricted set of types you may want to consider using pickle for serialization instead. The `pickle` serializer is convenient as it can serialize almost any Python object, even functions with some work, but for the same reasons `pickle` is inherently insecure [*]_, and should be avoided whenever clients are untrusted or unauthenticated. You can disable untrusted content by specifying a white-list of accepted content-types in the :setting:`accept_content` setting: .. versionadded:: 3.0.18 .. note:: This setting was first supported in version 3.0.18. If you're running an earlier version it will simply be ignored, so make sure you're running a version that supports it. .. code-block:: python accept_content = ['json'] This accepts a list of serializer names and content-types, so you could also specify the content type for json: .. code-block:: python accept_content = ['application/json'] Celery also comes with a special `auth` serializer that validates communication between Celery clients and workers, making sure that messages originates from trusted sources. Using `Public-key cryptography` the `auth` serializer can verify the authenticity of senders, to enable this read :ref:`message-signing` for more information. .. _`Public-key cryptography`: https://en.wikipedia.org/wiki/Public-key_cryptography .. _message-signing: Message Signing =============== Celery can use the :pypi:`pyOpenSSL` library to sign message using `Public-key cryptography`, where messages sent by clients are signed using a private key and then later verified by the worker using a public certificate. Optimally certificates should be signed by an official `Certificate Authority`_, but they can also be self-signed. To enable this you should configure the :setting:`task_serializer` setting to use the `auth` serializer. Also required is configuring the paths used to locate private keys and certificates on the file-system: the :setting:`security_key`, :setting:`security_certificate`, and :setting:`security_cert_store` settings respectively. With these configured it's also necessary to call the :func:`celery.setup_security` function. Note that this will also disable all insecure serializers so that the worker won't accept messages with untrusted content types. This is an example configuration using the `auth` serializer, with the private key and certificate files located in `/etc/ssl`. .. code-block:: python app = Celery() app.conf.update( security_key='/etc/ssl/private/worker.key' security_certificate='/etc/ssl/certs/worker.pem' security_cert_store='/etc/ssl/certs/*.pem', ) app.setup_security() .. note:: While relative paths aren't disallowed, using absolute paths is recommended for these files. Also note that the `auth` serializer won't encrypt the contents of a message, so if needed this will have to be enabled separately. .. _`X.509`: https://en.wikipedia.org/wiki/X.509 .. _`Certificate Authority`: https://en.wikipedia.org/wiki/Certificate_authority Intrusion Detection =================== The most important part when defending your systems against intruders is being able to detect if the system has been compromised. Logs ---- Logs are usually the first place to look for evidence of security breaches, but they're useless if they can be tampered with. A good solution is to set up centralized logging with a dedicated logging server. Access to it should be restricted. In addition to having all of the logs in a single place, if configured correctly, it can make it harder for intruders to tamper with your logs. This should be fairly easy to setup using syslog (see also `syslog-ng`_ and `rsyslog`_). Celery uses the :mod:`logging` library, and already has support for using syslog. A tip for the paranoid is to send logs using UDP and cut the transmit part of the logging server's network cable :-) .. _`syslog-ng`: https://en.wikipedia.org/wiki/Syslog-ng .. _`rsyslog`: http://www.rsyslog.com/ Tripwire -------- `Tripwire`_ is a (now commercial) data integrity tool, with several open source implementations, used to keep cryptographic hashes of files in the file-system, so that administrators can be alerted when they change. This way when the damage is done and your system has been compromised you can tell exactly what files intruders have changed (password files, logs, back-doors, root-kits, and so on). Often this is the only way you'll be able to detect an intrusion. Some open source implementations include: * `OSSEC`_ * `Samhain`_ * `Open Source Tripwire`_ * `AIDE`_ Also, the `ZFS`_ file-system comes with built-in integrity checks that can be used. .. _`Tripwire`: http://tripwire.com/ .. _`OSSEC`: http://www.ossec.net/ .. _`Samhain`: http://la-samhna.de/samhain/index.html .. _`AIDE`: http://aide.sourceforge.net/ .. _`Open Source Tripwire`: http://sourceforge.net/projects/tripwire/ .. _`ZFS`: https://en.wikipedia.org/wiki/ZFS .. rubric:: Footnotes .. [*] https://blog.nelhage.com/2011/03/exploiting-pickle/ celery-4.1.0/docs/userguide/canvas.rst0000644000175000017500000007003213130607475017666 0ustar omeromer00000000000000.. _guide-canvas: ============================== Canvas: Designing Work-flows ============================== .. contents:: :local: :depth: 2 .. _canvas-subtasks: .. _canvas-signatures: Signatures ========== .. versionadded:: 2.0 You just learned how to call a task using the tasks ``delay`` method in the :ref:`calling ` guide, and this is often all you need, but sometimes you may want to pass the signature of a task invocation to another process or as an argument to another function. A :func:`~celery.signature` wraps the arguments, keyword arguments, and execution options of a single task invocation in a way such that it can be passed to functions or even serialized and sent across the wire. - You can create a signature for the ``add`` task using its name like this: .. code-block:: pycon >>> from celery import signature >>> signature('tasks.add', args=(2, 2), countdown=10) tasks.add(2, 2) This task has a signature of arity 2 (two arguments): ``(2, 2)``, and sets the countdown execution option to 10. - or you can create one using the task's ``signature`` method: .. code-block:: pycon >>> add.signature((2, 2), countdown=10) tasks.add(2, 2) - There's also a shortcut using star arguments: .. code-block:: pycon >>> add.s(2, 2) tasks.add(2, 2) - Keyword arguments are also supported: .. code-block:: pycon >>> add.s(2, 2, debug=True) tasks.add(2, 2, debug=True) - From any signature instance you can inspect the different fields: .. code-block:: pycon >>> s = add.signature((2, 2), {'debug': True}, countdown=10) >>> s.args (2, 2) >>> s.kwargs {'debug': True} >>> s.options {'countdown': 10} - It supports the "Calling API" of ``delay``, ``apply_async``, etc., including being called directly (``__call__``). Calling the signature will execute the task inline in the current process: .. code-block:: pycon >>> add(2, 2) 4 >>> add.s(2, 2)() 4 ``delay`` is our beloved shortcut to ``apply_async`` taking star-arguments: .. code-block:: pycon >>> result = add.delay(2, 2) >>> result.get() 4 ``apply_async`` takes the same arguments as the :meth:`Task.apply_async <@Task.apply_async>` method: .. code-block:: pycon >>> add.apply_async(args, kwargs, **options) >>> add.signature(args, kwargs, **options).apply_async() >>> add.apply_async((2, 2), countdown=1) >>> add.signature((2, 2), countdown=1).apply_async() - You can't define options with :meth:`~@Task.s`, but a chaining ``set`` call takes care of that: .. code-block:: pycon >>> add.s(2, 2).set(countdown=1) proj.tasks.add(2, 2) Partials -------- With a signature, you can execute the task in a worker: .. code-block:: pycon >>> add.s(2, 2).delay() >>> add.s(2, 2).apply_async(countdown=1) Or you can call it directly in the current process: .. code-block:: pycon >>> add.s(2, 2)() 4 Specifying additional args, kwargs, or options to ``apply_async``/``delay`` creates partials: - Any arguments added will be prepended to the args in the signature: .. code-block:: pycon >>> partial = add.s(2) # incomplete signature >>> partial.delay(4) # 4 + 2 >>> partial.apply_async((4,)) # same - Any keyword arguments added will be merged with the kwargs in the signature, with the new keyword arguments taking precedence: .. code-block:: pycon >>> s = add.s(2, 2) >>> s.delay(debug=True) # -> add(2, 2, debug=True) >>> s.apply_async(kwargs={'debug': True}) # same - Any options added will be merged with the options in the signature, with the new options taking precedence: .. code-block:: pycon >>> s = add.signature((2, 2), countdown=10) >>> s.apply_async(countdown=1) # countdown is now 1 You can also clone signatures to create derivatives: .. code-block:: pycon >>> s = add.s(2) proj.tasks.add(2) >>> s.clone(args=(4,), kwargs={'debug': True}) proj.tasks.add(4, 2, debug=True) Immutability ------------ .. versionadded:: 3.0 Partials are meant to be used with callbacks, any tasks linked, or chord callbacks will be applied with the result of the parent task. Sometimes you want to specify a callback that doesn't take additional arguments, and in that case you can set the signature to be immutable: .. code-block:: pycon >>> add.apply_async((2, 2), link=reset_buffers.signature(immutable=True)) The ``.si()`` shortcut can also be used to create immutable signatures: .. code-block:: pycon >>> add.apply_async((2, 2), link=reset_buffers.si()) Only the execution options can be set when a signature is immutable, so it's not possible to call the signature with partial args/kwargs. .. note:: In this tutorial I sometimes use the prefix operator `~` to signatures. You probably shouldn't use it in your production code, but it's a handy shortcut when experimenting in the Python shell: .. code-block:: pycon >>> ~sig >>> # is the same as >>> sig.delay().get() .. _canvas-callbacks: Callbacks --------- .. versionadded:: 3.0 Callbacks can be added to any task using the ``link`` argument to ``apply_async``: .. code-block:: pycon add.apply_async((2, 2), link=other_task.s()) The callback will only be applied if the task exited successfully, and it will be applied with the return value of the parent task as argument. As I mentioned earlier, any arguments you add to a signature, will be prepended to the arguments specified by the signature itself! If you have the signature: .. code-block:: pycon >>> sig = add.s(10) then `sig.delay(result)` becomes: .. code-block:: pycon >>> add.apply_async(args=(result, 10)) ... Now let's call our ``add`` task with a callback using partial arguments: .. code-block:: pycon >>> add.apply_async((2, 2), link=add.s(8)) As expected this will first launch one task calculating :math:`2 + 2`, then another task calculating :math:`4 + 8`. The Primitives ============== .. versionadded:: 3.0 .. topic:: Overview - ``group`` The group primitive is a signature that takes a list of tasks that should be applied in parallel. - ``chain`` The chain primitive lets us link together signatures so that one is called after the other, essentially forming a *chain* of callbacks. - ``chord`` A chord is just like a group but with a callback. A chord consists of a header group and a body, where the body is a task that should execute after all of the tasks in the header are complete. - ``map`` The map primitive works like the built-in ``map`` function, but creates a temporary task where a list of arguments is applied to the task. For example, ``task.map([1, 2])`` -- results in a single task being called, applying the arguments in order to the task function so that the result is: .. code-block:: python res = [task(1), task(2)] - ``starmap`` Works exactly like map except the arguments are applied as ``*args``. For example ``add.starmap([(2, 2), (4, 4)])`` results in a single task calling: .. code-block:: python res = [add(2, 2), add(4, 4)] - ``chunks`` Chunking splits a long list of arguments into parts, for example the operation: .. code-block:: pycon >>> items = zip(xrange(1000), xrange(1000)) # 1000 items >>> add.chunks(items, 10) will split the list of items into chunks of 10, resulting in 100 tasks (each processing 10 items in sequence). The primitives are also signature objects themselves, so that they can be combined in any number of ways to compose complex work-flows. Here's some examples: - Simple chain Here's a simple chain, the first task executes passing its return value to the next task in the chain, and so on. .. code-block:: pycon >>> from celery import chain >>> # 2 + 2 + 4 + 8 >>> res = chain(add.s(2, 2), add.s(4), add.s(8))() >>> res.get() 16 This can also be written using pipes: .. code-block:: pycon >>> (add.s(2, 2) | add.s(4) | add.s(8))().get() 16 - Immutable signatures Signatures can be partial so arguments can be added to the existing arguments, but you may not always want that, for example if you don't want the result of the previous task in a chain. In that case you can mark the signature as immutable, so that the arguments cannot be changed: .. code-block:: pycon >>> add.signature((2, 2), immutable=True) There's also a ``.si()`` shortcut for this, and this is the preffered way of creating signatures: .. code-block:: pycon >>> add.si(2, 2) Now you can create a chain of independent tasks instead: .. code-block:: pycon >>> res = (add.si(2, 2) | add.si(4, 4) | add.s(8, 8))() >>> res.get() 16 >>> res.parent.get() 8 >>> res.parent.parent.get() 4 - Simple group You can easily create a group of tasks to execute in parallel: .. code-block:: pycon >>> from celery import group >>> res = group(add.s(i, i) for i in xrange(10))() >>> res.get(timeout=1) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] - Simple chord The chord primitive enables us to add a callback to be called when all of the tasks in a group have finished executing. This is often required for algorithms that aren't *embarrassingly parallel*: .. code-block:: pycon >>> from celery import chord >>> res = chord((add.s(i, i) for i in xrange(10)), xsum.s())() >>> res.get() 90 The above example creates 10 task that all start in parallel, and when all of them are complete the return values are combined into a list and sent to the ``xsum`` task. The body of a chord can also be immutable, so that the return value of the group isn't passed on to the callback: .. code-block:: pycon >>> chord((import_contact.s(c) for c in contacts), ... notify_complete.si(import_id)).apply_async() Note the use of ``.si`` above; this creates an immutable signature, meaning any new arguments passed (including to return value of the previous task) will be ignored. - Blow your mind by combining Chains can be partial too: .. code-block:: pycon >>> c1 = (add.s(4) | mul.s(8)) # (16 + 4) * 8 >>> res = c1(16) >>> res.get() 160 this means that you can combine chains: .. code-block:: pycon # ((4 + 16) * 2 + 4) * 8 >>> c2 = (add.s(4, 16) | mul.s(2) | (add.s(4) | mul.s(8))) >>> res = c2() >>> res.get() 352 Chaining a group together with another task will automatically upgrade it to be a chord: .. code-block:: pycon >>> c3 = (group(add.s(i, i) for i in xrange(10)) | xsum.s()) >>> res = c3() >>> res.get() 90 Groups and chords accepts partial arguments too, so in a chain the return value of the previous task is forwarded to all tasks in the group: .. code-block:: pycon >>> new_user_workflow = (create_user.s() | group( ... import_contacts.s(), ... send_welcome_email.s())) ... new_user_workflow.delay(username='artv', ... first='Art', ... last='Vandelay', ... email='art@vandelay.com') If you don't want to forward arguments to the group then you can make the signatures in the group immutable: .. code-block:: pycon >>> res = (add.s(4, 4) | group(add.si(i, i) for i in xrange(10)))() >>> res.get() >>> res.parent.get() 8 .. _canvas-chain: Chains ------ .. versionadded:: 3.0 Tasks can be linked together: the linked task is called when the task returns successfully: .. code-block:: pycon >>> res = add.apply_async((2, 2), link=mul.s(16)) >>> res.get() 4 The linked task will be applied with the result of its parent task as the first argument. In the above case where the result was 4, this will result in ``mul(4, 16)``. The results will keep track of any subtasks called by the original task, and this can be accessed from the result instance: .. code-block:: pycon >>> res.children [] >>> res.children[0].get() 64 The result instance also has a :meth:`~@AsyncResult.collect` method that treats the result as a graph, enabling you to iterate over the results: .. code-block:: pycon >>> list(res.collect()) [(, 4), (, 64)] By default :meth:`~@AsyncResult.collect` will raise an :exc:`~@IncompleteStream` exception if the graph isn't fully formed (one of the tasks hasn't completed yet), but you can get an intermediate representation of the graph too: .. code-block:: pycon >>> for result, value in res.collect(intermediate=True)): .... You can link together as many tasks as you like, and signatures can be linked too: .. code-block:: pycon >>> s = add.s(2, 2) >>> s.link(mul.s(4)) >>> s.link(log_result.s()) You can also add *error callbacks* using the `on_error` method: .. code-block:: pycon >>> add.s(2, 2).on_error(log_error.s()).delay() This will result in the following ``.apply_async`` call when the signature is applied: .. code-block:: pycon >>> add.apply_async((2, 2), link_error=log_error.s()) The worker won't actually call the errback as a task, but will instead call the errback function directly so that the raw request, exception and traceback objects can be passed to it. Here's an example errback: .. code-block:: python from __future__ import print_function import os from proj.celery import app @app.task def log_error(request, exc, traceback): with open(os.path.join('/var/errors', request.id), 'a') as fh: print('--\n\n{0} {1} {2}'.format( task_id, exc, traceback), file=fh) To make it even easier to link tasks together there's a special signature called :class:`~celery.chain` that lets you chain tasks together: .. code-block:: pycon >>> from celery import chain >>> from proj.tasks import add, mul >>> # (4 + 4) * 8 * 10 >>> res = chain(add.s(4, 4), mul.s(8), mul.s(10)) proj.tasks.add(4, 4) | proj.tasks.mul(8) | proj.tasks.mul(10) Calling the chain will call the tasks in the current process and return the result of the last task in the chain: .. code-block:: pycon >>> res = chain(add.s(4, 4), mul.s(8), mul.s(10))() >>> res.get() 640 It also sets ``parent`` attributes so that you can work your way up the chain to get intermediate results: .. code-block:: pycon >>> res.parent.get() 64 >>> res.parent.parent.get() 8 >>> res.parent.parent Chains can also be made using the ``|`` (pipe) operator: .. code-block:: pycon >>> (add.s(2, 2) | mul.s(8) | mul.s(10)).apply_async() Graphs ~~~~~~ In addition you can work with the result graph as a :class:`~celery.utils.graph.DependencyGraph`: .. code-block:: pycon >>> res = chain(add.s(4, 4), mul.s(8), mul.s(10))() >>> res.parent.parent.graph 285fa253-fcf8-42ef-8b95-0078897e83e6(1) 463afec2-5ed4-4036-b22d-ba067ec64f52(0) 872c3995-6fa0-46ca-98c2-5a19155afcf0(2) 285fa253-fcf8-42ef-8b95-0078897e83e6(1) 463afec2-5ed4-4036-b22d-ba067ec64f52(0) You can even convert these graphs to *dot* format: .. code-block:: pycon >>> with open('graph.dot', 'w') as fh: ... res.parent.parent.graph.to_dot(fh) and create images: .. code-block:: console $ dot -Tpng graph.dot -o graph.png .. image:: ../images/result_graph.png .. _canvas-group: Groups ------ .. versionadded:: 3.0 A group can be used to execute several tasks in parallel. The :class:`~celery.group` function takes a list of signatures: .. code-block:: pycon >>> from celery import group >>> from proj.tasks import add >>> group(add.s(2, 2), add.s(4, 4)) (proj.tasks.add(2, 2), proj.tasks.add(4, 4)) If you **call** the group, the tasks will be applied one after another in the current process, and a :class:`~celery.result.GroupResult` instance is returned that can be used to keep track of the results, or tell how many tasks are ready and so on: .. code-block:: pycon >>> g = group(add.s(2, 2), add.s(4, 4)) >>> res = g() >>> res.get() [4, 8] Group also supports iterators: .. code-block:: pycon >>> group(add.s(i, i) for i in xrange(100))() A group is a signature object, so it can be used in combination with other signatures. Group Results ~~~~~~~~~~~~~ The group task returns a special result too, this result works just like normal task results, except that it works on the group as a whole: .. code-block:: pycon >>> from celery import group >>> from tasks import add >>> job = group([ ... add.s(2, 2), ... add.s(4, 4), ... add.s(8, 8), ... add.s(16, 16), ... add.s(32, 32), ... ]) >>> result = job.apply_async() >>> result.ready() # have all subtasks completed? True >>> result.successful() # were all subtasks successful? True >>> result.get() [4, 8, 16, 32, 64] The :class:`~celery.result.GroupResult` takes a list of :class:`~celery.result.AsyncResult` instances and operates on them as if it was a single task. It supports the following operations: * :meth:`~celery.result.GroupResult.successful` Return :const:`True` if all of the subtasks finished successfully (e.g., didn't raise an exception). * :meth:`~celery.result.GroupResult.failed` Return :const:`True` if any of the subtasks failed. * :meth:`~celery.result.GroupResult.waiting` Return :const:`True` if any of the subtasks isn't ready yet. * :meth:`~celery.result.GroupResult.ready` Return :const:`True` if all of the subtasks are ready. * :meth:`~celery.result.GroupResult.completed_count` Return the number of completed subtasks. * :meth:`~celery.result.GroupResult.revoke` Revoke all of the subtasks. * :meth:`~celery.result.GroupResult.join` Gather the results of all subtasks and return them in the same order as they were called (as a list). .. _canvas-chord: Chords ------ .. versionadded:: 2.3 .. note:: Tasks used within a chord must *not* ignore their results. If the result backend is disabled for *any* task (header or body) in your chord you should read ":ref:`chord-important-notes`." Chords are not currently supported with the RPC result backend. A chord is a task that only executes after all of the tasks in a group have finished executing. Let's calculate the sum of the expression :math:`1 + 1 + 2 + 2 + 3 + 3 ... n + n` up to a hundred digits. First you need two tasks, :func:`add` and :func:`tsum` (:func:`sum` is already a standard function): .. code-block:: python @app.task def add(x, y): return x + y @app.task def tsum(numbers): return sum(numbers) Now you can use a chord to calculate each addition step in parallel, and then get the sum of the resulting numbers: .. code-block:: pycon >>> from celery import chord >>> from tasks import add, tsum >>> chord(add.s(i, i) ... for i in xrange(100))(tsum.s()).get() 9900 This is obviously a very contrived example, the overhead of messaging and synchronization makes this a lot slower than its Python counterpart: .. code-block:: pycon >>> sum(i + i for i in xrange(100)) The synchronization step is costly, so you should avoid using chords as much as possible. Still, the chord is a powerful primitive to have in your toolbox as synchronization is a required step for many parallel algorithms. Let's break the chord expression down: .. code-block:: pycon >>> callback = tsum.s() >>> header = [add.s(i, i) for i in range(100)] >>> result = chord(header)(callback) >>> result.get() 9900 Remember, the callback can only be executed after all of the tasks in the header have returned. Each step in the header is executed as a task, in parallel, possibly on different nodes. The callback is then applied with the return value of each task in the header. The task id returned by :meth:`chord` is the id of the callback, so you can wait for it to complete and get the final return value (but remember to :ref:`never have a task wait for other tasks `) .. _chord-errors: Error handling ~~~~~~~~~~~~~~ So what happens if one of the tasks raises an exception? The chord callback result will transition to the failure state, and the error is set to the :exc:`~@ChordError` exception: .. code-block:: pycon >>> c = chord([add.s(4, 4), raising_task.s(), add.s(8, 8)]) >>> result = c() >>> result.get() .. code-block:: pytb Traceback (most recent call last): File "", line 1, in File "*/celery/result.py", line 120, in get interval=interval) File "*/celery/backends/amqp.py", line 150, in wait_for raise meta['result'] celery.exceptions.ChordError: Dependency 97de6f3f-ea67-4517-a21c-d867c61fcb47 raised ValueError('something something',) While the traceback may be different depending on the result backend used, you can see that the error description includes the id of the task that failed and a string representation of the original exception. You can also find the original traceback in ``result.traceback``. Note that the rest of the tasks will still execute, so the third task (``add.s(8, 8)``) is still executed even though the middle task failed. Also the :exc:`~@ChordError` only shows the task that failed first (in time): it doesn't respect the ordering of the header group. To perform an action when a chord fails you can therefore attach an errback to the chord callback: .. code-block:: python @app.task def on_chord_error(request, exc, traceback): print('Task {0!r} raised error: {1!r}'.format(request.id, exc)) .. code-block:: pycon >>> c = (group(add.s(i, i) for i in range(10)) | ... xsum.s().on_error(on_chord_error.s()))).delay() .. _chord-important-notes: Important Notes ~~~~~~~~~~~~~~~ Tasks used within a chord must *not* ignore their results. In practice this means that you must enable a :const:`result_backend` in order to use chords. Additionally, if :const:`task_ignore_result` is set to :const:`True` in your configuration, be sure that the individual tasks to be used within the chord are defined with :const:`ignore_result=False`. This applies to both Task subclasses and decorated tasks. Example Task subclass: .. code-block:: python class MyTask(Task): ignore_result = False Example decorated task: .. code-block:: python @app.task(ignore_result=False) def another_task(project): do_something() By default the synchronization step is implemented by having a recurring task poll the completion of the group every second, calling the signature when ready. Example implementation: .. code-block:: python from celery import maybe_signature @app.task(bind=True) def unlock_chord(self, group, callback, interval=1, max_retries=None): if group.ready(): return maybe_signature(callback).delay(group.join()) raise self.retry(countdown=interval, max_retries=max_retries) This is used by all result backends except Redis and Memcached: they increment a counter after each task in the header, then applies the callback when the counter exceeds the number of tasks in the set. The Redis and Memcached approach is a much better solution, but not easily implemented in other backends (suggestions welcome!). .. note:: Chords don't properly work with Redis before version 2.2; you'll need to upgrade to at least redis-server 2.2 to use them. .. note:: If you're using chords with the Redis result backend and also overriding the :meth:`Task.after_return` method, you need to make sure to call the super method or else the chord callback won't be applied. .. code-block:: python def after_return(self, *args, **kwargs): do_something() super(MyTask, self).after_return(*args, **kwargs) .. _canvas-map: Map & Starmap ------------- :class:`~celery.map` and :class:`~celery.starmap` are built-in tasks that calls the task for every element in a sequence. They differ from group in that - only one task message is sent - the operation is sequential. For example using ``map``: .. code-block:: pycon >>> from proj.tasks import add >>> ~xsum.map([range(10), range(100)]) [45, 4950] is the same as having a task doing: .. code-block:: python @app.task def temp(): return [xsum(range(10)), xsum(range(100))] and using ``starmap``: .. code-block:: pycon >>> ~add.starmap(zip(range(10), range(10))) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] is the same as having a task doing: .. code-block:: python @app.task def temp(): return [add(i, i) for i in range(10)] Both ``map`` and ``starmap`` are signature objects, so they can be used as other signatures and combined in groups etc., for example to call the starmap after 10 seconds: .. code-block:: pycon >>> add.starmap(zip(range(10), range(10))).apply_async(countdown=10) .. _canvas-chunks: Chunks ------ Chunking lets you divide an iterable of work into pieces, so that if you have one million objects, you can create 10 tasks with hundred thousand objects each. Some may worry that chunking your tasks results in a degradation of parallelism, but this is rarely true for a busy cluster and in practice since you're avoiding the overhead of messaging it may considerably increase performance. To create a chunks signature you can use :meth:`@Task.chunks`: .. code-block:: pycon >>> add.chunks(zip(range(100), range(100)), 10) As with :class:`~celery.group` the act of sending the messages for the chunks will happen in the current process when called: .. code-block:: pycon >>> from proj.tasks import add >>> res = add.chunks(zip(range(100), range(100)), 10)() >>> res.get() [[0, 2, 4, 6, 8, 10, 12, 14, 16, 18], [20, 22, 24, 26, 28, 30, 32, 34, 36, 38], [40, 42, 44, 46, 48, 50, 52, 54, 56, 58], [60, 62, 64, 66, 68, 70, 72, 74, 76, 78], [80, 82, 84, 86, 88, 90, 92, 94, 96, 98], [100, 102, 104, 106, 108, 110, 112, 114, 116, 118], [120, 122, 124, 126, 128, 130, 132, 134, 136, 138], [140, 142, 144, 146, 148, 150, 152, 154, 156, 158], [160, 162, 164, 166, 168, 170, 172, 174, 176, 178], [180, 182, 184, 186, 188, 190, 192, 194, 196, 198]] while calling ``.apply_async`` will create a dedicated task so that the individual tasks are applied in a worker instead: .. code-block:: pycon >>> add.chunks(zip(range(100), range(100)), 10).apply_async() You can also convert chunks to a group: .. code-block:: pycon >>> group = add.chunks(zip(range(100), range(100)), 10).group() and with the group skew the countdown of each task by increments of one: .. code-block:: pycon >>> group.skew(start=1, stop=10)() This means that the first task will have a countdown of one second, the second task a countdown of two seconds, and so on. celery-4.1.0/docs/userguide/periodic-tasks.rst0000644000175000017500000004660213135426300021331 0ustar omeromer00000000000000.. _guide-beat: ================ Periodic Tasks ================ .. contents:: :local: Introduction ============ :program:`celery beat` is a scheduler; It kicks off tasks at regular intervals, that are then executed by available worker nodes in the cluster. By default the entries are taken from the :setting:`beat_schedule` setting, but custom stores can also be used, like storing the entries in a SQL database. You have to ensure only a single scheduler is running for a schedule at a time, otherwise you'd end up with duplicate tasks. Using a centralized approach means the schedule doesn't have to be synchronized, and the service can operate without using locks. .. _beat-timezones: Time Zones ========== The periodic task schedules uses the UTC time zone by default, but you can change the time zone used using the :setting:`timezone` setting. An example time zone could be `Europe/London`: .. code-block:: python timezone = 'Europe/London' This setting must be added to your app, either by configuration it directly using (``app.conf.timezone = 'Europe/London'``), or by adding it to your configuration module if you have set one up using ``app.config_from_object``. See :ref:`celerytut-configuration` for more information about configuration options. The default scheduler (storing the schedule in the :file:`celerybeat-schedule` file) will automatically detect that the time zone has changed, and so will reset the schedule itself, but other schedulers may not be so smart (e.g., the Django database scheduler, see below) and in that case you'll have to reset the schedule manually. .. admonition:: Django Users Celery recommends and is compatible with the new ``USE_TZ`` setting introduced in Django 1.4. For Django users the time zone specified in the ``TIME_ZONE`` setting will be used, or you can specify a custom time zone for Celery alone by using the :setting:`timezone` setting. The database scheduler won't reset when timezone related settings change, so you must do this manually: .. code-block:: console $ python manage.py shell >>> from djcelery.models import PeriodicTask >>> PeriodicTask.objects.update(last_run_at=None) .. _beat-entries: Entries ======= To call a task periodically you have to add an entry to the beat schedule list. .. code-block:: python from celery import Celery from celery.schedules import crontab app = Celery() @app.on_after_configure.connect def setup_periodic_tasks(sender, **kwargs): # Calls test('hello') every 10 seconds. sender.add_periodic_task(10.0, test.s('hello'), name='add every 10') # Calls test('world') every 30 seconds sender.add_periodic_task(30.0, test.s('world'), expires=10) # Executes every Monday morning at 7:30 a.m. sender.add_periodic_task( crontab(hour=7, minute=30, day_of_week=1), test.s('Happy Mondays!'), ) @app.task def test(arg): print(arg) Setting these up from within the :data:`~@on_after_configure` handler means that we'll not evaluate the app at module level when using ``test.s()``. The :meth:`~@add_periodic_task` function will add the entry to the :setting:`beat_schedule` setting behind the scenes, and the same setting can also be used to set up periodic tasks manually: Example: Run the `tasks.add` task every 30 seconds. .. code-block:: python app.conf.beat_schedule = { 'add-every-30-seconds': { 'task': 'tasks.add', 'schedule': 30.0, 'args': (16, 16) }, } app.conf.timezone = 'UTC' .. note:: If you're wondering where these settings should go then please see :ref:`celerytut-configuration`. You can either set these options on your app directly or you can keep a separate module for configuration. If you want to use a single item tuple for `args`, don't forget that the constructor is a comma, and not a pair of parentheses. Using a :class:`~datetime.timedelta` for the schedule means the task will be sent in 30 second intervals (the first task will be sent 30 seconds after `celery beat` starts, and then every 30 seconds after the last run). A Crontab like schedule also exists, see the section on `Crontab schedules`_. Like with :command:`cron`, the tasks may overlap if the first task doesn't complete before the next. If that's a concern you should use a locking strategy to ensure only one instance can run at a time (see for example :ref:`cookbook-task-serial`). .. _beat-entry-fields: Available Fields ---------------- * `task` The name of the task to execute. * `schedule` The frequency of execution. This can be the number of seconds as an integer, a :class:`~datetime.timedelta`, or a :class:`~celery.schedules.crontab`. You can also define your own custom schedule types, by extending the interface of :class:`~celery.schedules.schedule`. * `args` Positional arguments (:class:`list` or :class:`tuple`). * `kwargs` Keyword arguments (:class:`dict`). * `options` Execution options (:class:`dict`). This can be any argument supported by :meth:`~celery.task.base.Task.apply_async` -- `exchange`, `routing_key`, `expires`, and so on. * `relative` If `relative` is true :class:`~datetime.timedelta` schedules are scheduled "by the clock." This means the frequency is rounded to the nearest second, minute, hour or day depending on the period of the :class:`~datetime.timedelta`. By default `relative` is false, the frequency isn't rounded and will be relative to the time when :program:`celery beat` was started. .. _beat-crontab: Crontab schedules ================= If you want more control over when the task is executed, for example, a particular time of day or day of the week, you can use the :class:`~celery.schedules.crontab` schedule type: .. code-block:: python from celery.schedules import crontab app.conf.beat_schedule = { # Executes every Monday morning at 7:30 a.m. 'add-every-monday-morning': { 'task': 'tasks.add', 'schedule': crontab(hour=7, minute=30, day_of_week=1), 'args': (16, 16), }, } The syntax of these Crontab expressions are very flexible. Some examples: +-----------------------------------------+--------------------------------------------+ | **Example** | **Meaning** | +-----------------------------------------+--------------------------------------------+ | ``crontab()`` | Execute every minute. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour=0)`` | Execute daily at midnight. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/3')`` | Execute every three hours: | | | midnight, 3am, 6am, 9am, | | | noon, 3pm, 6pm, 9pm. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0,`` | Same as previous. | | ``hour='0,3,6,9,12,15,18,21')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute='*/15')`` | Execute every 15 minutes. | +-----------------------------------------+--------------------------------------------+ | ``crontab(day_of_week='sunday')`` | Execute every minute (!) at Sundays. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute='*',`` | Same as previous. | | ``hour='*',`` | | | ``day_of_week='sun')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute='*/10',`` | Execute every ten minutes, but only | | ``hour='3,17,22',`` | between 3-4 am, 5-6 pm, and 10-11 pm on | | ``day_of_week='thu,fri')`` | Thursdays or Fridays. | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/2,*/3')`` | Execute every even hour, and every hour | | | divisible by three. This means: | | | at every hour *except*: 1am, | | | 5am, 7am, 11am, 1pm, 5pm, 7pm, | | | 11pm | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/5')`` | Execute hour divisible by 5. This means | | | that it is triggered at 3pm, not 5pm | | | (since 3pm equals the 24-hour clock | | | value of "15", which is divisible by 5). | +-----------------------------------------+--------------------------------------------+ | ``crontab(minute=0, hour='*/3,8-17')`` | Execute every hour divisible by 3, and | | | every hour during office hours (8am-5pm). | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0, day_of_month='2')`` | Execute on the second day of every month. | | | | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0,`` | Execute on every even numbered day. | | ``day_of_month='2-30/3')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0,`` | Execute on the first and third weeks of | | ``day_of_month='1-7,15-21')`` | the month. | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0, day_of_month='11',`` | Execute on the eleventh of May every year. | | ``month_of_year='5')`` | | +-----------------------------------------+--------------------------------------------+ | ``crontab(0, 0,`` | Execute on the first month of every | | ``month_of_year='*/3')`` | quarter. | +-----------------------------------------+--------------------------------------------+ See :class:`celery.schedules.crontab` for more documentation. .. _beat-solar: Solar schedules ================= If you have a task that should be executed according to sunrise, sunset, dawn or dusk, you can use the :class:`~celery.schedules.solar` schedule type: .. code-block:: python from celery.schedules import solar app.conf.beat_schedule = { # Executes at sunset in Melbourne 'add-at-melbourne-sunset': { 'task': 'tasks.add', 'schedule': solar('sunset', -37.81753, 144.96715), 'args': (16, 16), }, } The arguments are simply: ``solar(event, latitude, longitude)`` Be sure to use the correct sign for latitude and longitude: +---------------+-------------------+----------------------+ | **Sign** | **Argument** | **Meaning** | +---------------+-------------------+----------------------+ | ``+`` | ``latitude`` | North | +---------------+-------------------+----------------------+ | ``-`` | ``latitude`` | South | +---------------+-------------------+----------------------+ | ``+`` | ``longitude`` | East | +---------------+-------------------+----------------------+ | ``-`` | ``longitude`` | West | +---------------+-------------------+----------------------+ Possible event types are: +-----------------------------------------+--------------------------------------------+ | **Event** | **Meaning** | +-----------------------------------------+--------------------------------------------+ | ``dawn_astronomical`` | Execute at the moment after which the sky | | | is no longer completely dark. This is when | | | the sun is 18 degrees below the horizon. | +-----------------------------------------+--------------------------------------------+ | ``dawn_nautical`` | Execute when there's enough sunlight for | | | the horizon and some objects to be | | | distinguishable; formally, when the sun is | | | 12 degrees below the horizon. | +-----------------------------------------+--------------------------------------------+ | ``dawn_civil`` | Execute when there's enough light for | | | objects to be distinguishable so that | | | outdoor activities can commence; | | | formally, when the Sun is 6 degrees below | | | the horizon. | +-----------------------------------------+--------------------------------------------+ | ``sunrise`` | Execute when the upper edge of the sun | | | appears over the eastern horizon in the | | | morning. | +-----------------------------------------+--------------------------------------------+ | ``solar_noon`` | Execute when the sun is highest above the | | | horizon on that day. | +-----------------------------------------+--------------------------------------------+ | ``sunset`` | Execute when the trailing edge of the sun | | | disappears over the western horizon in the | | | evening. | +-----------------------------------------+--------------------------------------------+ | ``dusk_civil`` | Execute at the end of civil twilight, when | | | objects are still distinguishable and some | | | stars and planets are visible. Formally, | | | when the sun is 6 degrees below the | | | horizon. | +-----------------------------------------+--------------------------------------------+ | ``dusk_nautical`` | Execute when the sun is 12 degrees below | | | the horizon. Objects are no longer | | | distinguishable, and the horizon is no | | | longer visible to the naked eye. | +-----------------------------------------+--------------------------------------------+ | ``dusk_astronomical`` | Execute at the moment after which the sky | | | becomes completely dark; formally, when | | | the sun is 18 degrees below the horizon. | +-----------------------------------------+--------------------------------------------+ All solar events are calculated using UTC, and are therefore unaffected by your timezone setting. In polar regions, the sun may not rise or set every day. The scheduler is able to handle these cases (i.e., a ``sunrise`` event won't run on a day when the sun doesn't rise). The one exception is ``solar_noon``, which is formally defined as the moment the sun transits the celestial meridian, and will occur every day even if the sun is below the horizon. Twilight is defined as the period between dawn and sunrise; and between sunset and dusk. You can schedule an event according to "twilight" depending on your definition of twilight (civil, nautical, or astronomical), and whether you want the event to take place at the beginning or end of twilight, using the appropriate event from the list above. See :class:`celery.schedules.solar` for more documentation. .. _beat-starting: Starting the Scheduler ====================== To start the :program:`celery beat` service: .. code-block:: console $ celery -A proj beat You can also embed `beat` inside the worker by enabling the workers :option:`-B ` option, this is convenient if you'll never run more than one worker node, but it's not commonly used and for that reason isn't recommended for production use: .. code-block:: console $ celery -A proj worker -B Beat needs to store the last run times of the tasks in a local database file (named `celerybeat-schedule` by default), so it needs access to write in the current directory, or alternatively you can specify a custom location for this file: .. code-block:: console $ celery -A proj beat -s /home/celery/var/run/celerybeat-schedule .. note:: To daemonize beat see :ref:`daemonizing`. .. _beat-custom-schedulers: Using custom scheduler classes ------------------------------ Custom scheduler classes can be specified on the command-line (the :option:`--scheduler ` argument). The default scheduler is the :class:`celery.beat.PersistentScheduler`, that simply keeps track of the last run times in a local :mod:`shelve` database file. There's also the :pypi:`django-celery-beat` extension that stores the schedule in the Django database, and presents a convenient admin interface to manage periodic tasks at runtime. To install and use this extension: #. Use :command:`pip` to install the package: .. code-block:: console $ pip install django-celery-beat #. Add the ``django_celery_beat`` module to ``INSTALLED_APPS`` in your Django project' :file:`settings.py`:: INSTALLED_APPS = ( ..., 'django_celery_beat', ) Note that there is no dash in the module name, only underscores. #. Apply Django database migrations so that the necessary tables are created: .. code-block:: console $ python manage.py migrate #. Start the :program:`celery beat` service using the ``django_celery_beat.schedulers:DatabaseScheduler`` scheduler: .. code-block:: console $ celery -A proj beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler Note: You may also add this as an settings option directly. #. Visit the Django-Admin interface to set up some periodic tasks. celery-4.1.0/docs/userguide/testing.rst0000644000175000017500000002247513130607475020100 0ustar omeromer00000000000000.. _testing: ================================================================ Testing with Celery ================================================================ Tasks and unit tests ==================== To test task behavior in unit tests the preferred method is mocking. .. admonition:: Eager mode The eager mode enabled by the :setting:`task_always_eager` setting is by definition not suitable for unit tests. When testing with eager mode you are only testing an emulation of what happens in a worker, and there are many discrepancies between the emulation and what happens in reality. A Celery task is much like a web view, in that it should only define how to perform the action in the context of being called as a task. This means optimally tasks only handle things like serialization, message headers, retries, and so on, with the actual logic implemented elsewhere. Say we had a task like this: .. code-block:: python from .models import Product @app.task(bind=True) def send_order(self, product_pk, quantity, price): price = Decimal(price) # json serializes this to string. # models are passed by id, not serialized. product = Product.objects.get(product_pk) try: product.order(quantity, price) except OperationalError as exc: raise self.retry(exc=exc) You could write unit tests for this task, using mocking like in this example: .. code-block:: python from pytest import raises from celery.exceptions import Retry # for python 2: use mock.patch from `pip install mock`. from unittest.mock import patch from proj.models import Product from proj.tasks import send_order class test_send_order: @patch('proj.tasks.Product.order') # < patching Product in module above def test_success(self, product_order): product = Product.objects.create( name='Foo', ) send_order(product.pk, 3, Decimal(30.3)) product_order.assert_called_with(3, Decimal(30.3)) @patch('proj.tasks.Product.order') @patch('proj.tasks.send_order.retry') def test_failure(self, send_order_retry, product_order): product = Product.objects.create( name='Foo', ) # Set a side effect on the patched methods # so that they raise the errors we want. send_order_retry.side_effect = Retry() product_order.side_effect = OperationalError() with raises(Retry): send_order(product.pk, 3, Decimal(30.6)) Py.test ======= .. versionadded:: 4.0 Celery is also a :pypi:`pytest` plugin that adds fixtures that you can use in your integration (or unit) test suites. Marks ----- ``celery`` - Set test app configuration. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``celery`` mark enables you to override the configuration used for a single test case: .. code-block:: python @pytest.mark.celery(result_backend='redis://') def test_something(): ... or for all the test cases in a class: .. code-block:: python @pytest.mark.celery(result_backend='redis://') class test_something: def test_one(self): ... def test_two(self): ... Fixtures -------- Function scope ^^^^^^^^^^^^^^ ``celery_app`` - Celery app used for testing. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This fixture returns a Celery app you can use for testing. Example: .. code-block:: python def test_create_task(celery_app, celery_worker): @celery_app.task def mul(x, y): return x * y assert mul.delay(4, 4).get(timeout=10) == 16 ``celery_worker`` - Embed live worker. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This fixture starts a Celery worker instance that you can use for integration tests. The worker will be started in a *separate thread* and will be shutdown as soon as the test returns. Example: .. code-block:: python # Put this in your conftest.py @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'amqp://', 'result_backend': 'redis://' } def test_add(celery_worker): mytask.delay() # If you wish to override some setting in one test cases # only - you can use the ``celery`` mark: @pytest.mark.celery(result_backend='rpc') def test_other(celery_worker): ... Session scope ^^^^^^^^^^^^^ ``celery_config`` - Override to setup Celery test app configuration. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can redefine this fixture to configure the test Celery app. The config returned by your fixture will then be used to configure the :func:`celery_app`, and :func:`celery_session_app` fixtures. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'amqp://', 'result_backend': 'rpc', } ``celery_parameters`` - Override to setup Celery test app parameters. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can redefine this fixture to change the ``__init__`` parameters of test Celery app. In contrast to :func:`celery_config`, these are directly passed to when instantiating :class:`~celery.Celery`. The config returned by your fixture will then be used to configure the :func:`celery_app`, and :func:`celery_session_app` fixtures. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_parameters(): return { 'task_cls': my.package.MyCustomTaskClass, 'strict_typing': False, } ``celery_worker_parameters`` - Override to setup Celery worker parameters. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can redefine this fixture to change the ``__init__`` parameters of test Celery workers. These are directly passed to :class:`~celery.worker.WorkController` when it is instantiated. The config returned by your fixture will then be used to configure the :func:`celery_worker`, and :func:`celery_session_worker` fixtures. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_worker_parameters(): return { 'queues': ('high-prio', 'low-prio'), 'exclude_queues': ('celery'), } ``celery_enable_logging`` - Override to enable logging in embedded workers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a fixture you can override to enable logging in embedded workers. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_enable_logging(): return True ``celery_includes`` - Add additional imports for embedded workers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can override fixture to include modules when an embedded worker starts. You can have this return a list of module names to import, which can be task modules, modules registering signals, and so on. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_includes(): return [ 'proj.tests.tasks', 'proj.tests.celery_signal_handlers', ] ``celery_worker_pool`` - Override the pool used for embedded workers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can override fixture to configure the execution pool used for embedded workers. Example: .. code-block:: python @pytest.fixture(scope='session') def celery_worker_pool(): return 'prefork' .. warning:: You cannot use the gevent/eventlet pools, that is unless your whole test suite is running with the monkeypatches enabled. ``celery_session_worker`` - Embedded worker that lives throughout the session. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This fixture starts a worker that lives throughout the testing session (it won't be started/stopped for every test). Example: .. code-block:: python # Add this to your conftest.py @pytest.fixture(scope='session') def celery_config(): return { 'broker_url': 'amqp://', 'result_backend': 'rpc', } # Do this in your tests. def test_add_task(celery_session_worker): assert add.delay(2, 2) == 4 .. warning:: It's probably a bad idea to mix session and ephemeral workers... ``celery_session_app`` - Celery app used for testing (session scope). ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This can be used by other session scoped fixtures when they need to refer to a Celery app instance. ``use_celery_app_trap`` - Raise exception on falling back to default app. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a fixture you can override in your ``conftest.py``, to enable the "app trap": if something tries to access the default or current_app, an exception is raised. Example: .. code-block:: python @pytest.fixture(scope='session') def use_celery_app_trap(): return True If a test wants to access the default app, you would have to mark it using the ``depends_on_current_app`` fixture: .. code-block:: python @pytest.mark.usefixtures('depends_on_current_app') def test_something(): something() celery-4.1.0/docs/userguide/monitoring.rst0000644000175000017500000005235313130607475020606 0ustar omeromer00000000000000.. _guide-monitoring: ================================= Monitoring and Management Guide ================================= .. contents:: :local: Introduction ============ There are several tools available to monitor and inspect Celery clusters. This document describes some of these, as as well as features related to monitoring, like events and broadcast commands. .. _monitoring-workers: Workers ======= .. _monitoring-control: Management Command-line Utilities (``inspect``/``control``) ----------------------------------------------------------- :program:`celery` can also be used to inspect and manage worker nodes (and to some degree tasks). To list all the commands available do: .. code-block:: console $ celery help or to get help for a specific command do: .. code-block:: console $ celery --help Commands ~~~~~~~~ * **shell**: Drop into a Python shell. The locals will include the ``celery`` variable: this is the current app. Also all known tasks will be automatically added to locals (unless the :option:`--without-tasks ` flag is set). Uses :pypi:`Ipython`, :pypi:`bpython`, or regular :program:`python` in that order if installed. You can force an implementation using :option:`--ipython `, :option:`--bpython `, or :option:`--python `. * **status**: List active nodes in this cluster .. code-block:: console $ celery -A proj status * **result**: Show the result of a task .. code-block:: console $ celery -A proj result -t tasks.add 4e196aa4-0141-4601-8138-7aa33db0f577 Note that you can omit the name of the task as long as the task doesn't use a custom result backend. * **purge**: Purge messages from all configured task queues. This command will remove all messages from queues configured in the :setting:`CELERY_QUEUES` setting: .. warning:: There's no undo for this operation, and messages will be permanently deleted! .. code-block:: console $ celery -A proj purge You can also specify the queues to purge using the `-Q` option: .. code-block:: console $ celery -A proj purge -Q celery,foo,bar and exclude queues from being purged using the `-X` option: .. code-block:: console $ celery -A proj purge -X celery * **inspect active**: List active tasks .. code-block:: console $ celery -A proj inspect active These are all the tasks that are currently being executed. * **inspect scheduled**: List scheduled ETA tasks .. code-block:: console $ celery -A proj inspect scheduled These are tasks reserved by the worker when they have an `eta` or `countdown` argument set. * **inspect reserved**: List reserved tasks .. code-block:: console $ celery -A proj inspect reserved This will list all tasks that have been prefetched by the worker, and is currently waiting to be executed (doesn't include tasks with an ETA value set). * **inspect revoked**: List history of revoked tasks .. code-block:: console $ celery -A proj inspect revoked * **inspect registered**: List registered tasks .. code-block:: console $ celery -A proj inspect registered * **inspect stats**: Show worker statistics (see :ref:`worker-statistics`) .. code-block:: console $ celery -A proj inspect stats * **inspect query_task**: Show information about task(s) by id. Any worker having a task in this set of ids reserved/active will respond with status and information. .. code-block:: console $ celery -A proj inspect query_task e9f6c8f0-fec9-4ae8-a8c6-cf8c8451d4f8 You can also query for information about multiple tasks: .. code-block:: console $ celery -A proj inspect query_task id1 id2 ... idN * **control enable_events**: Enable events .. code-block:: console $ celery -A proj control enable_events * **control disable_events**: Disable events .. code-block:: console $ celery -A proj control disable_events * **migrate**: Migrate tasks from one broker to another (**EXPERIMENTAL**). .. code-block:: console $ celery -A proj migrate redis://localhost amqp://localhost This command will migrate all the tasks on one broker to another. As this command is new and experimental you should be sure to have a backup of the data before proceeding. .. note:: All ``inspect`` and ``control`` commands supports a :option:`--timeout ` argument, This is the number of seconds to wait for responses. You may have to increase this timeout if you're not getting a response due to latency. .. _inspect-destination: Specifying destination nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default the inspect and control commands operates on all workers. You can specify a single, or a list of workers by using the :option:`--destination ` argument: .. code-block:: console $ celery -A proj inspect -d w1@e.com,w2@e.com reserved $ celery -A proj control -d w1@e.com,w2@e.com enable_events .. _monitoring-flower: Flower: Real-time Celery web-monitor ------------------------------------ Flower is a real-time web based monitor and administration tool for Celery. It's under active development, but is already an essential tool. Being the recommended monitor for Celery, it obsoletes the Django-Admin monitor, ``celerymon`` and the ``ncurses`` based monitor. Flower is pronounced like "flow", but you can also use the botanical version if you prefer. Features ~~~~~~~~ - Real-time monitoring using Celery Events - Task progress and history - Ability to show task details (arguments, start time, run-time, and more) - Graphs and statistics - Remote Control - View worker status and statistics - Shutdown and restart worker instances - Control worker pool size and autoscale settings - View and modify the queues a worker instance consumes from - View currently running tasks - View scheduled tasks (ETA/countdown) - View reserved and revoked tasks - Apply time and rate limits - Configuration viewer - Revoke or terminate tasks - HTTP API - List workers - Shut down a worker - Restart worker’s pool - Grow worker’s pool - Shrink worker’s pool - Autoscale worker pool - Start consuming from a queue - Stop consuming from a queue - List tasks - List (seen) task types - Get a task info - Execute a task - Execute a task by name - Get a task result - Change soft and hard time limits for a task - Change rate limit for a task - Revoke a task - OpenID authentication **Screenshots** .. figure:: ../images/dashboard.png :width: 700px .. figure:: ../images/monitor.png :width: 700px More screenshots_: .. _screenshots: https://github.com/mher/flower/tree/master/docs/screenshots Usage ~~~~~ You can use pip to install Flower: .. code-block:: console $ pip install flower Running the flower command will start a web-server that you can visit: .. code-block:: console $ celery -A proj flower The default port is http://localhost:5555, but you can change this using the :option:`--port ` argument: .. code-block:: console $ celery -A proj flower --port=5555 Broker URL can also be passed through the :option:`--broker ` argument : .. code-block:: console $ celery flower --broker=amqp://guest:guest@localhost:5672// or $ celery flower --broker=redis://guest:guest@localhost:6379/0 Then, you can visit flower in your web browser : .. code-block:: console $ open http://localhost:5555 Flower has many more features than are detailed here, including authorization options. Check out the `official documentation`_ for more information. .. _official documentation: https://flower.readthedocs.io/en/latest/ .. _monitoring-celeryev: celery events: Curses Monitor ----------------------------- .. versionadded:: 2.0 `celery events` is a simple curses monitor displaying task and worker history. You can inspect the result and traceback of tasks, and it also supports some management commands like rate limiting and shutting down workers. This monitor was started as a proof of concept, and you probably want to use Flower instead. Starting: .. code-block:: console $ celery -A proj events You should see a screen like: .. figure:: ../images/celeryevshotsm.jpg `celery events` is also used to start snapshot cameras (see :ref:`monitoring-snapshots`: .. code-block:: console $ celery -A proj events --camera= --frequency=1.0 and it includes a tool to dump events to :file:`stdout`: .. code-block:: console $ celery -A proj events --dump For a complete list of options use :option:`--help `: .. code-block:: console $ celery events --help .. _`celerymon`: https://github.com/celery/celerymon/ .. _monitoring-rabbitmq: RabbitMQ ======== To manage a Celery cluster it is important to know how RabbitMQ can be monitored. RabbitMQ ships with the `rabbitmqctl(1)`_ command, with this you can list queues, exchanges, bindings, queue lengths, the memory usage of each queue, as well as manage users, virtual hosts and their permissions. .. note:: The default virtual host (``"/"``) is used in these examples, if you use a custom virtual host you have to add the ``-p`` argument to the command, for example: ``rabbitmqctl list_queues -p my_vhost …`` .. _`rabbitmqctl(1)`: http://www.rabbitmq.com/man/rabbitmqctl.1.man.html .. _monitoring-rmq-queues: Inspecting queues ----------------- Finding the number of tasks in a queue: .. code-block:: console $ rabbitmqctl list_queues name messages messages_ready \ messages_unacknowledged Here `messages_ready` is the number of messages ready for delivery (sent but not received), `messages_unacknowledged` is the number of messages that's been received by a worker but not acknowledged yet (meaning it is in progress, or has been reserved). `messages` is the sum of ready and unacknowledged messages. Finding the number of workers currently consuming from a queue: .. code-block:: console $ rabbitmqctl list_queues name consumers Finding the amount of memory allocated to a queue: .. code-block:: console $ rabbitmqctl list_queues name memory :Tip: Adding the ``-q`` option to `rabbitmqctl(1)`_ makes the output easier to parse. .. _monitoring-redis: Redis ===== If you're using Redis as the broker, you can monitor the Celery cluster using the `redis-cli(1)` command to list lengths of queues. .. _monitoring-redis-queues: Inspecting queues ----------------- Finding the number of tasks in a queue: .. code-block:: console $ redis-cli -h HOST -p PORT -n DATABASE_NUMBER llen QUEUE_NAME The default queue is named `celery`. To get all available queues, invoke: .. code-block:: console $ redis-cli -h HOST -p PORT -n DATABASE_NUMBER keys \* .. note:: Queue keys only exists when there are tasks in them, so if a key doesn't exist it simply means there are no messages in that queue. This is because in Redis a list with no elements in it is automatically removed, and hence it won't show up in the `keys` command output, and `llen` for that list returns 0. Also, if you're using Redis for other purposes, the output of the `keys` command will include unrelated values stored in the database. The recommended way around this is to use a dedicated `DATABASE_NUMBER` for Celery, you can also use database numbers to separate Celery applications from each other (virtual hosts), but this won't affect the monitoring events used by for example Flower as Redis pub/sub commands are global rather than database based. .. _monitoring-munin: Munin ===== This is a list of known Munin plug-ins that can be useful when maintaining a Celery cluster. * ``rabbitmq-munin``: Munin plug-ins for RabbitMQ. https://github.com/ask/rabbitmq-munin * ``celery_tasks``: Monitors the number of times each task type has been executed (requires `celerymon`). http://exchange.munin-monitoring.org/plugins/celery_tasks-2/details * ``celery_task_states``: Monitors the number of tasks in each state (requires `celerymon`). http://exchange.munin-monitoring.org/plugins/celery_tasks/details .. _monitoring-events: Events ====== The worker has the ability to send a message whenever some event happens. These events are then captured by tools like Flower, and :program:`celery events` to monitor the cluster. .. _monitoring-snapshots: Snapshots --------- .. versionadded:: 2.1 Even a single worker can produce a huge amount of events, so storing the history of all events on disk may be very expensive. A sequence of events describes the cluster state in that time period, by taking periodic snapshots of this state you can keep all history, but still only periodically write it to disk. To take snapshots you need a Camera class, with this you can define what should happen every time the state is captured; You can write it to a database, send it by email or something else entirely. :program:`celery events` is then used to take snapshots with the camera, for example if you want to capture state every 2 seconds using the camera ``myapp.Camera`` you run :program:`celery events` with the following arguments: .. code-block:: console $ celery -A proj events -c myapp.Camera --frequency=2.0 .. _monitoring-camera: Custom Camera ~~~~~~~~~~~~~ Cameras can be useful if you need to capture events and do something with those events at an interval. For real-time event processing you should use :class:`@events.Receiver` directly, like in :ref:`event-real-time-example`. Here is an example camera, dumping the snapshot to screen: .. code-block:: python from pprint import pformat from celery.events.snapshot import Polaroid class DumpCam(Polaroid): clear_after = True # clear after flush (incl, state.event_count). def on_shutter(self, state): if not state.event_count: # No new events since last snapshot. return print('Workers: {0}'.format(pformat(state.workers, indent=4))) print('Tasks: {0}'.format(pformat(state.tasks, indent=4))) print('Total: {0.event_count} events, {0.task_count} tasks'.format( state)) See the API reference for :mod:`celery.events.state` to read more about state objects. Now you can use this cam with :program:`celery events` by specifying it with the :option:`-c ` option: .. code-block:: console $ celery -A proj events -c myapp.DumpCam --frequency=2.0 Or you can use it programmatically like this: .. code-block:: python from celery import Celery from myapp import DumpCam def main(app, freq=1.0): state = app.events.State() with app.connection() as connection: recv = app.events.Receiver(connection, handlers={'*': state.event}) with DumpCam(state, freq=freq): recv.capture(limit=None, timeout=None) if __name__ == '__main__': app = Celery(broker='amqp://guest@localhost//') main(app) .. _event-real-time-example: Real-time processing -------------------- To process events in real-time you need the following - An event consumer (this is the ``Receiver``) - A set of handlers called when events come in. You can have different handlers for each event type, or a catch-all handler can be used ('*') - State (optional) :class:`@events.State` is a convenient in-memory representation of tasks and workers in the cluster that's updated as events come in. It encapsulates solutions for many common things, like checking if a worker is still alive (by verifying heartbeats), merging event fields together as events come in, making sure time-stamps are in sync, and so on. Combining these you can easily process events in real-time: .. code-block:: python from celery import Celery def my_monitor(app): state = app.events.State() def announce_failed_tasks(event): state.event(event) # task name is sent only with -received event, and state # will keep track of this for us. task = state.tasks.get(event['uuid']) print('TASK FAILED: %s[%s] %s' % ( task.name, task.uuid, task.info(),)) with app.connection() as connection: recv = app.events.Receiver(connection, handlers={ 'task-failed': announce_failed_tasks, '*': state.event, }) recv.capture(limit=None, timeout=None, wakeup=True) if __name__ == '__main__': app = Celery(broker='amqp://guest@localhost//') my_monitor(app) .. note:: The ``wakeup`` argument to ``capture`` sends a signal to all workers to force them to send a heartbeat. This way you can immediately see workers when the monitor starts. You can listen to specific events by specifying the handlers: .. code-block:: python from celery import Celery def my_monitor(app): state = app.events.State() def announce_failed_tasks(event): state.event(event) # task name is sent only with -received event, and state # will keep track of this for us. task = state.tasks.get(event['uuid']) print('TASK FAILED: %s[%s] %s' % ( task.name, task.uuid, task.info(),)) with app.connection() as connection: recv = app.events.Receiver(connection, handlers={ 'task-failed': announce_failed_tasks, }) recv.capture(limit=None, timeout=None, wakeup=True) if __name__ == '__main__': app = Celery(broker='amqp://guest@localhost//') my_monitor(app) .. _event-reference: Event Reference =============== This list contains the events sent by the worker, and their arguments. .. _event-reference-task: Task Events ----------- .. event:: task-sent task-sent ~~~~~~~~~ :signature: ``task-sent(uuid, name, args, kwargs, retries, eta, expires, queue, exchange, routing_key, root_id, parent_id)`` Sent when a task message is published and the :setting:`task_send_sent_event` setting is enabled. .. event:: task-received task-received ~~~~~~~~~~~~~ :signature: ``task-received(uuid, name, args, kwargs, retries, eta, hostname, timestamp, root_id, parent_id)`` Sent when the worker receives a task. .. event:: task-started task-started ~~~~~~~~~~~~ :signature: ``task-started(uuid, hostname, timestamp, pid)`` Sent just before the worker executes the task. .. event:: task-succeeded task-succeeded ~~~~~~~~~~~~~~ :signature: ``task-succeeded(uuid, result, runtime, hostname, timestamp)`` Sent if the task executed successfully. Run-time is the time it took to execute the task using the pool. (Starting from the task is sent to the worker pool, and ending when the pool result handler callback is called). .. event:: task-failed task-failed ~~~~~~~~~~~ :signature: ``task-failed(uuid, exception, traceback, hostname, timestamp)`` Sent if the execution of the task failed. .. event:: task-rejected task-rejected ~~~~~~~~~~~~~ :signature: ``task-rejected(uuid, requeued)`` The task was rejected by the worker, possibly to be re-queued or moved to a dead letter queue. .. event:: task-revoked task-revoked ~~~~~~~~~~~~ :signature: ``task-revoked(uuid, terminated, signum, expired)`` Sent if the task has been revoked (Note that this is likely to be sent by more than one worker). - ``terminated`` is set to true if the task process was terminated, and the ``signum`` field set to the signal used. - ``expired`` is set to true if the task expired. .. event:: task-retried task-retried ~~~~~~~~~~~~ :signature: ``task-retried(uuid, exception, traceback, hostname, timestamp)`` Sent if the task failed, but will be retried in the future. .. _event-reference-worker: Worker Events ------------- .. event:: worker-online worker-online ~~~~~~~~~~~~~ :signature: ``worker-online(hostname, timestamp, freq, sw_ident, sw_ver, sw_sys)`` The worker has connected to the broker and is online. - `hostname`: Nodename of the worker. - `timestamp`: Event time-stamp. - `freq`: Heartbeat frequency in seconds (float). - `sw_ident`: Name of worker software (e.g., ``py-celery``). - `sw_ver`: Software version (e.g., 2.2.0). - `sw_sys`: Operating System (e.g., Linux/Darwin). .. event:: worker-heartbeat worker-heartbeat ~~~~~~~~~~~~~~~~ :signature: ``worker-heartbeat(hostname, timestamp, freq, sw_ident, sw_ver, sw_sys, active, processed)`` Sent every minute, if the worker hasn't sent a heartbeat in 2 minutes, it is considered to be offline. - `hostname`: Nodename of the worker. - `timestamp`: Event time-stamp. - `freq`: Heartbeat frequency in seconds (float). - `sw_ident`: Name of worker software (e.g., ``py-celery``). - `sw_ver`: Software version (e.g., 2.2.0). - `sw_sys`: Operating System (e.g., Linux/Darwin). - `active`: Number of currently executing tasks. - `processed`: Total number of tasks processed by this worker. .. event:: worker-offline worker-offline ~~~~~~~~~~~~~~ :signature: ``worker-offline(hostname, timestamp, freq, sw_ident, sw_ver, sw_sys)`` The worker has disconnected from the broker. celery-4.1.0/docs/userguide/signals.rst0000644000175000017500000004013213135426300020040 0ustar omeromer00000000000000.. _signals: ======= Signals ======= .. contents:: :local: Signals allows decoupled applications to receive notifications when certain actions occur elsewhere in the application. Celery ships with many signals that your application can hook into to augment behavior of certain actions. .. _signal-basics: Basics ====== Several kinds of events trigger signals, you can connect to these signals to perform actions as they trigger. Example connecting to the :signal:`after_task_publish` signal: .. code-block:: python from celery.signals import after_task_publish @after_task_publish.connect def task_sent_handler(sender=None, headers=None, body=None, **kwargs): # information about task are located in headers for task messages # using the task protocol version 2. info = headers if 'task' in headers else body print('after_task_publish for task id {info[id]}'.format( info=info, )) Some signals also have a sender you can filter by. For example the :signal:`after_task_publish` signal uses the task name as a sender, so by providing the ``sender`` argument to :class:`~celery.utils.dispatch.signal.Signal.connect` you can connect your handler to be called every time a task with name `"proj.tasks.add"` is published: .. code-block:: python @after_task_publish.connect(sender='proj.tasks.add') def task_sent_handler(sender=None, headers=None, body=None, **kwargs): # information about task are located in headers for task messages # using the task protocol version 2. info = headers if 'task' in headers else body print('after_task_publish for task id {info[id]}'.format( info=info, )) Signals use the same implementation as :mod:`django.core.dispatch`. As a result other keyword parameters (e.g., signal) are passed to all signal handlers by default. The best practice for signal handlers is to accept arbitrary keyword arguments (i.e., ``**kwargs``). That way new Celery versions can add additional arguments without breaking user code. .. _signal-ref: Signals ======= Task Signals ------------ .. signal:: before_task_publish ``before_task_publish`` ~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 3.1 Dispatched before a task is published. Note that this is executed in the process sending the task. Sender is the name of the task being sent. Provides arguments: * ``body`` Task message body. This is a mapping containing the task message fields, see :ref:`message-protocol-task-v2` and :ref:`message-protocol-task-v1` for a reference of possible fields that can be defined. * ``exchange`` Name of the exchange to send to or a :class:`~kombu.Exchange` object. * ``routing_key`` Routing key to use when sending the message. * ``headers`` Application headers mapping (can be modified). * ``properties`` Message properties (can be modified) * ``declare`` List of entities (:class:`~kombu.Exchange`, :class:`~kombu.Queue`, or :class:`~kombu.binding` to declare before publishing the message. Can be modified. * ``retry_policy`` Mapping of retry options. Can be any argument to :meth:`kombu.Connection.ensure` and can be modified. .. signal:: after_task_publish ``after_task_publish`` ~~~~~~~~~~~~~~~~~~~~~~ Dispatched when a task has been sent to the broker. Note that this is executed in the process that sent the task. Sender is the name of the task being sent. Provides arguments: * ``headers`` The task message headers, see :ref:`message-protocol-task-v2` and :ref:`message-protocol-task-v1` for a reference of possible fields that can be defined. * ``body`` The task message body, see :ref:`message-protocol-task-v2` and :ref:`message-protocol-task-v1` for a reference of possible fields that can be defined. * ``exchange`` Name of the exchange or :class:`~kombu.Exchange` object used. * ``routing_key`` Routing key used. .. signal:: task_prerun ``task_prerun`` ~~~~~~~~~~~~~~~ Dispatched before a task is executed. Sender is the task object being executed. Provides arguments: * ``task_id`` Id of the task to be executed. * ``task`` The task being executed. * ``args`` The tasks positional arguments. * ``kwargs`` The tasks keyword arguments. .. signal:: task_postrun ``task_postrun`` ~~~~~~~~~~~~~~~~ Dispatched after a task has been executed. Sender is the task object executed. Provides arguments: * ``task_id`` Id of the task to be executed. * ``task`` The task being executed. * ``args`` The tasks positional arguments. * ``kwargs`` The tasks keyword arguments. * ``retval`` The return value of the task. * ``state`` Name of the resulting state. .. signal:: task_retry ``task_retry`` ~~~~~~~~~~~~~~ Dispatched when a task will be retried. Sender is the task object. Provides arguments: * ``request`` The current task request. * ``reason`` Reason for retry (usually an exception instance, but can always be coerced to :class:`str`). * ``einfo`` Detailed exception information, including traceback (a :class:`billiard.einfo.ExceptionInfo` object). .. signal:: task_success ``task_success`` ~~~~~~~~~~~~~~~~ Dispatched when a task succeeds. Sender is the task object executed. Provides arguments * ``result`` Return value of the task. .. signal:: task_failure ``task_failure`` ~~~~~~~~~~~~~~~~ Dispatched when a task fails. Sender is the task object executed. Provides arguments: * ``task_id`` Id of the task. * ``exception`` Exception instance raised. * ``args`` Positional arguments the task was called with. * ``kwargs`` Keyword arguments the task was called with. * ``traceback`` Stack trace object. * ``einfo`` The :class:`billiard.einfo.ExceptionInfo` instance. .. signal:: task_revoked ``task_revoked`` ~~~~~~~~~~~~~~~~ Dispatched when a task is revoked/terminated by the worker. Sender is the task object revoked/terminated. Provides arguments: * ``request`` This is a :class:`~celery.worker.request.Request` instance, and not ``task.request``. When using the prefork pool this signal is dispatched in the parent process, so ``task.request`` isn't available and shouldn't be used. Use this object instead, as they share many of the same fields. * ``terminated`` Set to :const:`True` if the task was terminated. * ``signum`` Signal number used to terminate the task. If this is :const:`None` and terminated is :const:`True` then :sig:`TERM` should be assumed. * ``expired`` Set to :const:`True` if the task expired. .. signal:: task_unknown ``task_unknown`` ~~~~~~~~~~~~~~~~ Dispatched when a worker receives a message for a task that's not registered. Sender is the worker :class:`~celery.worker.consumer.Consumer`. Provides arguments: * ``name`` Name of task not found in registry. * ``id`` The task id found in the message. * ``message`` Raw message object. * ``exc`` The error that occurred. .. signal:: task_rejected ``task_rejected`` ~~~~~~~~~~~~~~~~~ Dispatched when a worker receives an unknown type of message to one of its task queues. Sender is the worker :class:`~celery.worker.consumer.Consumer`. Provides arguments: * ``message`` Raw message object. * ``exc`` The error that occurred (if any). App Signals ----------- .. signal:: import_modules ``import_modules`` ~~~~~~~~~~~~~~~~~~ This signal is sent when a program (worker, beat, shell) etc, asks for modules in the :setting:`include` and :setting:`imports` settings to be imported. Sender is the app instance. Worker Signals -------------- .. signal:: celeryd_after_setup ``celeryd_after_setup`` ~~~~~~~~~~~~~~~~~~~~~~~ This signal is sent after the worker instance is set up, but before it calls run. This means that any queues from the :option:`celery worker -Q` option is enabled, logging has been set up and so on. It can be used to add custom queues that should always be consumed from, disregarding the :option:`celery worker -Q` option. Here's an example that sets up a direct queue for each worker, these queues can then be used to route a task to any specific worker: .. code-block:: python from celery.signals import celeryd_after_setup @celeryd_after_setup.connect def setup_direct_queue(sender, instance, **kwargs): queue_name = '{0}.dq'.format(sender) # sender is the nodename of the worker instance.app.amqp.queues.select_add(queue_name) Provides arguments: * ``sender`` Node name of the worker. * ``instance`` This is the :class:`celery.apps.worker.Worker` instance to be initialized. Note that only the :attr:`app` and :attr:`hostname` (nodename) attributes have been set so far, and the rest of ``__init__`` hasn't been executed. * ``conf`` The configuration of the current app. .. signal:: celeryd_init ``celeryd_init`` ~~~~~~~~~~~~~~~~ This is the first signal sent when :program:`celery worker` starts up. The ``sender`` is the host name of the worker, so this signal can be used to setup worker specific configuration: .. code-block:: python from celery.signals import celeryd_init @celeryd_init.connect(sender='worker12@example.com') def configure_worker12(conf=None, **kwargs): conf.task_default_rate_limit = '10/m' or to set up configuration for multiple workers you can omit specifying a sender when you connect: .. code-block:: python from celery.signals import celeryd_init @celeryd_init.connect def configure_workers(sender=None, conf=None, **kwargs): if sender in ('worker1@example.com', 'worker2@example.com'): conf.task_default_rate_limit = '10/m' if sender == 'worker3@example.com': conf.worker_prefetch_multiplier = 0 Provides arguments: * ``sender`` Nodename of the worker. * ``instance`` This is the :class:`celery.apps.worker.Worker` instance to be initialized. Note that only the :attr:`app` and :attr:`hostname` (nodename) attributes have been set so far, and the rest of ``__init__`` hasn't been executed. * ``conf`` The configuration of the current app. * ``options`` Options passed to the worker from command-line arguments (including defaults). .. signal:: worker_init ``worker_init`` ~~~~~~~~~~~~~~~ Dispatched before the worker is started. .. signal:: worker_ready ``worker_ready`` ~~~~~~~~~~~~~~~~ Dispatched when the worker is ready to accept work. .. signal:: heartbeat_sent ``heartbeat_sent`` ~~~~~~~~~~~~~~~~~~ Dispatched when Celery sends a worker heartbeat. Sender is the :class:`celery.worker.heartbeat.Heart` instance. .. signal:: worker_shutting_down ``worker_shutting_down`` ~~~~~~~~~~~~~~~~~~~~~~~~ Dispatched when the worker begins the shutdown process. Provides arguments: * ``sig`` The POSIX signal that was received. * ``how`` The shutdown method, warm or cold. * ``exitcode`` The exitcode that will be used when the main process exits. .. signal:: worker_process_init ``worker_process_init`` ~~~~~~~~~~~~~~~~~~~~~~~ Dispatched in all pool child processes when they start. Note that handlers attached to this signal mustn't be blocking for more than 4 seconds, or the process will be killed assuming it failed to start. .. signal:: worker_process_shutdown ``worker_process_shutdown`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dispatched in all pool child processes just before they exit. Note: There's no guarantee that this signal will be dispatched, similarly to :keyword:`finally` blocks it's impossible to guarantee that handlers will be called at shutdown, and if called it may be interrupted during. Provides arguments: * ``pid`` The pid of the child process that's about to shutdown. * ``exitcode`` The exitcode that'll be used when the child process exits. .. signal:: worker_shutdown ``worker_shutdown`` ~~~~~~~~~~~~~~~~~~~ Dispatched when the worker is about to shut down. Beat Signals ------------ .. signal:: beat_init ``beat_init`` ~~~~~~~~~~~~~ Dispatched when :program:`celery beat` starts (either standalone or embedded). Sender is the :class:`celery.beat.Service` instance. .. signal:: beat_embedded_init ``beat_embedded_init`` ~~~~~~~~~~~~~~~~~~~~~~ Dispatched in addition to the :signal:`beat_init` signal when :program:`celery beat` is started as an embedded process. Sender is the :class:`celery.beat.Service` instance. Eventlet Signals ---------------- .. signal:: eventlet_pool_started ``eventlet_pool_started`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Sent when the eventlet pool has been started. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. .. signal:: eventlet_pool_preshutdown ``eventlet_pool_preshutdown`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sent when the worker shutdown, just before the eventlet pool is requested to wait for remaining workers. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. .. signal:: eventlet_pool_postshutdown ``eventlet_pool_postshutdown`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sent when the pool has been joined and the worker is ready to shutdown. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. .. signal:: eventlet_pool_apply ``eventlet_pool_apply`` ~~~~~~~~~~~~~~~~~~~~~~~ Sent whenever a task is applied to the pool. Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance. Provides arguments: * ``target`` The target function. * ``args`` Positional arguments. * ``kwargs`` Keyword arguments. Logging Signals --------------- .. signal:: setup_logging ``setup_logging`` ~~~~~~~~~~~~~~~~~ Celery won't configure the loggers if this signal is connected, so you can use this to completely override the logging configuration with your own. If you'd like to augment the logging configuration setup by Celery then you can use the :signal:`after_setup_logger` and :signal:`after_setup_task_logger` signals. Provides arguments: * ``loglevel`` The level of the logging object. * ``logfile`` The name of the logfile. * ``format`` The log format string. * ``colorize`` Specify if log messages are colored or not. .. signal:: after_setup_logger ``after_setup_logger`` ~~~~~~~~~~~~~~~~~~~~~~ Sent after the setup of every global logger (not task loggers). Used to augment logging configuration. Provides arguments: * ``logger`` The logger object. * ``loglevel`` The level of the logging object. * ``logfile`` The name of the logfile. * ``format`` The log format string. * ``colorize`` Specify if log messages are colored or not. .. signal:: after_setup_task_logger ``after_setup_task_logger`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sent after the setup of every single task logger. Used to augment logging configuration. Provides arguments: * ``logger`` The logger object. * ``loglevel`` The level of the logging object. * ``logfile`` The name of the logfile. * ``format`` The log format string. * ``colorize`` Specify if log messages are colored or not. Command signals --------------- .. signal:: user_preload_options ``user_preload_options`` ~~~~~~~~~~~~~~~~~~~~~~~~ This signal is sent after any of the Celery command line programs are finished parsing the user preload options. It can be used to add additional command-line arguments to the :program:`celery` umbrella command: .. code-block:: python from celery import Celery from celery import signals from celery.bin.base import Option app = Celery() app.user_options['preload'].add(Option( '--monitoring', action='store_true', help='Enable our external monitoring utility, blahblah', )) @signals.user_preload_options.connect def handle_preload_options(options, **kwargs): if options['monitoring']: enable_monitoring() Sender is the :class:`~celery.bin.base.Command` instance, and the value depends on the program that was called (e.g., for the umbrella command it'll be a :class:`~celery.bin.celery.CeleryCommand`) object). Provides arguments: * ``app`` The app instance. * ``options`` Mapping of the parsed user preload options (with default values). Deprecated Signals ------------------ .. signal:: task_sent ``task_sent`` ~~~~~~~~~~~~~ This signal is deprecated, please use :signal:`after_task_publish` instead. celery-4.1.0/docs/userguide/daemonizing.rst0000644000175000017500000003503213130607475020720 0ustar omeromer00000000000000.. _daemonizing: ====================================================================== Daemonization ====================================================================== .. contents:: :local: .. _daemon-generic: Generic init-scripts ====================================================================== See the `extra/generic-init.d/`_ directory Celery distribution. This directory contains generic bash init-scripts for the :program:`celery worker` program, these should run on Linux, FreeBSD, OpenBSD, and other Unix-like platforms. .. _`extra/generic-init.d/`: https://github.com/celery/celery/tree/3.1/extra/generic-init.d/ .. _generic-initd-celeryd: Init-script: ``celeryd`` ---------------------------------------------------------------------- :Usage: `/etc/init.d/celeryd {start|stop|restart|status}` :Configuration file: :file:`/etc/default/celeryd` To configure this script to run the worker properly you probably need to at least tell it where to change directory to when it starts (to find the module containing your app, or your configuration module). The daemonization script is configured by the file :file:`/etc/default/celeryd`. This is a shell (:command:`sh`) script where you can add environment variables like the configuration options below. To add real environment variables affecting the worker you must also export them (e.g., :command:`export DISPLAY=":0"`) .. Admonition:: Superuser privileges required The init-scripts can only be used by root, and the shell configuration file must also be owned by root. Unprivileged users don't need to use the init-script, instead they can use the :program:`celery multi` utility (or :program:`celery worker --detach`): .. code-block:: console $ celery multi start worker1 \ -A proj \ --pidfile="$HOME/run/celery/%n.pid" \ --logfile="$HOME/log/celery/%n%I.log" $ celery multi restart worker1 \ -A proj \ --logfile="$HOME/log/celery/%n%I.log" \ --pidfile="$HOME/run/celery/%n.pid $ celery multi stopwait worker1 --pidfile="$HOME/run/celery/%n.pid" .. _generic-initd-celeryd-example: Example configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example configuration for a Python project. :file:`/etc/default/celeryd`: .. code-block:: bash # Names of nodes to start # most people will only start one node: CELERYD_NODES="worker1" # but you can also start multiple and configure settings # for each in CELERYD_OPTS #CELERYD_NODES="worker1 worker2 worker3" # alternatively, you can specify the number of nodes to start: #CELERYD_NODES=10 # Absolute or relative path to the 'celery' command: CELERY_BIN="/usr/local/bin/celery" #CELERY_BIN="/virtualenvs/def/bin/celery" # App instance to use # comment out this line if you don't use an app CELERY_APP="proj" # or fully qualified: #CELERY_APP="proj.tasks:app" # Where to chdir at start. CELERYD_CHDIR="/opt/Myproject/" # Extra command-line arguments to the worker CELERYD_OPTS="--time-limit=300 --concurrency=8" # Configure node-specific settings by appending node name to arguments: #CELERYD_OPTS="--time-limit=300 -c 8 -c:worker2 4 -c:worker3 2 -Ofair:worker1" # Set logging level to DEBUG #CELERYD_LOG_LEVEL="DEBUG" # %n will be replaced with the first part of the nodename. CELERYD_LOG_FILE="/var/log/celery/%n%I.log" CELERYD_PID_FILE="/var/run/celery/%n.pid" # Workers should run as an unprivileged user. # You need to create this user manually (or you can choose # a user/group combination that already exists (e.g., nobody). CELERYD_USER="celery" CELERYD_GROUP="celery" # If enabled pid and log directories will be created if missing, # and owned by the userid/group configured. CELERY_CREATE_DIRS=1 Using a login shell ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can inherit the environment of the ``CELERYD_USER`` by using a login shell: .. code-block:: bash CELERYD_SU_ARGS="-l" Note that this isn't recommended, and that you should only use this option when absolutely necessary. .. _generic-initd-celeryd-django-example: Example Django configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Django users now uses the exact same template as above, but make sure that the module that defines your Celery app instance also sets a default value for :envvar:`DJANGO_SETTINGS_MODULE` as shown in the example Django project in :ref:`django-first-steps`. .. _generic-initd-celeryd-options: Available options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``CELERY_APP`` App instance to use (value for :option:`--app ` argument). * ``CELERY_BIN`` Absolute or relative path to the :program:`celery` program. Examples: * :file:`celery` * :file:`/usr/local/bin/celery` * :file:`/virtualenvs/proj/bin/celery` * :file:`/virtualenvs/proj/bin/python -m celery` * ``CELERYD_NODES`` List of node names to start (separated by space). * ``CELERYD_OPTS`` Additional command-line arguments for the worker, see `celery worker --help` for a list. This also supports the extended syntax used by `multi` to configure settings for individual nodes. See `celery multi --help` for some multi-node configuration examples. * ``CELERYD_CHDIR`` Path to change directory to at start. Default is to stay in the current directory. * ``CELERYD_PID_FILE`` Full path to the PID file. Default is /var/run/celery/%n.pid * ``CELERYD_LOG_FILE`` Full path to the worker log file. Default is /var/log/celery/%n%I.log **Note**: Using `%I` is important when using the prefork pool as having multiple processes share the same log file will lead to race conditions. * ``CELERYD_LOG_LEVEL`` Worker log level. Default is INFO. * ``CELERYD_USER`` User to run the worker as. Default is current user. * ``CELERYD_GROUP`` Group to run worker as. Default is current user. * ``CELERY_CREATE_DIRS`` Always create directories (log directory and pid file directory). Default is to only create directories when no custom logfile/pidfile set. * ``CELERY_CREATE_RUNDIR`` Always create pidfile directory. By default only enabled when no custom pidfile location set. * ``CELERY_CREATE_LOGDIR`` Always create logfile directory. By default only enable when no custom logfile location set. .. _generic-initd-celerybeat: Init-script: ``celerybeat`` ---------------------------------------------------------------------- :Usage: `/etc/init.d/celerybeat {start|stop|restart}` :Configuration file: :file:`/etc/default/celerybeat` or :file:`/etc/default/celeryd`. .. _generic-initd-celerybeat-example: Example configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example configuration for a Python project: `/etc/default/celerybeat`: .. code-block:: bash # Absolute or relative path to the 'celery' command: CELERY_BIN="/usr/local/bin/celery" #CELERY_BIN="/virtualenvs/def/bin/celery" # App instance to use # comment out this line if you don't use an app CELERY_APP="proj" # or fully qualified: #CELERY_APP="proj.tasks:app" # Where to chdir at start. CELERYBEAT_CHDIR="/opt/Myproject/" # Extra arguments to celerybeat CELERYBEAT_OPTS="--schedule=/var/run/celery/celerybeat-schedule" .. _generic-initd-celerybeat-django-example: Example Django configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You should use the same template as above, but make sure the ``DJANGO_SETTINGS_MODULE`` variable is set (and exported), and that ``CELERYD_CHDIR`` is set to the projects directory: .. code-block:: bash export DJANGO_SETTINGS_MODULE="settings" CELERYD_CHDIR="/opt/MyProject" .. _generic-initd-celerybeat-options: Available options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``CELERY_APP`` App instance to use (value for :option:`--app ` argument). * ``CELERYBEAT_OPTS`` Additional arguments to :program:`celery beat`, see :command:`celery beat --help` for a list of available options. * ``CELERYBEAT_PID_FILE`` Full path to the PID file. Default is :file:`/var/run/celeryd.pid`. * ``CELERYBEAT_LOG_FILE`` Full path to the log file. Default is :file:`/var/log/celeryd.log`. * ``CELERYBEAT_LOG_LEVEL`` Log level to use. Default is ``INFO``. * ``CELERYBEAT_USER`` User to run beat as. Default is the current user. * ``CELERYBEAT_GROUP`` Group to run beat as. Default is the current user. * ``CELERY_CREATE_DIRS`` Always create directories (log directory and pid file directory). Default is to only create directories when no custom logfile/pidfile set. * ``CELERY_CREATE_RUNDIR`` Always create pidfile directory. By default only enabled when no custom pidfile location set. * ``CELERY_CREATE_LOGDIR`` Always create logfile directory. By default only enable when no custom logfile location set. .. _generic-initd-troubleshooting: Troubleshooting ---------------------------------------------------------------------- If you can't get the init-scripts to work, you should try running them in *verbose mode*: .. code-block:: console # sh -x /etc/init.d/celeryd start This can reveal hints as to why the service won't start. If the worker starts with *"OK"* but exits almost immediately afterwards and there's no evidence in the log file, then there's probably an error but as the daemons standard outputs are already closed you'll not be able to see them anywhere. For this situation you can use the :envvar:`C_FAKEFORK` environment variable to skip the daemonization step: .. code-block:: console # C_FAKEFORK=1 sh -x /etc/init.d/celeryd start and now you should be able to see the errors. Commonly such errors are caused by insufficient permissions to read from, or write to a file, and also by syntax errors in configuration modules, user modules, third-party libraries, or even from Celery itself (if you've found a bug you should :ref:`report it `). .. _daemon-systemd-generic: Usage ``systemd`` ====================================================================== * `extra/systemd/`_ .. _`extra/systemd/`: https://github.com/celery/celery/tree/3.1/extra/systemd/ .. _generic-systemd-celery: :Usage: `systemctl {start|stop|restart|status} celery.service` :Configuration file: /etc/conf.d/celery Service file: celery.service ---------------------------------------------------------------------- This is an example systemd file: :file:`/etc/systemd/system/celery.service`: .. code-block:: bash [Unit] Description=Celery Service After=network.target [Service] Type=forking User=celery Group=celery EnvironmentFile=-/etc/conf.d/celery WorkingDirectory=/opt/celery ExecStart=/bin/sh -c '${CELERY_BIN} multi start ${CELERYD_NODES} \ -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \ --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}' ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \ --pidfile=${CELERYD_PID_FILE}' ExecReload=/bin/sh -c '${CELERY_BIN} multi restart ${CELERYD_NODES} \ -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \ --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}' [Install] WantedBy=multi-user.target Once you've put that file in :file:`/etc/systemd/system`, you should run :command:`systemctl daemon-reload` in order that Systemd acknowledges that file. You should also run that command each time you modify it. To configure user, group, :command:`chdir` change settings: ``User``, ``Group``, and ``WorkingDirectory`` defined in :file:`/etc/systemd/system/celery.service`. You can also use systemd-tmpfiles in order to create working directories (for logs and pid). :file: `/etc/tmpfiles.d/celery.conf` .. code-block:: bash d /var/run/celery 0755 celery celery - d /var/log/celery 0755 celery celery - .. _generic-systemd-celery-example: Example configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example configuration for a Python project: :file:`/etc/conf.d/celery`: .. code-block:: bash # Name of nodes to start # here we have a single node CELERYD_NODES="w1" # or we could have three nodes: #CELERYD_NODES="w1 w2 w3" # Absolute or relative path to the 'celery' command: CELERY_BIN="/usr/local/bin/celery" #CELERY_BIN="/virtualenvs/def/bin/celery" # App instance to use # comment out this line if you don't use an app CELERY_APP="proj" # or fully qualified: #CELERY_APP="proj.tasks:app" # How to call manage.py CELERYD_MULTI="multi" # Extra command-line arguments to the worker CELERYD_OPTS="--time-limit=300 --concurrency=8" # - %n will be replaced with the first part of the nodename. # - %I will be replaced with the current child process index # and is important when using the prefork pool to avoid race conditions. CELERYD_PID_FILE="/var/run/celery/%n.pid" CELERYD_LOG_FILE="/var/log/celery/%n%I.log" CELERYD_LOG_LEVEL="INFO" Running the worker with superuser privileges (root) ====================================================================== Running the worker with superuser privileges is a very dangerous practice. There should always be a workaround to avoid running as root. Celery may run arbitrary code in messages serialized with pickle - this is dangerous, especially when run as root. By default Celery won't run workers as root. The associated error message may not be visible in the logs but may be seen if :envvar:`C_FAKEFORK` is used. To force Celery to run workers as root use :envvar:`C_FORCE_ROOT`. When running as root without :envvar:`C_FORCE_ROOT` the worker will appear to start with *"OK"* but exit immediately after with no apparent errors. This problem may appear when running the project in a new development or production environment (inadvertently) as root. .. _daemon-supervisord: :pypi:`supervisor` ====================================================================== * `extra/supervisord/`_ .. _`extra/supervisord/`: https://github.com/celery/celery/tree/master/extra/supervisord/ .. _daemon-launchd: ``launchd`` (macOS) ====================================================================== * `extra/macOS`_ .. _`extra/macOS`: https://github.com/celery/celery/tree/master/extra/macOS/ celery-4.1.0/docs/userguide/tasks.rst0000644000175000017500000016015213130607475017543 0ustar omeromer00000000000000.. _guide-tasks: ===================================================================== Tasks ===================================================================== Tasks are the building blocks of Celery applications. A task is a class that can be created out of any callable. It performs dual roles in that it defines both what happens when a task is called (sends a message), and what happens when a worker receives that message. Every task class has a unique name, and this name is referenced in messages so the worker can find the right function to execute. A task message is not removed from the queue until that message has been :term:`acknowledged` by a worker. A worker can reserve many messages in advance and even if the worker is killed -- by power failure or some other reason -- the message will be redelivered to another worker. Ideally task functions should be :term:`idempotent`: meaning the function won't cause unintended effects even if called multiple times with the same arguments. Since the worker cannot detect if your tasks are idempotent, the default behavior is to acknowledge the message in advance, just before it's executed, so that a task invocation that already started is never executed again. If your task is idempotent you can set the :attr:`~Task.acks_late` option to have the worker acknowledge the message *after* the task returns instead. See also the FAQ entry :ref:`faq-acks_late-vs-retry`. Note that the worker will acknowledge the message if the child process executing the task is terminated (either by the task calling :func:`sys.exit`, or by signal) even when :attr:`~Task.acks_late` is enabled. This behavior is by purpose as... #. We don't want to rerun tasks that forces the kernel to send a :sig:`SIGSEGV` (segmentation fault) or similar signals to the process. #. We assume that a system administrator deliberately killing the task does not want it to automatically restart. #. A task that allocates too much memory is in danger of triggering the kernel OOM killer, the same may happen again. #. A task that always fails when redelivered may cause a high-frequency message loop taking down the system. If you really want a task to be redelivered in these scenarios you should consider enabling the :setting:`task_reject_on_worker_lost` setting. .. warning:: A task that blocks indefinitely may eventually stop the worker instance from doing any other work. If you task does I/O then make sure you add timeouts to these operations, like adding a timeout to a web request using the :pypi:`requests` library: .. code-block:: python connect_timeout, read_timeout = 5.0, 30.0 response = requests.get(URL, timeout=(connect_timeout, read_timeout)) :ref:`Time limits ` are convenient for making sure all tasks return in a timely manner, but a time limit event will actually kill the process by force so only use them to detect cases where you haven't used manual timeouts yet. The default prefork pool scheduler is not friendly to long-running tasks, so if you have tasks that run for minutes/hours make sure you enable the :option:`-Ofair ` command-line argument to the :program:`celery worker`. See :ref:`prefork-pool-prefetch` for more information, and for the best performance route long-running and short-running tasks to dedicated workers (:ref:`routing-automatic`). If your worker hangs then please investigate what tasks are running before submitting an issue, as most likely the hanging is caused by one or more tasks hanging on a network operation. -- In this chapter you'll learn all about defining tasks, and this is the **table of contents**: .. contents:: :local: :depth: 1 .. _task-basics: Basics ====== You can easily create a task from any callable by using the :meth:`~@task` decorator: .. code-block:: python from .models import User @app.task def create_user(username, password): User.objects.create(username=username, password=password) There are also many :ref:`options ` that can be set for the task, these can be specified as arguments to the decorator: .. code-block:: python @app.task(serializer='json') def create_user(username, password): User.objects.create(username=username, password=password) .. sidebar:: How do I import the task decorator? And what's "app"? The task decorator is available on your :class:`@Celery` application instance, if you don't know what this is then please read :ref:`first-steps`. If you're using Django (see :ref:`django-first-steps`), or you're the author of a library then you probably want to use the :func:`@shared_task` decorator: .. code-block:: python from celery import shared_task @shared_task def add(x, y): return x + y .. sidebar:: Multiple decorators When using multiple decorators in combination with the task decorator you must make sure that the `task` decorator is applied last (oddly, in Python this means it must be first in the list): .. code-block:: python @app.task @decorator2 @decorator1 def add(x, y): return x + y Bound tasks ----------- A task being bound means the first argument to the task will always be the task instance (``self``), just like Python bound methods: .. code-block:: python logger = get_task_logger(__name__) @task(bind=True) def add(self, x, y): logger.info(self.request.id) Bound tasks are needed for retries (using :meth:`Task.retry() <@Task.retry>`), for accessing information about the current task request, and for any additional functionality you add to custom task base classes. Task inheritance ---------------- The ``base`` argument to the task decorator specifies the base class of the task: .. code-block:: python import celery class MyTask(celery.Task): def on_failure(self, exc, task_id, args, kwargs, einfo): print('{0!r} failed: {1!r}'.format(task_id, exc)) @task(base=MyTask) def add(x, y): raise KeyError() .. _task-names: Names ===== Every task must have a unique name. If no explicit name is provided the task decorator will generate one for you, and this name will be based on 1) the module the task is defined in, and 2) the name of the task function. Example setting explicit name: .. code-block:: pycon >>> @app.task(name='sum-of-two-numbers') >>> def add(x, y): ... return x + y >>> add.name 'sum-of-two-numbers' A best practice is to use the module name as a name-space, this way names won't collide if there's already a task with that name defined in another module. .. code-block:: pycon >>> @app.task(name='tasks.add') >>> def add(x, y): ... return x + y You can tell the name of the task by investigating its ``.name`` attribute: .. code-block:: pycon >>> add.name 'tasks.add' The name we specified here (``tasks.add``) is exactly the name that would've been automatically generated for us if the task was defined in a module named :file:`tasks.py`: :file:`tasks.py`: .. code-block:: python @app.task def add(x, y): return x + y .. code-block:: pycon >>> from tasks import add >>> add.name 'tasks.add' .. _task-naming-relative-imports: Automatic naming and relative imports ------------------------------------- .. sidebar:: Absolute Imports The best practice for developers targetting Python 2 is to add the following to the top of **every module**: .. code-block:: python from __future__ import absolute_import This will force you to always use absolute imports so you will never have any problems with tasks using relative names. Absolute imports are the default in Python 3 so you don't need this if you target that version. Relative imports and automatic name generation don't go well together, so if you're using relative imports you should set the name explicitly. For example if the client imports the module ``"myapp.tasks"`` as ``".tasks"``, and the worker imports the module as ``"myapp.tasks"``, the generated names won't match and an :exc:`~@NotRegistered` error will be raised by the worker. This is also the case when using Django and using ``project.myapp``-style naming in ``INSTALLED_APPS``: .. code-block:: python INSTALLED_APPS = ['project.myapp'] If you install the app under the name ``project.myapp`` then the tasks module will be imported as ``project.myapp.tasks``, so you must make sure you always import the tasks using the same name: .. code-block:: pycon >>> from project.myapp.tasks import mytask # << GOOD >>> from myapp.tasks import mytask # << BAD!!! The second example will cause the task to be named differently since the worker and the client imports the modules under different names: .. code-block:: pycon >>> from project.myapp.tasks import mytask >>> mytask.name 'project.myapp.tasks.mytask' >>> from myapp.tasks import mytask >>> mytask.name 'myapp.tasks.mytask' For this reason you must be consistent in how you import modules, and that is also a Python best practice. Similarly, you shouldn't use old-style relative imports: .. code-block:: python from module import foo # BAD! from proj.module import foo # GOOD! New-style relative imports are fine and can be used: .. code-block:: python from .module import foo # GOOD! If you want to use Celery with a project already using these patterns extensively and you don't have the time to refactor the existing code then you can consider specifying the names explicitly instead of relying on the automatic naming: .. code-block:: python @task(name='proj.tasks.add') def add(x, y): return x + y .. _task-name-generator-info: Changing the automatic naming behavior -------------------------------------- .. versionadded:: 4.0 There are some cases when the default automatic naming isn't suitable. Consider you have many tasks within many different modules:: project/ /__init__.py /celery.py /moduleA/ /__init__.py /tasks.py /moduleB/ /__init__.py /tasks.py Using the default automatic naming, each task will have a generated name like `moduleA.tasks.taskA`, `moduleA.tasks.taskB`, `moduleB.tasks.test`, and so on. You may want to get rid of having `tasks` in all task names. As pointed above, you can explicitly give names for all tasks, or you can change the automatic naming behavior by overriding :meth:`@gen_task_name`. Continuing with the example, `celery.py` may contain: .. code-block:: python from celery import Celery class MyCelery(Celery): def gen_task_name(self, name, module): if module.endswith('.tasks'): module = module[:-6] return super(MyCelery, self).gen_task_name(name, module) app = MyCelery('main') So each task will have a name like `moduleA.taskA`, `moduleA.taskB` and `moduleB.test`. .. warning:: Make sure that your :meth:`@gen_task_name` is a pure function: meaning that for the same input it must always return the same output. .. _task-request-info: Task Request ============ :attr:`Task.request <@Task.request>` contains information and state related to the currently executing task. The request defines the following attributes: :id: The unique id of the executing task. :group: The unique id of the task's :ref:`group `, if this task is a member. :chord: The unique id of the chord this task belongs to (if the task is part of the header). :correlation_id: Custom ID used for things like de-duplication. :args: Positional arguments. :kwargs: Keyword arguments. :origin: Name of host that sent this task. :retries: How many times the current task has been retried. An integer starting at `0`. :is_eager: Set to :const:`True` if the task is executed locally in the client, not by a worker. :eta: The original ETA of the task (if any). This is in UTC time (depending on the :setting:`enable_utc` setting). :expires: The original expiry time of the task (if any). This is in UTC time (depending on the :setting:`enable_utc` setting). :hostname: Node name of the worker instance executing the task. :delivery_info: Additional message delivery information. This is a mapping containing the exchange and routing key used to deliver this task. Used by for example :meth:`Task.retry() <@Task.retry>` to resend the task to the same destination queue. Availability of keys in this dict depends on the message broker used. :reply-to: Name of queue to send replies back to (used with RPC result backend for example). :called_directly: This flag is set to true if the task wasn't executed by the worker. :timelimit: A tuple of the current ``(soft, hard)`` time limits active for this task (if any). :callbacks: A list of signatures to be called if this task returns successfully. :errback: A list of signatures to be called if this task fails. :utc: Set to true the caller has UTC enabled (:setting:`enable_utc`). .. versionadded:: 3.1 :headers: Mapping of message headers sent with this task message (may be :const:`None`). :reply_to: Where to send reply to (queue name). :correlation_id: Usually the same as the task id, often used in amqp to keep track of what a reply is for. .. versionadded:: 4.0 :root_id: The unique id of the first task in the workflow this task is part of (if any). :parent_id: The unique id of the task that called this task (if any). :chain: Reversed list of tasks that form a chain (if any). The last item in this list will be the next task to succeed the current task. If using version one of the task protocol the chain tasks will be in ``request.callbacks`` instead. Example ------- An example task accessing information in the context is: .. code-block:: python @app.task(bind=True) def dump_context(self, x, y): print('Executing task id {0.id}, args: {0.args!r} kwargs: {0.kwargs!r}'.format( self.request)) The ``bind`` argument means that the function will be a "bound method" so that you can access attributes and methods on the task type instance. .. _task-logging: Logging ======= The worker will automatically set up logging for you, or you can configure logging manually. A special logger is available named "celery.task", you can inherit from this logger to automatically get the task name and unique id as part of the logs. The best practice is to create a common logger for all of your tasks at the top of your module: .. code-block:: python from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @app.task def add(x, y): logger.info('Adding {0} + {1}'.format(x, y)) return x + y Celery uses the standard Python logger library, and the documentation can be found :mod:`here `. You can also use :func:`print`, as anything written to standard out/-err will be redirected to the logging system (you can disable this, see :setting:`worker_redirect_stdouts`). .. note:: The worker won't update the redirection if you create a logger instance somewhere in your task or task module. If you want to redirect ``sys.stdout`` and ``sys.stderr`` to a custom logger you have to enable this manually, for example: .. code-block:: python import sys logger = get_task_logger(__name__) @app.task(bind=True) def add(self, x, y): old_outs = sys.stdout, sys.stderr rlevel = self.app.conf.worker_redirect_stdouts_level try: self.app.log.redirect_stdouts_to_logger(logger, rlevel) print('Adding {0} + {1}'.format(x, y)) return x + y finally: sys.stdout, sys.stderr = old_outs .. _task-argument-checking: Argument checking ----------------- .. versionadded:: 4.0 Celery will verify the arguments passed when you call the task, just like Python does when calling a normal function: .. code-block:: pycon >>> @app.task ... def add(x, y): ... return x + y # Calling the task with two arguments works: >>> add.delay(8, 8) # Calling the task with only one argument fails: >>> add.delay(8) Traceback (most recent call last): File "", line 1, in File "celery/app/task.py", line 376, in delay return self.apply_async(args, kwargs) File "celery/app/task.py", line 485, in apply_async check_arguments(*(args or ()), **(kwargs or {})) TypeError: add() takes exactly 2 arguments (1 given) You can disable the argument checking for any task by setting its :attr:`~@Task.typing` attribute to :const:`False`: .. code-block:: pycon >>> @app.task(typing=False) ... def add(x, y): ... return x + y # Works locally, but the worker reciving the task will raise an error. >>> add.delay(8) Hiding sensitive information in arguments ----------------------------------------- .. versionadded:: 4.0 When using :setting:`task_protocol` 2 or higher (default since 4.0), you can override how positional arguments and keyword arguments are represented in logs and monitoring events using the ``argsrepr`` and ``kwargsrepr`` calling arguments: .. code-block:: pycon >>> add.apply_async((2, 3), argsrepr='(, )') >>> charge.s(account, card='1234 5678 1234 5678').set( ... kwargsrepr=repr({'card': '**** **** **** 5678'}) ... ).delay() .. warning:: Sensitive information will still be accessible to anyone able to read your task message from the broker, or otherwise able intercept it. For this reason you should probably encrypt your message if it contains sensitive information, or in this example with a credit card number the actual number could be stored encrypted in a secure store that you retrieve and decrypt in the task itself. .. _task-retry: Retrying ======== :meth:`Task.retry() <@Task.retry>` can be used to re-execute the task, for example in the event of recoverable errors. When you call ``retry`` it'll send a new message, using the same task-id, and it'll take care to make sure the message is delivered to the same queue as the originating task. When a task is retried this is also recorded as a task state, so that you can track the progress of the task using the result instance (see :ref:`task-states`). Here's an example using ``retry``: .. code-block:: python @app.task(bind=True) def send_twitter_status(self, oauth, tweet): try: twitter = Twitter(oauth) twitter.update_status(tweet) except (Twitter.FailWhaleError, Twitter.LoginError) as exc: raise self.retry(exc=exc) .. note:: The :meth:`Task.retry() <@Task.retry>` call will raise an exception so any code after the retry won't be reached. This is the :exc:`~@Retry` exception, it isn't handled as an error but rather as a semi-predicate to signify to the worker that the task is to be retried, so that it can store the correct state when a result backend is enabled. This is normal operation and always happens unless the ``throw`` argument to retry is set to :const:`False`. The bind argument to the task decorator will give access to ``self`` (the task type instance). The ``exc`` method is used to pass exception information that's used in logs, and when storing task results. Both the exception and the traceback will be available in the task state (if a result backend is enabled). If the task has a ``max_retries`` value the current exception will be re-raised if the max number of retries has been exceeded, but this won't happen if: - An ``exc`` argument wasn't given. In this case the :exc:`~@MaxRetriesExceededError` exception will be raised. - There's no current exception If there's no original exception to re-raise the ``exc`` argument will be used instead, so: .. code-block:: python self.retry(exc=Twitter.LoginError()) will raise the ``exc`` argument given. .. _task-retry-custom-delay: Using a custom retry delay -------------------------- When a task is to be retried, it can wait for a given amount of time before doing so, and the default delay is defined by the :attr:`~@Task.default_retry_delay` attribute. By default this is set to 3 minutes. Note that the unit for setting the delay is in seconds (int or float). You can also provide the `countdown` argument to :meth:`~@Task.retry` to override this default. .. code-block:: python @app.task(bind=True, default_retry_delay=30 * 60) # retry in 30 minutes. def add(self, x, y): try: something_raising() except Exception as exc: # overrides the default delay to retry after 1 minute raise self.retry(exc=exc, countdown=60) .. _task-autoretry: Automatic retry for known exceptions ------------------------------------ .. versionadded:: 4.0 Sometimes you just want to retry a task whenever a particular exception is raised. Fortunately, you can tell Celery to automatically retry a task using `autoretry_for` argument in `~@Celery.task` decorator: .. code-block:: python from twitter.exceptions import FailWhaleError @app.task(autoretry_for=(FailWhaleError,)) def refresh_timeline(user): return twitter.refresh_timeline(user) If you want to specify custom arguments for internal `~@Task.retry` call, pass `retry_kwargs` argument to `~@Celery.task` decorator: .. code-block:: python @app.task(autoretry_for=(FailWhaleError,), retry_kwargs={'max_retries': 5}) def refresh_timeline(user): return twitter.refresh_timeline(user) This is provided as an alternative to manually handling the exceptions, and the example above will do the same as wrapping the task body in a :keyword:`try` ... :keyword:`except` statement: .. code-block:: python @app.task def refresh_timeline(user): try: twitter.refresh_timeline(user) except FailWhaleError as exc: raise div.retry(exc=exc, max_retries=5) If you want to automatically retry on any error, simply use: .. code-block:: python @app.task(autoretry_for=(Exception,)) def x(): ... .. _task-options: List of Options =============== The task decorator can take a number of options that change the way the task behaves, for example you can set the rate limit for a task using the :attr:`rate_limit` option. Any keyword argument passed to the task decorator will actually be set as an attribute of the resulting task class, and this is a list of the built-in attributes. General ------- .. _task-general-options: .. attribute:: Task.name The name the task is registered as. You can set this name manually, or a name will be automatically generated using the module and class name. See also :ref:`task-names`. .. attribute:: Task.request If the task is being executed this will contain information about the current request. Thread local storage is used. See :ref:`task-request-info`. .. attribute:: Task.max_retries Only applies if the task calls ``self.retry`` or if the task is decorated with the :ref:`autoretry_for ` argument. The maximum number of attempted retries before giving up. If the number of retries exceeds this value a :exc:`~@MaxRetriesExceededError` exception will be raised. .. note:: You have to call :meth:`~@Task.retry` manually, as it won't automatically retry on exception.. The default is ``3``. A value of :const:`None` will disable the retry limit and the task will retry forever until it succeeds. .. attribute:: Task.throws Optional tuple of expected error classes that shouldn't be regarded as an actual error. Errors in this list will be reported as a failure to the result backend, but the worker won't log the event as an error, and no traceback will be included. Example: .. code-block:: python @task(throws=(KeyError, HttpNotFound)): def get_foo(): something() Error types: - Expected errors (in ``Task.throws``) Logged with severity ``INFO``, traceback excluded. - Unexpected errors Logged with severity ``ERROR``, with traceback included. .. attribute:: Task.default_retry_delay Default time in seconds before a retry of the task should be executed. Can be either :class:`int` or :class:`float`. Default is a three minute delay. .. attribute:: Task.rate_limit Set the rate limit for this task type (limits the number of tasks that can be run in a given time frame). Tasks will still complete when a rate limit is in effect, but it may take some time before it's allowed to start. If this is :const:`None` no rate limit is in effect. If it is an integer or float, it is interpreted as "tasks per second". The rate limits can be specified in seconds, minutes or hours by appending `"/s"`, `"/m"` or `"/h"` to the value. Tasks will be evenly distributed over the specified time frame. Example: `"100/m"` (hundred tasks a minute). This will enforce a minimum delay of 600ms between starting two tasks on the same worker instance. Default is the :setting:`task_default_rate_limit` setting: if not specified means rate limiting for tasks is disabled by default. Note that this is a *per worker instance* rate limit, and not a global rate limit. To enforce a global rate limit (e.g., for an API with a maximum number of requests per second), you must restrict to a given queue. .. note:: This attribute is ignored if the task is requested with an ETA. .. attribute:: Task.time_limit The hard time limit, in seconds, for this task. When not set the workers default is used. .. attribute:: Task.soft_time_limit The soft time limit for this task. When not set the workers default is used. .. attribute:: Task.ignore_result Don't store task state. Note that this means you can't use :class:`~celery.result.AsyncResult` to check if the task is ready, or get its return value. .. attribute:: Task.store_errors_even_if_ignored If :const:`True`, errors will be stored even if the task is configured to ignore results. .. attribute:: Task.serializer A string identifying the default serialization method to use. Defaults to the :setting:`task_serializer` setting. Can be `pickle`, `json`, `yaml`, or any custom serialization methods that have been registered with :mod:`kombu.serialization.registry`. Please see :ref:`calling-serializers` for more information. .. attribute:: Task.compression A string identifying the default compression scheme to use. Defaults to the :setting:`task_compression` setting. Can be `gzip`, or `bzip2`, or any custom compression schemes that have been registered with the :mod:`kombu.compression` registry. Please see :ref:`calling-compression` for more information. .. attribute:: Task.backend The result store backend to use for this task. An instance of one of the backend classes in `celery.backends`. Defaults to `app.backend`, defined by the :setting:`result_backend` setting. .. attribute:: Task.acks_late If set to :const:`True` messages for this task will be acknowledged **after** the task has been executed, not *just before* (the default behavior). Note: This means the task may be executed multiple times should the worker crash in the middle of execution. Make sure your tasks are :term:`idempotent`. The global default can be overridden by the :setting:`task_acks_late` setting. .. _task-track-started: .. attribute:: Task.track_started If :const:`True` the task will report its status as "started" when the task is executed by a worker. The default value is :const:`False` as the normal behavior is to not report that level of granularity. Tasks are either pending, finished, or waiting to be retried. Having a "started" status can be useful for when there are long running tasks and there's a need to report what task is currently running. The host name and process id of the worker executing the task will be available in the state meta-data (e.g., `result.info['pid']`) The global default can be overridden by the :setting:`task_track_started` setting. .. seealso:: The API reference for :class:`~@Task`. .. _task-states: States ====== Celery can keep track of the tasks current state. The state also contains the result of a successful task, or the exception and traceback information of a failed task. There are several *result backends* to choose from, and they all have different strengths and weaknesses (see :ref:`task-result-backends`). During its lifetime a task will transition through several possible states, and each state may have arbitrary meta-data attached to it. When a task moves into a new state the previous state is forgotten about, but some transitions can be deducted, (e.g., a task now in the :state:`FAILED` state, is implied to have been in the :state:`STARTED` state at some point). There are also sets of states, like the set of :state:`FAILURE_STATES`, and the set of :state:`READY_STATES`. The client uses the membership of these sets to decide whether the exception should be re-raised (:state:`PROPAGATE_STATES`), or whether the state can be cached (it can if the task is ready). You can also define :ref:`custom-states`. .. _task-result-backends: Result Backends --------------- If you want to keep track of tasks or need the return values, then Celery must store or send the states somewhere so that they can be retrieved later. There are several built-in result backends to choose from: SQLAlchemy/Django ORM, Memcached, RabbitMQ/QPid (``rpc``), and Redis -- or you can define your own. No backend works well for every use case. You should read about the strengths and weaknesses of each backend, and choose the most appropriate for your needs. .. seealso:: :ref:`conf-result-backend` RPC Result Backend (RabbitMQ/QPid) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The RPC result backend (`rpc://`) is special as it doesn't actually *store* the states, but rather sends them as messages. This is an important difference as it means that a result *can only be retrieved once*, and *only by the client that initiated the task*. Two different processes can't wait for the same result. Even with that limitation, it is an excellent choice if you need to receive state changes in real-time. Using messaging means the client doesn't have to poll for new states. The messages are transient (non-persistent) by default, so the results will disappear if the broker restarts. You can configure the result backend to send persistent messages using the :setting:`result_persistent` setting. Database Result Backend ~~~~~~~~~~~~~~~~~~~~~~~ Keeping state in the database can be convenient for many, especially for web applications with a database already in place, but it also comes with limitations. * Polling the database for new states is expensive, and so you should increase the polling intervals of operations, such as `result.get()`. * Some databases use a default transaction isolation level that isn't suitable for polling tables for changes. In MySQL the default transaction isolation level is `REPEATABLE-READ`: meaning the transaction won't see changes made by other transactions until the current transaction is committed. Changing that to the `READ-COMMITTED` isolation level is recommended. .. _task-builtin-states: Built-in States --------------- .. state:: PENDING PENDING ~~~~~~~ Task is waiting for execution or unknown. Any task id that's not known is implied to be in the pending state. .. state:: STARTED STARTED ~~~~~~~ Task has been started. Not reported by default, to enable please see :attr:`@Task.track_started`. :meta-data: `pid` and `hostname` of the worker process executing the task. .. state:: SUCCESS SUCCESS ~~~~~~~ Task has been successfully executed. :meta-data: `result` contains the return value of the task. :propagates: Yes :ready: Yes .. state:: FAILURE FAILURE ~~~~~~~ Task execution resulted in failure. :meta-data: `result` contains the exception occurred, and `traceback` contains the backtrace of the stack at the point when the exception was raised. :propagates: Yes .. state:: RETRY RETRY ~~~~~ Task is being retried. :meta-data: `result` contains the exception that caused the retry, and `traceback` contains the backtrace of the stack at the point when the exceptions was raised. :propagates: No .. state:: REVOKED REVOKED ~~~~~~~ Task has been revoked. :propagates: Yes .. _custom-states: Custom states ------------- You can easily define your own states, all you need is a unique name. The name of the state is usually an uppercase string. As an example you could have a look at the :mod:`abortable tasks <~celery.contrib.abortable>` which defines a custom :state:`ABORTED` state. Use :meth:`~@Task.update_state` to update a task's state:. .. code-block:: python @app.task(bind=True) def upload_files(self, filenames): for i, file in enumerate(filenames): if not self.request.called_directly: self.update_state(state='PROGRESS', meta={'current': i, 'total': len(filenames)}) Here I created the state `"PROGRESS"`, telling any application aware of this state that the task is currently in progress, and also where it is in the process by having `current` and `total` counts as part of the state meta-data. This can then be used to create progress bars for example. .. _pickling_exceptions: Creating pickleable exceptions ------------------------------ A rarely known Python fact is that exceptions must conform to some simple rules to support being serialized by the pickle module. Tasks that raise exceptions that aren't pickleable won't work properly when Pickle is used as the serializer. To make sure that your exceptions are pickleable the exception *MUST* provide the original arguments it was instantiated with in its ``.args`` attribute. The simplest way to ensure this is to have the exception call ``Exception.__init__``. Let's look at some examples that work, and one that doesn't: .. code-block:: python # OK: class HttpError(Exception): pass # BAD: class HttpError(Exception): def __init__(self, status_code): self.status_code = status_code # OK: class HttpError(Exception): def __init__(self, status_code): self.status_code = status_code Exception.__init__(self, status_code) # <-- REQUIRED So the rule is: For any exception that supports custom arguments ``*args``, ``Exception.__init__(self, *args)`` must be used. There's no special support for *keyword arguments*, so if you want to preserve keyword arguments when the exception is unpickled you have to pass them as regular args: .. code-block:: python class HttpError(Exception): def __init__(self, status_code, headers=None, body=None): self.status_code = status_code self.headers = headers self.body = body super(HttpError, self).__init__(status_code, headers, body) .. _task-semipredicates: Semipredicates ============== The worker wraps the task in a tracing function that records the final state of the task. There are a number of exceptions that can be used to signal this function to change how it treats the return of the task. .. _task-semipred-ignore: Ignore ------ The task may raise :exc:`~@Ignore` to force the worker to ignore the task. This means that no state will be recorded for the task, but the message is still acknowledged (removed from queue). This can be used if you want to implement custom revoke-like functionality, or manually store the result of a task. Example keeping revoked tasks in a Redis set: .. code-block:: python from celery.exceptions import Ignore @app.task(bind=True) def some_task(self): if redis.ismember('tasks.revoked', self.request.id): raise Ignore() Example that stores results manually: .. code-block:: python from celery import states from celery.exceptions import Ignore @app.task(bind=True) def get_tweets(self, user): timeline = twitter.get_timeline(user) if not self.request.called_directly: self.update_state(state=states.SUCCESS, meta=timeline) raise Ignore() .. _task-semipred-reject: Reject ------ The task may raise :exc:`~@Reject` to reject the task message using AMQPs ``basic_reject`` method. This won't have any effect unless :attr:`Task.acks_late` is enabled. Rejecting a message has the same effect as acking it, but some brokers may implement additional functionality that can be used. For example RabbitMQ supports the concept of `Dead Letter Exchanges`_ where a queue can be configured to use a dead letter exchange that rejected messages are redelivered to. .. _`Dead Letter Exchanges`: http://www.rabbitmq.com/dlx.html Reject can also be used to re-queue messages, but please be very careful when using this as it can easily result in an infinite message loop. Example using reject when a task causes an out of memory condition: .. code-block:: python import errno from celery.exceptions import Reject @app.task(bind=True, acks_late=True) def render_scene(self, path): file = get_file(path) try: renderer.render_scene(file) # if the file is too big to fit in memory # we reject it so that it's redelivered to the dead letter exchange # and we can manually inspect the situation. except MemoryError as exc: raise Reject(exc, requeue=False) except OSError as exc: if exc.errno == errno.ENOMEM: raise Reject(exc, requeue=False) # For any other error we retry after 10 seconds. except Exception as exc: raise self.retry(exc, countdown=10) Example re-queuing the message: .. code-block:: python from celery.exceptions import Reject @app.task(bind=True, acks_late=True) def requeues(self): if not self.request.delivery_info['redelivered']: raise Reject('no reason', requeue=True) print('received two times') Consult your broker documentation for more details about the ``basic_reject`` method. .. _task-semipred-retry: Retry ----- The :exc:`~@Retry` exception is raised by the ``Task.retry`` method to tell the worker that the task is being retried. .. _task-custom-classes: Custom task classes =================== All tasks inherit from the :class:`@Task` class. The :meth:`~@Task.run` method becomes the task body. As an example, the following code, .. code-block:: python @app.task def add(x, y): return x + y will do roughly this behind the scenes: .. code-block:: python class _AddTask(app.Task): def run(self, x, y): return x + y add = app.tasks[_AddTask.name] Instantiation ------------- A task is **not** instantiated for every request, but is registered in the task registry as a global instance. This means that the ``__init__`` constructor will only be called once per process, and that the task class is semantically closer to an Actor. If you have a task, .. code-block:: python from celery import Task class NaiveAuthenticateServer(Task): def __init__(self): self.users = {'george': 'password'} def run(self, username, password): try: return self.users[username] == password except KeyError: return False And you route every request to the same process, then it will keep state between requests. This can also be useful to cache resources, For example, a base Task class that caches a database connection: .. code-block:: python from celery import Task class DatabaseTask(Task): _db = None @property def db(self): if self._db is None: self._db = Database.connect() return self._db that can be added to tasks like this: .. code-block:: python @app.task(base=DatabaseTask) def process_rows(): for row in process_rows.db.table.all(): process_row(row) The ``db`` attribute of the ``process_rows`` task will then always stay the same in each process. Handlers -------- .. method:: after_return(self, status, retval, task_id, args, kwargs, einfo) Handler called after the task returns. :param status: Current task state. :param retval: Task return value/exception. :param task_id: Unique id of the task. :param args: Original arguments for the task that returned. :param kwargs: Original keyword arguments for the task that returned. :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback (if any). The return value of this handler is ignored. .. method:: on_failure(self, exc, task_id, args, kwargs, einfo) This is run by the worker when the task fails. :param exc: The exception raised by the task. :param task_id: Unique id of the failed task. :param args: Original arguments for the task that failed. :param kwargs: Original keyword arguments for the task that failed. :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback. The return value of this handler is ignored. .. method:: on_retry(self, exc, task_id, args, kwargs, einfo) This is run by the worker when the task is to be retried. :param exc: The exception sent to :meth:`~@Task.retry`. :param task_id: Unique id of the retried task. :param args: Original arguments for the retried task. :param kwargs: Original keyword arguments for the retried task. :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback. The return value of this handler is ignored. .. method:: on_success(self, retval, task_id, args, kwargs) Run by the worker if the task executes successfully. :param retval: The return value of the task. :param task_id: Unique id of the executed task. :param args: Original arguments for the executed task. :param kwargs: Original keyword arguments for the executed task. The return value of this handler is ignored. .. _task-how-they-work: How it works ============ Here come the technical details. This part isn't something you need to know, but you may be interested. All defined tasks are listed in a registry. The registry contains a list of task names and their task classes. You can investigate this registry yourself: .. code-block:: pycon >>> from proj.celery import app >>> app.tasks {'celery.chord_unlock': <@task: celery.chord_unlock>, 'celery.backend_cleanup': <@task: celery.backend_cleanup>, 'celery.chord': <@task: celery.chord>} This is the list of tasks built-in to Celery. Note that tasks will only be registered when the module they're defined in is imported. The default loader imports any modules listed in the :setting:`imports` setting. The :meth:`@task` decorator is responsible for registering your task in the applications task registry. When tasks are sent, no actual function code is sent with it, just the name of the task to execute. When the worker then receives the message it can look up the name in its task registry to find the execution code. This means that your workers should always be updated with the same software as the client. This is a drawback, but the alternative is a technical challenge that's yet to be solved. .. _task-best-practices: Tips and Best Practices ======================= .. _task-ignore_results: Ignore results you don't want ----------------------------- If you don't care about the results of a task, be sure to set the :attr:`~@Task.ignore_result` option, as storing results wastes time and resources. .. code-block:: python @app.task(ignore_result=True) def mytask(): something() Results can even be disabled globally using the :setting:`task_ignore_result` setting. More optimization tips ---------------------- You find additional optimization tips in the :ref:`Optimizing Guide `. .. _task-synchronous-subtasks: Avoid launching synchronous subtasks ------------------------------------ Having a task wait for the result of another task is really inefficient, and may even cause a deadlock if the worker pool is exhausted. Make your design asynchronous instead, for example by using *callbacks*. **Bad**: .. code-block:: python @app.task def update_page_info(url): page = fetch_page.delay(url).get() info = parse_page.delay(url, page).get() store_page_info.delay(url, info) @app.task def fetch_page(url): return myhttplib.get(url) @app.task def parse_page(url, page): return myparser.parse_document(page) @app.task def store_page_info(url, info): return PageInfo.objects.create(url, info) **Good**: .. code-block:: python def update_page_info(url): # fetch_page -> parse_page -> store_page chain = fetch_page.s(url) | parse_page.s() | store_page_info.s(url) chain() @app.task() def fetch_page(url): return myhttplib.get(url) @app.task() def parse_page(page): return myparser.parse_document(page) @app.task(ignore_result=True) def store_page_info(info, url): PageInfo.objects.create(url=url, info=info) Here I instead created a chain of tasks by linking together different :func:`~celery.signature`'s. You can read about chains and other powerful constructs at :ref:`designing-workflows`. By default celery will not enable you to run tasks within task synchronously in rare or extreme cases you might have to do so. **WARNING**: enabling subtasks run synchronously is not recommended! .. code-block:: python @app.task def update_page_info(url): page = fetch_page.delay(url).get(disable_sync_subtasks=False) info = parse_page.delay(url, page).get(disable_sync_subtasks=False) store_page_info.delay(url, info) @app.task def fetch_page(url): return myhttplib.get(url) @app.task def parse_page(url, page): return myparser.parse_document(page) @app.task def store_page_info(url, info): return PageInfo.objects.create(url, info) .. _task-performance-and-strategies: Performance and Strategies ========================== .. _task-granularity: Granularity ----------- The task granularity is the amount of computation needed by each subtask. In general it is better to split the problem up into many small tasks rather than have a few long running tasks. With smaller tasks you can process more tasks in parallel and the tasks won't run long enough to block the worker from processing other waiting tasks. However, executing a task does have overhead. A message needs to be sent, data may not be local, etc. So if the tasks are too fine-grained the overhead added probably removes any benefit. .. seealso:: The book `Art of Concurrency`_ has a section dedicated to the topic of task granularity [AOC1]_. .. _`Art of Concurrency`: http://oreilly.com/catalog/9780596521547 .. [AOC1] Breshears, Clay. Section 2.2.1, "The Art of Concurrency". O'Reilly Media, Inc. May 15, 2009. ISBN-13 978-0-596-52153-0. .. _task-data-locality: Data locality ------------- The worker processing the task should be as close to the data as possible. The best would be to have a copy in memory, the worst would be a full transfer from another continent. If the data is far away, you could try to run another worker at location, or if that's not possible - cache often used data, or preload data you know is going to be used. The easiest way to share data between workers is to use a distributed cache system, like `memcached`_. .. seealso:: The paper `Distributed Computing Economics`_ by Jim Gray is an excellent introduction to the topic of data locality. .. _`Distributed Computing Economics`: http://research.microsoft.com/pubs/70001/tr-2003-24.pdf .. _`memcached`: http://memcached.org/ .. _task-state: State ----- Since celery is a distributed system, you can't know which process, or on what machine the task will be executed. You can't even know if the task will run in a timely manner. The ancient async sayings tells us that “asserting the world is the responsibility of the taskâ€. What this means is that the world view may have changed since the task was requested, so the task is responsible for making sure the world is how it should be; If you have a task that re-indexes a search engine, and the search engine should only be re-indexed at maximum every 5 minutes, then it must be the tasks responsibility to assert that, not the callers. Another gotcha is Django model objects. They shouldn't be passed on as arguments to tasks. It's almost always better to re-fetch the object from the database when the task is running instead, as using old data may lead to race conditions. Imagine the following scenario where you have an article and a task that automatically expands some abbreviations in it: .. code-block:: python class Article(models.Model): title = models.CharField() body = models.TextField() @app.task def expand_abbreviations(article): article.body.replace('MyCorp', 'My Corporation') article.save() First, an author creates an article and saves it, then the author clicks on a button that initiates the abbreviation task: .. code-block:: pycon >>> article = Article.objects.get(id=102) >>> expand_abbreviations.delay(article) Now, the queue is very busy, so the task won't be run for another 2 minutes. In the meantime another author makes changes to the article, so when the task is finally run, the body of the article is reverted to the old version because the task had the old body in its argument. Fixing the race condition is easy, just use the article id instead, and re-fetch the article in the task body: .. code-block:: python @app.task def expand_abbreviations(article_id): article = Article.objects.get(id=article_id) article.body.replace('MyCorp', 'My Corporation') article.save() .. code-block:: pycon >>> expand_abbreviations.delay(article_id) There might even be performance benefits to this approach, as sending large messages may be expensive. .. _task-database-transactions: Database transactions --------------------- Let's have a look at another example: .. code-block:: python from django.db import transaction @transaction.commit_on_success def create_article(request): article = Article.objects.create() expand_abbreviations.delay(article.pk) This is a Django view creating an article object in the database, then passing the primary key to a task. It uses the `commit_on_success` decorator, that will commit the transaction when the view returns, or roll back if the view raises an exception. There's a race condition if the task starts executing before the transaction has been committed; The database object doesn't exist yet! The solution is to use the ``on_commit`` callback to launch your celery task once all transactions have been committed successfully. .. code-block:: python from django.db.transaction import on_commit def create_article(request): article = Article.objects.create() on_commit(lambda: expand_abbreviations.delay(article.pk)) .. note:: ``on_commit`` is available in Django 1.9 and above, if you are using a version prior to that then the `django-transaction-hooks`_ library adds support for this. .. _`django-transaction-hooks`: https://github.com/carljm/django-transaction-hooks .. _task-example: Example ======= Let's take a real world example: a blog where comments posted need to be filtered for spam. When the comment is created, the spam filter runs in the background, so the user doesn't have to wait for it to finish. I have a Django blog application allowing comments on blog posts. I'll describe parts of the models/views and tasks for this application. ``blog/models.py`` ------------------ The comment model looks like this: .. code-block:: python from django.db import models from django.utils.translation import ugettext_lazy as _ class Comment(models.Model): name = models.CharField(_('name'), max_length=64) email_address = models.EmailField(_('email address')) homepage = models.URLField(_('home page'), blank=True, verify_exists=False) comment = models.TextField(_('comment')) pub_date = models.DateTimeField(_('Published date'), editable=False, auto_add_now=True) is_spam = models.BooleanField(_('spam?'), default=False, editable=False) class Meta: verbose_name = _('comment') verbose_name_plural = _('comments') In the view where the comment is posted, I first write the comment to the database, then I launch the spam filter task in the background. .. _task-example-blog-views: ``blog/views.py`` ----------------- .. code-block:: python from django import forms from django.http import HttpResponseRedirect from django.template.context import RequestContext from django.shortcuts import get_object_or_404, render_to_response from blog import tasks from blog.models import Comment class CommentForm(forms.ModelForm): class Meta: model = Comment def add_comment(request, slug, template_name='comments/create.html'): post = get_object_or_404(Entry, slug=slug) remote_addr = request.META.get('REMOTE_ADDR') if request.method == 'post': form = CommentForm(request.POST, request.FILES) if form.is_valid(): comment = form.save() # Check spam asynchronously. tasks.spam_filter.delay(comment_id=comment.id, remote_addr=remote_addr) return HttpResponseRedirect(post.get_absolute_url()) else: form = CommentForm() context = RequestContext(request, {'form': form}) return render_to_response(template_name, context_instance=context) To filter spam in comments I use `Akismet`_, the service used to filter spam in comments posted to the free blog platform `Wordpress`. `Akismet`_ is free for personal use, but for commercial use you need to pay. You have to sign up to their service to get an API key. To make API calls to `Akismet`_ I use the `akismet.py`_ library written by `Michael Foord`_. .. _task-example-blog-tasks: ``blog/tasks.py`` ----------------- .. code-block:: python from celery import Celery from akismet import Akismet from django.core.exceptions import ImproperlyConfigured from django.contrib.sites.models import Site from blog.models import Comment app = Celery(broker='amqp://') @app.task def spam_filter(comment_id, remote_addr=None): logger = spam_filter.get_logger() logger.info('Running spam filter for comment %s', comment_id) comment = Comment.objects.get(pk=comment_id) current_domain = Site.objects.get_current().domain akismet = Akismet(settings.AKISMET_KEY, 'http://{0}'.format(domain)) if not akismet.verify_key(): raise ImproperlyConfigured('Invalid AKISMET_KEY') is_spam = akismet.comment_check(user_ip=remote_addr, comment_content=comment.comment, comment_author=comment.name, comment_author_email=comment.email_address) if is_spam: comment.is_spam = True comment.save() return is_spam .. _`Akismet`: http://akismet.com/faq/ .. _`akismet.py`: http://www.voidspace.org.uk/downloads/akismet.py .. _`Michael Foord`: http://www.voidspace.org.uk/ celery-4.1.0/docs/userguide/configuration.rst0000644000175000017500000020150313135426300021250 0ustar omeromer00000000000000.. _configuration: ============================ Configuration and defaults ============================ This document describes the configuration options available. If you're using the default loader, you must create the :file:`celeryconfig.py` module and make sure it's available on the Python path. .. contents:: :local: :depth: 2 .. _conf-example: Example configuration file ========================== This is an example configuration file to get you started. It should contain all you need to run a basic Celery set-up. .. code-block:: python ## Broker settings. broker_url = 'amqp://guest:guest@localhost:5672//' # List of modules to import when the Celery worker starts. imports = ('myapp.tasks',) ## Using the database to store task state and results. result_backend = 'db+sqlite:///results.db' task_annotations = {'tasks.add': {'rate_limit': '10/s'}} .. _conf-old-settings-map: New lowercase settings ====================== Version 4.0 introduced new lower case settings and setting organization. The major difference between previous versions, apart from the lower case names, are the renaming of some prefixes, like ``celerybeat_`` to ``beat_``, ``celeryd_`` to ``worker_``, and most of the top level ``celery_`` settings have been moved into a new ``task_`` prefix. Celery will still be able to read old configuration files, so there's no rush in moving to the new settings format. ===================================== ============================================== **Setting name** **Replace with** ===================================== ============================================== ``CELERY_ACCEPT_CONTENT`` :setting:`accept_content` ``CELERY_ENABLE_UTC`` :setting:`enable_utc` ``CELERY_IMPORTS`` :setting:`imports` ``CELERY_INCLUDE`` :setting:`include` ``CELERY_TIMEZONE`` :setting:`timezone` ``CELERYBEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval` ``CELERYBEAT_SCHEDULE`` :setting:`beat_schedule` ``CELERYBEAT_SCHEDULER`` :setting:`beat_scheduler` ``CELERYBEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename` ``CELERYBEAT_SYNC_EVERY`` :setting:`beat_sync_every` ``BROKER_URL`` :setting:`broker_url` ``BROKER_TRANSPORT`` :setting:`broker_transport` ``BROKER_TRANSPORT_OPTIONS`` :setting:`broker_transport_options` ``BROKER_CONNECTION_TIMEOUT`` :setting:`broker_connection_timeout` ``BROKER_CONNECTION_RETRY`` :setting:`broker_connection_retry` ``BROKER_CONNECTION_MAX_RETRIES`` :setting:`broker_connection_max_retries` ``BROKER_FAILOVER_STRATEGY`` :setting:`broker_failover_strategy` ``BROKER_HEARTBEAT`` :setting:`broker_heartbeat` ``BROKER_LOGIN_METHOD`` :setting:`broker_login_method` ``BROKER_POOL_LIMIT`` :setting:`broker_pool_limit` ``BROKER_USE_SSL`` :setting:`broker_use_ssl` ``CELERY_CACHE_BACKEND`` :setting:`cache_backend` ``CELERY_CACHE_BACKEND_OPTIONS`` :setting:`cache_backend_options` ``CASSANDRA_COLUMN_FAMILY`` :setting:`cassandra_table` ``CASSANDRA_ENTRY_TTL`` :setting:`cassandra_entry_ttl` ``CASSANDRA_KEYSPACE`` :setting:`cassandra_keyspace` ``CASSANDRA_PORT`` :setting:`cassandra_port` ``CASSANDRA_READ_CONSISTENCY`` :setting:`cassandra_read_consistency` ``CASSANDRA_SERVERS`` :setting:`cassandra_servers` ``CASSANDRA_WRITE_CONSISTENCY`` :setting:`cassandra_write_consistency` ``CELERY_COUCHBASE_BACKEND_SETTINGS`` :setting:`couchbase_backend_settings` ``CELERY_MONGODB_BACKEND_SETTINGS`` :setting:`mongodb_backend_settings` ``CELERY_EVENT_QUEUE_EXPIRES`` :setting:`event_queue_expires` ``CELERY_EVENT_QUEUE_TTL`` :setting:`event_queue_ttl` ``CELERY_EVENT_QUEUE_PREFIX`` :setting:`event_queue_prefix` ``CELERY_EVENT_SERIALIZER`` :setting:`event_serializer` ``CELERY_REDIS_DB`` :setting:`redis_db` ``CELERY_REDIS_HOST`` :setting:`redis_host` ``CELERY_REDIS_MAX_CONNECTIONS`` :setting:`redis_max_connections` ``CELERY_REDIS_PASSWORD`` :setting:`redis_password` ``CELERY_REDIS_PORT`` :setting:`redis_port` ``CELERY_RESULT_BACKEND`` :setting:`result_backend` ``CELERY_MAX_CACHED_RESULTS`` :setting:`result_cache_max` ``CELERY_MESSAGE_COMPRESSION`` :setting:`result_compression` ``CELERY_RESULT_EXCHANGE`` :setting:`result_exchange` ``CELERY_RESULT_EXCHANGE_TYPE`` :setting:`result_exchange_type` ``CELERY_TASK_RESULT_EXPIRES`` :setting:`result_expires` ``CELERY_RESULT_PERSISTENT`` :setting:`result_persistent` ``CELERY_RESULT_SERIALIZER`` :setting:`result_serializer` ``CELERY_RESULT_DBURI`` Use :setting:`result_backend` instead. ``CELERY_RESULT_ENGINE_OPTIONS`` :setting:`database_engine_options` ``[...]_DB_SHORT_LIVED_SESSIONS`` :setting:`database_short_lived_sessions` ``CELERY_RESULT_DB_TABLE_NAMES`` :setting:`database_db_names` ``CELERY_SECURITY_CERTIFICATE`` :setting:`security_certificate` ``CELERY_SECURITY_CERT_STORE`` :setting:`security_cert_store` ``CELERY_SECURITY_KEY`` :setting:`security_key` ``CELERY_TASK_ACKS_LATE`` :setting:`task_acks_late` ``CELERY_TASK_ALWAYS_EAGER`` :setting:`task_always_eager` ``CELERY_TASK_ANNOTATIONS`` :setting:`task_annotations` ``CELERY_TASK_COMPRESSION`` :setting:`task_compression` ``CELERY_TASK_CREATE_MISSING_QUEUES`` :setting:`task_create_missing_queues` ``CELERY_TASK_DEFAULT_DELIVERY_MODE`` :setting:`task_default_delivery_mode` ``CELERY_TASK_DEFAULT_EXCHANGE`` :setting:`task_default_exchange` ``CELERY_TASK_DEFAULT_EXCHANGE_TYPE`` :setting:`task_default_exchange_type` ``CELERY_TASK_DEFAULT_QUEUE`` :setting:`task_default_queue` ``CELERY_TASK_DEFAULT_RATE_LIMIT`` :setting:`task_default_rate_limit` ``CELERY_TASK_DEFAULT_ROUTING_KEY`` :setting:`task_default_routing_key` ``CELERY_TASK_EAGER_PROPAGATES`` :setting:`task_eager_propagates` ``CELERY_TASK_IGNORE_RESULT`` :setting:`task_ignore_result` ``CELERY_TASK_PUBLISH_RETRY`` :setting:`task_publish_retry` ``CELERY_TASK_PUBLISH_RETRY_POLICY`` :setting:`task_publish_retry_policy` ``CELERY_TASK_QUEUES`` :setting:`task_queues` ``CELERY_TASK_ROUTES`` :setting:`task_routes` ``CELERY_TASK_SEND_SENT_EVENT`` :setting:`task_send_sent_event` ``CELERY_TASK_SERIALIZER`` :setting:`task_serializer` ``CELERYD_TASK_SOFT_TIME_LIMIT`` :setting:`task_soft_time_limit` ``CELERYD_TASK_TIME_LIMIT`` :setting:`task_time_limit` ``CELERY_TRACK_STARTED`` :setting:`task_track_started` ``CELERYD_AGENT`` :setting:`worker_agent` ``CELERYD_AUTOSCALER`` :setting:`worker_autoscaler` ``CELERYD_CONCURRENCY`` :setting:`worker_concurrency` ``CELERYD_CONSUMER`` :setting:`worker_consumer` ``CELERY_WORKER_DIRECT`` :setting:`worker_direct` ``CELERY_DISABLE_RATE_LIMITS`` :setting:`worker_disable_rate_limits` ``CELERY_ENABLE_REMOTE_CONTROL`` :setting:`worker_enable_remote_control` ``CELERYD_HIJACK_ROOT_LOGGER`` :setting:`worker_hijack_root_logger` ``CELERYD_LOG_COLOR`` :setting:`worker_log_color` ``CELERYD_LOG_FORMAT`` :setting:`worker_log_format` ``CELERYD_WORKER_LOST_WAIT`` :setting:`worker_lost_wait` ``CELERYD_MAX_TASKS_PER_CHILD`` :setting:`worker_max_tasks_per_child` ``CELERYD_POOL`` :setting:`worker_pool` ``CELERYD_POOL_PUTLOCKS`` :setting:`worker_pool_putlocks` ``CELERYD_POOL_RESTARTS`` :setting:`worker_pool_restarts` ``CELERYD_PREFETCH_MULTIPLIER`` :setting:`worker_prefetch_multiplier` ``CELERYD_REDIRECT_STDOUTS`` :setting:`worker_redirect_stdouts` ``CELERYD_REDIRECT_STDOUTS_LEVEL`` :setting:`worker_redirect_stdouts_level` ``CELERYD_SEND_EVENTS`` :setting:`worker_send_task_events` ``CELERYD_STATE_DB`` :setting:`worker_state_db` ``CELERYD_TASK_LOG_FORMAT`` :setting:`worker_task_log_format` ``CELERYD_TIMER`` :setting:`worker_timer` ``CELERYD_TIMER_PRECISION`` :setting:`worker_timer_precision` ===================================== ============================================== Configuration Directives ======================== .. _conf-datetime: General settings ---------------- .. setting:: accept_content ``accept_content`` ~~~~~~~~~~~~~~~~~~ Default: ``{'json'}`` (set, list, or tuple). A white-list of content-types/serializers to allow. If a message is received that's not in this list then the message will be discarded with an error. By default any content type is enabled, including pickle and yaml, so make sure untrusted parties don't have access to your broker. See :ref:`guide-security` for more. Example:: # using serializer name accept_content = ['json'] # or the actual content-type (MIME) accept_content = ['application/json'] Time and date settings ---------------------- .. setting:: enable_utc ``enable_utc`` ~~~~~~~~~~~~~~ .. versionadded:: 2.5 Default: Enabled by default since version 3.0. If enabled dates and times in messages will be converted to use the UTC timezone. Note that workers running Celery versions below 2.5 will assume a local timezone for all messages, so only enable if all workers have been upgraded. .. setting:: timezone ``timezone`` ~~~~~~~~~~~~ .. versionadded:: 2.5 Default: ``"UTC"``. Configure Celery to use a custom time zone. The timezone value can be any time zone supported by the :pypi:`pytz` library. If not set the UTC timezone is used. For backwards compatibility there's also a :setting:`enable_utc` setting, and this is set to false the system local timezone is used instead. .. _conf-tasks: Task settings ------------- .. setting:: task_annotations ``task_annotations`` ~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.5 Default: :const:`None`. This setting can be used to rewrite any task attribute from the configuration. The setting can be a dict, or a list of annotation objects that filter for tasks and return a map of attributes to change. This will change the ``rate_limit`` attribute for the ``tasks.add`` task: .. code-block:: python task_annotations = {'tasks.add': {'rate_limit': '10/s'}} or change the same for all tasks: .. code-block:: python task_annotations = {'*': {'rate_limit': '10/s'}} You can change methods too, for example the ``on_failure`` handler: .. code-block:: python def my_on_failure(self, exc, task_id, args, kwargs, einfo): print('Oh no! Task failed: {0!r}'.format(exc)) task_annotations = {'*': {'on_failure': my_on_failure}} If you need more flexibility then you can use objects instead of a dict to choose the tasks to annotate: .. code-block:: python class MyAnnotate(object): def annotate(self, task): if task.name.startswith('tasks.'): return {'rate_limit': '10/s'} task_annotations = (MyAnnotate(), {other,}) .. setting:: task_compression ``task_compression`` ~~~~~~~~~~~~~~~~~~~~ Default: :const:`None` Default compression used for task messages. Can be ``gzip``, ``bzip2`` (if available), or any custom compression schemes registered in the Kombu compression registry. The default is to send uncompressed messages. .. setting:: task_protocol ``task_protocol`` ~~~~~~~~~~~~~~~~~ .. versionadded: 4.0 Default: 2 (since 4.0). Set the default task message protocol version used to send tasks. Supports protocols: 1 and 2. Protocol 2 is supported by 3.1.24 and 4.x+. .. setting:: task_serializer ``task_serializer`` ~~~~~~~~~~~~~~~~~~~ Default: ``"json"`` (since 4.0, earlier: pickle). A string identifying the default serialization method to use. Can be `json` (default), `pickle`, `yaml`, `msgpack`, or any custom serialization methods that have been registered with :mod:`kombu.serialization.registry`. .. seealso:: :ref:`calling-serializers`. .. setting:: task_publish_retry ``task_publish_retry`` ~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: Enabled. Decides if publishing task messages will be retried in the case of connection loss or other connection errors. See also :setting:`task_publish_retry_policy`. .. setting:: task_publish_retry_policy ``task_publish_retry_policy`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: See :ref:`calling-retry`. Defines the default policy when retrying publishing a task message in the case of connection loss or other connection errors. .. _conf-task-execution: Task execution settings ----------------------- .. setting:: task_always_eager ``task_always_eager`` ~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If this is :const:`True`, all tasks will be executed locally by blocking until the task returns. ``apply_async()`` and ``Task.delay()`` will return an :class:`~celery.result.EagerResult` instance, that emulates the API and behavior of :class:`~celery.result.AsyncResult`, except the result is already evaluated. That is, tasks will be executed locally instead of being sent to the queue. .. setting:: task_eager_propagates ``task_eager_propagates`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If this is :const:`True`, eagerly executed tasks (applied by `task.apply()`, or when the :setting:`task_always_eager` setting is enabled), will propagate exceptions. It's the same as always running ``apply()`` with ``throw=True``. .. setting:: task_remote_tracebacks ``task_remote_tracebacks`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If enabled task results will include the workers stack when re-raising task errors. This requires the :pypi:`tblib` library, that can be installed using :command:`pip`: .. code-block:: console $ pip install celery[tblib] See :ref:`bundles` for information on combining multiple extension requirements. .. setting:: task_ignore_result ``task_ignore_result`` ~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. Whether to store the task return values or not (tombstones). If you still want to store errors, just not successful return values, you can set :setting:`task_store_errors_even_if_ignored`. .. setting:: task_store_errors_even_if_ignored ``task_store_errors_even_if_ignored`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If set, the worker stores all task errors in the result store even if :attr:`Task.ignore_result ` is on. .. setting:: task_track_started ``task_track_started`` ~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. If :const:`True` the task will report its status as 'started' when the task is executed by a worker. The default value is :const:`False` as the normal behavior is to not report that level of granularity. Tasks are either pending, finished, or waiting to be retried. Having a 'started' state can be useful for when there are long running tasks and there's a need to report what task is currently running. .. setting:: task_time_limit ``task_time_limit`` ~~~~~~~~~~~~~~~~~~~ Default: No time limit. Task hard time limit in seconds. The worker processing the task will be killed and replaced with a new one when this is exceeded. .. setting:: task_soft_time_limit ``task_soft_time_limit`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: No soft time limit. Task soft time limit in seconds. The :exc:`~@SoftTimeLimitExceeded` exception will be raised when this is exceeded. For example, the task can catch this to clean up before the hard time limit comes: .. code-block:: python from celery.exceptions import SoftTimeLimitExceeded @app.task def mytask(): try: return do_work() except SoftTimeLimitExceeded: cleanup_in_a_hurry() .. setting:: task_acks_late ``task_acks_late`` ~~~~~~~~~~~~~~~~~~ Default: Disabled. Late ack means the task messages will be acknowledged **after** the task has been executed, not *just before* (the default behavior). .. seealso:: FAQ: :ref:`faq-acks_late-vs-retry`. .. setting:: task_reject_on_worker_lost ``task_reject_on_worker_lost`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. Even if :setting:`task_acks_late` is enabled, the worker will acknowledge tasks when the worker process executing them abruptly exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc). Setting this to true allows the message to be re-queued instead, so that the task will execute again by the same worker, or another worker. .. warning:: Enabling this can cause message loops; make sure you know what you're doing. .. setting:: task_default_rate_limit ``task_default_rate_limit`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: No rate limit. The global default rate limit for tasks. This value is used for tasks that doesn't have a custom rate limit .. seealso:: The setting:`worker_disable_rate_limits` setting can disable all rate limits. .. _conf-result-backend: Task result backend settings ---------------------------- .. setting:: result_backend ``result_backend`` ~~~~~~~~~~~~~~~~~~ Default: No result backend enabled by default. The backend used to store task results (tombstones). Can be one of the following: * ``rpc`` Send results back as AMQP messages See :ref:`conf-rpc-result-backend`. * ``database`` Use a relational database supported by `SQLAlchemy`_. See :ref:`conf-database-result-backend`. * ``redis`` Use `Redis`_ to store the results. See :ref:`conf-redis-result-backend`. * ``cache`` Use `Memcached`_ to store the results. See :ref:`conf-cache-result-backend`. * ``cassandra`` Use `Cassandra`_ to store the results. See :ref:`conf-cassandra-result-backend`. * ``elasticsearch`` Use `Elasticsearch`_ to store the results. See :ref:`conf-elasticsearch-result-backend`. * ``ironcache`` Use `IronCache`_ to store the results. See :ref:`conf-ironcache-result-backend`. * ``couchbase`` Use `Couchbase`_ to store the results. See :ref:`conf-couchbase-result-backend`. * ``couchdb`` Use `CouchDB`_ to store the results. See :ref:`conf-couchdb-result-backend`. * ``filesystem`` Use a shared directory to store the results. See :ref:`conf-filesystem-result-backend`. * ``consul`` Use the `Consul`_ K/V store to store the results See :ref:`conf-consul-result-backend`. .. warning: While the AMQP result backend is very efficient, you must make sure you only receive the same result once. See :doc:`userguide/calling`). .. _`SQLAlchemy`: http://sqlalchemy.org .. _`Memcached`: http://memcached.org .. _`Redis`: https://redis.io .. _`Cassandra`: http://cassandra.apache.org/ .. _`Elasticsearch`: https://aws.amazon.com/elasticsearch-service/ .. _`IronCache`: http://www.iron.io/cache .. _`CouchDB`: http://www.couchdb.com/ .. _`Couchbase`: https://www.couchbase.com/ .. _`Consul`: https://consul.io/ .. setting:: result_serializer ``result_serializer`` ~~~~~~~~~~~~~~~~~~~~~ Default: ``json`` since 4.0 (earlier: pickle). Result serialization format. See :ref:`calling-serializers` for information about supported serialization formats. .. setting:: result_compression ``result_compression`` ~~~~~~~~~~~~~~~~~~~~~~ Default: No compression. Optional compression method used for task results. Supports the same options as the :setting:`task_serializer` setting. .. setting:: result_expires ``result_expires`` ~~~~~~~~~~~~~~~~~~ Default: Expire after 1 day. Time (in seconds, or a :class:`~datetime.timedelta` object) for when after stored task tombstones will be deleted. A built-in periodic task will delete the results after this time (``celery.backend_cleanup``), assuming that ``celery beat`` is enabled. The task runs daily at 4am. A value of :const:`None` or 0 means results will never expire (depending on backend specifications). .. note:: For the moment this only works with the AMQP, database, cache, and Redis backends. When using the database backend, ``celery beat`` must be running for the results to be expired. .. setting:: result_cache_max ``result_cache_max`` ~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. Enables client caching of results. This can be useful for the old deprecated 'amqp' backend where the result is unavailable as soon as one result instance consumes it. This is the total number of results to cache before older results are evicted. A value of 0 or None means no limit, and a value of :const:`-1` will disable the cache. Disabled by default. .. _conf-database-result-backend: Database backend settings ------------------------- Database URL Examples ~~~~~~~~~~~~~~~~~~~~~ To use the database backend you have to configure the :setting:`result_backend` setting with a connection URL and the ``db+`` prefix: .. code-block:: python result_backend = 'db+scheme://user:password@host:port/dbname' Examples:: # sqlite (filename) result_backend = 'db+sqlite:///results.sqlite' # mysql result_backend = 'db+mysql://scott:tiger@localhost/foo' # postgresql result_backend = 'db+postgresql://scott:tiger@localhost/mydatabase' # oracle result_backend = 'db+oracle://scott:tiger@127.0.0.1:1521/sidname' .. code-block:: python Please see `Supported Databases`_ for a table of supported databases, and `Connection String`_ for more information about connection strings (this is the part of the URI that comes after the ``db+`` prefix). .. _`Supported Databases`: http://www.sqlalchemy.org/docs/core/engines.html#supported-databases .. _`Connection String`: http://www.sqlalchemy.org/docs/core/engines.html#database-urls .. setting:: database_engine_options ``database_engine_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). To specify additional SQLAlchemy database engine options you can use the :setting:`sqlalchmey_engine_options` setting:: # echo enables verbose logging from SQLAlchemy. app.conf.database_engine_options = {'echo': True} .. setting:: database_short_lived_sessions ``database_short_lived_sessions`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. Short lived sessions are disabled by default. If enabled they can drastically reduce performance, especially on systems processing lots of tasks. This option is useful on low-traffic workers that experience errors as a result of cached database connections going stale through inactivity. For example, intermittent errors like `(OperationalError) (2006, 'MySQL server has gone away')` can be fixed by enabling short lived sessions. This option only affects the database backend. .. setting:: database_table_names ``database_table_names`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). When SQLAlchemy is configured as the result backend, Celery automatically creates two tables to store result meta-data for tasks. This setting allows you to customize the table names: .. code-block:: python # use custom table names for the database result backend. database_table_names = { 'task': 'myapp_taskmeta', 'group': 'myapp_groupmeta', } .. _conf-rpc-result-backend: RPC backend settings -------------------- .. setting:: result_persistent ``result_persistent`` ~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default (transient messages). If set to :const:`True`, result messages will be persistent. This means the messages won't be lost after a broker restart. Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python result_backend = 'rpc://' result_persistent = False .. _conf-cache-result-backend: Cache backend settings ---------------------- .. note:: The cache backend supports the :pypi:`pylibmc` and :pypi:`python-memcached` libraries. The latter is used only if :pypi:`pylibmc` isn't installed. Using a single Memcached server: .. code-block:: python result_backend = 'cache+memcached://127.0.0.1:11211/' Using multiple Memcached servers: .. code-block:: python result_backend = """ cache+memcached://172.19.26.240:11211;172.19.26.242:11211/ """.strip() The "memory" backend stores the cache in memory only: .. code-block:: python result_backend = 'cache' cache_backend = 'memory' .. setting:: cache_backend_options ``cache_backend_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). You can set :pypi:`pylibmc` options using the :setting:`cache_backend_options` setting: .. code-block:: python cache_backend_options = { 'binary': True, 'behaviors': {'tcp_nodelay': True}, } .. setting:: cache_backend ``cache_backend`` ~~~~~~~~~~~~~~~~~ This setting is no longer used as it's now possible to specify the cache backend directly in the :setting:`result_backend` setting. .. _conf-redis-result-backend: Redis backend settings ---------------------- Configuring the backend URL ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: The Redis backend requires the :pypi:`redis` library. To install this package use :command:`pip`: .. code-block:: console $ pip install celery[redis] See :ref:`bundles` for information on combining multiple extension requirements. This backend requires the :setting:`result_backend` setting to be set to a Redis URL:: result_backend = 'redis://:password@host:port/db' For example:: result_backend = 'redis://localhost/0' is the same as:: result_backend = 'redis://' The fields of the URL are defined as follows: #. ``password`` Password used to connect to the database. #. ``host`` Host name or IP address of the Redis server (e.g., `localhost`). #. ``port`` Port to the Redis server. Default is 6379. #. ``db`` Database number to use. Default is 0. The db can include an optional leading slash. .. setting:: redis_backend_use_ssl ``redis_backend_use_ssl`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled. The Redis backend supports SSL. The valid values of this options are the same as :setting:`broker_use_ssl`. .. setting:: redis_max_connections ``redis_max_connections`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: No limit. Maximum number of connections available in the Redis connection pool used for sending and retrieving results. .. setting:: redis_socket_connect_timeout ``redis_socket_connect_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 5.0.1 Default: :const:`None` Socket timeout for connections to Redis from the result backend in seconds (int/float) .. setting:: redis_socket_timeout ``redis_socket_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: 120.0 seconds. Socket timeout for reading/writing operations to the Redis server in seconds (int/float), used by the redis result backend. .. _conf-cassandra-result-backend: Cassandra backend settings -------------------------- .. note:: This Cassandra backend driver requires :pypi:`cassandra-driver`. To install, use :command:`pip`: .. code-block:: console $ pip install celery[cassandra] See :ref:`bundles` for information on combining multiple extension requirements. This backend requires the following configuration directives to be set. .. setting:: cassandra_servers ``cassandra_servers`` ~~~~~~~~~~~~~~~~~~~~~ Default: ``[]`` (empty list). List of ``host`` Cassandra servers. For example:: cassandra_servers = ['localhost'] .. setting:: cassandra_port ``cassandra_port`` ~~~~~~~~~~~~~~~~~~ Default: 9042. Port to contact the Cassandra servers on. .. setting:: cassandra_keyspace ``cassandra_keyspace`` ~~~~~~~~~~~~~~~~~~~~~~ Default: None. The key-space in which to store the results. For example:: cassandra_keyspace = 'tasks_keyspace' .. setting:: cassandra_table ``cassandra_table`` ~~~~~~~~~~~~~~~~~~~ Default: None. The table (column family) in which to store the results. For example:: cassandra_table = 'tasks' .. setting:: cassandra_read_consistency ``cassandra_read_consistency`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: None. The read consistency used. Values can be ``ONE``, ``TWO``, ``THREE``, ``QUORUM``, ``ALL``, ``LOCAL_QUORUM``, ``EACH_QUORUM``, ``LOCAL_ONE``. .. setting:: cassandra_write_consistency ``cassandra_write_consistency`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: None. The write consistency used. Values can be ``ONE``, ``TWO``, ``THREE``, ``QUORUM``, ``ALL``, ``LOCAL_QUORUM``, ``EACH_QUORUM``, ``LOCAL_ONE``. .. setting:: cassandra_entry_ttl ``cassandra_entry_ttl`` ~~~~~~~~~~~~~~~~~~~~~~~ Default: None. Time-to-live for status entries. They will expire and be removed after that many seconds after adding. A value of :const:`None` (default) means they will never expire. .. setting:: cassandra_auth_provider ``cassandra_auth_provider`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. AuthProvider class within ``cassandra.auth`` module to use. Values can be ``PlainTextAuthProvider`` or ``SaslAuthProvider``. .. setting:: cassandra_auth_kwargs ``cassandra_auth_kwargs`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). Named arguments to pass into the authentication provider. For example: .. code-block:: python cassandra_auth_kwargs = { username: 'cassandra', password: 'cassandra' } Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python cassandra_servers = ['localhost'] cassandra_keyspace = 'celery' cassandra_table = 'tasks' cassandra_read_consistency = 'ONE' cassandra_write_consistency = 'ONE' cassandra_entry_ttl = 86400 .. _conf-elasticsearch-result-backend: Elasticsearch backend settings ------------------------------ To use `Elasticsearch`_ as the result backend you simply need to configure the :setting:`result_backend` setting with the correct URL. Example configuration ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python result_backend = 'elasticsearch://example.com:9200/index_name/doc_type' .. setting:: elasticsearch_retry_on_timeout ``elasticsearch_retry_on_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`False` Should timeout trigger a retry on different node? .. setting:: elasticsearch_max_retries ``elasticsearch_max_retries`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 3. Maximum number of retries before an exception is propagated. .. setting:: elasticsearch_timeout ``elasticsearch_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 10.0 seconds. Global timeout,used by the elasticsearch result backend. .. _conf-riak-result-backend: Riak backend settings --------------------- .. note:: The Riak backend requires the :pypi:`riak` library. To install the this package use :command:`pip`: .. code-block:: console $ pip install celery[riak] See :ref:`bundles` for information on combining multiple extension requirements. This backend requires the :setting:`result_backend` setting to be set to a Riak URL:: result_backend = 'riak://host:port/bucket' For example:: result_backend = 'riak://localhost/celery is the same as:: result_backend = 'riak://' The fields of the URL are defined as follows: #. ``host`` Host name or IP address of the Riak server (e.g., `'localhost'`). #. ``port`` Port to the Riak server using the protobuf protocol. Default is 8087. #. ``bucket`` Bucket name to use. Default is `celery`. The bucket needs to be a string with ASCII characters only. Alternatively, this backend can be configured with the following configuration directives. .. setting:: riak_backend_settings ``riak_backend_settings`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). This is a dict supporting the following keys: * ``host`` The host name of the Riak server. Defaults to ``"localhost"``. * ``port`` The port the Riak server is listening to. Defaults to 8087. * ``bucket`` The bucket name to connect to. Defaults to "celery". * ``protocol`` The protocol to use to connect to the Riak server. This isn't configurable via :setting:`result_backend` .. _conf-dynamodb-result-backend: AWS DynamoDB backend settings ----------------------------- .. note:: The Dynamodb backend requires the :pypi:`boto3` library. To install this package use :command:`pip`: .. code-block:: console $ pip install celery[dynamodb] See :ref:`bundles` for information on combining multiple extension requirements. This backend requires the :setting:`result_backend` setting to be set to a DynamoDB URL:: result_backend = 'dynamodb://aws_access_key_id:aws_secret_access_key@region:port/table?read=n&write=m' For example, specifying the AWS region and the table name:: result_backend = 'dynamodb://@us-east-1/celery_results or retrieving AWS configuration parameters from the environment, using the default table name (``celery``) and specifying read and write provisioned throughput:: result_backend = 'dynamodb://@/?read=5&write=5' or using the `downloadable version `_ of DynamoDB `locally `_:: result_backend = 'dynamodb://@localhost:8000 The fields of the URL are defined as follows: #. ``aws_access_key_id & aws_secret_access_key`` The credentials for accessing AWS API resources. These can also be resolved by the :pypi:`boto3` library from various sources, as described `here `_. #. ``region`` The AWS region, e.g. ``us-east-1`` or ``localhost`` for the `Downloadable Version `_. See the :pypi:`boto3` library `documentation `_ for definition options. #. ``port`` The listening port of the local DynamoDB instance, if you are using the downloadable version. If you have not specified the ``region`` parameter as ``localhost``, setting this parameter has **no effect**. #. ``table`` Table name to use. Default is ``celery``. See the `DynamoDB Naming Rules `_ for information on the allowed characters and length. #. ``read & write`` The Read & Write Capacity Units for the created DynamoDB table. Default is ``1`` for both read and write. More details can be found in the `Provisioned Throughput documentation `_. .. _conf-ironcache-result-backend: IronCache backend settings -------------------------- .. note:: The IronCache backend requires the :pypi:`iron_celery` library: To install this package use :command:`pip`: .. code-block:: console $ pip install iron_celery IronCache is configured via the URL provided in :setting:`result_backend`, for example:: result_backend = 'ironcache://project_id:token@' Or to change the cache name:: ironcache:://project_id:token@/awesomecache For more information, see: https://github.com/iron-io/iron_celery .. _conf-couchbase-result-backend: Couchbase backend settings -------------------------- .. note:: The Couchbase backend requires the :pypi:`couchbase` library. To install this package use :command:`pip`: .. code-block:: console $ pip install celery[couchbase] See :ref:`bundles` for instructions how to combine multiple extension requirements. This backend can be configured via the :setting:`result_backend` set to a Couchbase URL: .. code-block:: python result_backend = 'couchbase://username:password@host:port/bucket' .. setting:: couchbase_backend_settings ``couchbase_backend_settings`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). This is a dict supporting the following keys: * ``host`` Host name of the Couchbase server. Defaults to ``localhost``. * ``port`` The port the Couchbase server is listening to. Defaults to ``8091``. * ``bucket`` The default bucket the Couchbase server is writing to. Defaults to ``default``. * ``username`` User name to authenticate to the Couchbase server as (optional). * ``password`` Password to authenticate to the Couchbase server (optional). .. _conf-couchdb-result-backend: CouchDB backend settings ------------------------ .. note:: The CouchDB backend requires the :pypi:`pycouchdb` library: To install this Couchbase package use :command:`pip`: .. code-block:: console $ pip install celery[couchdb] See :ref:`bundles` for information on combining multiple extension requirements. This backend can be configured via the :setting:`result_backend` set to a CouchDB URL:: result_backend = 'couchdb://username:password@host:port/container' The URL is formed out of the following parts: * ``username`` User name to authenticate to the CouchDB server as (optional). * ``password`` Password to authenticate to the CouchDB server (optional). * ``host`` Host name of the CouchDB server. Defaults to ``localhost``. * ``port`` The port the CouchDB server is listening to. Defaults to ``8091``. * ``container`` The default container the CouchDB server is writing to. Defaults to ``default``. .. _conf-filesystem-result-backend: File-system backend settings ---------------------------- This backend can be configured using a file URL, for example:: CELERY_RESULT_BACKEND = 'file:///var/celery/results' The configured directory needs to be shared and writable by all servers using the backend. If you're trying Celery on a single system you can simply use the backend without any further configuration. For larger clusters you could use NFS, `GlusterFS`_, CIFS, `HDFS`_ (using FUSE), or any other file-system. .. _`GlusterFS`: http://www.gluster.org/ .. _`HDFS`: http://hadoop.apache.org/ .. _conf-consul-result-backend: Consul K/V store backend settings --------------------------------- The Consul backend can be configured using a URL, for example: CELERY_RESULT_BACKEND = 'consul://localhost:8500/' The backend will storage results in the K/V store of Consul as individual keys. The backend supports auto expire of results using TTLs in Consul. .. _conf-messaging: Message Routing --------------- .. _conf-messaging-routing: .. setting:: task_queues ``task_queues`` ~~~~~~~~~~~~~~~ Default: :const:`None` (queue taken from default queue settings). Most users will not want to specify this setting and should rather use the :ref:`automatic routing facilities `. If you really want to configure advanced routing, this setting should be a list of :class:`kombu.Queue` objects the worker will consume from. Note that workers can be overridden this setting via the :option:`-Q ` option, or individual queues from this list (by name) can be excluded using the :option:`-X ` option. Also see :ref:`routing-basics` for more information. The default is a queue/exchange/binding key of ``celery``, with exchange type ``direct``. See also :setting:`task_routes` .. setting:: task_routes ``task_routes`` ~~~~~~~~~~~~~~~ Default: :const:`None`. A list of routers, or a single router used to route tasks to queues. When deciding the final destination of a task the routers are consulted in order. A router can be specified as either: * A function with the signature ``(name, args, kwargs, options, task=None, **kwargs)`` * A string providing the path to a router function. * A dict containing router specification: Will be converted to a :class:`celery.routes.MapRoute` instance. * A list of ``(pattern, route)`` tuples: Will be converted to a :class:`celery.routes.MapRoute` instance. Examples: .. code-block:: python task_routes = { 'celery.ping': 'default', 'mytasks.add': 'cpu-bound', 'feed.tasks.*': 'feeds', # <-- glob pattern re.compile(r'(image|video)\.tasks\..*'): 'media', # <-- regex 'video.encode': { 'queue': 'video', 'exchange': 'media' 'routing_key': 'media.video.encode', }, } task_routes = ('myapp.tasks.route_task', {'celery.ping': 'default}) Where ``myapp.tasks.route_task`` could be: .. code-block:: python def route_task(self, name, args, kwargs, options, task=None, **kw): if task == 'celery.ping': return {'queue': 'default'} ``route_task`` may return a string or a dict. A string then means it's a queue name in :setting:`task_queues`, a dict means it's a custom route. When sending tasks, the routers are consulted in order. The first router that doesn't return ``None`` is the route to use. The message options is then merged with the found route settings, where the routers settings have priority. Example if :func:`~celery.execute.apply_async` has these arguments: .. code-block:: python Task.apply_async(immediate=False, exchange='video', routing_key='video.compress') and a router returns: .. code-block:: python {'immediate': True, 'exchange': 'urgent'} the final message options will be: .. code-block:: python immediate=True, exchange='urgent', routing_key='video.compress' (and any default message options defined in the :class:`~celery.task.base.Task` class) Values defined in :setting:`task_routes` have precedence over values defined in :setting:`task_queues` when merging the two. With the follow settings: .. code-block:: python task_queues = { 'cpubound': { 'exchange': 'cpubound', 'routing_key': 'cpubound', }, } task_routes = { 'tasks.add': { 'queue': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json', }, } The final routing options for ``tasks.add`` will become: .. code-block:: javascript {'exchange': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json'} See :ref:`routers` for more examples. .. setting:: task_queue_ha_policy ``task_queue_ha_policy`` ~~~~~~~~~~~~~~~~~~~~~~~~ :brokers: RabbitMQ Default: :const:`None`. This will set the default HA policy for a queue, and the value can either be a string (usually ``all``): .. code-block:: python task_queue_ha_policy = 'all' Using 'all' will replicate the queue to all current nodes, Or you can give it a list of nodes to replicate to: .. code-block:: python task_queue_ha_policy = ['rabbit@host1', 'rabbit@host2'] Using a list will implicitly set ``x-ha-policy`` to 'nodes' and ``x-ha-policy-params`` to the given list of nodes. See http://www.rabbitmq.com/ha.html for more information. .. setting:: task_queue_max_priority ``task_queue_max_priority`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ :brokers: RabbitMQ Default: :const:`None`. See :ref:`routing-options-rabbitmq-priorities`. .. setting:: worker_direct ``worker_direct`` ~~~~~~~~~~~~~~~~~ Default: Disabled. This option enables so that every worker has a dedicated queue, so that tasks can be routed to specific workers. The queue name for each worker is automatically generated based on the worker hostname and a ``.dq`` suffix, using the ``C.dq`` exchange. For example the queue name for the worker with node name ``w1@example.com`` becomes:: w1@example.com.dq Then you can route the task to the task by specifying the hostname as the routing key and the ``C.dq`` exchange:: task_routes = { 'tasks.add': {'exchange': 'C.dq', 'routing_key': 'w1@example.com'} } .. setting:: task_create_missing_queues ``task_create_missing_queues`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled. If enabled (default), any queues specified that aren't defined in :setting:`task_queues` will be automatically created. See :ref:`routing-automatic`. .. setting:: task_default_queue ``task_default_queue`` ~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celery"``. The name of the default queue used by `.apply_async` if the message has no route or no custom queue has been specified. This queue must be listed in :setting:`task_queues`. If :setting:`task_queues` isn't specified then it's automatically created containing one queue entry, where this name is used as the name of that queue. .. seealso:: :ref:`routing-changing-default-queue` .. setting:: task_default_exchange ``task_default_exchange`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celery"``. Name of the default exchange to use when no custom exchange is specified for a key in the :setting:`task_queues` setting. .. setting:: task_default_exchange_type ``task_default_exchange_type`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"direct"``. Default exchange type used when no custom exchange type is specified for a key in the :setting:`task_queues` setting. .. setting:: task_default_routing_key ``task_default_routing_key`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celery"``. The default routing key used when no custom routing key is specified for a key in the :setting:`task_queues` setting. .. setting:: task_default_delivery_mode ``task_default_delivery_mode`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"persistent"``. Can be `transient` (messages not written to disk) or `persistent` (written to disk). .. _conf-broker-settings: Broker Settings --------------- .. setting:: broker_url ``broker_url`` ~~~~~~~~~~~~~~ Default: ``"amqp://"`` Default broker URL. This must be a URL in the form of:: transport://userid:password@hostname:port/virtual_host Only the scheme part (``transport://``) is required, the rest is optional, and defaults to the specific transports default values. The transport part is the broker implementation to use, and the default is ``amqp``, (uses ``librabbitmq`` if installed or falls back to ``pyamqp``). There are also other choices available, including; ``redis://``, ``sqs://``, and ``qpid://``. The scheme can also be a fully qualified path to your own transport implementation:: broker_url = 'proj.transports.MyTransport://localhost' More than one broker URL, of the same transport, can also be specified. The broker URLs can be passed in as a single string that's semicolon delimited:: broker_url = 'transport://userid:password@hostname:port//;transport://userid:password@hostname:port//' Or as a list:: broker_url = [ 'transport://userid:password@localhost:port//', 'transport://userid:password@hostname:port//' ] The brokers will then be used in the :setting:`broker_failover_strategy`. See :ref:`kombu:connection-urls` in the Kombu documentation for more information. .. setting:: broker_read_url .. setting:: broker_write_url ``broker_read_url`` / ``broker_write_url`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Taken from :setting:`broker_url`. These settings can be configured, instead of :setting:`broker_url` to specify different connection parameters for broker connections used for consuming and producing. Example:: broker_read_url = 'amqp://user:pass@broker.example.com:56721' broker_write_url = 'amqp://user:pass@broker.example.com:56722' Both options can also be specified as a list for failover alternates, see :setting:`broker_url` for more information. .. setting:: broker_failover_strategy ``broker_failover_strategy`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"round-robin"``. Default failover strategy for the broker Connection object. If supplied, may map to a key in 'kombu.connection.failover_strategies', or be a reference to any method that yields a single item from a supplied list. Example:: # Random failover strategy def random_failover_strategy(servers): it = list(servers) # don't modify callers list shuffle = random.shuffle for _ in repeat(None): shuffle(it) yield it[0] broker_failover_strategy = random_failover_strategy .. setting:: broker_heartbeat ``broker_heartbeat`` ~~~~~~~~~~~~~~~~~~~~ :transports supported: ``pyamqp`` Default: ``120.0`` (negotiated by server). Note: This value is only used by the worker, clients do not use a heartbeat at the moment. It's not always possible to detect connection loss in a timely manner using TCP/IP alone, so AMQP defines something called heartbeats that's is used both by the client and the broker to detect if a connection was closed. If the heartbeat value is 10 seconds, then the heartbeat will be monitored at the interval specified by the :setting:`broker_heartbeat_checkrate` setting (by default this is set to double the rate of the heartbeat value, so for the 10 seconds, the heartbeat is checked every 5 seconds). .. setting:: broker_heartbeat_checkrate ``broker_heartbeat_checkrate`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :transports supported: ``pyamqp`` Default: 2.0. At intervals the worker will monitor that the broker hasn't missed too many heartbeats. The rate at which this is checked is calculated by dividing the :setting:`broker_heartbeat` value with this value, so if the heartbeat is 10.0 and the rate is the default 2.0, the check will be performed every 5 seconds (twice the heartbeat sending rate). .. setting:: broker_use_ssl ``broker_use_ssl`` ~~~~~~~~~~~~~~~~~~ :transports supported: ``pyamqp``, ``redis`` Default: Disabled. Toggles SSL usage on broker connection and SSL settings. The valid values for this option vary by transport. ``pyamqp`` __________ If ``True`` the connection will use SSL with default SSL settings. If set to a dict, will configure SSL connection according to the specified policy. The format used is Python's :func:`ssl.wrap_socket` options. Note that SSL socket is generally served on a separate port by the broker. Example providing a client cert and validating the server cert against a custom certificate authority: .. code-block:: python import ssl broker_use_ssl = { 'keyfile': '/var/ssl/private/worker-key.pem', 'certfile': '/var/ssl/amqp-server-cert.pem', 'ca_certs': '/var/ssl/myca.pem', 'cert_reqs': ssl.CERT_REQUIRED } .. warning:: Be careful using ``broker_use_ssl=True``. It's possible that your default configuration won't validate the server cert at all. Please read Python `ssl module security considerations `_. ``redis`` _________ The setting must be a dict the keys: * ``ssl_cert_reqs`` (required): one of the ``SSLContext.verify_mode`` values: * ``ssl.CERT_NONE`` * ``ssl.CERT_OPTIONAL`` * ``ssl.CERT_REQUIRED`` * ``ssl_ca_certs`` (optional): path to the CA certificate * ``ssl_certfile`` (optional): path to the client certificate * ``ssl_keyfile`` (optional): path to the client key .. setting:: broker_pool_limit ``broker_pool_limit`` ~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.3 Default: 10. The maximum number of connections that can be open in the connection pool. The pool is enabled by default since version 2.5, with a default limit of ten connections. This number can be tweaked depending on the number of threads/green-threads (eventlet/gevent) using a connection. For example running eventlet with 1000 greenlets that use a connection to the broker, contention can arise and you should consider increasing the limit. If set to :const:`None` or 0 the connection pool will be disabled and connections will be established and closed for every use. .. setting:: broker_connection_timeout ``broker_connection_timeout`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 4.0. The default timeout in seconds before we give up establishing a connection to the AMQP server. This setting is disabled when using gevent. .. setting:: broker_connection_retry ``broker_connection_retry`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled. Automatically try to re-establish the connection to the AMQP broker if lost. The time between retries is increased for each retry, and is not exhausted before :setting:`broker_connection_max_retries` is exceeded. .. setting:: broker_connection_max_retries ``broker_connection_max_retries`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 100. Maximum number of retries before we give up re-establishing a connection to the AMQP broker. If this is set to :const:`0` or :const:`None`, we'll retry forever. .. setting:: broker_login_method ``broker_login_method`` ~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"AMQPLAIN"``. Set custom amqp login method. .. setting:: broker_transport_options ``broker_transport_options`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: ``{}`` (empty mapping). A dict of additional options passed to the underlying transport. See your transport user manual for supported options (if any). Example setting the visibility timeout (supported by Redis and SQS transports): .. code-block:: python broker_transport_options = {'visibility_timeout': 18000} # 5 hours .. _conf-worker: Worker ------ .. setting:: imports ``imports`` ~~~~~~~~~~~ Default: ``[]`` (empty list). A sequence of modules to import when the worker starts. This is used to specify the task modules to import, but also to import signal handlers and additional remote control commands, etc. The modules will be imported in the original order. .. setting:: include ``include`` ~~~~~~~~~~~ Default: ``[]`` (empty list). Exact same semantics as :setting:`imports`, but can be used as a means to have different import categories. The modules in this setting are imported after the modules in :setting:`imports`. .. _conf-concurrency: .. setting:: worker_concurrency ``worker_concurrency`` ~~~~~~~~~~~~~~~~~~~~~~ Default: Number of CPU cores. The number of concurrent worker processes/threads/green threads executing tasks. If you're doing mostly I/O you can have more processes, but if mostly CPU-bound, try to keep it close to the number of CPUs on your machine. If not set, the number of CPUs/cores on the host will be used. .. setting:: worker_prefetch_multiplier ``worker_prefetch_multiplier`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 4. How many messages to prefetch at a time multiplied by the number of concurrent processes. The default is 4 (four messages for each process). The default setting is usually a good choice, however -- if you have very long running tasks waiting in the queue and you have to start the workers, note that the first worker to start will receive four times the number of messages initially. Thus the tasks may not be fairly distributed to the workers. To disable prefetching, set :setting:`worker_prefetch_multiplier` to 1. Changing that setting to 0 will allow the worker to keep consuming as many messages as it wants. For more on prefetching, read :ref:`optimizing-prefetch-limit` .. note:: Tasks with ETA/countdown aren't affected by prefetch limits. .. setting:: worker_lost_wait ``worker_lost_wait`` ~~~~~~~~~~~~~~~~~~~~ Default: 10.0 seconds. In some cases a worker may be killed without proper cleanup, and the worker may have published a result before terminating. This value specifies how long we wait for any missing results before raising a :exc:`@WorkerLostError` exception. .. setting:: worker_max_tasks_per_child ``worker_max_tasks_per_child`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Maximum number of tasks a pool worker process can execute before it's replaced with a new one. Default is no limit. .. setting:: worker_max_memory_per_child ``worker_max_memory_per_child`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: No limit. Type: int (kilobytes) Maximum amount of resident memory, in kilobytes, that may be consumed by a worker before it will be replaced by a new worker. If a single task causes a worker to exceed this limit, the task will be completed, and the worker will be replaced afterwards. Example: .. code-block:: python worker_max_memory_per_child = 12000 # 12MB .. setting:: worker_disable_rate_limits ``worker_disable_rate_limits`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled (rate limits enabled). Disable all rate limits, even if tasks has explicit rate limits set. .. setting:: worker_state_db ``worker_state_db`` ~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. Name of the file used to stores persistent worker state (like revoked tasks). Can be a relative or absolute path, but be aware that the suffix `.db` may be appended to the file name (depending on Python version). Can also be set via the :option:`celery worker --statedb` argument. .. setting:: worker_timer_precision ``worker_timer_precision`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 1.0 seconds. Set the maximum time in seconds that the ETA scheduler can sleep between rechecking the schedule. Setting this value to 1 second means the schedulers precision will be 1 second. If you need near millisecond precision you can set this to 0.1. .. setting:: worker_enable_remote_control ``worker_enable_remote_control`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled by default. Specify if remote control of the workers is enabled. .. _conf-events: Events ------ .. setting:: worker_send_task_events ``worker_send_task_events`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. Send task-related events so that tasks can be monitored using tools like `flower`. Sets the default value for the workers :option:`-E ` argument. .. setting:: task_send_sent_event ``task_send_sent_event`` ~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: Disabled by default. If enabled, a :event:`task-sent` event will be sent for every task so tasks can be tracked before they're consumed by a worker. .. setting:: event_queue_ttl ``event_queue_ttl`` ~~~~~~~~~~~~~~~~~~~ :transports supported: ``amqp`` Default: 5.0 seconds. Message expiry time in seconds (int/float) for when messages sent to a monitor clients event queue is deleted (``x-message-ttl``) For example, if this value is set to 10 then a message delivered to this queue will be deleted after 10 seconds. .. setting:: event_queue_expires ``event_queue_expires`` ~~~~~~~~~~~~~~~~~~~~~~~ :transports supported: ``amqp`` Default: 60.0 seconds. Expiry time in seconds (int/float) for when after a monitor clients event queue will be deleted (``x-expires``). .. setting:: event_queue_prefix ``event_queue_prefix`` ~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celeryev"``. The prefix to use for event receiver queue names. .. setting:: event_serializer ``event_serializer`` ~~~~~~~~~~~~~~~~~~~~ Default: ``"json"``. Message serialization format used when sending event messages. .. seealso:: :ref:`calling-serializers`. .. _conf-control: Remote Control Commands ----------------------- .. note:: To disable remote control commands see the :setting:`worker_enable_remote_control` setting. .. setting:: control_queue_ttl ``control_queue_ttl`` ~~~~~~~~~~~~~~~~~~~~~ Default: 300.0 Time in seconds, before a message in a remote control command queue will expire. If using the default of 300 seconds, this means that if a remote control command is sent and no worker picks it up within 300 seconds, the command is discarded. This setting also applies to remote control reply queues. .. setting:: control_queue_expires ``control_queue_expires`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 10.0 Time in seconds, before an unused remote control command queue is deleted from the broker. This setting also applies to remote control reply queues. .. _conf-logging: Logging ------- .. setting:: worker_hijack_root_logger ``worker_hijack_root_logger`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: Enabled by default (hijack root logger). By default any previously configured handlers on the root logger will be removed. If you want to customize your own logging handlers, then you can disable this behavior by setting `worker_hijack_root_logger = False`. .. note:: Logging can also be customized by connecting to the :signal:`celery.signals.setup_logging` signal. .. setting:: worker_log_color ``worker_log_color`` ~~~~~~~~~~~~~~~~~~~~ Default: Enabled if app is logging to a terminal. Enables/disables colors in logging output by the Celery apps. .. setting:: worker_log_format ``worker_log_format`` ~~~~~~~~~~~~~~~~~~~~~ Default: .. code-block:: text "[%(asctime)s: %(levelname)s/%(processName)s] %(message)s" The format to use for log messages. See the Python :mod:`logging` module for more information about log formats. .. setting:: worker_task_log_format ``worker_task_log_format`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: .. code-block:: text "[%(asctime)s: %(levelname)s/%(processName)s] [%(task_name)s(%(task_id)s)] %(message)s" The format to use for log messages logged in tasks. See the Python :mod:`logging` module for more information about log formats. .. setting:: worker_redirect_stdouts ``worker_redirect_stdouts`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: Enabled by default. If enabled `stdout` and `stderr` will be redirected to the current logger. Used by :program:`celery worker` and :program:`celery beat`. .. setting:: worker_redirect_stdouts_level ``worker_redirect_stdouts_level`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`WARNING`. The log level output to `stdout` and `stderr` is logged as. Can be one of :const:`DEBUG`, :const:`INFO`, :const:`WARNING`, :const:`ERROR`, or :const:`CRITICAL`. .. _conf-security: Security -------- .. setting:: security_key ``security_key`` ~~~~~~~~~~~~~~~~ Default: :const:`None`. .. versionadded:: 2.5 The relative or absolute path to a file containing the private key used to sign messages when :ref:`message-signing` is used. .. setting:: security_certificate ``security_certificate`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. .. versionadded:: 2.5 The relative or absolute path to an X.509 certificate file used to sign messages when :ref:`message-signing` is used. .. setting:: security_cert_store ``security_cert_store`` ~~~~~~~~~~~~~~~~~~~~~~~ Default: :const:`None`. .. versionadded:: 2.5 The directory containing X.509 certificates used for :ref:`message-signing`. Can be a glob with wild-cards, (for example :file:`/etc/certs/*.pem`). .. _conf-custom-components: Custom Component Classes (advanced) ----------------------------------- .. setting:: worker_pool ``worker_pool`` ~~~~~~~~~~~~~~~ Default: ``"prefork"`` (``celery.concurrency.prefork:TaskPool``). Name of the pool class used by the worker. .. admonition:: Eventlet/Gevent Never use this option to select the eventlet or gevent pool. You must use the :option:`-P ` option to :program:`celery worker` instead, to ensure the monkey patches aren't applied too late, causing things to break in strange ways. .. setting:: worker_pool_restarts ``worker_pool_restarts`` ~~~~~~~~~~~~~~~~~~~~~~~~ Default: Disabled by default. If enabled the worker pool can be restarted using the :control:`pool_restart` remote control command. .. setting:: worker_autoscaler ``worker_autoscaler`` ~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2.2 Default: ``"celery.worker.autoscale:Autoscaler"``. Name of the autoscaler class to use. .. setting:: worker_consumer ``worker_consumer`` ~~~~~~~~~~~~~~~~~~~ Default: ``"celery.worker.consumer:Consumer"``. Name of the consumer class used by the worker. .. setting:: worker_timer ``worker_timer`` ~~~~~~~~~~~~~~~~ Default: ``"kombu.async.hub.timer:Timer"``. Name of the ETA scheduler class used by the worker. Default is or set by the pool implementation. .. _conf-celerybeat: Beat Settings (:program:`celery beat`) -------------------------------------- .. setting:: beat_schedule ``beat_schedule`` ~~~~~~~~~~~~~~~~~ Default: ``{}`` (empty mapping). The periodic task schedule used by :mod:`~celery.bin.beat`. See :ref:`beat-entries`. .. setting:: beat_scheduler ``beat_scheduler`` ~~~~~~~~~~~~~~~~~~ Default: ``"celery.beat:PersistentScheduler"``. The default scheduler class. May be set to ``"django_celery_beat.schedulers:DatabaseScheduler"`` for instance, if used alongside `django-celery-beat` extension. Can also be set via the :option:`celery beat -S` argument. .. setting:: beat_schedule_filename ``beat_schedule_filename`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: ``"celerybeat-schedule"``. Name of the file used by `PersistentScheduler` to store the last run times of periodic tasks. Can be a relative or absolute path, but be aware that the suffix `.db` may be appended to the file name (depending on Python version). Can also be set via the :option:`celery beat --schedule` argument. .. setting:: beat_sync_every ``beat_sync_every`` ~~~~~~~~~~~~~~~~~~~ Default: 0. The number of periodic tasks that can be called before another database sync is issued. A value of 0 (default) means sync based on timing - default of 3 minutes as determined by scheduler.sync_every. If set to 1, beat will call sync after every task message sent. .. setting:: beat_max_loop_interval ``beat_max_loop_interval`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Default: 0. The maximum number of seconds :mod:`~celery.bin.beat` can sleep between checking the schedule. The default for this value is scheduler specific. For the default Celery beat scheduler the value is 300 (5 minutes), but for the :pypi:`django-celery-beat` database scheduler it's 5 seconds because the schedule may be changed externally, and so it must take changes to the schedule into account. Also when running Celery beat embedded (:option:`-B `) on Jython as a thread the max interval is overridden and set to 1 so that it's possible to shut down in a timely manner. celery-4.1.0/docs/spelling_wordlist.txt0000644000175000017500000001140613130607475020172 0ustar omeromer00000000000000許邱翔 AMQP Adriaenssens Adrien Agris Ahmet Aitor Akira Alain Alcides Aleksandr Alexey Allard Alman Almeer Ameriks Andreas Andrey Andriy Aneil Areski Armin Artyom Atanasov Attias Attwood Autechre Axel Aziz Azovskov Babiy Bargen Baumgold Belaid Bence Berker Bevan Biel Bistuer Bolshakov Bouterse Bozorgkhan Brakhane Brendon Breshears Bridgen Briem Brodie Bryson Buckens Bujniewicz Buttu CPython Carvalho Cassandra Catalano Catalin Chamberlin Chiastic Chintomby Christoph Cipater Clowes Cobertura Codeb CouchDB Couchbase Cramer Cristian Cron Crontab Crontabs Czajka Danilo Daodao Dartiguelongue Davanum Davide Davidsson Deane Dees Dein Delalande Demir Django Dmitry Dubus Dudás Duggan Duryee Elasticsearch Engledew Eran Erway Esquivel Farrimond Farwell Fatih Feanil Fladischer Flavio Floering Fokau Frantisek Gao Garnero Gauvrit Gedminas Georgievsky Germán Gheem Gilles GitHub Gómez Goiri Gorbunov Grainger Greinhofer Grégoire Groner Grossi Guillaume Guinet Gunnlaugur Gylfason Haag Harnly Harrigan Haskins Helmers Helmig Henrik Heroku Hoch Hoeve Hogni Holop Homebrew Honza Hsad Hu Hynek IP Iacob Idan Ignas Illes Ilya Ionel IronCache Iurii Jaillet Jameel Janež Jelle Jellick Jerzy Jevnik Jiangmiao Jirka Johansson Julien Jython Kai Kalinowski Kamara Katz Khera KiB Kilgo Kirill Kiriukha Kirkham Kjartansson Klindukh Kombu Konstantin Konstantinos Kornelijus Korner Koshelev Kotlyarov Kouhei Koukopoulos Koval Kozera Kracekumar Kral Kriachko Krybus Krzysztof Kumar Kupershmidt Kuznetsov Lamport Langford Latitia Lavin Lawley Lebedev Ledesma Legrand Loic Luckie Maeda MaÅ›lanka Malinovsky Mallavarapu Manipon Marcio Maries Markey Markus Marlow Masiero Matsuzaki Maxime McGregor Melin Memcached Metzlar Mher Mickaël MikalajÅ«nas Milen Mitar Modrzejewski MongoDB Movsisyan MărieÈ™ Môshe Munin Nagurney Nextdoor Nik Nikolov Node.js Northway Nyby ORM O'Reilly Oblovatniy Omer Ordoquy Ori Parncutt Patrin Paulo Pavel Pavlovic Pearce Peksag Penhard Pepijn Permana Petersson Petrello Pika Piotr Podshumok Poissonnier Pomfrey Pär Pravec Pulec Pyro QoS Qpid Quarta RPC RSS Rabbaglietti RabbitMQ Rackspace Radek Raghuram Ramaraju Rao Raphaël Rattray Redis Remigiusz Remy Renberg Riak Ribeiro Rinat Rémy Robenolt Rodionoff Romuald Ronacher Rongze Rossi Rouberol Rudakou Rundstein SQLAlchemy SQS Sadaoui Savchenko Savvides Schlawack Schottdorf Schwarz Selivanov SemVer Seong Sergey Seungha Shigapov Slinckx Smirnov Solem Solt Sosnovskiy Srinivas Srinivasan Stas StateDB Steeve Sterre Streeter Sucu Sukrit Survila SysV Tadej Tallon Tamas Tantiras Taub Tewfik Theo Thrift Tikhonov Tobias Tochev Tocho Tsigularov Twomey URI Ullmann Unix Valentyn Vanderbauwhede Varona Vdb Veatch Vejrazka Verhagen Verstraaten Viamontes Viktor Vitaly Vixie Voronov Vos Vsevolod Webber Werkzeug Whitlock Widman Wieslander Wil Wiman Wun Yaroslav Younkins Yu Yurchuk Yury Yuval Zarowny Zatelepin Zaytsev Zhaorong Zhavoronkov Zhu Zoë Zoran abortable ack acked acking acks acyclic arg args arity async autocommit autodoc autoscale autoscaler autoscalers autoscaling backend backends backport backported backtrace bootstep bootsteps bufsize bugfix callbacks celerymon changelog chunking cipater committer committers compat conf config contrib coroutine coroutines cronjob cryptographic daemonization daemonize daemonizing dburi de deprecated deprecations der deserialization deserialize deserialized deserializes deserializing destructor distro Ãdám docstring docstrings embeddable encodable errbacks euid eventlet exc execv exitcode failover fanout filename gevent gid greenlet greenlets greenthreads hashable hostname http idempotence ident indices init initializer instantiation interoperability iterable js json kombu kwargs logfile login loglevel lookup memoization memoize memoized misconfiguration misconfigure misconfigured msgpack multi mutex mutexes natively nodename nullipotent optimizations persister pickleable pid pidbox pidfile pidfiles pluggable poller pre prefetch prefetched prefetching prefork preload preloading prepend prepended programmatically proj protobuf rdb reStructured rebased rebasing redelivered redelivery reentrancy reentrant refactor refactored refactoring referenceable regex regexes reloader resize resized resizing rtype runlevel runtime screenshot screenshots semipredicate semipredicates serializable serialized serializer serializers serializes serializing starmap stderr stdlib stdout subclasses subclassing submodule subtask subtasks supervisord symlink symlinked symlinks taskset timezones tracebacks tuple tuples uid Åukasz umask unacked undeliverable unencrypted unlink unlinked unlinks unmanaged unorderable unpickleable unpickled unregister unrepresentable unroutable untrusted username usernames utcoffset utils versa versioning wbits weakref weakrefs webhook webhooks writable yaml metavar const nargs dest questionark amongst requeue wildcard celery-4.1.0/docs/copyright.rst0000644000175000017500000000164313130607475016431 0ustar omeromer00000000000000Copyright ========= *Celery User Manual* by Ask Solem .. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN Copyright |copy| 2009-2016, Ask Solem. All rights reserved. This material may be copied or distributed only subject to the terms and conditions set forth in the `Creative Commons Attribution-ShareAlike 4.0 International` `_ license. You may share and adapt the material, even for commercial purposes, but you must give the original author credit. If you alter, transform, or build upon this work, you may distribute the resulting work only under the same license or a license compatible to this one. .. note:: While the *Celery* documentation is offered under the Creative Commons *Attribution-ShareAlike 4.0 International* license the Celery *software* is offered under the `BSD License (3 Clause) `_ celery-4.1.0/docs/sec/0000755000175000017500000000000013135426347014437 5ustar omeromer00000000000000celery-4.1.0/docs/sec/CELERYSA-0002.txt0000644000175000017500000000514613130607475016772 0ustar omeromer00000000000000========================================= CELERYSA-0002: Celery Security Advisory ========================================= :contact: security@celeryproject.org :CVE id: TBA :date: 2014-07-10 05:00:00 p.m. UTC Details ======= :package: celery :vulnerability: Environment error :problem type: local :risk: low :versions-affected: 2.5, 3.0, 3.1 Description =========== The built-in utility used to daemonize the Celery worker service sets an insecure umask by default (umask 0). This means that any files or directories created by the worker will end up having world-writable permissions. In practice this means that local users will be able to modify and possibly corrupt the files created by user tasks. This isn't immediately exploitable but can be if those files are later evaluated as a program, for example a task that creates Python program files that are later executed. Patches are now available for all maintained versions (see below), and users are urged to upgrade, even if not directly affected. Acknowledgments =============== Special thanks to Red Hat for originally discovering and reporting the issue. Systems affected ================ Users of Celery versions 3.0, and 3.1, except the recently released 3.1.13, are affected if daemonizing the Celery programs using the `--detach` argument or using the `celery multi` program to start workers in the background, without setting a custom `--umask` argument. Solution ======== NOTE: Not all users of Celery will use it to create files, but if you do then files may already have been created with insecure permissions. So after upgrading, or using the workaround, then please make sure that files already created aren't world writable. To work around the issue you can set a custom umask using the ``--umask`` argument: $ celery worker -l info --detach --umask=18 # (022) Or you can upgrade to a more recent version: - Users of the 3.1 series should upgrade to 3.1.13: * ``pip install -U celery``, or * ``easy_install -U celery``, or * https://pypi.python.org/pypi/celery/3.1.13 - Users of the 3.0 series should upgrade to 3.0.25: * ``pip install -U celery==3.0.25``, or * ``easy_install -U celery==3.0.25``, or * https://pypi.python.org/pypi/celery/3.0.25 Distribution package maintainers are urged to provide their users with updated packages. Please direct questions to the celery-users mailing-list: https://groups.google.com/group/celery-users/, or if you're planning to report a new security related issue we request that you keep the information confidential by contacting security@celeryproject.org instead. Thank you! celery-4.1.0/docs/sec/CELERYSA-0003.txt0000644000175000017500000000301713130607475016766 0ustar omeromer00000000000000========================================= CELERYSA-0003: Celery Security Advisory ========================================= :contact: security@celeryproject.org :CVE id: TBA :date: 2016-12-08 05:00:00 p.m. PST Details ======= :package: celery :vulnerability: Configuration Error :problem type: remote :risk: low :versions-affected: 4.0.0 Description =========== The default configuration in Celery 4.0.0 allowed for deserialization of pickled messages, even if the software is configured to send messages in the JSON format. The particular configuration in question is the `accept_content` setting, which by default was set to: app.conf.accept_content = ['json', 'pickle', 'msgpack', 'yaml'] The risk is still set to low considering that an attacker would require access to the message broker used to send messages to Celery workers. Systems affected ================ Users of Celery version 4.0.0 with no explicit accept_content setting set. Solution ======== To work around the issue you can explicitly configure the accept_content setting: app.conf.accept_content = ['json'] Or you can upgrade to the Celery 4.0.1 version: $ pip install -U celery Distribution package maintainers are urged to provide their users with updated packages. Please direct questions to the celery-users mailing-list: https://groups.google.com/group/celery-users/, or if you're planning to report a new security related issue we request that you keep the information confidential by contacting security@celeryproject.org instead. Thank you! celery-4.1.0/docs/sec/CELERYSA-0001.txt0000644000175000017500000000556213130607475016773 0ustar omeromer00000000000000========================================= CELERYSA-0001: Celery Security Advisory ========================================= :contact: security@celeryproject.org :author: Ask Solem :CVE id: CVE-2011-4356 :date: 2011-11-25 04:35:00 p.m. GMT Details ======= :package: celery :vulnerability: privilege escalation :problem type: local :risk: medium :bug-no: Celery #544 :versions-affected: 2.1, 2.2, 2.3, 2.4 Description =========== The --uid and --gid arguments to the celeryd-multi, celeryd_detach, celerybeat and celeryev programs shipped with Celery versions 2.1 and later wasn't handled properly: only the effective user was changed, with the real id remaining unchanged. In practice for affected users the vulnerability means that malicious code loaded in the worker process would be allowed to escalate privileges. We take this issue seriously since the Pickle serializer used by default makes it possible to execute arbitrary code. We recommend that users takes steps to secure their systems so that malicious users cannot abuse the message broker to send messages, or disable the pickle serializer used in Celery so that arbitrary code execution isn't possible. Patches are now available for all maintained versions (see below), and users are urged to upgrade, even if not directly affected. Systems affected ================ Users of Celery versions 2.1, 2.2, 2.3, 2.4; except the recently released 2.2.8, 2.3.4, and 2.4.4, daemonizing the Celery programs as the root user, using either: 1) the --uid or --gid arguments, or 2) the provided generic init-scripts with the environment variables CELERYD_USER or CELERYD_GROUP defined, are affected. Users using the Debian init-scripts, CentOS init-scripts, macOS launchctl scripts, Supervisor, or users not starting the programs as the root user are *not* affected. Solution ======== Users of the 2.4 series should upgrade to 2.4.4: * ``pip install -U celery``, or * ``easy_install -U celery``, or * https://pypi.python.org/pypi/celery/2.4.4 Users of the 2.3 series should upgrade to 2.3.4: * ``pip install -U celery==2.3.4``, or * ``easy_install -U celery==2.3.4``, or * https://pypi.python.org/pypi/celery/2.3.4 Users of the 2.2 series should upgrade to 2.2.8: * ``pip install -U celery==2.2.8``, or * ``easy_install -U celery==2.2.8``, or * https://pypi.python.org/pypi/celery/2.2.8 The 2.1 series is no longer being maintained, so we urge users of that series to upgrade to a more recent version. Distribution package maintainers are urged to provide their users with updated packages. Please direct questions to the celery-users mailing-list: https://groups.google.com/group/celery-users/, or if you're planning to report a security issue we request that you keep the information confidential by contacting security@celeryproject.org, so that a fix can be issued as quickly as possible. Thank you! celery-4.1.0/docs/tutorials/0000755000175000017500000000000013135426347015713 5ustar omeromer00000000000000celery-4.1.0/docs/tutorials/index.rst0000644000175000017500000000017113130607475017551 0ustar omeromer00000000000000=========== Tutorials =========== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 task-cookbook celery-4.1.0/docs/tutorials/debugging.html0000644000175000017500000000013313130607475020527 0ustar omeromer00000000000000Moved ===== This document has been moved into the userguide. See :ref:`guide-debugging`. celery-4.1.0/docs/tutorials/daemonizing.html0000644000175000017500000000012613130607475021102 0ustar omeromer00000000000000Moved ===== This document has been moved into the userguide. See :ref:`daemonizing` celery-4.1.0/docs/tutorials/task-cookbook.rst0000644000175000017500000000521213130607475021211 0ustar omeromer00000000000000.. _cookbook-tasks: ================ Task Cookbook ================ .. contents:: :local: .. _cookbook-task-serial: Ensuring a task is only executed one at a time ============================================== You can accomplish this by using a lock. In this example we'll be using the cache framework to set a lock that's accessible for all workers. It's part of an imaginary RSS feed importer called `djangofeeds`. The task takes a feed URL as a single argument, and imports that feed into a Django model called `Feed`. We ensure that it's not possible for two or more workers to import the same feed at the same time by setting a cache key consisting of the MD5 check-sum of the feed URL. The cache key expires after some time in case something unexpected happens, and something always will... For this reason your tasks run-time shouldn't exceed the timeout. .. note:: In order for this to work correctly you need to be using a cache backend where the ``.add`` operation is atomic. ``memcached`` is known to work well for this purpose. .. code-block:: python from celery import task from celery.five import monotonic from celery.utils.log import get_task_logger from contextlib import contextmanager from django.core.cache import cache from hashlib import md5 from djangofeeds.models import Feed logger = get_task_logger(__name__) LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes @contextmanager def memcache_lock(lock_id, oid): timeout_at = monotonic() + LOCK_EXPIRE - 3 # cache.add fails if the key already exists status = cache.add(lock_id, oid, LOCK_EXPIRE) try: yield status finally: # memcache delete is very slow, but we have to use it to take # advantage of using add() for atomic locking if monotonic() < timeout_at: # don't release the lock if we exceeded the timeout # to lessen the chance of releasing an expired lock # owned by someone else. cache.delete(lock_id) @task(bind=True) def import_feed(self, feed_url): # The cache key consists of the task name and the MD5 digest # of the feed URL. feed_url_hexdigest = md5(feed_url).hexdigest() lock_id = '{0}-lock-{1}'.format(self.name, feed_url_hexdigest) logger.debug('Importing feed: %s', feed_url) with memcache_lock(lock_id, self.app.oid) as acquired: if acquired: return Feed.objects.import_feed(feed_url).url logger.debug( 'Feed %s is already being imported by another worker', feed_url) celery-4.1.0/docs/internals/0000755000175000017500000000000013135426347015664 5ustar omeromer00000000000000celery-4.1.0/docs/internals/index.rst0000644000175000017500000000031613130607475017523 0ustar omeromer00000000000000.. _internals: =========== Internals =========== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 guide deprecation worker protocol app-overview reference/index celery-4.1.0/docs/internals/protocol.rst0000644000175000017500000002322413130607475020260 0ustar omeromer00000000000000.. _message-protocol: =================== Message Protocol =================== .. contents:: :local: .. _message-protocol-task: .. _internals-task-message-protocol: Task messages ============= .. _message-protocol-task-v2: Version 2 --------- Definition ~~~~~~~~~~ .. code-block:: python properties = { 'correlation_id': uuid task_id, 'content_type': string mimetype, 'content_encoding': string encoding, # optional 'reply_to': string queue_or_url, } headers = { 'lang': string 'py' 'task': string task, 'id': uuid task_id, 'root_id': uuid root_id, 'parent_id': uuid parent_id, 'group': uuid group_id, # optional 'meth': string method_name, 'shadow': string alias_name, 'eta': iso8601 ETA, 'expires': iso8601 expires, 'retries': int retries, 'timelimit': (soft, hard), 'argsrepr': str repr(args), 'kwargsrepr': str repr(kwargs), 'origin': str nodename, } body = ( object[] args, Mapping kwargs, Mapping embed { 'callbacks': Signature[] callbacks, 'errbacks': Signature[] errbacks, 'chain': Signature[] chain, 'chord': Signature chord_callback, } ) Example ~~~~~~~ This example sends a task message using version 2 of the protocol: .. code-block:: python # chain: add(add(add(2, 2), 4), 8) == 2 + 2 + 4 + 8 import json import os import socket task_id = uuid() args = (2, 2) kwargs = {} basic_publish( message=json.dumps((args, kwargs, None), application_headers={ 'lang': 'py', 'task': 'proj.tasks.add', 'argsrepr': repr(args), 'kwargsrepr': repr(kwargs), 'origin': '@'.join([os.getpid(), socket.gethostname()]) } properties={ 'correlation_id': task_id, 'content_type': 'application/json', 'content_encoding': 'utf-8', } ) Changes from version 1 ~~~~~~~~~~~~~~~~~~~~~~ - Protocol version detected by the presence of a ``task`` message header. - Support for multiple languages via the ``lang`` header. Worker may redirect the message to a worker that supports the language. - Meta-data moved to headers. This means that workers/intermediates can inspect the message and make decisions based on the headers without decoding the payload (that may be language specific, for example serialized by the Python specific pickle serializer). - Always UTC There's no ``utc`` flag anymore, so any time information missing timezone will be expected to be in UTC time. - Body is only for language specific data. - Python stores args/kwargs and embedded signatures in body. - If a message uses raw encoding then the raw data will be passed as a single argument to the function. - Java/C, etc. can use a Thrift/protobuf document as the body - ``origin`` is the name of the node sending the task. - Dispatches to actor based on ``task``, ``meth`` headers ``meth`` is unused by Python, but may be used in the future to specify class+method pairs. - Chain gains a dedicated field. Reducing the chain into a recursive ``callbacks`` argument causes problems when the recursion limit is exceeded. This is fixed in the new message protocol by specifying a list of signatures, each task will then pop a task off the list when sending the next message: .. code-block:: python execute_task(message) chain = embed['chain'] if chain: sig = maybe_signature(chain.pop()) sig.apply_async(chain=chain) - ``correlation_id`` replaces ``task_id`` field. - ``root_id`` and ``parent_id`` fields helps keep track of work-flows. - ``shadow`` lets you specify a different name for logs, monitors can be used for concepts like tasks that calls a function specified as argument: .. code-block:: python from celery.utils.imports import qualname class PickleTask(Task): def unpack_args(self, fun, args=()): return fun, args def apply_async(self, args, kwargs, **options): fun, real_args = self.unpack_args(*args) return super(PickleTask, self).apply_async( (fun, real_args, kwargs), shadow=qualname(fun), **options ) @app.task(base=PickleTask) def call(fun, args, kwargs): return fun(*args, **kwargs) .. _message-protocol-task-v1: .. _task-message-protocol-v1: Version 1 --------- In version 1 of the protocol all fields are stored in the message body: meaning workers and intermediate consumers must deserialize the payload to read the fields. Message body ~~~~~~~~~~~~ * ``task`` :`string`: Name of the task. **required** * ``id`` :`string`: Unique id of the task (UUID). **required** * ``args`` :`list`: List of arguments. Will be an empty list if not provided. * ``kwargs`` :`dictionary`: Dictionary of keyword arguments. Will be an empty dictionary if not provided. * ``retries`` :`int`: Current number of times this task has been retried. Defaults to `0` if not specified. * ``eta`` :`string` (ISO 8601): Estimated time of arrival. This is the date and time in ISO 8601 format. If not provided the message isn't scheduled, but will be executed asap. * ``expires`` :`string` (ISO 8601): .. versionadded:: 2.0.2 Expiration date. This is the date and time in ISO 8601 format. If not provided the message will never expire. The message will be expired when the message is received and the expiration date has been exceeded. * ``taskset`` :`string`: The group this task is part of (if any). * ``chord`` :`Signature`: .. versionadded:: 2.3 Signifies that this task is one of the header parts of a chord. The value of this key is the body of the cord that should be executed when all of the tasks in the header has returned. * ``utc`` :`bool`: .. versionadded:: 2.5 If true time uses the UTC timezone, if not the current local timezone should be used. * ``callbacks`` :`Signature`: .. versionadded:: 3.0 A list of signatures to call if the task exited successfully. * ``errbacks`` :`Signature`: .. versionadded:: 3.0 A list of signatures to call if an error occurs while executing the task. * ``timelimit`` :`(float, float)`: .. versionadded:: 3.1 Task execution time limit settings. This is a tuple of hard and soft time limit value (`int`/`float` or :const:`None` for no limit). Example value specifying a soft time limit of 3 seconds, and a hard time limit of 10 seconds:: {'timelimit': (3.0, 10.0)} Example message ~~~~~~~~~~~~~~~ This is an example invocation of a `celery.task.ping` task in json format: .. code-block:: javascript {"id": "4cc7438e-afd4-4f8f-a2f3-f46567e7ca77", "task": "celery.task.PingTask", "args": [], "kwargs": {}, "retries": 0, "eta": "2009-11-17T12:30:56.527191"} Task Serialization ------------------ Several types of serialization formats are supported using the `content_type` message header. The MIME-types supported by default are shown in the following table. =============== ================================= Scheme MIME Type =============== ================================= json application/json yaml application/x-yaml pickle application/x-python-serialize msgpack application/x-msgpack =============== ================================= .. _message-protocol-event: Event Messages ============== Event messages are always JSON serialized and can contain arbitrary message body fields. Since version 4.0. the body can consist of either a single mapping (one event), or a list of mappings (multiple events). There are also standard fields that must always be present in an event message: Standard body fields -------------------- - *string* ``type`` The type of event. This is a string containing the *category* and *action* separated by a dash delimiter (e.g., ``task-succeeded``). - *string* ``hostname`` The fully qualified hostname of where the event occurred at. - *unsigned long long* ``clock`` The logical clock value for this event (Lamport time-stamp). - *float* ``timestamp`` The UNIX time-stamp corresponding to the time of when the event occurred. - *signed short* ``utcoffset`` This field describes the timezone of the originating host, and is specified as the number of hours ahead of/behind UTC (e.g., -2 or +1). - *unsigned long long* ``pid`` The process id of the process the event originated in. Standard event types -------------------- For a list of standard event types and their fields see the :ref:`event-reference`. Example message --------------- This is the message fields for a ``task-succeeded`` event: .. code-block:: python properties = { 'routing_key': 'task.succeeded', 'exchange': 'celeryev', 'content_type': 'application/json', 'content_encoding': 'utf-8', 'delivery_mode': 1, } headers = { 'hostname': 'worker1@george.vandelay.com', } body = { 'type': 'task-succeeded', 'hostname': 'worker1@george.vandelay.com', 'pid': 6335, 'clock': 393912923921, 'timestamp': 1401717709.101747, 'utcoffset': -1, 'uuid': '9011d855-fdd1-4f8f-adb3-a413b499eafb', 'retval': '4', 'runtime': 0.0003212, ) celery-4.1.0/docs/internals/reference/0000755000175000017500000000000013135426347017622 5ustar omeromer00000000000000celery-4.1.0/docs/internals/reference/celery.utils.saferepr.rst0000644000175000017500000000040013130607475024574 0ustar omeromer00000000000000=========================================== ``celery.utils.saferepr`` =========================================== .. contents:: :local: .. currentmodule:: celery.utils.saferepr .. automodule:: celery.utils.saferepr :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.base.rst0000644000175000017500000000036313130607475024321 0ustar omeromer00000000000000===================================== ``celery.backends.base`` ===================================== .. contents:: :local: .. currentmodule:: celery.backends.base .. automodule:: celery.backends.base :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.cache.rst0000644000175000017500000000040013130607475024442 0ustar omeromer00000000000000=========================================== ``celery.backends.cache`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.cache .. automodule:: celery.backends.cache :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.worker.heartbeat.rst0000644000175000017500000000041213130607475025100 0ustar omeromer00000000000000============================================= ``celery.worker.heartbeat`` ============================================= .. contents:: :local: .. currentmodule:: celery.worker.heartbeat .. automodule:: celery.worker.heartbeat :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.events.dumper.rst0000644000175000017500000000037313130607475024436 0ustar omeromer00000000000000========================================== ``celery.events.dumper`` ========================================== .. contents:: :local: .. currentmodule:: celery.events.dumper .. automodule:: celery.events.dumper :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.text.rst0000644000175000017500000000041013130607475023752 0ustar omeromer00000000000000===================================================== ``celery.utils.text`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.text .. automodule:: celery.utils.text :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.security.certificate.rst0000644000175000017500000000042013130607475025760 0ustar omeromer00000000000000========================================== ``celery.security.certificate`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.certificate .. automodule:: celery.security.certificate :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.cassandra.rst0000644000175000017500000000042613130607475025346 0ustar omeromer00000000000000================================================ ``celery.backends.cassandra`` ================================================ .. contents:: :local: .. currentmodule:: celery.backends.cassandra .. automodule:: celery.backends.cassandra :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.mongodb.rst0000644000175000017500000000041013130607475025025 0ustar omeromer00000000000000============================================ ``celery.backends.mongodb`` ============================================ .. contents:: :local: .. currentmodule:: celery.backends.mongodb .. automodule:: celery.backends.mongodb :members: :undoc-members: celery-4.1.0/docs/internals/reference/index.rst0000644000175000017500000000355213130607475021466 0ustar omeromer00000000000000=========================== Internal Module Reference =========================== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 celery.worker.components celery.worker.loops celery.worker.heartbeat celery.worker.control celery.worker.pidbox celery.worker.autoscale celery.concurrency celery.concurrency.solo celery.concurrency.prefork celery.concurrency.eventlet celery.concurrency.gevent celery.concurrency.base celery.backends celery.backends.base celery.backends.async celery.backends.rpc celery.backends.database celery.backends.amqp celery.backends.cache celery.backends.consul celery.backends.couchdb celery.backends.mongodb celery.backends.elasticsearch celery.backends.redis celery.backends.riak celery.backends.cassandra celery.backends.couchbase celery.backends.dynamodb celery.backends.filesystem celery.app.trace celery.app.annotations celery.app.routes celery.security.certificate celery.security.key celery.security.serialization celery.security.utils celery.events.snapshot celery.events.cursesmon celery.events.dumper celery.backends.database.models celery.backends.database.session celery.utils celery.utils.abstract celery.utils.collections celery.utils.nodenames celery.utils.deprecated celery.utils.functional celery.utils.graph celery.utils.objects celery.utils.term celery.utils.time celery.utils.iso8601 celery.utils.saferepr celery.utils.serialization celery.utils.sysinfo celery.utils.threads celery.utils.timer2 celery.utils.imports celery.utils.log celery.utils.text celery.utils.dispatch celery.utils.dispatch.signal celery.utils.dispatch.weakref_backports celery.platforms celery._state celery-4.1.0/docs/internals/reference/celery.utils.rst0000644000175000017500000000030313130607475022770 0ustar omeromer00000000000000========================== ``celery.utils`` ========================== .. contents:: :local: .. currentmodule:: celery.utils .. automodule:: celery.utils :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.term.rst0000644000175000017500000000041013130607475023735 0ustar omeromer00000000000000===================================================== ``celery.utils.term`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.term .. automodule:: celery.utils.term :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery._state.rst0000644000175000017500000000034213130607475023112 0ustar omeromer00000000000000======================================== ``celery._state`` ======================================== .. contents:: :local: .. currentmodule:: celery._state .. automodule:: celery._state :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.app.trace.rst0000644000175000017500000000035713130607475023516 0ustar omeromer00000000000000========================================== ``celery.app.trace`` ========================================== .. contents:: :local: .. currentmodule:: celery.app.trace .. automodule:: celery.app.trace :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.app.annotations.rst0000644000175000017500000000040113130607475024743 0ustar omeromer00000000000000========================================== ``celery.app.annotations`` ========================================== .. contents:: :local: .. currentmodule:: celery.app.annotations .. automodule:: celery.app.annotations :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.redis.rst0000644000175000017500000000037613130607475024521 0ustar omeromer00000000000000========================================== ``celery.backends.redis`` ========================================== .. contents:: :local: .. currentmodule:: celery.backends.redis .. automodule:: celery.backends.redis :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.abstract.rst0000644000175000017500000000040013130607475024570 0ustar omeromer00000000000000=========================================== ``celery.utils.abstract`` =========================================== .. contents:: :local: .. currentmodule:: celery.utils.abstract .. automodule:: celery.utils.abstract :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.database.session.rst0000644000175000017500000000043313130607475026633 0ustar omeromer00000000000000======================================== ``celery.backends.database.session`` ======================================== .. contents:: :local: .. currentmodule:: celery.backends.database.session .. automodule:: celery.backends.database.session :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.concurrency.gevent.rst0000644000175000017500000000046013130607475025455 0ustar omeromer00000000000000============================================================= ``celery.concurrency.gevent`` ============================================================= .. contents:: :local: .. currentmodule:: celery.concurrency.gevent .. automodule:: celery.concurrency.gevent :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.functional.rst0000644000175000017500000000043213130607475025134 0ustar omeromer00000000000000===================================================== ``celery.utils.functional`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.functional .. automodule:: celery.utils.functional :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.rst0000644000175000017500000000031613130607475023406 0ustar omeromer00000000000000=========================== ``celery.backends`` =========================== .. contents:: :local: .. currentmodule:: celery.backends .. automodule:: celery.backends :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.elasticsearch.rst0000644000175000017500000000043013130607475026214 0ustar omeromer00000000000000=========================================== ``celery.backends.elasticsearch`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.elasticsearch .. automodule:: celery.backends.elasticsearch :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.worker.pidbox.rst0000644000175000017500000000035713130607475024436 0ustar omeromer00000000000000==================================== ``celery.worker.pidbox`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.pidbox .. automodule:: celery.worker.pidbox :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.deprecated.rst0000644000175000017500000000040413130607475025071 0ustar omeromer00000000000000========================================== ``celery.utils.deprecated`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.deprecated .. automodule:: celery.utils.deprecated :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.concurrency.eventlet.rst0000644000175000017500000000046613130607475026021 0ustar omeromer00000000000000============================================================= ``celery.concurrency.eventlet`` ============================================================= .. contents:: :local: .. currentmodule:: celery.concurrency.eventlet .. automodule:: celery.concurrency.eventlet :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.database.rst0000644000175000017500000000044513130607475025154 0ustar omeromer00000000000000========================================================= ``celery.backends.database`` ========================================================= .. contents:: :local: .. currentmodule:: celery.backends.database .. automodule:: celery.backends.database :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.filesystem.rst0000644000175000017500000000041513130607475025571 0ustar omeromer00000000000000========================================== ``celery.backends.filesystem`` ========================================== .. contents:: :local: .. currentmodule:: celery.backends.filesystem .. automodule:: celery.backends.filesystem :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.time.rst0000644000175000017500000000040213130607475023725 0ustar omeromer00000000000000================================================== ``celery.utils.time`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.time .. automodule:: celery.utils.time :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.concurrency.base.rst0000644000175000017500000000041613130607475025100 0ustar omeromer00000000000000=============================================== ``celery.concurrency.base`` =============================================== .. contents:: :local: .. currentmodule:: celery.concurrency.base .. automodule:: celery.concurrency.base :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.events.cursesmon.rst0000644000175000017500000000040413130607475025153 0ustar omeromer00000000000000========================================== ``celery.events.cursesmon`` ========================================== .. contents:: :local: .. currentmodule:: celery.events.cursesmon .. automodule:: celery.events.cursesmon :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.worker.loops.rst0000644000175000017500000000035413130607475024302 0ustar omeromer00000000000000==================================== ``celery.worker.loops`` ==================================== .. contents:: :local: .. currentmodule:: celery.worker.loops .. automodule:: celery.worker.loops :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.worker.control.rst0000644000175000017500000000040413130607475024622 0ustar omeromer00000000000000============================================= ``celery.worker.control`` ============================================= .. contents:: :local: .. currentmodule:: celery.worker.control .. automodule:: celery.worker.control :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.graph.rst0000644000175000017500000000036513130607475024100 0ustar omeromer00000000000000========================================== ``celery.utils.graph`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.graph .. automodule:: celery.utils.graph :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.database.models.rst0000644000175000017500000000042413130607475026433 0ustar omeromer00000000000000====================================== ``celery.backends.database.models`` ====================================== .. contents:: :local: .. currentmodule:: celery.backends.database.models .. automodule:: celery.backends.database.models :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.worker.components.rst0000644000175000017500000000040313130607475025326 0ustar omeromer00000000000000======================================== ``celery.worker.components`` ======================================== .. contents:: :local: .. currentmodule:: celery.worker.components .. automodule:: celery.worker.components :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.threads.rst0000644000175000017500000000037313130607475024430 0ustar omeromer00000000000000========================================== ``celery.utils.threads`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.threads .. automodule:: celery.utils.threads :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.worker.autoscale.rst0000644000175000017500000000040013130607475025116 0ustar omeromer00000000000000======================================== ``celery.worker.autoscale`` ======================================== .. contents:: :local: .. currentmodule:: celery.worker.autoscale .. automodule:: celery.worker.autoscale :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.app.routes.rst0000644000175000017500000000034013130607475023731 0ustar omeromer00000000000000================================= ``celery.app.routes`` ================================= .. contents:: :local: .. currentmodule:: celery.app.routes .. automodule:: celery.app.routes :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.dispatch.rst0000644000175000017500000000037413130607475024576 0ustar omeromer00000000000000========================================= ``celery.utils.dispatch`` ========================================= .. contents:: :local: .. currentmodule:: celery.utils.dispatch .. automodule:: celery.utils.dispatch :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.sysinfo.rst0000644000175000017500000000041313130607475024463 0ustar omeromer00000000000000================================================== ``celery.utils.sysinfo`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.sysinfo .. automodule:: celery.utils.sysinfo :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.couchbase.rst0000644000175000017500000000041613130607475025342 0ustar omeromer00000000000000============================================ ``celery.backends.couchbase`` ============================================ .. contents:: :local: .. currentmodule:: celery.backends.couchbase .. automodule:: celery.backends.couchbase :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.collections.rst0000644000175000017500000000037413130607475025315 0ustar omeromer00000000000000==================================== ``celery.utils.collections`` ==================================== .. currentmodule:: celery.utils.collections .. contents:: :local: .. automodule:: celery.utils.collections :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.serialization.rst0000644000175000017500000000042113130607475025645 0ustar omeromer00000000000000============================================ ``celery.utils.serialization`` ============================================ .. contents:: :local: .. currentmodule:: celery.utils.serialization .. automodule:: celery.utils.serialization :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.security.utils.rst0000644000175000017500000000037613130607475024650 0ustar omeromer00000000000000========================================== ``celery.security.utils`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.utils .. automodule:: celery.security.utils :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.imports.rst0000644000175000017500000000042113130607475024465 0ustar omeromer00000000000000===================================================== ``celery.utils.imports`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.imports .. automodule:: celery.utils.imports :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.concurrency.rst0000644000175000017500000000034513130607475024170 0ustar omeromer00000000000000================================== ``celery.concurrency`` ================================== .. contents:: :local: .. currentmodule:: celery.concurrency .. automodule:: celery.concurrency :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.objects.rst0000644000175000017500000000041313130607475024422 0ustar omeromer00000000000000================================================== ``celery.utils.objects`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.objects .. automodule:: celery.utils.objects :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.amqp.rst0000644000175000017500000000036513130607475024347 0ustar omeromer00000000000000======================================= ``celery.backends.amqp`` ======================================= .. contents:: :local: .. currentmodule:: celery.backends.amqp .. automodule:: celery.backends.amqp :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.events.snapshot.rst0000644000175000017500000000040113130607475024771 0ustar omeromer00000000000000========================================== ``celery.events.snapshot`` ========================================== .. contents:: :local: .. currentmodule:: celery.events.snapshot .. automodule:: celery.events.snapshot :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.consul.rst0000644000175000017500000000037513130607475024715 0ustar omeromer00000000000000========================================== celery.backends.consul ========================================== .. contents:: :local: .. currentmodule:: celery.backends.consul .. automodule:: celery.backends.consul :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.security.serialization.rst0000644000175000017500000000042613130607475026361 0ustar omeromer00000000000000========================================== ``celery.security.serialization`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.serialization .. automodule:: celery.security.serialization :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.dynamodb.rst0000644000175000017500000000041113130607475025176 0ustar omeromer00000000000000=========================================== ``celery.backends.dynamodb`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.dynamodb .. automodule:: celery.backends.dynamodb :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.security.key.rst0000644000175000017500000000037013130607475024272 0ustar omeromer00000000000000========================================== ``celery.security.key`` ========================================== .. contents:: :local: .. currentmodule:: celery.security.key .. automodule:: celery.security.key :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.log.rst0000644000175000017500000000040513130607475023553 0ustar omeromer00000000000000===================================================== ``celery.utils.log`` ===================================================== .. contents:: :local: .. currentmodule:: celery.utils.log .. automodule:: celery.utils.log :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.concurrency.prefork.rst0000644000175000017500000000046313130607475025640 0ustar omeromer00000000000000============================================================= ``celery.concurrency.prefork`` ============================================================= .. contents:: :local: .. currentmodule:: celery.concurrency.prefork .. automodule:: celery.concurrency.prefork :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.async.rst0000644000175000017500000000036613130607475024527 0ustar omeromer00000000000000===================================== ``celery.backends.async`` ===================================== .. contents:: :local: .. currentmodule:: celery.backends.async .. automodule:: celery.backends.async :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.dispatch.weakref_backports.rst0000644000175000017500000000051013130607475030261 0ustar omeromer00000000000000==================================================== ``celery.utils.dispatch.weakref_backports`` ==================================================== .. contents:: :local: .. currentmodule:: celery.utils.dispatch.weakref_backports .. automodule:: celery.utils.dispatch.weakref_backports :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.iso8601.rst0000644000175000017500000000041313130607475024102 0ustar omeromer00000000000000================================================== ``celery.utils.iso8601`` ================================================== .. contents:: :local: .. currentmodule:: celery.utils.iso8601 .. automodule:: celery.utils.iso8601 :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.platforms.rst0000644000175000017500000000034713130607475023647 0ustar omeromer00000000000000====================================== ``celery.platforms`` ====================================== .. contents:: :local: .. currentmodule:: celery.platforms .. automodule:: celery.platforms :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.riak.rst0000644000175000017500000000037513130607475024340 0ustar omeromer00000000000000=========================================== ``celery.backends.riak`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.riak .. automodule:: celery.backends.riak :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.couchdb.rst0000644000175000017500000000040613130607475025014 0ustar omeromer00000000000000=========================================== ``celery.backends.couchdb`` =========================================== .. contents:: :local: .. currentmodule:: celery.backends.couchdb .. automodule:: celery.backends.couchdb :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.timer2.rst0000644000175000017500000000034013130607475024172 0ustar omeromer00000000000000============================== ``celery.utils.timer2`` ============================== .. contents:: :local: .. currentmodule:: celery.utils.timer2 .. automodule:: celery.utils.timer2 :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.concurrency.solo.rst0000644000175000017500000000046613130607475025147 0ustar omeromer00000000000000=================================================================== ``celery.concurrency.solo`` =================================================================== .. contents:: :local: .. currentmodule:: celery.concurrency.solo .. automodule:: celery.concurrency.solo :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.nodenames.rst0000644000175000017500000000040113130607475024737 0ustar omeromer00000000000000========================================== ``celery.utils.nodenames`` ========================================== .. contents:: :local: .. currentmodule:: celery.utils.nodenames .. automodule:: celery.utils.nodenames :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.backends.rpc.rst0000644000175000017500000000036213130607475024172 0ustar omeromer00000000000000======================================= ``celery.backends.rpc`` ======================================= .. contents:: :local: .. currentmodule:: celery.backends.rpc .. automodule:: celery.backends.rpc :members: :undoc-members: celery-4.1.0/docs/internals/reference/celery.utils.dispatch.signal.rst0000644000175000017500000000044713130607475026053 0ustar omeromer00000000000000==================================================== ``celery.utils.dispatch.signal`` ==================================================== .. contents:: :local: .. currentmodule:: celery.utils.dispatch.signal .. automodule:: celery.utils.dispatch.signal :members: :undoc-members: celery-4.1.0/docs/internals/app-overview.rst0000644000175000017500000001475313130607475021052 0ustar omeromer00000000000000============================= "The Big Instance" Refactor ============================= The `app` branch is a work-in-progress to remove the use of a global configuration in Celery. Celery can now be instantiated and several instances of Celery may exist in the same process space. Also, large parts can be customized without resorting to monkey patching. Examples ======== Creating a Celery instance:: >>> from celery import Celery >>> app = Celery() >>> app.config_from_object('celeryconfig') >>> #app.config_from_envvar('CELERY_CONFIG_MODULE') Creating tasks: .. code-block:: python @app.task def add(x, y): return x + y Creating custom Task subclasses: .. code-block:: python Task = celery.create_task_cls() class DebugTask(Task): def on_failure(self, *args, **kwargs): import pdb pdb.set_trace() @app.task(base=DebugTask) def add(x, y): return x + y Starting a worker: .. code-block:: python worker = celery.Worker(loglevel='INFO') Getting access to the configuration: .. code-block:: python celery.conf.task_always_eager = True celery.conf['task_always_eager'] = True Controlling workers:: >>> celery.control.inspect().active() >>> celery.control.rate_limit(add.name, '100/m') >>> celery.control.broadcast('shutdown') >>> celery.control.discard_all() Other interesting attributes:: # Establish broker connection. >>> celery.broker_connection() # AMQP Specific features. >>> celery.amqp >>> celery.amqp.Router >>> celery.amqp.get_queues() >>> celery.amqp.get_task_consumer() # Loader >>> celery.loader # Default backend >>> celery.backend As you can probably see, this really opens up another dimension of customization abilities. Deprecated ========== * ``celery.task.ping`` ``celery.task.PingTask`` Inferior to the ping remote control command. Will be removed in Celery 2.3. Aliases (Pending deprecation) ============================= * ``celery.task.base`` * ``.Task`` -> {``app.Task`` / :class:`celery.app.task.Task`} * ``celery.task.sets`` * ``.TaskSet`` -> {``app.TaskSet``} * ``celery.decorators`` / ``celery.task`` * ``.task`` -> {``app.task``} * ``celery.execute`` * ``.apply_async`` -> {``task.apply_async``} * ``.apply`` -> {``task.apply``} * ``.send_task`` -> {``app.send_task``} * ``.delay_task`` -> *no alternative* * ``celery.log`` * ``.get_default_logger`` -> {``app.log.get_default_logger``} * ``.setup_logger`` -> {``app.log.setup_logger``} * ``.get_task_logger`` -> {``app.log.get_task_logger``} * ``.setup_task_logger`` -> {``app.log.setup_task_logger``} * ``.setup_logging_subsystem`` -> {``app.log.setup_logging_subsystem``} * ``.redirect_stdouts_to_logger`` -> {``app.log.redirect_stdouts_to_logger``} * ``celery.messaging`` * ``.establish_connection`` -> {``app.broker_connection``} * ``.with_connection`` -> {``app.with_connection``} * ``.get_consumer_set`` -> {``app.amqp.get_task_consumer``} * ``.TaskPublisher`` -> {``app.amqp.TaskPublisher``} * ``.TaskConsumer`` -> {``app.amqp.TaskConsumer``} * ``.ConsumerSet`` -> {``app.amqp.ConsumerSet``} * ``celery.conf.*`` -> {``app.conf``} **NOTE**: All configuration keys are now named the same as in the configuration. So the key ``task_always_eager`` is accessed as:: >>> app.conf.task_always_eager instead of:: >>> from celery import conf >>> conf.always_eager * ``.get_queues`` -> {``app.amqp.get_queues``} * ``celery.task.control`` * ``.broadcast`` -> {``app.control.broadcast``} * ``.rate_limit`` -> {``app.control.rate_limit``} * ``.ping`` -> {``app.control.ping``} * ``.revoke`` -> {``app.control.revoke``} * ``.discard_all`` -> {``app.control.discard_all``} * ``.inspect`` -> {``app.control.inspect``} * ``celery.utils.info`` * ``.humanize_seconds`` -> ``celery.utils.time.humanize_seconds`` * ``.textindent`` -> ``celery.utils.textindent`` * ``.get_broker_info`` -> {``app.amqp.get_broker_info``} * ``.format_broker_info`` -> {``app.amqp.format_broker_info``} * ``.format_queues`` -> {``app.amqp.format_queues``} Default App Usage ================= To be backward compatible, it must be possible to use all the classes/functions without passing an explicit app instance. This is achieved by having all app-dependent objects use :data:`~celery.app.default_app` if the app instance is missing. .. code-block:: python from celery.app import app_or_default class SomeClass(object): def __init__(self, app=None): self.app = app_or_default(app) The problem with this approach is that there's a chance that the app instance is lost along the way, and everything seems to be working normally. Testing app instance leaks is hard. The environment variable :envvar:`CELERY_TRACE_APP` can be used, when this is enabled :func:`celery.app.app_or_default` will raise an exception whenever it has to go back to the default app instance. App Dependency Tree ------------------- * {``app``} * ``celery.loaders.base.BaseLoader`` * ``celery.backends.base.BaseBackend`` * {``app.TaskSet``} * ``celery.task.sets.TaskSet`` (``app.TaskSet``) * [``app.TaskSetResult``] * ``celery.result.TaskSetResult`` (``app.TaskSetResult``) * {``app.AsyncResult``} * ``celery.result.BaseAsyncResult`` / ``celery.result.AsyncResult`` * ``celery.bin.worker.WorkerCommand`` * ``celery.apps.worker.Worker`` * ``celery.worker.WorkerController`` * ``celery.worker.consumer.Consumer`` * ``celery.worker.request.Request`` * ``celery.events.EventDispatcher`` * ``celery.worker.control.ControlDispatch`` * ``celery.worker.control.registry.Panel`` * ``celery.pidbox.BroadcastPublisher`` * ``celery.pidbox.BroadcastConsumer`` * ``celery.beat.EmbeddedService`` * ``celery.bin.events.EvCommand`` * ``celery.events.snapshot.evcam`` * ``celery.events.snapshot.Polaroid`` * ``celery.events.EventReceiver`` * ``celery.events.cursesmon.evtop`` * ``celery.events.EventReceiver`` * ``celery.events.cursesmon.CursesMonitor`` * ``celery.events.dumper`` * ``celery.events.EventReceiver`` * ``celery.bin.amqp.AMQPAdmin`` * ``celery.bin.beat.BeatCommand`` * ``celery.apps.beat.Beat`` * ``celery.beat.Service`` * ``celery.beat.Scheduler`` celery-4.1.0/docs/internals/deprecation.rst0000644000175000017500000001265713130607475020724 0ustar omeromer00000000000000.. _deprecation-timeline: ============================== Celery Deprecation Time-line ============================== .. contents:: :local: .. _deprecations-v5.0: Removals for version 5.0 ======================== Old Task API ------------ .. _deprecate-compat-task-modules: Compat Task Modules ~~~~~~~~~~~~~~~~~~~ - Module ``celery.decorators`` will be removed: This means you need to change: .. code-block:: python from celery.decorators import task Into: .. code-block:: python from celery import task - Module ``celery.task`` *may* be removed (not decided) This means you should change: .. code-block:: python from celery.task import task into: .. code-block:: python from celery import task -- and: .. code-block:: python from celery.task import Task into: .. code-block:: python from celery import Task Note that the new :class:`~celery.Task` class no longer uses :func:`classmethod` for these methods: - delay - apply_async - retry - apply - AsyncResult - subtask This also means that you can't call these methods directly on the class, but have to instantiate the task first: .. code-block:: pycon >>> MyTask.delay() # NO LONGER WORKS >>> MyTask().delay() # WORKS! Task attributes --------------- The task attributes: - ``queue`` - ``exchange`` - ``exchange_type`` - ``routing_key`` - ``delivery_mode`` - ``priority`` is deprecated and must be set by :setting:`task_routes` instead. Modules to Remove ----------------- - ``celery.execute`` This module only contains ``send_task``: this must be replaced with :attr:`@send_task` instead. - ``celery.decorators`` See :ref:`deprecate-compat-task-modules` - ``celery.log`` Use :attr:`@log` instead. - ``celery.messaging`` Use :attr:`@amqp` instead. - ``celery.registry`` Use :mod:`celery.app.registry` instead. - ``celery.task.control`` Use :attr:`@control` instead. - ``celery.task.schedules`` Use :mod:`celery.schedules` instead. - ``celery.task.chords`` Use :func:`celery.chord` instead. Settings -------- ``BROKER`` Settings ~~~~~~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``BROKER_HOST`` :setting:`broker_url` ``BROKER_PORT`` :setting:`broker_url` ``BROKER_USER`` :setting:`broker_url` ``BROKER_PASSWORD`` :setting:`broker_url` ``BROKER_VHOST`` :setting:`broker_url` ===================================== ===================================== ``REDIS`` Result Backend Settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERY_REDIS_HOST`` :setting:`result_backend` ``CELERY_REDIS_PORT`` :setting:`result_backend` ``CELERY_REDIS_DB`` :setting:`result_backend` ``CELERY_REDIS_PASSWORD`` :setting:`result_backend` ``REDIS_HOST`` :setting:`result_backend` ``REDIS_PORT`` :setting:`result_backend` ``REDIS_DB`` :setting:`result_backend` ``REDIS_PASSWORD`` :setting:`result_backend` ===================================== ===================================== Task_sent signal ---------------- The :signal:`task_sent` signal will be removed in version 4.0. Please use the :signal:`before_task_publish` and :signal:`after_task_publish` signals instead. Result ------ Apply to: :class:`~celery.result.AsyncResult`, :class:`~celery.result.EagerResult`: - ``Result.wait()`` -> ``Result.get()`` - ``Result.task_id()`` -> ``Result.id`` - ``Result.status`` -> ``Result.state``. .. _deprecations-v3.1: Settings ~~~~~~~~ ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== ``CELERY_AMQP_TASK_RESULT_EXPIRES`` :setting:`result_expires` ===================================== ===================================== .. _deprecations-v2.0: Removals for version 2.0 ======================== * The following settings will be removed: ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== `CELERY_AMQP_CONSUMER_QUEUES` `task_queues` `CELERY_AMQP_CONSUMER_QUEUES` `task_queues` `CELERY_AMQP_EXCHANGE` `task_default_exchange` `CELERY_AMQP_EXCHANGE_TYPE` `task_default_exchange_type` `CELERY_AMQP_CONSUMER_ROUTING_KEY` `task_queues` `CELERY_AMQP_PUBLISHER_ROUTING_KEY` `task_default_routing_key` ===================================== ===================================== * :envvar:`CELERY_LOADER` definitions without class name. For example,, `celery.loaders.default`, needs to include the class name: `celery.loaders.default.Loader`. * :meth:`TaskSet.run`. Use :meth:`celery.task.base.TaskSet.apply_async` instead. celery-4.1.0/docs/internals/guide.rst0000644000175000017500000002064513130607475017520 0ustar omeromer00000000000000.. _internals-guide: ================================ Contributors Guide to the Code ================================ .. contents:: :local: Philosophy ========== The API>RCP Precedence Rule --------------------------- - The API is more important than Readability - Readability is more important than Convention - Convention is more important than Performance - …unless the code is a proven hot-spot. More important than anything else is the end-user API. Conventions must step aside, and any suffering is always alleviated if the end result is a better API. Conventions and Idioms Used =========================== Classes ------- Naming ~~~~~~ - Follows :pep:`8`. - Class names must be `CamelCase`. - but not if they're verbs, verbs shall be `lower_case`: .. code-block:: python # - test case for a class class TestMyClass(Case): # BAD pass class test_MyClass(Case): # GOOD pass # - test case for a function class TestMyFunction(Case): # BAD pass class test_my_function(Case): # GOOD pass # - "action" class (verb) class UpdateTwitterStatus(object): # BAD pass class update_twitter_status(object): # GOOD pass .. note:: Sometimes it makes sense to have a class mask as a function, and there's precedence for this in the Python standard library (e.g., :class:`~contextlib.contextmanager`). Celery examples include :class:`~celery.signature`, :class:`~celery.chord`, ``inspect``, :class:`~kombu.utils.functional.promise` and more.. - Factory functions and methods must be `CamelCase` (excluding verbs): .. code-block:: python class Celery(object): def consumer_factory(self): # BAD ... def Consumer(self): # GOOD ... Default values ~~~~~~~~~~~~~~ Class attributes serve as default values for the instance, as this means that they can be set by either instantiation or inheritance. **Example:** .. code-block:: python class Producer(object): active = True serializer = 'json' def __init__(self, serializer=None): self.serializer = serializer or self.serializer # must check for None when value can be false-y self.active = active if active is not None else self.active A subclass can change the default value: .. code-block:: python TaskProducer(Producer): serializer = 'pickle' and the value can be set at instantiation: .. code-block:: pycon >>> producer = TaskProducer(serializer='msgpack') Exceptions ~~~~~~~~~~ Custom exceptions raised by an objects methods and properties should be available as an attribute and documented in the method/property that throw. This way a user doesn't have to find out where to import the exception from, but rather use ``help(obj)`` and access the exception class from the instance directly. **Example**: .. code-block:: python class Empty(Exception): pass class Queue(object): Empty = Empty def get(self): """Get the next item from the queue. :raises Queue.Empty: if there are no more items left. """ try: return self.queue.popleft() except IndexError: raise self.Empty() Composites ~~~~~~~~~~ Similarly to exceptions, composite classes should be override-able by inheritance and/or instantiation. Common sense can be used when selecting what classes to include, but often it's better to add one too many: predicting what users need to override is hard (this has saved us from many a monkey patch). **Example**: .. code-block:: python class Worker(object): Consumer = Consumer def __init__(self, connection, consumer_cls=None): self.Consumer = consumer_cls or self.Consumer def do_work(self): with self.Consumer(self.connection) as consumer: self.connection.drain_events() Applications vs. "single mode" ============================== In the beginning Celery was developed for Django, simply because this enabled us get the project started quickly, while also having a large potential user base. In Django there's a global settings object, so multiple Django projects can't co-exist in the same process space, this later posed a problem for using Celery with frameworks that don't have this limitation. Therefore the app concept was introduced. When using apps you use 'celery' objects instead of importing things from Celery sub-modules, this (unfortunately) also means that Celery essentially has two API's. Here's an example using Celery in single-mode: .. code-block:: python from celery import task from celery.task.control import inspect from .models import CeleryStats @task def write_stats_to_db(): stats = inspect().stats(timeout=1) for node_name, reply in stats: CeleryStats.objects.update_stat(node_name, stats) and here's the same using Celery app objects: .. code-block:: python from .celery import celery from .models import CeleryStats @app.task def write_stats_to_db(): stats = celery.control.inspect().stats(timeout=1) for node_name, reply in stats: CeleryStats.objects.update_stat(node_name, stats) In the example above the actual application instance is imported from a module in the project, this module could look something like this: .. code-block:: python from celery import Celery app = Celery(broker='amqp://') Module Overview =============== - celery.app This is the core of Celery: the entry-point for all functionality. - celery.loaders Every app must have a loader. The loader decides how configuration is read; what happens when the worker starts; when a task starts and ends; and so on. The loaders included are: - app Custom Celery app instances uses this loader by default. - default "single-mode" uses this loader by default. Extension loaders also exist, for example :pypi:`celery-pylons`. - celery.worker This is the worker implementation. - celery.backends Task result backends live here. - celery.apps Major user applications: worker and beat. The command-line wrappers for these are in celery.bin (see below) - celery.bin Command-line applications. :file:`setup.py` creates setuptools entry-points for these. - celery.concurrency Execution pool implementations (prefork, eventlet, gevent, solo). - celery.db Database models for the SQLAlchemy database result backend. (should be moved into :mod:`celery.backends.database`) - celery.events Sending and consuming monitoring events, also includes curses monitor, event dumper and utilities to work with in-memory cluster state. - celery.execute.trace How tasks are executed and traced by the worker, and in eager mode. - celery.security Security related functionality, currently a serializer using cryptographic digests. - celery.task single-mode interface to creating tasks, and controlling workers. - t.unit (int distribution) The unit test suite. - celery.utils Utility functions used by the Celery code base. Much of it is there to be compatible across Python versions. - celery.contrib Additional public code that doesn't fit into any other name-space. Worker overview =============== * `celery.bin.worker:Worker` This is the command-line interface to the worker. Responsibilities: * Daemonization when :option:`--detach ` set, * dropping privileges when using :option:`--uid `/ :option:`--gid ` arguments * Installs "concurrency patches" (eventlet/gevent monkey patches). ``app.worker_main(argv)`` calls ``instantiate('celery.bin.worker:Worker')(app).execute_from_commandline(argv)`` * `app.Worker` -> `celery.apps.worker:Worker` Responsibilities: * sets up logging and redirects standard outs * installs signal handlers (`TERM`/`HUP`/`STOP`/`USR1` (cry)/`USR2` (rdb)) * prints banner and warnings (e.g., pickle warning) * handles the :option:`celery worker --purge` argument * `app.WorkController` -> `celery.worker.WorkController` This is the real worker, built up around bootsteps. celery-4.1.0/docs/internals/worker.rst0000644000175000017500000000272313130607475017731 0ustar omeromer00000000000000.. _internals-worker: ======================= Internals: The worker ======================= .. contents:: :local: Introduction ============ The worker consists of 4 main components: the consumer, the scheduler, the mediator and the task pool. All these components runs in parallel working with two data structures: the ready queue and the ETA schedule. Data structures =============== timer ----- The timer uses :mod:`heapq` to schedule internal functions. It's very efficient and can handle hundred of thousands of entries. Components ========== Consumer -------- Receives messages from the broker using :pypi:`Kombu`. When a message is received it's converted into a :class:`celery.worker.request.Request` object. Tasks with an ETA, or rate-limit are entered into the `timer`, messages that can be immediately processed are sent to the execution pool. ETA and rate-limit are 2 incompatible parameters, and the ETA is overriding the rate-limit by default. A task with both will follow its ETA and ignore its rate-limit. Timer ----- The timer schedules internal functions, like cleanup and internal monitoring, but also it schedules ETA tasks and rate limited tasks. If the scheduled tasks ETA has passed it is moved to the execution pool. TaskPool -------- This is a slightly modified :class:`multiprocessing.Pool`. It mostly works the same way, except it makes sure all of the workers are running at all times. If a worker is missing, it replaces it with a new one. celery-4.1.0/docs/faq.rst0000644000175000017500000007116713130607475015200 0ustar omeromer00000000000000.. _faq: ============================ Frequently Asked Questions ============================ .. contents:: :local: .. _faq-general: General ======= .. _faq-when-to-use: What kinds of things should I use Celery for? --------------------------------------------- **Answer:** `Queue everything and delight everyone`_ is a good article describing why you'd use a queue in a web context. .. _`Queue everything and delight everyone`: https://decafbad.com/blog/2008/07/04/queue-everything-and-delight-everyone These are some common use cases: * Running something in the background. For example, to finish the web request as soon as possible, then update the users page incrementally. This gives the user the impression of good performance and "snappiness", even though the real work might actually take some time. * Running something after the web request has finished. * Making sure something is done, by executing it asynchronously and using retries. * Scheduling periodic work. And to some degree: * Distributed computing. * Parallel execution. .. _faq-misconceptions: Misconceptions ============== .. _faq-loc: Does Celery really consist of 50.000 lines of code? --------------------------------------------------- **Answer:** No, this and similarly large numbers have been reported at various locations. The numbers as of this writing are: - core: 7,141 lines of code. - tests: 14,209 lines. - backends, contrib, compat utilities: 9,032 lines. Lines of code isn't a useful metric, so even if Celery did consist of 50k lines of code you wouldn't be able to draw any conclusions from such a number. Does Celery have many dependencies? ----------------------------------- A common criticism is that Celery uses too many dependencies. The rationale behind such a fear is hard to imagine, especially considering code reuse as the established way to combat complexity in modern software development, and that the cost of adding dependencies is very low now that package managers like pip and PyPI makes the hassle of installing and maintaining dependencies a thing of the past. Celery has replaced several dependencies along the way, and the current list of dependencies are: celery ~~~~~~ - :pypi:`kombu` Kombu is part of the Celery ecosystem and is the library used to send and receive messages. It's also the library that enables us to support many different message brokers. It's also used by the OpenStack project, and many others, validating the choice to separate it from the Celery code-base. - :pypi:`billiard` Billiard is a fork of the Python multiprocessing module containing many performance and stability improvements. It's an eventual goal that these improvements will be merged back into Python one day. It's also used for compatibility with older Python versions that don't come with the multiprocessing module. - :pypi:`pytz` The pytz module provides timezone definitions and related tools. kombu ~~~~~ Kombu depends on the following packages: - :pypi:`amqp` The underlying pure-Python amqp client implementation. AMQP being the default broker this is a natural dependency. .. note:: To handle the dependencies for popular configuration choices Celery defines a number of "bundle" packages, see :ref:`bundles`. .. _faq-heavyweight: Is Celery heavy-weight? ----------------------- Celery poses very little overhead both in memory footprint and performance. But please note that the default configuration isn't optimized for time nor space, see the :ref:`guide-optimizing` guide for more information. .. _faq-serialization-is-a-choice: Is Celery dependent on pickle? ------------------------------ **Answer:** No, Celery can support any serialization scheme. We have built-in support for JSON, YAML, Pickle, and msgpack. Every task is associated with a content type, so you can even send one task using pickle, another using JSON. The default serialization support used to be pickle, but since 4.0 the default is now JSON. If you require sending complex Python objects as task arguments, you can use pickle as the serialization format, but see notes in :ref:`security-serializers`. If you need to communicate with other languages you should use a serialization format suited to that task, which pretty much means any serializer that's not pickle. You can set a global default serializer, the default serializer for a particular Task, or even what serializer to use when sending a single task instance. .. _faq-is-celery-for-django-only: Is Celery for Django only? -------------------------- **Answer:** No, you can use Celery with any framework, web or otherwise. .. _faq-is-celery-for-rabbitmq-only: Do I have to use AMQP/RabbitMQ? ------------------------------- **Answer**: No, although using RabbitMQ is recommended you can also use Redis, SQS, or Qpid. See :ref:`brokers` for more information. Redis as a broker won't perform as well as an AMQP broker, but the combination RabbitMQ as broker and Redis as a result store is commonly used. If you have strict reliability requirements you're encouraged to use RabbitMQ or another AMQP broker. Some transports also use polling, so they're likely to consume more resources. However, if you for some reason aren't able to use AMQP, feel free to use these alternatives. They will probably work fine for most use cases, and note that the above points are not specific to Celery; If using Redis/database as a queue worked fine for you before, it probably will now. You can always upgrade later if you need to. .. _faq-is-celery-multilingual: Is Celery multilingual? ------------------------ **Answer:** Yes. :mod:`~celery.bin.worker` is an implementation of Celery in Python. If the language has an AMQP client, there shouldn't be much work to create a worker in your language. A Celery worker is just a program connecting to the broker to process messages. Also, there's another way to be language-independent, and that's to use REST tasks, instead of your tasks being functions, they're URLs. With this information you can even create simple web servers that enable preloading of code. Simply expose an endpoint that performs an operation, and create a task that just performs an HTTP request to that endpoint. .. _faq-troubleshooting: Troubleshooting =============== .. _faq-mysql-deadlocks: MySQL is throwing deadlock errors, what can I do? ------------------------------------------------- **Answer:** MySQL has default isolation level set to `REPEATABLE-READ`, if you don't really need that, set it to `READ-COMMITTED`. You can do that by adding the following to your :file:`my.cnf`:: [mysqld] transaction-isolation = READ-COMMITTED For more information about InnoDB`s transaction model see `MySQL - The InnoDB Transaction Model and Locking`_ in the MySQL user manual. (Thanks to Honza Kral and Anton Tsigularov for this solution) .. _`MySQL - The InnoDB Transaction Model and Locking`: https://dev.mysql.com/doc/refman/5.1/en/innodb-transaction-model.html .. _faq-worker-hanging: The worker isn't doing anything, just hanging --------------------------------------------- **Answer:** See `MySQL is throwing deadlock errors, what can I do?`_, or `Why is Task.delay/apply\*/the worker just hanging?`_. .. _faq-results-unreliable: Task results aren't reliably returning -------------------------------------- **Answer:** If you're using the database backend for results, and in particular using MySQL, see `MySQL is throwing deadlock errors, what can I do?`_. .. _faq-publish-hanging: Why is Task.delay/apply\*/the worker just hanging? -------------------------------------------------- **Answer:** There's a bug in some AMQP clients that'll make it hang if it's not able to authenticate the current user, the password doesn't match or the user doesn't have access to the virtual host specified. Be sure to check your broker logs (for RabbitMQ that's :file:`/var/log/rabbitmq/rabbit.log` on most systems), it usually contains a message describing the reason. .. _faq-worker-on-freebsd: Does it work on FreeBSD? ------------------------ **Answer:** Depends; When using the RabbitMQ (AMQP) and Redis transports it should work out of the box. For other transports the compatibility prefork pool is used and requires a working POSIX semaphore implementation, this is enabled in FreeBSD by default since FreeBSD 8.x. For older version of FreeBSD, you have to enable POSIX semaphores in the kernel and manually recompile billiard. Luckily, Viktor Petersson has written a tutorial to get you started with Celery on FreeBSD here: http://www.playingwithwire.com/2009/10/how-to-get-celeryd-to-work-on-freebsd/ .. _faq-duplicate-key-errors: I'm having `IntegrityError: Duplicate Key` errors. Why? --------------------------------------------------------- **Answer:** See `MySQL is throwing deadlock errors, what can I do?`_. Thanks to :github_user:`@howsthedotcom`. .. _faq-worker-stops-processing: Why aren't my tasks processed? ------------------------------ **Answer:** With RabbitMQ you can see how many consumers are currently receiving tasks by running the following command: .. code-block:: console $ rabbitmqctl list_queues -p name messages consumers Listing queues ... celery 2891 2 This shows that there's 2891 messages waiting to be processed in the task queue, and there are two consumers processing them. One reason that the queue is never emptied could be that you have a stale worker process taking the messages hostage. This could happen if the worker wasn't properly shut down. When a message is received by a worker the broker waits for it to be acknowledged before marking the message as processed. The broker won't re-send that message to another consumer until the consumer is shut down properly. If you hit this problem you have to kill all workers manually and restart them: .. code-block:: console $ pkill 'celery worker' $ # - If you don't have pkill use: $ # ps auxww | grep 'celery worker' | awk '{print $2}' | xargs kill You may have to wait a while until all workers have finished executing tasks. If it's still hanging after a long time you can kill them by force with: .. code-block:: console $ pkill -9 'celery worker' $ # - If you don't have pkill use: $ # ps auxww | grep 'celery worker' | awk '{print $2}' | xargs kill -9 .. _faq-task-does-not-run: Why won't my Task run? ---------------------- **Answer:** There might be syntax errors preventing the tasks module being imported. You can find out if Celery is able to run the task by executing the task manually: .. code-block:: python >>> from myapp.tasks import MyPeriodicTask >>> MyPeriodicTask.delay() Watch the workers log file to see if it's able to find the task, or if some other error is happening. .. _faq-periodic-task-does-not-run: Why won't my periodic task run? ------------------------------- **Answer:** See `Why won't my Task run?`_. .. _faq-purge-the-queue: How do I purge all waiting tasks? --------------------------------- **Answer:** You can use the ``celery purge`` command to purge all configured task queues: .. code-block:: console $ celery -A proj purge or programmatically: .. code-block:: pycon >>> from proj.celery import app >>> app.control.purge() 1753 If you only want to purge messages from a specific queue you have to use the AMQP API or the :program:`celery amqp` utility: .. code-block:: console $ celery -A proj amqp queue.purge The number 1753 is the number of messages deleted. You can also start the worker with the :option:`--purge ` option enabled to purge messages when the worker starts. .. _faq-messages-left-after-purge: I've purged messages, but there are still messages left in the queue? --------------------------------------------------------------------- **Answer:** Tasks are acknowledged (removed from the queue) as soon as they're actually executed. After the worker has received a task, it will take some time until it's actually executed, especially if there are a lot of tasks already waiting for execution. Messages that aren't acknowledged are held on to by the worker until it closes the connection to the broker (AMQP server). When that connection is closed (e.g., because the worker was stopped) the tasks will be re-sent by the broker to the next available worker (or the same worker when it has been restarted), so to properly purge the queue of waiting tasks you have to stop all the workers, and then purge the tasks using :func:`celery.control.purge`. .. _faq-results: Results ======= .. _faq-get-result-by-task-id: How do I get the result of a task if I have the ID that points there? ---------------------------------------------------------------------- **Answer**: Use `task.AsyncResult`: .. code-block:: pycon >>> result = my_task.AsyncResult(task_id) >>> result.get() This will give you a :class:`~celery.result.AsyncResult` instance using the tasks current result backend. If you need to specify a custom result backend, or you want to use the current application's default backend you can use :class:`@AsyncResult`: .. code-block:: pycon >>> result = app.AsyncResult(task_id) >>> result.get() .. _faq-security: Security ======== Isn't using `pickle` a security concern? ---------------------------------------- **Answer**: Indeed, since Celery 4.0 the default serializer is now JSON to make sure people are choosing serializers consciously and aware of this concern. It's essential that you protect against unauthorized access to your broker, databases and other services transmitting pickled data. Note that this isn't just something you should be aware of with Celery, for example also Django uses pickle for its cache client. For the task messages you can set the :setting:`task_serializer` setting to "json" or "yaml" instead of pickle. Similarly for task results you can set :setting:`result_serializer`. For more details of the formats used and the lookup order when checking what format to use for a task see :ref:`calling-serializers` Can messages be encrypted? -------------------------- **Answer**: Some AMQP brokers supports using SSL (including RabbitMQ). You can enable this using the :setting:`broker_use_ssl` setting. It's also possible to add additional encryption and security to messages, if you have a need for this then you should contact the :ref:`mailing-list`. Is it safe to run :program:`celery worker` as root? --------------------------------------------------- **Answer**: No! We're not currently aware of any security issues, but it would be incredibly naive to assume that they don't exist, so running the Celery services (:program:`celery worker`, :program:`celery beat`, :program:`celeryev`, etc) as an unprivileged user is recommended. .. _faq-brokers: Brokers ======= Why is RabbitMQ crashing? ------------------------- **Answer:** RabbitMQ will crash if it runs out of memory. This will be fixed in a future release of RabbitMQ. please refer to the RabbitMQ FAQ: https://www.rabbitmq.com/faq.html#node-runs-out-of-memory .. note:: This is no longer the case, RabbitMQ versions 2.0 and above includes a new persister, that's tolerant to out of memory errors. RabbitMQ 2.1 or higher is recommended for Celery. If you're still running an older version of RabbitMQ and experience crashes, then please upgrade! Misconfiguration of Celery can eventually lead to a crash on older version of RabbitMQ. Even if it doesn't crash, this can still consume a lot of resources, so it's important that you're aware of the common pitfalls. * Events. Running :mod:`~celery.bin.worker` with the :option:`-E ` option will send messages for events happening inside of the worker. Events should only be enabled if you have an active monitor consuming them, or if you purge the event queue periodically. * AMQP backend results. When running with the AMQP result backend, every task result will be sent as a message. If you don't collect these results, they will build up and RabbitMQ will eventually run out of memory. This result backend is now deprecated so you shouldn't be using it. Use either the RPC backend for rpc-style calls, or a persistent backend if you need multi-consumer access to results. Results expire after 1 day by default. It may be a good idea to lower this value by configuring the :setting:`result_expires` setting. If you don't use the results for a task, make sure you set the `ignore_result` option: .. code-block:: python @app.task(ignore_result=True) def mytask(): pass class MyTask(Task): ignore_result = True .. _faq-use-celery-with-stomp: Can I use Celery with ActiveMQ/STOMP? ------------------------------------- **Answer**: No. It used to be supported by :pypi:`Carrot` (our old messaging library) but isn't currently supported in :pypi:`Kombu` (our new messaging library). .. _faq-non-amqp-missing-features: What features aren't supported when not using an AMQP broker? ------------------------------------------------------------- This is an incomplete list of features not available when using the virtual transports: * Remote control commands (supported only by Redis). * Monitoring with events may not work in all virtual transports. * The `header` and `fanout` exchange types (`fanout` is supported by Redis). .. _faq-tasks: Tasks ===== .. _faq-tasks-connection-reuse: How can I reuse the same connection when calling tasks? ------------------------------------------------------- **Answer**: See the :setting:`broker_pool_limit` setting. The connection pool is enabled by default since version 2.5. .. _faq-sudo-subprocess: :command:`sudo` in a :mod:`subprocess` returns :const:`None` ------------------------------------------------------------ There's a :command:`sudo` configuration option that makes it illegal for process without a tty to run :command:`sudo`: .. code-block:: text Defaults requiretty If you have this configuration in your :file:`/etc/sudoers` file then tasks won't be able to call :command:`sudo` when the worker is running as a daemon. If you want to enable that, then you need to remove the line from :file:`/etc/sudoers`. See: http://timelordz.com/wiki/Apache_Sudo_Commands .. _faq-deletes-unknown-tasks: Why do workers delete tasks from the queue if they're unable to process them? ----------------------------------------------------------------------------- **Answer**: The worker rejects unknown tasks, messages with encoding errors and messages that don't contain the proper fields (as per the task message protocol). If it didn't reject them they could be redelivered again and again, causing a loop. Recent versions of RabbitMQ has the ability to configure a dead-letter queue for exchange, so that rejected messages is moved there. .. _faq-execute-task-by-name: Can I call a task by name? ----------------------------- **Answer**: Yes, use :meth:`@send_task`. You can also call a task by name, from any language, using an AMQP client: .. code-block:: python >>> app.send_task('tasks.add', args=[2, 2], kwargs={}) .. _faq-get-current-task-id: Can I get the task id of the current task? ---------------------------------------------- **Answer**: Yes, the current id and more is available in the task request:: @app.task(bind=True) def mytask(self): cache.set(self.request.id, "Running") For more information see :ref:`task-request-info`. If you don't have a reference to the task instance you can use :attr:`app.current_task <@current_task>`: .. code-block:: python >>> app.current_task.request.id But note that this will be any task, be it one executed by the worker, or a task called directly by that task, or a task called eagerly. To get the current task being worked on specifically, use :attr:`app.current_worker_task <@current_worker_task>`: .. code-block:: python >>> app.current_worker_task.request.id .. note:: Both :attr:`~@current_task`, and :attr:`~@current_worker_task` can be :const:`None`. .. _faq-custom-task-ids: Can I specify a custom task_id? ------------------------------- **Answer**: Yes, use the `task_id` argument to :meth:`Task.apply_async`: .. code-block:: pycon >>> task.apply_async(args, kwargs, task_id='…') Can I use decorators with tasks? -------------------------------- **Answer**: Yes, but please see note in the sidebar at :ref:`task-basics`. .. _faq-natural-task-ids: Can I use natural task ids? --------------------------- **Answer**: Yes, but make sure it's unique, as the behavior for two tasks existing with the same id is undefined. The world will probably not explode, but they can definitely overwrite each others results. .. _faq-task-callbacks: Can I run a task once another task has finished? ------------------------------------------------ **Answer**: Yes, you can safely launch a task inside a task. A common pattern is to add callbacks to tasks: .. code-block:: python from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @app.task def add(x, y): return x + y @app.task(ignore_result=True) def log_result(result): logger.info("log_result got: %r", result) Invocation: .. code-block:: pycon >>> (add.s(2, 2) | log_result.s()).delay() See :doc:`userguide/canvas` for more information. .. _faq-cancel-task: Can I cancel the execution of a task? ------------------------------------- **Answer**: Yes, Use :meth:`result.revoke() `: .. code-block:: pycon >>> result = add.apply_async(args=[2, 2], countdown=120) >>> result.revoke() or if you only have the task id: .. code-block:: pycon >>> from proj.celery import app >>> app.control.revoke(task_id) The latter also support passing a list of task-ids as argument. .. _faq-node-not-receiving-broadcast-commands: Why aren't my remote control commands received by all workers? -------------------------------------------------------------- **Answer**: To receive broadcast remote control commands, every worker node creates a unique queue name, based on the nodename of the worker. If you have more than one worker with the same host name, the control commands will be received in round-robin between them. To work around this you can explicitly set the nodename for every worker using the :option:`-n ` argument to :mod:`~celery.bin.worker`: .. code-block:: console $ celery -A proj worker -n worker1@%h $ celery -A proj worker -n worker2@%h where ``%h`` expands into the current hostname. .. _faq-task-routing: Can I send some tasks to only some servers? -------------------------------------------- **Answer:** Yes, you can route tasks to one or more workers, using different message routing topologies, and a worker instance can bind to multiple queues. See :doc:`userguide/routing` for more information. .. _faq-disable-prefetch: Can I disable prefetching of tasks? ----------------------------------- **Answer**: Maybe! The AMQP term "prefetch" is confusing, as it's only used to describe the task prefetching *limit*. There's no actual prefetching involved. Disabling the prefetch limits is possible, but that means the worker will consume as many tasks as it can, as fast as possible. A discussion on prefetch limits, and configuration settings for a worker that only reserves one task at a time is found here: :ref:`optimizing-prefetch-limit`. .. _faq-change-periodic-task-interval-at-runtime: Can I change the interval of a periodic task at runtime? -------------------------------------------------------- **Answer**: Yes, you can use the Django database scheduler, or you can create a new schedule subclass and override :meth:`~celery.schedules.schedule.is_due`: .. code-block:: python from celery.schedules import schedule class my_schedule(schedule): def is_due(self, last_run_at): return run_now, next_time_to_check .. _faq-task-priorities: Does Celery support task priorities? ------------------------------------ **Answer**: Yes, RabbitMQ supports priorities since version 3.5.0, and the Redis transport emulates priority support. You can also prioritize work by routing high priority tasks to different workers. In the real world this usually works better than per message priorities. You can use this in combination with rate limiting, and per message priorities to achieve a responsive system. .. _faq-acks_late-vs-retry: Should I use retry or acks_late? -------------------------------- **Answer**: Depends. It's not necessarily one or the other, you may want to use both. `Task.retry` is used to retry tasks, notably for expected errors that is catch-able with the :keyword:`try` block. The AMQP transaction isn't used for these errors: **if the task raises an exception it's still acknowledged!** The `acks_late` setting would be used when you need the task to be executed again if the worker (for some reason) crashes mid-execution. It's important to note that the worker isn't known to crash, and if it does it's usually an unrecoverable error that requires human intervention (bug in the worker, or task code). In an ideal world you could safely retry any task that's failed, but this is rarely the case. Imagine the following task: .. code-block:: python @app.task def process_upload(filename, tmpfile): # Increment a file count stored in a database increment_file_counter() add_file_metadata_to_db(filename, tmpfile) copy_file_to_destination(filename, tmpfile) If this crashed in the middle of copying the file to its destination the world would contain incomplete state. This isn't a critical scenario of course, but you can probably imagine something far more sinister. So for ease of programming we have less reliability; It's a good default, users who require it and know what they are doing can still enable acks_late (and in the future hopefully use manual acknowledgment). In addition `Task.retry` has features not available in AMQP transactions: delay between retries, max retries, etc. So use retry for Python errors, and if your task is idempotent combine that with `acks_late` if that level of reliability is required. .. _faq-schedule-at-specific-time: Can I schedule tasks to execute at a specific time? --------------------------------------------------- .. module:: celery.app.task **Answer**: Yes. You can use the `eta` argument of :meth:`Task.apply_async`. See also :ref:`guide-beat`. .. _faq-safe-worker-shutdown: Can I safely shut down the worker? ---------------------------------- **Answer**: Yes, use the :sig:`TERM` signal. This will tell the worker to finish all currently executing jobs and shut down as soon as possible. No tasks should be lost even with experimental transports as long as the shutdown completes. You should never stop :mod:`~celery.bin.worker` with the :sig:`KILL` signal (``kill -9``), unless you've tried :sig:`TERM` a few times and waited a few minutes to let it get a chance to shut down. Also make sure you kill the main worker process only, not any of its child processes. You can direct a kill signal to a specific child process if you know the process is currently executing a task the worker shutdown is depending on, but this also means that a ``WorkerLostError`` state will be set for the task so the task won't run again. Identifying the type of process is easier if you have installed the :pypi:`setproctitle` module: .. code-block:: console $ pip install setproctitle With this library installed you'll be able to see the type of process in :command:`ps` listings, but the worker must be restarted for this to take effect. .. seealso:: :ref:`worker-stopping` .. _faq-daemonizing: Can I run the worker in the background on [platform]? ----------------------------------------------------- **Answer**: Yes, please see :ref:`daemonizing`. .. _faq-django: Django ====== .. _faq-django-beat-database-tables: What purpose does the database tables created by ``django-celery-beat`` have? ----------------------------------------------------------------------------- When the database-backed schedule is used the periodic task schedule is taken from the ``PeriodicTask`` model, there are also several other helper tables (``IntervalSchedule``, ``CrontabSchedule``, ``PeriodicTasks``). .. _faq-django-result-database-tables: What purpose does the database tables created by ``django-celery-results`` have? -------------------------------------------------------------------------------- The Django database result backend extension requires two extra models: ``TaskResult`` and ``GroupResult``. .. _faq-windows: Windows ======= .. _faq-windows-worker-embedded-beat: Does Celery support Windows? ---------------------------------------------------------------- **Answer**: No. Since Celery 4.x, Windows is no longer supported due to lack of resources. But it may still work and we are happy to accept patches. celery-4.1.0/docs/glossary.rst0000644000175000017500000001054013130607475016260 0ustar omeromer00000000000000.. _glossary: Glossary ======== .. glossary:: :sorted: acknowledged Workers acknowledge messages to signify that a message has been handled. Failing to acknowledge a message will cause the message to be redelivered. Exactly when a transaction is considered a failure varies by transport. In AMQP the transaction fails when the connection/channel is closed (or lost), but in Redis/SQS the transaction times out after a configurable amount of time (the ``visibility_timeout``). ack Short for :term:`acknowledged`. early acknowledgment Task is :term:`acknowledged` just-in-time before being executed, meaning the task won't be redelivered to another worker if the machine loses power, or the worker instance is abruptly killed, mid-execution. Configured using :setting:`task_acks_late`. late acknowledgment Task is :term:`acknowledged` after execution (both if successful, or if the task is raising an error), which means the task will be redelivered to another worker in the event of the machine losing power, or the worker instance being killed mid-execution. Configured using :setting:`task_acks_late`. early ack Short for :term:`early acknowledgment` late ack Short for :term:`late acknowledgment` ETA "Estimated Time of Arrival", in Celery and Google Task Queue, etc., used as the term for a delayed message that should not be processed until the specified ETA time. See :ref:`calling-eta`. request Task messages are converted to *requests* within the worker. The request information is also available as the task's :term:`context` (the ``task.request`` attribute). calling Sends a task message so that the task function is :term:`executed ` by a worker. kombu Python messaging library used by Celery to send and receive messages. billiard Fork of the Python multiprocessing library containing improvements required by Celery. executing Workers *execute* task :term:`requests `. apply Originally a synonym to :term:`call ` but used to signify that a function is executed by the current process. context The context of a task contains information like the id of the task, it's arguments and what queue it was delivered to. It can be accessed as the tasks ``request`` attribute. See :ref:`task-request-info` idempotent Idempotence is a mathematical property that describes a function that can be called multiple times without changing the result. Practically it means that a function can be repeated many times without unintended effects, but not necessarily side-effect free in the pure sense (compare to :term:`nullipotent`). Further reading: https://en.wikipedia.org/wiki/Idempotent nullipotent describes a function that'll have the same effect, and give the same result, even if called zero or multiple times (side-effect free). A stronger version of :term:`idempotent`. reentrant describes a function that can be interrupted in the middle of execution (e.g., by hardware interrupt or signal), and then safely called again later. Reentrancy isn't the same as :term:`idempotence ` as the return value doesn't have to be the same given the same inputs, and a reentrant function may have side effects as long as it can be interrupted; An idempotent function is always reentrant, but the reverse may not be true. cipater Celery release 3.1 named after song by Autechre (http://www.youtube.com/watch?v=OHsaqUr_33Y) prefetch multiplier The :term:`prefetch count` is configured by using the :setting:`worker_prefetch_multiplier` setting, which is multiplied by the number of pool slots (threads/processes/greenthreads). `prefetch count` Maximum number of unacknowledged messages a consumer can hold and if exceeded the transport shouldn't deliver any more messages to that consumer. See :ref:`optimizing-prefetch-limit`. pidbox A process mailbox, used to implement remote control commands. celery-4.1.0/docs/AUTHORS.txt0000644000175000017500000001221213130607475015547 0ustar omeromer00000000000000========= AUTHORS ========= :order: sorted Aaron Ross Adam Endicott Adriano Petrich Akira Matsuzaki Alan Brogan Alec Clowes Ales Zoulek Allan Caffee Andrew McFague Andrew Watts Armin Ronacher Ask Solem Augusto Becciu Balachandran C Bartosz Ptaszynski Ben Firshman Brad Jasper Branko ÄŒibej Brendon Crawford Brian Bouterse Brian Rosner Bryan Berg Chase Seibert Chris Adams Chris Angove Chris Chamberlin Chris Rose Chris St. Pierre Chris Streeter Christoph Burgmer Christopher Peplin Clay Gerrard Dan McGee Daniel Hepper Daniel Lundin Daniel Watkins David Arthur David Cramer David Miller David Strauss David White Eran Rundstein Felix Berger Florian Apolloner Frédéric Junod Gert Van Gool Greg Haskins Greg Taylor Grégoire Cachet Gunnlaugur Thor Briem Hari Harm Verhagen Honza Kral Ian A Wilson Ignas MikalajÅ«nas Ionel Maries Cristian Ionut Turturica Iurii Kriachko Ivan Metzlar Jannis Leidel Jason Baker Jay McGrath Jeff Balogh Jeff Terrace Jerzy Kozera Jesper Noehr John Watson John Whitlock Jonas Haag Jonas Obrist Jonatan Heyman Joshua Ginsberg Juan Ignacio Catalano Juarez Bochi Jude Nagurney Julien Poissonnier Keith Perkins Kevin Tran Kornelijus Survila Leo Dirac Luis Clara Gomez Lukas Linhart Luke Zapart Marcin KuźmiÅ„ski Marcin Lulek Mark Hellewell Mark Lavin Mark Parncutt Mark Stover Mark Thurman Martin Galpin Martin Melin Matt Ullman Matt Williamson Matthew J Morrison Matthew Miller Mauro Rocco Maxim Bodyansky Mher Movsisyan Michael Elsdoerfer Michael Fladischer Miguel Hernandez Martos Mikhail Gusarov Mikhail Korobov Mitar Môshe van der Sterre Neil Chintomby Noah Kantrowitz Norman Richards Patrick Altman Peter Bittner Piotr Sikora Primož Kerin Remy Noel Reza Lotun Roberto Gaiser Roger Hu Rune Halvorsen Ryan P. Kelly Ryan Petrello Sam Cooke Sean Creeley Sean O'Connor Seong Won Mun Simon Josi Steeve Morin Stefan Kjartansson Steven Skoczen Tayfun Sen Thomas Johansson Thomas Forbes Timo Sugliani Travis Swicegood Vincent Driessen Vitaly Babiy Vladimir Kryachko Wes Turner Wes Winham Yury V. Zaytsev jpellerin kuno lookfwd sdcooke Åukasz Langa Åukasz OleÅ› celery-4.1.0/docs/Makefile0000644000175000017500000002022613130607475015325 0ustar omeromer00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " apicheck to verify that all modules are present in autodoc" @echo " configcheck to verify that all modules are present in autodoc" @echo " spelling to perform a spell check" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PROJ.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PROJ.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PROJ" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PROJ" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: apicheck apicheck: $(SPHINXBUILD) -b apicheck $(ALLSPHINXOPTS) $(BUILDDIR)/apicheck .PHONY: configcheck configcheck: $(SPHINXBUILD) -b configcheck $(ALLSPHINXOPTS) $(BUILDDIR)/configcheck .PHONY: spelling spelling: SPELLCHECK=1 $(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spelling .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." celery-4.1.0/docs/contributing.rst0000644000175000017500000007771713130607475017147 0ustar omeromer00000000000000.. _contributing: ============== Contributing ============== Welcome! This document is fairly extensive and you aren't really expected to study this in detail for small contributions; The most important rule is that contributing must be easy and that the community is friendly and not nitpicking on details, such as coding style. If you're reporting a bug you should read the Reporting bugs section below to ensure that your bug report contains enough information to successfully diagnose the issue, and if you're contributing code you should try to mimic the conventions you see surrounding the code you're working on, but in the end all patches will be cleaned up by the person merging the changes so don't worry too much. .. contents:: :local: .. _community-code-of-conduct: Community Code of Conduct ========================= The goal is to maintain a diverse community that's pleasant for everyone. That's why we would greatly appreciate it if everyone contributing to and interacting with the community also followed this Code of Conduct. The Code of Conduct covers our behavior as members of the community, in any forum, mailing list, wiki, website, Internet relay chat (IRC), public meeting or private correspondence. The Code of Conduct is heavily based on the `Ubuntu Code of Conduct`_, and the `Pylons Code of Conduct`_. .. _`Ubuntu Code of Conduct`: https://www.ubuntu.com/community/conduct .. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html Be considerate -------------- Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and we expect you to take those consequences into account when making decisions. Even if it's not obvious at the time, our contributions to Celery will impact the work of others. For example, changes to code, infrastructure, policy, documentation and translations during a release may negatively impact others work. Be respectful ------------- The Celery community and its members treat one another with respect. Everyone can make a valuable contribution to Celery. We may not always agree, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It's important to remember that a community where people feel uncomfortable or threatened isn't a productive one. We expect members of the Celery community to be respectful when dealing with other contributors as well as with people outside the Celery project and with users of Celery. Be collaborative ---------------- Collaboration is central to Celery and to the larger free software community. We should always be open to collaboration. Your work should be done transparently and patches from Celery should be given back to the community when they're made, not just when the distribution releases. If you wish to work on new code for existing upstream projects, at least keep those projects informed of your ideas and progress. It many not be possible to get consensus from upstream, or even from your colleagues about the correct implementation for an idea, so don't feel obliged to have that agreement before you begin, but at least keep the outside world informed of your work, and publish your work in a way that allows outsiders to test, discuss, and contribute to your efforts. When you disagree, consult others --------------------------------- Disagreements, both political and technical, happen all the time and the Celery community is no exception. It's important that we resolve disagreements and differing views constructively and with the help of the community and community process. If you really want to go a different way, then we encourage you to make a derivative distribution or alternate set of packages that still build on the work we've done to utilize as common of a core as possible. When you're unsure, ask for help -------------------------------- Nobody knows everything, and nobody is expected to be perfect. Asking questions avoids many problems down the road, and so questions are encouraged. Those who are asked questions should be responsive and helpful. However, when asking a question, care must be taken to do so in an appropriate forum. Step down considerately ----------------------- Developers on every project come and go and Celery is no different. When you leave or disengage from the project, in whole or in part, we ask that you do so in a way that minimizes disruption to the project. This means you should tell people you're leaving and take the proper steps to ensure that others can pick up where you leave off. .. _reporting-bugs: Reporting Bugs ============== .. _vulnsec: Security -------- You must never report security related issues, vulnerabilities or bugs including sensitive information to the bug tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to ``security@celeryproject.org``. If you'd like to submit the information encrypted our PGP key is:: -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1.4.15 (Darwin) mQENBFJpWDkBCADFIc9/Fpgse4owLNvsTC7GYfnJL19XO0hnL99sPx+DPbfr+cSE 9wiU+Wp2TfUX7pCLEGrODiEP6ZCZbgtiPgId+JYvMxpP6GXbjiIlHRw1EQNH8RlX cVxy3rQfVv8PGGiJuyBBjxzvETHW25htVAZ5TI1+CkxmuyyEYqgZN2fNd0wEU19D +c10G1gSECbCQTCbacLSzdpngAt1Gkrc96r7wGHBBSvDaGDD2pFSkVuTLMbIRrVp lnKOPMsUijiip2EMr2DvfuXiUIUvaqInTPNWkDynLoh69ib5xC19CSVLONjkKBsr Pe+qAY29liBatatpXsydY7GIUzyBT3MzgMJlABEBAAG0MUNlbGVyeSBTZWN1cml0 eSBUZWFtIDxzZWN1cml0eUBjZWxlcnlwcm9qZWN0Lm9yZz6JATgEEwECACIFAlJp WDkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOArFOUDCicIw1IH/26f CViDC7/P13jr+srRdjAsWvQztia9HmTlY8cUnbmkR9w6b6j3F2ayw8VhkyFWgYEJ wtPBv8mHKADiVSFARS+0yGsfCkia5wDSQuIv6XqRlIrXUyqJbmF4NUFTyCZYoh+C ZiQpN9xGhFPr5QDlMx2izWg1rvWlG1jY2Es1v/xED3AeCOB1eUGvRe/uJHKjGv7J rj0pFcptZX+WDF22AN235WYwgJM6TrNfSu8sv8vNAQOVnsKcgsqhuwomSGsOfMQj LFzIn95MKBBU1G5wOs7JtwiV9jefGqJGBO2FAvOVbvPdK/saSnB+7K36dQcIHqms 5hU4Xj0RIJiod5idlRC5AQ0EUmlYOQEIAJs8OwHMkrdcvy9kk2HBVbdqhgAREMKy gmphDp7prRL9FqSY/dKpCbG0u82zyJypdb7QiaQ5pfPzPpQcd2dIcohkkh7G3E+e hS2L9AXHpwR26/PzMBXyr2iNnNc4vTksHvGVDxzFnRpka6vbI/hrrZmYNYh9EAiv uhE54b3/XhXwFgHjZXb9i8hgJ3nsO0pRwvUAM1bRGMbvf8e9F+kqgV0yWYNnh6QL 4Vpl1+epqp2RKPHyNQftbQyrAHXT9kQF9pPlx013MKYaFTADscuAp4T3dy7xmiwS crqMbZLzfrxfFOsNxTUGE5vmJCcm+mybAtRo4aV6ACohAO9NevMx8pUAEQEAAYkB HwQYAQIACQUCUmlYOQIbDAAKCRDgKxTlAwonCNFbB/9esir/f7TufE+isNqErzR/ aZKZo2WzZR9c75kbqo6J6DYuUHe6xI0OZ2qZ60iABDEZAiNXGulysFLCiPdatQ8x 8zt3DF9BMkEck54ZvAjpNSern6zfZb1jPYWZq3TKxlTs/GuCgBAuV4i5vDTZ7xK/ aF+OFY5zN7ciZHkqLgMiTZ+RhqRcK6FhVBP/Y7d9NlBOcDBTxxE1ZO1ute6n7guJ ciw4hfoRk8qNN19szZuq3UU64zpkM2sBsIFM9tGF2FADRxiOaOWZHmIyVZriPFqW RUwjSjs7jBVNq0Vy4fCu/5+e+XLOUBOoqtM5W7ELt0t1w9tXebtPEetV86in8fU2 =0chn -----END PGP PUBLIC KEY BLOCK----- Other bugs ---------- Bugs can always be described to the :ref:`mailing-list`, but the best way to report an issue and to ensure a timely response is to use the issue tracker. 1) **Create a GitHub account**. You need to `create a GitHub account`_ to be able to create new issues and participate in the discussion. .. _`create a GitHub account`: https://github.com/signup/free 2) **Determine if your bug is really a bug**. You shouldn't file a bug if you're requesting support. For that you can use the :ref:`mailing-list`, or :ref:`irc-channel`. 3) **Make sure your bug hasn't already been reported**. Search through the appropriate Issue tracker. If a bug like yours was found, check if you have new information that could be reported to help the developers fix the bug. 4) **Check if you're using the latest version**. A bug could be fixed by some other improvements and fixes - it might not have an existing report in the bug tracker. Make sure you're using the latest releases of celery, billiard, kombu, amqp, and vine. 5) **Collect information about the bug**. To have the best chance of having a bug fixed, we need to be able to easily reproduce the conditions that caused it. Most of the time this information will be from a Python traceback message, though some bugs might be in design, spelling or other errors on the website/docs/code. A) If the error is from a Python traceback, include it in the bug report. B) We also need to know what platform you're running (Windows, macOS, Linux, etc.), the version of your Python interpreter, and the version of Celery, and related packages that you were running when the bug occurred. C) If you're reporting a race condition or a deadlock, tracebacks can be hard to get or might not be that useful. Try to inspect the process to get more diagnostic data. Some ideas: * Enable Celery's :ref:`breakpoint signal ` and use it to inspect the process's state. This will allow you to open a :mod:`pdb` session. * Collect tracing data using `strace`_(Linux), :command:`dtruss` (macOS), and :command:`ktrace` (BSD), `ltrace`_, and `lsof`_. D) Include the output from the :command:`celery report` command: .. code-block:: console $ celery -A proj report This will also include your configuration settings and it try to remove values for keys known to be sensitive, but make sure you also verify the information before submitting so that it doesn't contain confidential information like API tokens and authentication credentials. 6) **Submit the bug**. By default `GitHub`_ will email you to let you know when new comments have been made on your bug. In the event you've turned this feature off, you should check back on occasion to ensure you don't miss any questions a developer trying to fix the bug might ask. .. _`GitHub`: https://github.com .. _`strace`: https://en.wikipedia.org/wiki/Strace .. _`ltrace`: https://en.wikipedia.org/wiki/Ltrace .. _`lsof`: https://en.wikipedia.org/wiki/Lsof .. _issue-trackers: Issue Trackers -------------- Bugs for a package in the Celery ecosystem should be reported to the relevant issue tracker. * :pypi:`celery`: https://github.com/celery/celery/issues/ * :pypi:`kombu`: https://github.com/celery/kombu/issues * :pypi:`amqp`: https://github.com/celery/py-amqp/issues * :pypi:`vine`: https://github.com/celery/vine/issues * :pypi:`librabbitmq`: https://github.com/celery/librabbitmq/issues * :pypi:`django-celery-beat`: https://github.com/celery/django-celery-beat/issues * :pypi:`django-celery-results`: https://github.com/celery/django-celery-results/issues If you're unsure of the origin of the bug you can ask the :ref:`mailing-list`, or just use the Celery issue tracker. Contributors guide to the code base =================================== There's a separate section for internal details, including details about the code base and a style guide. Read :ref:`internals-guide` for more! .. _versions: Versions ======== Version numbers consists of a major version, minor version and a release number. Since version 2.1.0 we use the versioning semantics described by SemVer: http://semver.org. Stable releases are published at PyPI while development releases are only available in the GitHub git repository as tags. All version tags starts with “vâ€, so version 0.8.0 is the tag v0.8.0. .. _git-branches: Branches ======== Current active version branches: * dev (which git calls "master") (https://github.com/celery/celery/tree/master) * 4.0 (https://github.com/celery/celery/tree/4.0) * 3.1 (https://github.com/celery/celery/tree/3.1) * 3.0 (https://github.com/celery/celery/tree/3.0) You can see the state of any branch by looking at the Changelog: https://github.com/celery/celery/blob/master/Changelog If the branch is in active development the topmost version info should contain meta-data like: .. code-block:: restructuredtext 2.4.0 ====== :release-date: TBA :status: DEVELOPMENT :branch: dev (git calls this master) The ``status`` field can be one of: * ``PLANNING`` The branch is currently experimental and in the planning stage. * ``DEVELOPMENT`` The branch is in active development, but the test suite should be passing and the product should be working and possible for users to test. * ``FROZEN`` The branch is frozen, and no more features will be accepted. When a branch is frozen the focus is on testing the version as much as possible before it is released. dev branch ---------- The dev branch (called "master" by git), is where development of the next version happens. Maintenance branches -------------------- Maintenance branches are named after the version -- for example, the maintenance branch for the 2.2.x series is named ``2.2``. Previously these were named ``releaseXX-maint``. The versions we currently maintain is: * 3.1 This is the current series. * 3.0 This is the previous series, and the last version to support Python 2.5. Archived branches ----------------- Archived branches are kept for preserving history only, and theoretically someone could provide patches for these if they depend on a series that's no longer officially supported. An archived version is named ``X.Y-archived``. Our currently archived branches are: * :github_branch:`2.5-archived` * :github_branch:`2.4-archived` * :github_branch:`2.3-archived` * :github_branch:`2.1-archived` * :github_branch:`2.0-archived` * :github_branch:`1.0-archived` Feature branches ---------------- Major new features are worked on in dedicated branches. There's no strict naming requirement for these branches. Feature branches are removed once they've been merged into a release branch. Tags ==== - Tags are used exclusively for tagging releases. A release tag is named with the format ``vX.Y.Z`` -- for example ``v2.3.1``. - Experimental releases contain an additional identifier ``vX.Y.Z-id`` -- for example ``v3.0.0-rc1``. - Experimental tags may be removed after the official release. .. _contributing-changes: Working on Features & Patches ============================= .. note:: Contributing to Celery should be as simple as possible, so none of these steps should be considered mandatory. You can even send in patches by email if that's your preferred work method. We won't like you any less, any contribution you make is always appreciated! However following these steps may make maintainers life easier, and may mean that your changes will be accepted sooner. Forking and setting up the repository ------------------------------------- First you need to fork the Celery repository, a good introduction to this is in the GitHub Guide: `Fork a Repo`_. After you have cloned the repository you should checkout your copy to a directory on your machine: .. code-block:: console $ git clone git@github.com:username/celery.git When the repository is cloned enter the directory to set up easy access to upstream changes: .. code-block:: console $ cd celery $ git remote add upstream git://github.com/celery/celery.git $ git fetch upstream If you need to pull in new changes from upstream you should always use the ``--rebase`` option to ``git pull``: .. code-block:: console git pull --rebase upstream master With this option you don't clutter the history with merging commit notes. See `Rebasing merge commits in git`_. If you want to learn more about rebasing see the `Rebase`_ section in the GitHub guides. If you need to work on a different branch than the one git calls ``master``, you can fetch and checkout a remote branch like this:: git checkout --track -b 3.0-devel origin/3.0-devel .. _`Fork a Repo`: https://help.github.com/fork-a-repo/ .. _`Rebasing merge commits in git`: https://notes.envato.com/developers/rebasing-merge-commits-in-git/ .. _`Rebase`: https://help.github.com/rebase/ .. _contributing-testing: Running the unit test suite --------------------------- To run the Celery test suite you need to install a few dependencies. A complete list of the dependencies needed are located in :file:`requirements/test.txt`. If you're working on the development version, then you need to install the development requirements first: .. code-block:: console $ pip install -U -r requirements/dev.txt THIS REQUIREMENT FILE MAY NOT BE PRESENT, SKIP IF NOT FOUND. Both the stable and the development version have testing related dependencies, so install these next: .. code-block:: console $ pip install -U -r requirements/test.txt $ pip install -U -r requirements/default.txt After installing the dependencies required, you can now execute the test suite by calling :pypi:`py.test `: .. code-block:: console $ py.test Some useful options to :command:`py.test` are: * ``-x`` Stop running the tests at the first test that fails. * ``-s`` Don't capture output * ``-v`` Run with verbose output. If you want to run the tests for a single test file only you can do so like this: .. code-block:: console $ py.test t/unit/worker/test_worker_job.py .. _contributing-pull-requests: Creating pull requests ---------------------- When your feature/bugfix is complete you may want to submit a pull requests so that it can be reviewed by the maintainers. Creating pull requests is easy, and also let you track the progress of your contribution. Read the `Pull Requests`_ section in the GitHub Guide to learn how this is done. You can also attach pull requests to existing issues by following the steps outlined here: https://bit.ly/koJoso .. _`Pull Requests`: http://help.github.com/send-pull-requests/ .. _contributing-coverage: Calculating test coverage ~~~~~~~~~~~~~~~~~~~~~~~~~ To calculate test coverage you must first install the :pypi:`pytest-cov` module. Installing the :pypi:`pytest-cov` module: .. code-block:: console $ pip install -U pytest-cov Code coverage in HTML format ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. Run :command:`py.test` with the ``--cov-report=html`` argument enabled: .. code-block:: console $ py.test --cov=celery --cov-report=html #. The coverage output will then be located in the :file:`htmlcov/` directory: .. code-block:: console $ open htmlcov/index.html Code coverage in XML (Cobertura-style) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. Run :command:`py.test` with the ``--cov-report=xml`` argument enabled: .. code-block:: console $ py.test --cov=celery --cov-report=xml #. The coverage XML output will then be located in the :file:`coverage.xml` file. .. _contributing-tox: Running the tests on all supported Python versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There's a :pypi:`tox` configuration file in the top directory of the distribution. To run the tests for all supported Python versions simply execute: .. code-block:: console $ tox Use the ``tox -e`` option if you only want to test specific Python versions: .. code-block:: console $ tox -e 2.7 Building the documentation -------------------------- To build the documentation you need to install the dependencies listed in :file:`requirements/docs.txt` and :file:`requirements/default.txt`: .. code-block:: console $ pip install -U -r requirements/docs.txt $ pip install -U -r requirements/default.txt After these dependencies are installed you should be able to build the docs by running: .. code-block:: console $ cd docs $ rm -rf _build $ make html Make sure there are no errors or warnings in the build output. After building succeeds the documentation is available at :file:`_build/html`. .. _contributing-verify: Verifying your contribution --------------------------- To use these tools you need to install a few dependencies. These dependencies can be found in :file:`requirements/pkgutils.txt`. Installing the dependencies: .. code-block:: console $ pip install -U -r requirements/pkgutils.txt pyflakes & PEP-8 ~~~~~~~~~~~~~~~~ To ensure that your changes conform to :pep:`8` and to run pyflakes execute: .. code-block:: console $ make flakecheck To not return a negative exit code when this command fails use the ``flakes`` target instead: .. code-block:: console $ make flakes API reference ~~~~~~~~~~~~~ To make sure that all modules have a corresponding section in the API reference please execute: .. code-block:: console $ make apicheck $ make indexcheck If files are missing you can add them by copying an existing reference file. If the module is internal it should be part of the internal reference located in :file:`docs/internals/reference/`. If the module is public it should be located in :file:`docs/reference/`. For example if reference is missing for the module ``celery.worker.awesome`` and this module is considered part of the public API, use the following steps: Use an existing file as a template: .. code-block:: console $ cd docs/reference/ $ cp celery.schedules.rst celery.worker.awesome.rst Edit the file using your favorite editor: .. code-block:: console $ vim celery.worker.awesome.rst # change every occurrence of ``celery.schedules`` to # ``celery.worker.awesome`` Edit the index using your favorite editor: .. code-block:: console $ vim index.rst # Add ``celery.worker.awesome`` to the index. Commit your changes: .. code-block:: console # Add the file to git $ git add celery.worker.awesome.rst $ git add index.rst $ git commit celery.worker.awesome.rst index.rst \ -m "Adds reference for celery.worker.awesome" .. _coding-style: Coding Style ============ You should probably be able to pick up the coding style from surrounding code, but it is a good idea to be aware of the following conventions. * All Python code must follow the :pep:`8` guidelines. :pypi:`pep8` is a utility you can use to verify that your code is following the conventions. * Docstrings must follow the :pep:`257` conventions, and use the following style. Do this: .. code-block:: python def method(self, arg): """Short description. More details. """ or: .. code-block:: python def method(self, arg): """Short description.""" but not this: .. code-block:: python def method(self, arg): """ Short description. """ * Lines shouldn't exceed 78 columns. You can enforce this in :command:`vim` by setting the ``textwidth`` option: .. code-block:: vim set textwidth=78 If adhering to this limit makes the code less readable, you have one more character to go on. This means 78 is a soft limit, and 79 is the hard limit :) * Import order * Python standard library (`import xxx`) * Python standard library (`from xxx import`) * Third-party packages. * Other modules from the current package. or in case of code using Django: * Python standard library (`import xxx`) * Python standard library (`from xxx import`) * Third-party packages. * Django packages. * Other modules from the current package. Within these sections the imports should be sorted by module name. Example: .. code-block:: python import threading import time from collections import deque from Queue import Queue, Empty from .platforms import Pidfile from .five import zip_longest, items, range from .utils.time import maybe_timedelta * Wild-card imports must not be used (`from xxx import *`). * For distributions where Python 2.5 is the oldest support version additional rules apply: * Absolute imports must be enabled at the top of every module:: from __future__ import absolute_import * If the module uses the :keyword:`with` statement and must be compatible with Python 2.5 (celery isn't) then it must also enable that:: from __future__ import with_statement * Every future import must be on its own line, as older Python 2.5 releases didn't support importing multiple features on the same future import line:: # Good from __future__ import absolute_import from __future__ import with_statement # Bad from __future__ import absolute_import, with_statement (Note that this rule doesn't apply if the package doesn't include support for Python 2.5) * Note that we use "new-style" relative imports when the distribution doesn't support Python versions below 2.5 This requires Python 2.5 or later: .. code-block:: python from . import submodule .. _feature-with-extras: Contributing features requiring additional libraries ==================================================== Some features like a new result backend may require additional libraries that the user must install. We use setuptools `extra_requires` for this, and all new optional features that require third-party libraries must be added. 1) Add a new requirements file in `requirements/extras` For the Cassandra backend this is :file:`requirements/extras/cassandra.txt`, and the file looks like this: .. code-block:: text pycassa These are pip requirement files so you can have version specifiers and multiple packages are separated by newline. A more complex example could be: .. code-block:: text # pycassa 2.0 breaks Foo pycassa>=1.0,<2.0 thrift 2) Modify ``setup.py`` After the requirements file is added you need to add it as an option to :file:`setup.py` in the ``extras_require`` section:: extra['extras_require'] = { # ... 'cassandra': extras('cassandra.txt'), } 3) Document the new feature in :file:`docs/includes/installation.txt` You must add your feature to the list in the :ref:`bundles` section of :file:`docs/includes/installation.txt`. After you've made changes to this file you need to render the distro :file:`README` file: .. code-block:: console $ pip install -U requirements/pkgutils.txt $ make readme That's all that needs to be done, but remember that if your feature adds additional configuration options then these needs to be documented in :file:`docs/configuration.rst`. Also all settings need to be added to the :file:`celery/app/defaults.py` module. Result backends require a separate section in the :file:`docs/configuration.rst` file. .. _contact_information: Contacts ======== This is a list of people that can be contacted for questions regarding the official git repositories, PyPI packages Read the Docs pages. If the issue isn't an emergency then it's better to :ref:`report an issue `. Committers ---------- Ask Solem ~~~~~~~~~ :github: https://github.com/ask :twitter: https://twitter.com/#!/asksol Asif Saif Uddin ~~~~~~~~~~~~~~~ :github: https://github.com/auvipy :twitter: https://twitter.com/#!/auvipy Dmitry Malinovsky ~~~~~~~~~~~~~~~~~ :github: https://github.com/malinoff :twitter: https://twitter.com/__malinoff__ Ionel Cristian MărieÈ™ ~~~~~~~~~~~~~~~~~~~~~ :github: https://github.com/ionelmc :twitter: https://twitter.com/ionelmc Mher Movsisyan ~~~~~~~~~~~~~~ :github: https://github.com/mher :twitter: https://twitter.com/#!/movsm Omer Katz ~~~~~~~~~ :github: https://github.com/thedrow :twitter: https://twitter.com/the_drow Steeve Morin ~~~~~~~~~~~~ :github: https://github.com/steeve :twitter: https://twitter.com/#!/steeve Website ------- The Celery Project website is run and maintained by Mauro Rocco ~~~~~~~~~~~ :github: https://github.com/fireantology :twitter: https://twitter.com/#!/fireantology with design by: Jan Henrik Helmers ~~~~~~~~~~~~~~~~~~ :web: http://www.helmersworks.com :twitter: https://twitter.com/#!/helmers .. _packages: Packages ======== ``celery`` ---------- :git: https://github.com/celery/celery :CI: https://travis-ci.org/#!/celery/celery :Windows-CI: https://ci.appveyor.com/project/ask/celery :PyPI: :pypi:`celery` :docs: http://docs.celeryproject.org ``kombu`` --------- Messaging library. :git: https://github.com/celery/kombu :CI: https://travis-ci.org/#!/celery/kombu :Windows-CI: https://ci.appveyor.com/project/ask/kombu :PyPI: :pypi:`kombu` :docs: https://kombu.readthedocs.io ``amqp`` -------- Python AMQP 0.9.1 client. :git: https://github.com/celery/py-amqp :CI: https://travis-ci.org/#!/celery/py-amqp :Windows-CI: https://ci.appveyor.com/project/ask/py-amqp :PyPI: :pypi:`amqp` :docs: https://amqp.readthedocs.io ``vine`` -------- Promise/deferred implementation. :git: https://github.com/celery/vine/ :CI: https://travis-ci.org/#!/celery/vine/ :Windows-CI: https://ci.appveyor.com/project/ask/vine :PyPI: :pypi:`vine` :docs: https://vine.readthedocs.io ``billiard`` ------------ Fork of multiprocessing containing improvements that'll eventually be merged into the Python stdlib. :git: https://github.com/celery/billiard :CI: https://travis-ci.org/#!/celery/billiard/ :Windows-CI: https://ci.appveyor.com/project/ask/billiard :PyPI: :pypi:`billiard` ``django-celery-beat`` ---------------------- Database-backed Periodic Tasks with admin interface using the Django ORM. :git: https://github.com/celery/django-celery-beat :CI: https://travis-ci.org/#!/celery/django-celery-beat :Windows-CI: https://ci.appveyor.com/project/ask/django-celery-beat :PyPI: :pypi:`django-celery-beat` ``django-celery-results`` ------------------------- Store task results in the Django ORM, or using the Django Cache Framework. :git: https://github.com/celery/django-celery-results :CI: https://travis-ci.org/#!/celery/django-celery-results :Windows-CI: https://ci.appveyor.com/project/ask/django-celery-results :PyPI: :pypi:`django-celery-results` ``librabbitmq`` --------------- Very fast Python AMQP client written in C. :git: https://github.com/celery/librabbitmq :PyPI: :pypi:`librabbitmq` ``cell`` -------- Actor library. :git: https://github.com/celery/cell :PyPI: :pypi:`cell` ``cyme`` -------- Distributed Celery Instance manager. :git: https://github.com/celery/cyme :PyPI: :pypi:`cyme` :docs: https://cyme.readthedocs.io/ Deprecated ---------- - ``django-celery`` :git: https://github.com/celery/django-celery :PyPI: :pypi:`django-celery` :docs: http://docs.celeryproject.org/en/latest/django - ``Flask-Celery`` :git: https://github.com/ask/Flask-Celery :PyPI: :pypi:`Flask-Celery` - ``celerymon`` :git: https://github.com/celery/celerymon :PyPI: :pypi:`celerymon` - ``carrot`` :git: https://github.com/ask/carrot :PyPI: :pypi:`carrot` - ``ghettoq`` :git: https://github.com/ask/ghettoq :PyPI: :pypi:`ghettoq` - ``kombu-sqlalchemy`` :git: https://github.com/ask/kombu-sqlalchemy :PyPI: :pypi:`kombu-sqlalchemy` - ``django-kombu`` :git: https://github.com/ask/django-kombu :PyPI: :pypi:`django-kombu` - ``pylibrabbitmq`` Old name for :pypi:`librabbitmq`. :git: :const:`None` :PyPI: :pypi:`pylibrabbitmq` .. _release-procedure: Release Procedure ================= Updating the version number --------------------------- The version number must be updated two places: * :file:`celery/__init__.py` * :file:`docs/include/introduction.txt` After you have changed these files you must render the :file:`README` files. There's a script to convert sphinx syntax to generic reStructured Text syntax, and the make target `readme` does this for you: .. code-block:: console $ make readme Now commit the changes: .. code-block:: console $ git commit -a -m "Bumps version to X.Y.Z" and make a new version tag: .. code-block:: console $ git tag vX.Y.Z $ git push --tags Releasing --------- Commands to make a new public stable release: .. code-block:: console $ make distcheck # checks pep8, autodoc index, runs tests and more $ make dist # NOTE: Runs git clean -xdf and removes files not in the repo. $ python setup.py sdist upload --sign --identity='Celery Security Team' $ python setup.py bdist_wheel upload --sign --identity='Celery Security Team' If this is a new release series then you also need to do the following: * Go to the Read The Docs management interface at: https://readthedocs.org/projects/celery/?fromdocs=celery * Enter "Edit project" Change default branch to the branch of this series, for example, use the ``2.4`` branch for the 2.4 series. * Also add the previous version under the "versions" tab. celery-4.1.0/docs/history/0000755000175000017500000000000013135426347015366 5ustar omeromer00000000000000celery-4.1.0/docs/history/changelog-4.0.rst0000644000175000017500000001444013135426300020336 0ustar omeromer00000000000000.. _changelog-4.0: ================ Change history ================ This document contains change notes for bugfix releases in the 4.0.x series (latentcall), please see :ref:`whatsnew-4.0` for an overview of what's new in Celery 4.0. .. _version-4.0.2: 4.0.2 ===== :release-date: 2016-12-15 03:40 PM PST :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 4.0.2 `. - **Tasks**: Fixed problem with JSON serialization of `group` (``keys must be string`` error, Issue #3688). - **Worker**: Fixed JSON serialization issue when using ``inspect active`` and friends (Issue #3667). - **App**: Fixed saferef errors when using signals (Issue #3670). - **Prefork**: Fixed bug with pack requiring bytes argument on Python 2.7.5 and earlier (Issue #3674). - **Tasks**: Saferepr did not handle unicode in bytestrings on Python 2 (Issue #3676). - **Testing**: Added new ``celery_worker_paremeters`` fixture. Contributed by **Michael Howitz**. - **Tasks**: Added new ``app`` argument to ``GroupResult.restore`` (Issue #3669). This makes the restore method behave the same way as the ``GroupResult`` constructor. Contributed by **Andreas Pelme**. - **Tasks**: Fixed type checking crash when task takes ``*args`` on Python 3 (Issue #3678). - Documentation and examples improvements by: - **BLAGA Razvan-Paul** - **Michael Howitz** - :github_user:`paradox41` .. _version-4.0.1: 4.0.1 ===== :release-date: 2016-12-08 05:22 PM PST :release-by: Ask Solem * [Security: `CELERYSA-0003`_] Insecure default configuration The default :setting:`accept_content` setting was set to allow deserialization of pickled messages in Celery 4.0.0. The insecure default has been fixed in 4.0.1, and you can also configure the 4.0.0 version to explicitly only allow json serialized messages: .. code-block:: python app.conf.accept_content = ['json'] .. _`CELERYSA-0003`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0003.txt - **Tasks**: Added new method to register class-based tasks (Issue #3615). To register a class based task you should now call ``app.register_task``: .. code-block:: python from celery import Celery, Task app = Celery() class CustomTask(Task): def run(self): return 'hello' app.register_task(CustomTask()) - **Tasks**: Argument checking now supports keyword-only arguments on Python3 (Issue #3658). Contributed by :github_user:`sww`. - **Tasks**: The ``task-sent`` event was not being sent even if configured to do so (Issue #3646). - **Worker**: Fixed AMQP heartbeat support for eventlet/gevent pools (Issue #3649). - **App**: ``app.conf.humanize()`` would not work if configuration not finalized (Issue #3652). - **Utils**: ``saferepr`` attempted to show iterables as lists and mappings as dicts. - **Utils**: ``saferepr`` did not handle unicode-errors when attempting to format ``bytes`` on Python 3 (Issue #3610). - **Utils**: ``saferepr`` should now properly represent byte strings with non-ascii characters (Issue #3600). - **Results**: Fixed bug in elasticsearch where _index method missed the body argument (Issue #3606). Fix contributed by **何翔宇** (Sean Ho). - **Canvas**: Fixed :exc:`ValueError` in chord with single task header (Issue #3608). Fix contributed by **Viktor Holmqvist**. - **Task**: Ensure class-based task has name prior to registration (Issue #3616). Fix contributed by **Rick Wargo**. - **Beat**: Fixed problem with strings in shelve (Issue #3644). Fix contributed by **Alli**. - **Worker**: Fixed :exc:`KeyError` in ``inspect stats`` when ``-O`` argument set to something other than ``fast`` or ``fair`` (Issue #3621). - **Task**: Retried tasks were no longer sent to the original queue (Issue #3622). - **Worker**: Python 3: Fixed None/int type comparison in :file:`apps/worker.py` (Issue #3631). - **Results**: Redis has a new :setting:`redis_socket_connect_timeout` setting. - **Results**: Redis result backend passed the ``socket_connect_timeout`` argument to UNIX socket based connections by mistake, causing a crash. - **Worker**: Fixed missing logo in worker splash screen when running on Python 3.x (Issue #3627). Fix contributed by **Brian Luan**. - **Deps**: Fixed ``celery[redis]`` bundle installation (Issue #3643). Fix contributed by **Rémi Marenco**. - **Deps**: Bundle ``celery[sqs]`` now also requires :pypi:`pycurl` (Issue #3619). - **Worker**: Hard time limits were no longer being respected (Issue #3618). - **Worker**: Soft time limit log showed ``Trues`` instead of the number of seconds. - **App**: ``registry_cls`` argument no longer had any effect (Issue #3613). - **Worker**: Event producer now uses ``connection_for_Write`` (Issue #3525). - **Results**: Redis/memcache backends now uses :setting:`result_expires` to expire chord counter (Issue #3573). Contributed by **Tayfun Sen**. - **Django**: Fixed command for upgrading settings with Django (Issue #3563). Fix contributed by **François Voron**. - **Testing**: Added a ``celery_parameters`` test fixture to be able to use customized ``Celery`` init parameters. (#3626) Contributed by **Steffen Allner**. - Documentation improvements contributed by - :github_user:`csfeathers` - **Moussa Taifi** - **Yuhannaa** - **Laurent Peuch** - **Christian** - **Bruno Alla** - **Steven Johns** - :github_user:`tnir` - **GDR!** .. _version-4.0.0: 4.0.0 ===== :release-date: 2016-11-04 02:00 P.M PDT :release-by: Ask Solem See :ref:`whatsnew-4.0` (in :file:`docs/whatsnew-4.0.rst`). .. _version-4.0.0rc7: 4.0.0rc7 ======== :release-date: 2016-11-02 01:30 P.M PDT Important notes --------------- - Database result backend related setting names changed from ``sqlalchemy_*`` -> ``database_*``. The ``sqlalchemy_`` named settings won't work at all in this version so you need to rename them. This is a last minute change, and as they were not supported in 3.1 we will not be providing aliases. - ``chain(A, B, C)`` now works the same way as ``A | B | C``. This means calling ``chain()`` might not actually return a chain, it can return a group or any other type depending on how the workflow can be optimized. celery-4.1.0/docs/history/index.rst0000644000175000017500000000065713135426300017224 0ustar omeromer00000000000000.. _history: ========= History ========= This section contains historical change histories, for the latest version please visit :ref:`changelog`. :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 changelog-4.0 changelog-3.1 whatsnew-3.0 changelog-3.0 whatsnew-2.5 changelog-2.5 changelog-2.4 changelog-2.3 changelog-2.2 changelog-2.1 changelog-2.0 changelog-1.0 celery-4.1.0/docs/history/changelog-2.4.rst0000644000175000017500000003136013130607475020351 0ustar omeromer00000000000000.. _changelog-2.4: =============================== Change history for Celery 2.4 =============================== .. contents:: :local: .. _version-2.4.5: 2.4.5 ===== :release-date: 2011-12-02 05:00 p.m. GMT :release-by: Ask Solem * Periodic task interval schedules were accidentally rounded down, resulting in some periodic tasks being executed early. * Logging of humanized times in the beat log is now more detailed. * New :ref:`brokers` section in the Getting Started part of the Documentation This replaces the old "Other queues" tutorial, and adds documentation for MongoDB, Beanstalk and CouchDB. .. _version-2.4.4: 2.4.4 ===== :release-date: 2011-11-25 04:00 p.m. GMT :release-by: Ask Solem .. _v244-security-fixes: Security Fixes -------------- * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than real id's when the :option:`--uid `/ :option:`--gid ` arguments to :program:`celery multi`, :program:`celeryd_detach`, :program:`celery beat` and :program:`celery events` were used. This means privileges weren't properly dropped, and that it would be possible to regain supervisor privileges later. .. _`CELERYSA-0001`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt .. _v244-fixes: Fixes ----- * Processes pool: Fixed rare deadlock at shutdown (Issue #523). Fix contributed by Ionel Maries Christian. * Webhook tasks issued the wrong HTTP POST headers (Issue #515). The *Content-Type* header has been changed from ``application/json`` ⇒ ``application/x-www-form-urlencoded``, and adds a proper *Content-Length* header. Fix contributed by Mitar. * Daemonization tutorial: Adds a configuration example using Django and virtualenv together (Issue #505). Contributed by Juan Ignacio Catalano. * generic init-scripts now automatically creates log and pid file directories (Issue #545). Contributed by Chris Streeter. .. _version-2.4.3: 2.4.3 ===== :release-date: 2011-11-22 06:00 p.m. GMT :release-by: Ask Solem * Fixes module import typo in `celeryctl` (Issue #538). Fix contributed by Chris Streeter. .. _version-2.4.2: 2.4.2 ===== :release-date: 2011-11-14 12:00 p.m. GMT :release-by: Ask Solem * Program module no longer uses relative imports so that it's possible to do ``python -m celery.bin.name``. .. _version-2.4.1: 2.4.1 ===== :release-date: 2011-11-07 06:00 p.m. GMT :release-by: Ask Solem * ``celeryctl inspect`` commands was missing output. * processes pool: Decrease polling interval for less idle CPU usage. * processes pool: MaybeEncodingError wasn't wrapped in ExceptionInfo (Issue #524). * worker: would silence errors occurring after task consumer started. * logging: Fixed a bug where unicode in stdout redirected log messages couldn't be written (Issue #522). .. _version-2.4.0: 2.4.0 ===== :release-date: 2011-11-04 04:00 p.m. GMT :release-by: Ask Solem .. _v240-important: Important Notes --------------- * Now supports Python 3. * Fixed deadlock in worker process handling (Issue #496). A deadlock could occur after spawning new child processes because the logging library's mutex wasn't properly reset after fork. The symptoms of this bug affecting would be that the worker simply stops processing tasks, as none of the workers child processes are functioning. There was a greater chance of this bug occurring with ``maxtasksperchild`` or a time-limit enabled. This is a workaround for http://bugs.python.org/issue6721#msg140215. Be aware that while this fixes the logging library lock, there could still be other locks initialized in the parent process, introduced by custom code. Fix contributed by Harm Verhagen. * AMQP Result backend: Now expires results by default. The default expiration value is now taken from the :setting:`CELERY_TASK_RESULT_EXPIRES` setting. The old :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting has been deprecated and will be removed in version 4.0. Note that this means that the result backend requires RabbitMQ 2.1.0 or higher, and that you have to disable expiration if you're running with an older version. You can do so by disabling the :setting:`CELERY_TASK_RESULT_EXPIRES` setting:: CELERY_TASK_RESULT_EXPIRES = None * Eventlet: Fixed problem with shutdown (Issue #457). * Broker transports can be now be specified using URLs The broker can now be specified as a URL instead. This URL must have the format: .. code-block:: text transport://user:password@hostname:port/virtual_host for example the default broker is written as: .. code-block:: text amqp://guest:guest@localhost:5672// The scheme is required, so that the host is identified as a URL and not just a host name. User, password, port and virtual_host are optional and defaults to the particular transports default value. .. note:: Note that the path component (virtual_host) always starts with a forward-slash. This is necessary to distinguish between the virtual host ``''`` (empty) and ``'/'``, which are both acceptable virtual host names. A virtual host of ``'/'`` becomes: .. code-block:: text amqp://guest:guest@localhost:5672// and a virtual host of ``''`` (empty) becomes: .. code-block:: text amqp://guest:guest@localhost:5672/ So the leading slash in the path component is **always required**. In addition the :setting:`BROKER_URL` setting has been added as an alias to ``BROKER_HOST``. Any broker setting specified in both the URL and in the configuration will be ignored, if a setting isn't provided in the URL then the value from the configuration will be used as default. Also, programs now support the :option:`--broker ` option to specify a broker URL on the command-line: .. code-block:: console $ celery worker -b redis://localhost $ celery inspect -b amqp://guest:guest@localhost//e The environment variable :envvar:`CELERY_BROKER_URL` can also be used to easily override the default broker used. * The deprecated :func:`celery.loaders.setup_loader` function has been removed. * The :setting:`CELERY_TASK_ERROR_WHITELIST` setting has been replaced by a more flexible approach (Issue #447). The error mail sending logic is now available as ``Task.ErrorMail``, with the implementation (for reference) in :mod:`celery.utils.mail`. The error mail class can be sub-classed to gain complete control of when error messages are sent, thus removing the need for a separate white-list setting. The :setting:`CELERY_TASK_ERROR_WHITELIST` setting has been deprecated, and will be removed completely in version 4.0. * Additional Deprecations The following functions has been deprecated and is scheduled for removal in version 4.0: ===================================== =================================== **Old function** **Alternative** ===================================== =================================== `celery.loaders.current_loader` `celery.current_app.loader` `celery.loaders.load_settings` `celery.current_app.conf` `celery.execute.apply` `Task.apply` `celery.execute.apply_async` `Task.apply_async` `celery.execute.delay_task` `celery.execute.send_task` ===================================== =================================== The following settings has been deprecated and is scheduled for removal in version 4.0: ===================================== =================================== **Old setting** **Alternative** ===================================== =================================== `CELERYD_LOG_LEVEL` ``celery worker --loglevel=`` `CELERYD_LOG_FILE` ``celery worker --logfile=`` `CELERYBEAT_LOG_LEVEL` ``celery beat --loglevel=`` `CELERYBEAT_LOG_FILE` ``celery beat --logfile=`` `CELERYMON_LOG_LEVEL` ``celerymon --loglevel=`` `CELERYMON_LOG_FILE` ``celerymon --logfile=`` ===================================== =================================== .. _v240-news: News ---- * No longer depends on :pypi:`pyparsing`. * Now depends on Kombu 1.4.3. * CELERY_IMPORTS can now be a scalar value (Issue #485). It's too easy to forget to add the comma after the sole element of a tuple, and this is something that often affects newcomers. The docs should probably use a list in examples, as using a tuple for this doesn't even make sense. Nonetheless, there are many tutorials out there using a tuple, and this change should be a help to new users. Suggested by :github_user:`jsaxon-cars`. * Fixed a memory leak when using the thread pool (Issue #486). Contributed by Kornelijus Survila. * The ``statedb`` wasn't saved at exit. This has now been fixed and it should again remember previously revoked tasks when a ``--statedb`` is enabled. * Adds :setting:`EMAIL_USE_TLS` to enable secure SMTP connections (Issue #418). Contributed by Stefan Kjartansson. * Now handles missing fields in task messages as documented in the message format documentation. * Missing required field throws :exc:`~@InvalidTaskError` * Missing args/kwargs is assumed empty. Contributed by Chris Chamberlin. * Fixed race condition in :mod:`celery.events.state` (``celerymon``/``celeryev``) where task info would be removed while iterating over it (Issue #501). * The Cache, Cassandra, MongoDB, Redis and Tyrant backends now respects the :setting:`CELERY_RESULT_SERIALIZER` setting (Issue #435). This means that only the database (Django/SQLAlchemy) backends currently doesn't support using custom serializers. Contributed by Steeve Morin * Logging calls no longer manually formats messages, but delegates that to the logging system, so tools like Sentry can easier work with the messages (Issue #445). Contributed by Chris Adams. * ``multi`` now supports a ``stop_verify`` command to wait for processes to shutdown. * Cache backend didn't work if the cache key was unicode (Issue #504). Fix contributed by Neil Chintomby. * New setting :setting:`CELERY_RESULT_DB_SHORT_LIVED_SESSIONS` added, which if enabled will disable the caching of SQLAlchemy sessions (Issue #449). Contributed by Leo Dirac. * All result backends now implements ``__reduce__`` so that they can be pickled (Issue #441). Fix contributed by Remy Noel * multi didn't work on Windows (Issue #472). * New-style ``CELERY_REDIS_*`` settings now takes precedence over the old ``REDIS_*`` configuration keys (Issue #508). Fix contributed by Joshua Ginsberg * Generic beat init-script no longer sets `bash -e` (Issue #510). Fix contributed by Roger Hu. * Documented that Chords don't work well with :command:`redis-server` versions before 2.2. Contributed by Dan McGee. * The :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` setting wasn't respected. * ``inspect.registered_tasks`` renamed to ``inspect.registered`` for naming consistency. The previous name is still available as an alias. Contributed by Mher Movsisyan * Worker logged the string representation of args and kwargs without safe guards (Issue #480). * RHEL init-script: Changed worker start-up priority. The default start / stop priorities for MySQL on RHEL are: .. code-block:: console # chkconfig: - 64 36 Therefore, if Celery is using a database as a broker / message store, it should be started after the database is up and running, otherwise errors will ensue. This commit changes the priority in the init-script to: .. code-block:: console # chkconfig: - 85 15 which are the default recommended settings for 3-rd party applications and assure that Celery will be started after the database service & shut down before it terminates. Contributed by Yury V. Zaytsev. * KeyValueStoreBackend.get_many didn't respect the ``timeout`` argument (Issue #512). * beat/events's ``--workdir`` option didn't :manpage:`chdir(2)` before after configuration was attempted (Issue #506). * After deprecating 2.4 support we can now name modules correctly, since we can take use of absolute imports. Therefore the following internal modules have been renamed: ``celery.concurrency.evlet`` -> ``celery.concurrency.eventlet`` ``celery.concurrency.evg`` -> ``celery.concurrency.gevent`` * :file:`AUTHORS` file is now sorted alphabetically. Also, as you may have noticed the contributors of new features/fixes are now mentioned in the Changelog. celery-4.1.0/docs/history/changelog-2.3.rst0000644000175000017500000002573513130607475020361 0ustar omeromer00000000000000.. _changelog-2.3: =============================== Change history for Celery 2.3 =============================== .. contents:: :local: .. _version-2.3.4: 2.3.4 ===== :release-date: 2011-11-25 04:00 p.m. GMT :release-by: Ask Solem .. _v234-security-fixes: Security Fixes -------------- * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than real id's when the :option:`--uid `/ :option:`--gid ` arguments to :program:`celery multi`, :program:`celeryd_detach`, :program:`celery beat` and :program:`celery events` were used. This means privileges weren't properly dropped, and that it would be possible to regain supervisor privileges later. .. _`CELERYSA-0001`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt Fixes ----- * Backported fix for #455 from 2.4 to 2.3. * StateDB wasn't saved at shutdown. * Fixes worker sometimes hanging when hard time limit exceeded. .. _version-2.3.3: 2.3.3 ===== :release-date: 2011-16-09 05:00 p.m. BST :release-by: Mher Movsisyan * Monkey patching :attr:`sys.stdout` could result in the worker crashing if the replacing object didn't define :meth:`isatty` (Issue #477). * ``CELERYD`` option in :file:`/etc/default/celeryd` shouldn't be used with generic init-scripts. .. _version-2.3.2: 2.3.2 ===== :release-date: 2011-10-07 05:00 p.m. BST :release-by: Ask Solem .. _v232-news: News ---- * Improved Contributing guide. If you'd like to contribute to Celery you should read the :ref:`Contributing Gudie `. We're looking for contributors at all skill levels, so don't hesitate! * Now depends on Kombu 1.3.1 * ``Task.request`` now contains the current worker host name (Issue #460). Available as ``task.request.hostname``. * It's now easier for app subclasses to extend how they're pickled. (see :class:`celery.app.AppPickler`). .. _v232-fixes: Fixes ----- * `purge/discard_all` wasn't working correctly (Issue #455). * The coloring of log messages didn't handle non-ASCII data well (Issue #427). * [Windows] the multiprocessing pool tried to import ``os.kill`` even though this isn't available there (Issue #450). * Fixes case where the worker could become unresponsive because of tasks exceeding the hard time limit. * The :event:`task-sent` event was missing from the event reference. * ``ResultSet.iterate`` now returns results as they finish (Issue #459). This wasn't the case previously, even though the documentation states this was the expected behavior. * Retries will no longer be performed when tasks are called directly (using ``__call__``). Instead the exception passed to ``retry`` will be re-raised. * Eventlet no longer crashes if autoscale is enabled. growing and shrinking eventlet pools is still not supported. * ``py24`` target removed from :file:`tox.ini`. .. _version-2.3.1: 2.3.1 ===== :release-date: 2011-08-07 08:00 p.m. BST :release-by: Ask Solem Fixes ----- * The :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting didn't work, resulting in an AMQP related error about not being able to serialize floats while trying to publish task states (Issue #446). .. _version-2.3.0: 2.3.0 ===== :release-date: 2011-08-05 12:00 p.m. BST :tested: CPython: 2.5, 2.6, 2.7; PyPy: 1.5; Jython: 2.5.2 :release-by: Ask Solem .. _v230-important: Important Notes --------------- * Now requires Kombu 1.2.1 * Results are now disabled by default. The AMQP backend wasn't a good default because often the users were not consuming the results, resulting in thousands of queues. While the queues can be configured to expire if left unused, it wasn't possible to enable this by default because this was only available in recent RabbitMQ versions (2.1.1+) With this change enabling a result backend will be a conscious choice, which will hopefully lead the user to read the documentation and be aware of any common pitfalls with the particular backend. The default backend is now a dummy backend (:class:`celery.backends.base.DisabledBackend`). Saving state is simply an no-op, and AsyncResult.wait(), .result, .state, etc. will raise a :exc:`NotImplementedError` telling the user to configure the result backend. For help choosing a backend please see :ref:`task-result-backends`. If you depend on the previous default which was the AMQP backend, then you have to set this explicitly before upgrading:: CELERY_RESULT_BACKEND = 'amqp' .. note:: For :pypi:`django-celery` users the default backend is still ``database``, and results are not disabled by default. * The Debian init-scripts have been deprecated in favor of the generic-init.d init-scripts. In addition generic init-scripts for ``celerybeat`` and ``celeryev`` has been added. .. _v230-news: News ---- * Automatic connection pool support. The pool is used by everything that requires a broker connection, for example calling tasks, sending broadcast commands, retrieving results with the AMQP result backend, and so on. The pool is disabled by default, but you can enable it by configuring the :setting:`BROKER_POOL_LIMIT` setting:: BROKER_POOL_LIMIT = 10 A limit of 10 means a maximum of 10 simultaneous connections can co-exist. Only a single connection will ever be used in a single-thread environment, but in a concurrent environment (threads, greenlets, etc., but not processes) when the limit has been exceeded, any try to acquire a connection will block the thread and wait for a connection to be released. This is something to take into consideration when choosing a limit. A limit of :const:`None` or 0 means no limit, and connections will be established and closed every time. * Introducing Chords (taskset callbacks). A chord is a task that only executes after all of the tasks in a taskset has finished executing. It's a fancy term for "taskset callbacks" adopted from `Cω `_). It works with all result backends, but the best implementation is currently provided by the Redis result backend. Here's an example chord:: >>> chord(add.subtask((i, i)) ... for i in xrange(100))(tsum.subtask()).get() 9900 Please read the :ref:`Chords section in the user guide `, if you want to know more. * Time limits can now be set for individual tasks. To set the soft and hard time limits for a task use the ``time_limit`` and ``soft_time_limit`` attributes: .. code-block:: python import time @task(time_limit=60, soft_time_limit=30) def sleeptask(seconds): time.sleep(seconds) If the attributes are not set, then the workers default time limits will be used. New in this version you can also change the time limits for a task at runtime using the :func:`time_limit` remote control command:: >>> from celery.task import control >>> control.time_limit('tasks.sleeptask', ... soft=60, hard=120, reply=True) [{'worker1.example.com': {'ok': 'time limits set successfully'}}] Only tasks that starts executing after the time limit change will be affected. .. note:: Soft time limits will still not work on Windows or other platforms that don't have the ``SIGUSR1`` signal. * Redis backend configuration directive names changed to include the ``CELERY_`` prefix. ===================================== =================================== **Old setting name** **Replace with** ===================================== =================================== `REDIS_HOST` `CELERY_REDIS_HOST` `REDIS_PORT` `CELERY_REDIS_PORT` `REDIS_DB` `CELERY_REDIS_DB` `REDIS_PASSWORD` `CELERY_REDIS_PASSWORD` ===================================== =================================== The old names are still supported but pending deprecation. * PyPy: The default pool implementation used is now multiprocessing if running on PyPy 1.5. * multi: now supports "pass through" options. Pass through options makes it easier to use Celery without a configuration file, or just add last-minute options on the command line. Example use: .. code-block:: console $ celery multi start 4 -c 2 -- broker.host=amqp.example.com \ broker.vhost=/ \ celery.disable_rate_limits=yes * ``celerybeat``: Now retries establishing the connection (Issue #419). * ``celeryctl``: New ``list bindings`` command. Lists the current or all available bindings, depending on the broker transport used. * Heartbeat is now sent every 30 seconds (previously every 2 minutes). * ``ResultSet.join_native()`` and ``iter_native()`` is now supported by the Redis and Cache result backends. This is an optimized version of ``join()`` using the underlying backends ability to fetch multiple results at once. * Can now use SSL when sending error e-mails by enabling the :setting:`EMAIL_USE_SSL` setting. * ``events.default_dispatcher()``: Context manager to easily obtain an event dispatcher instance using the connection pool. * Import errors in the configuration module won't be silenced anymore. * ResultSet.iterate: Now supports the ``timeout``, ``propagate`` and ``interval`` arguments. * ``with_default_connection`` -> ``with default_connection`` * TaskPool.apply_async: Keyword arguments ``callbacks`` and ``errbacks`` has been renamed to ``callback`` and ``errback`` and take a single scalar value instead of a list. * No longer propagates errors occurring during process cleanup (Issue #365) * Added ``TaskSetResult.delete()``, which will delete a previously saved taskset result. * ``celerybeat`` now syncs every 3 minutes instead of only at shutdown (Issue #382). * Monitors now properly handles unknown events, so user-defined events are displayed. * Terminating a task on Windows now also terminates all of the tasks child processes (Issue #384). * worker: ``-I|--include`` option now always searches the current directory to import the specified modules. * Cassandra backend: Now expires results by using TTLs. * Functional test suite in ``funtests`` is now actually working properly, and passing tests. .. _v230-fixes: Fixes ----- * ``celeryev`` was trying to create the pidfile twice. * celery.contrib.batches: Fixed problem where tasks failed silently (Issue #393). * Fixed an issue where logging objects would give "`. - Now depends on :pypi:`billiard` version 2.7.3.34. - AMQP Result backend: No longer caches queue declarations. The queues created by the AMQP result backend are always unique, so caching the declarations caused a slow memory leak. - Worker: Fixed crash when hostname contained Unicode characters. Contributed by Daodao. - The worker would no longer start if the `-P solo` pool was selected (Issue #1548). - Redis/Cache result backends wouldn't complete chords if any of the tasks were retried (Issue #1401). - Task decorator is no longer lazy if app is finalized. - AsyncResult: Fixed bug with ``copy(AsyncResult)`` when no ``current_app`` available. - ResultSet: Now properly propagates app when passed string id's. - Loader now ignores :envvar:`CELERY_CONFIG_MODULE` if value is empty string. - Fixed race condition in Proxy object where it tried to delete an attribute twice, resulting in :exc:`AttributeError`. - Task methods now works with the :setting:`CELERY_ALWAYS_EAGER` setting (Issue #1478). - :class:`~kombu.common.Broadcast` queues were accidentally declared when publishing tasks (Issue #1540). - New :envvar:`C_FAKEFORK` environment variable can be used to debug the init-scripts. Setting this will skip the daemonization step so that errors printed to stderr after standard outs are closed can be seen: .. code-block:: console $ C_FAKEFORK /etc/init.d/celeryd start This works with the `celery multi` command in general. - ``get_pickleable_etype`` didn't always return a value (Issue #1556). - Fixed bug where ``app.GroupResult.restore`` would fall back to the default app. - Fixed rare bug where built-in tasks would use the current_app. - :func:`~celery.platforms.maybe_fileno` now handles :exc:`ValueError`. .. _version-3.0.23: 3.0.23 ====== :release-date: 2013-09-02 01:00 p.m. BST :release-by: Ask Solem - Now depends on :ref:`Kombu 2.5.14 `. - ``send_task`` didn't honor ``link`` and ``link_error`` arguments. This had the side effect of chains not calling unregistered tasks, silently discarding them. Fix contributed by Taylor Nelson. - :mod:`celery.state`: Optimized precedence lookup. Contributed by Matt Robenolt. - POSIX: Daemonization didn't redirect ``sys.stdin`` to ``/dev/null``. Fix contributed by Alexander Smirnov. - Canvas: group bug caused fallback to default app when ``.apply_async`` used (Issue #1516) - Canvas: generator arguments wasn't always pickleable. .. _version-3.0.22: 3.0.22 ====== :release-date: 2013-08-16 04:30 p.m. BST :release-by: Ask Solem - Now depends on :ref:`Kombu 2.5.13 `. - Now depends on :pypi:`billiard` 2.7.3.32 - Fixed bug with monthly and yearly Crontabs (Issue #1465). Fix contributed by Guillaume Gauvrit. - Fixed memory leak caused by time limits (Issue #1129, Issue #1427) - Worker will now sleep if being restarted more than 5 times in one second to avoid spamming with ``worker-online`` events. - Includes documentation fixes Contributed by: Ken Fromm, Andreas Savvides, Alex Kiriukha, Michael Fladischer. .. _version-3.0.21: 3.0.21 ====== :release-date: 2013-07-05 04:30 p.m. BST :release-by: Ask Solem - Now depends on :pypi:`billiard` 2.7.3.31. This version fixed a bug when running without the billiard C extension. - 3.0.20 broke eventlet/gevent support (worker not starting). - Fixed memory leak problem when MongoDB result backend was used with the gevent pool. Fix contributed by Ross Lawley. .. _version-3.0.20: 3.0.20 ====== :release-date: 2013-06-28 04:00 p.m. BST :release-by: Ask Solem - Contains workaround for deadlock problems. A better solution will be part of Celery 3.1. - Now depends on :ref:`Kombu 2.5.12 `. - Now depends on :pypi:`billiard` 2.7.3.30. - :option:`--loader ` argument no longer supported importing loaders from the current directory. - [Worker] Fixed memory leak when restarting after connection lost (Issue #1325). - [Worker] Fixed UnicodeDecodeError at start-up (Issue #1373). Fix contributed by Jessica Tallon. - [Worker] Now properly rewrites unpickleable exceptions again. - Fixed possible race condition when evicting items from the revoked task set. - [generic-init.d] Fixed compatibility with Ubuntu's minimal Dash shell (Issue #1387). Fix contributed by :github_user:`monkut`. - ``Task.apply``/``ALWAYS_EAGER`` now also executes callbacks and errbacks (Issue #1336). - [Worker] The :signal:`worker-shutdown` signal was no longer being dispatched (Issue #1339)j - [Python 3] Fixed problem with threading.Event. Fix contributed by Xavier Ordoquy. - [Python 3] Now handles ``io.UnsupportedOperation`` that may be raised by ``file.fileno()`` in Python 3. - [Python 3] Fixed problem with ``qualname``. - [events.State] Now ignores unknown event-groups. - [MongoDB backend] No longer uses deprecated ``safe`` parameter. Fix contributed by :github_user:`rfkrocktk`. - The eventlet pool now imports on Windows. - [Canvas] Fixed regression where immutable chord members may receive arguments (Issue #1340). Fix contributed by Peter Brook. - [Canvas] chain now accepts generator argument again (Issue #1319). - ``celery.migrate`` command now consumes from all queues if no queues specified. Fix contributed by John Watson. .. _version-3.0.19: 3.0.19 ====== :release-date: 2013-04-17 04:30:00 p.m. BST :release-by: Ask Solem - Now depends on :pypi:`billiard` 2.7.3.28 - A Python 3 related fix managed to disable the deadlock fix announced in 3.0.18. Tests have been added to make sure this doesn't happen again. - Task retry policy: Default max_retries is now 3. This ensures clients won't be hanging while the broker is down. .. note:: You can set a longer retry for the worker by using the :signal:`celeryd_after_setup` signal: .. code-block:: python from celery.signals import celeryd_after_setup @celeryd_after_setup.connect def configure_worker(instance, conf, **kwargs): conf.CELERY_TASK_PUBLISH_RETRY_POLICY = { 'max_retries': 100, 'interval_start': 0, 'interval_max': 1, 'interval_step': 0.2, } - Worker: Will now properly display message body in error messages even if the body is a buffer instance. - 3.0.18 broke the MongoDB result backend (Issue #1303). .. _version-3.0.18: 3.0.18 ====== :release-date: 2013-04-12 05:00:00 p.m. BST :release-by: Ask Solem - Now depends on :pypi:`kombu` 2.5.10. See the :ref:`kombu changelog `. - Now depends on :pypi:`billiard` 2.7.3.27. - Can now specify a white-list of accepted serializers using the new :setting:`CELERY_ACCEPT_CONTENT` setting. This means that you can force the worker to discard messages serialized with pickle and other untrusted serializers. For example to only allow JSON serialized messages use:: CELERY_ACCEPT_CONTENT = ['json'] you can also specify MIME types in the white-list:: CELERY_ACCEPT_CONTENT = ['application/json'] - Fixed deadlock in multiprocessing's pool caused by the semaphore not being released when terminated by signal. - Processes Pool: It's now possible to debug pool processes using GDB. - ``celery report`` now censors possibly secret settings, like passwords and secret tokens. You should still check the output before pasting anything on the internet. - Connection URLs now ignore multiple '+' tokens. - Worker/``statedb``: Now uses pickle protocol 2 (Python 2.5+) - Fixed Python 3 compatibility issues. - Worker: A warning is now given if a worker is started with the same node name as an existing worker. - Worker: Fixed a deadlock that could occur while revoking tasks (Issue #1297). - Worker: The :sig:`HUP` handler now closes all open file descriptors before restarting to ensure file descriptors doesn't leak (Issue #1270). - Worker: Optimized storing/loading the revoked tasks list (Issue #1289). After this change the :option:`celery worker --statedb` file will take up more disk space, but loading from and storing the revoked tasks will be considerably faster (what before took 5 minutes will now take less than a second). - Celery will now suggest alternatives if there's a typo in the broker transport name (e.g., ``ampq`` -> ``amqp``). - Worker: The auto-reloader would cause a crash if a monitored file was unlinked. Fix contributed by Agris Ameriks. - Fixed AsyncResult pickling error. Fix contributed by Thomas Minor. - Fixed handling of Unicode in logging output when using log colors (Issue #427). - :class:`~celery.app.utils.ConfigurationView` is now a ``MutableMapping``. Contributed by Aaron Harnly. - Fixed memory leak in LRU cache implementation. Fix contributed by Romuald Brunet. - ``celery.contrib.rdb``: Now works when sockets are in non-blocking mode. Fix contributed by Theo Spears. - The `inspect reserved` remote control command included active (started) tasks with the reserved tasks (Issue #1030). - The :signal:`task_failure` signal received a modified traceback object meant for pickling purposes, this has been fixed so that it now receives the real traceback instead. - The ``@task`` decorator silently ignored positional arguments, it now raises the expected :exc:`TypeError` instead (Issue #1125). - The worker will now properly handle messages with invalid ETA/expires fields (Issue #1232). - The ``pool_restart`` remote control command now reports an error if the :setting:`CELERYD_POOL_RESTARTS` setting isn't set. - :meth:`@add_defaults`` can now be used with non-dict objects. - Fixed compatibility problems in the Proxy class (Issue #1087). The class attributes ``__module__``, ``__name__`` and ``__doc__`` are now meaningful string objects. Thanks to Marius Gedminas. - MongoDB Backend: The :setting:`MONGODB_BACKEND_SETTINGS` setting now accepts a ``option`` key that lets you forward arbitrary kwargs to the underlying ``pymongo.Connection`` object (Issue #1015). - Beat: The daily backend cleanup task is no longer enabled for result backends that support automatic result expiration (Issue #1031). - Canvas list operations now takes application instance from the first task in the list, instead of depending on the ``current_app`` (Issue #1249). - Worker: Message decoding error log message now includes traceback information. - Worker: The start-up banner now includes system platform. - ``celery inspect|status|control`` now gives an error if used with a SQL based broker transport. .. _version-3.0.17: 3.0.17 ====== :release-date: 2013-03-22 04:00:00 p.m. UTC :release-by: Ask Solem - Now depends on kombu 2.5.8 - Now depends on billiard 2.7.3.23 - RabbitMQ/Redis: thread-less and lock-free rate-limit implementation. This means that rate limits pose minimal overhead when used with RabbitMQ/Redis or future transports using the event-loop, and that the rate-limit implementation is now thread-less and lock-free. The thread-based transports will still use the old implementation for now, but the plan is to use the timer also for other broker transports in Celery 3.1. - Rate limits now works with eventlet/gevent if using RabbitMQ/Redis as the broker. - A regression caused ``task.retry`` to ignore additional keyword arguments. Extra keyword arguments are now used as execution options again. Fix contributed by Simon Engledew. - Windows: Fixed problem with the worker trying to pickle the Django settings module at worker start-up. - generic-init.d: No longer double quotes ``$CELERYD_CHDIR`` (Issue #1235). - generic-init.d: Removes bash-specific syntax. Fix contributed by Pär Wieslander. - Cassandra Result Backend: Now handles the :exc:`~pycassa.AllServersUnavailable` error (Issue #1010). Fix contributed by Jared Biel. - Result: Now properly forwards apps to GroupResults when deserializing (Issue #1249). Fix contributed by Charles-Axel Dein. - ``GroupResult.revoke`` now supports the ``terminate`` and ``signal`` keyword arguments. - Worker: Multiprocessing pool workers now import task modules/configuration before setting up the logging system so that logging signals can be connected before they're dispatched. - chord: The ``AsyncResult`` instance returned now has its ``parent`` attribute set to the header ``GroupResult``. This is consistent with how ``chain`` works. .. _version-3.0.16: 3.0.16 ====== :release-date: 2013-03-07 04:00:00 p.m. UTC :release-by: Ask Solem - Happy International Women's Day! We have a long way to go, so this is a chance for you to get involved in one of the organizations working for making our communities more diverse. - PyLadies — http://pyladies.com - Girls Who Code — http://www.girlswhocode.com - Women Who Code — http://www.meetup.com/Women-Who-Code-SF/ - Now depends on :pypi:`kombu` version 2.5.7 - Now depends on :pypi:`billiard` version 2.7.3.22 - AMQP heartbeats are now disabled by default. Some users experiences issues with heartbeats enabled, and it's not strictly necessary to use them. If you're experiencing problems detecting connection failures, you can re-enable heartbeats by configuring the :setting:`BROKER_HEARTBEAT` setting. - Worker: Now propagates connection errors occurring in multiprocessing callbacks, so that the connection can be reset (Issue #1226). - Worker: Now propagates connection errors occurring in timer callbacks, so that the connection can be reset. - The modules in :setting:`CELERY_IMPORTS` and :setting:`CELERY_INCLUDE` are now imported in the original order (Issue #1161). The modules in :setting:`CELERY_IMPORTS` will be imported first, then continued by :setting:`CELERY_INCLUDE`. Thanks to Joey Wilhelm. - New bash completion for ``celery`` available in the git repository: https://github.com/celery/celery/tree/3.0/extra/bash-completion You can source this file or put it in ``bash_completion.d`` to get auto-completion for the ``celery`` command-line utility. - The node name of a worker can now include unicode characters (Issue #1186). - The repr of a ``crontab`` object now displays correctly (Issue #972). - ``events.State`` no longer modifies the original event dictionary. - No longer uses ``Logger.warn`` deprecated in Python 3. - Cache Backend: Now works with chords again (Issue #1094). - Chord unlock now handles errors occurring while calling the callback. - Generic worker init.d script: Status check is now performed by querying the pid of the instance instead of sending messages. Contributed by Milen Pavlov. - Improved init-scripts for CentOS. - Updated to support Celery 3.x conventions. - Now uses CentOS built-in ``status`` and ``killproc`` - Support for multi-node / multi-pid worker services. - Standard color-coded CentOS service-init output. - A test suite. Contributed by Milen Pavlov. - ``ResultSet.join`` now always works with empty result set (Issue #1219). - A ``group`` consisting of a single task is now supported (Issue #1219). - Now supports the ``pycallgraph`` program (Issue #1051). - Fixed Jython compatibility problems. - Django tutorial: Now mentions that the example app must be added to ``INSTALLED_APPS`` (Issue #1192). .. _version-3.0.15: 3.0.15 ====== :release-date: 2013-02-11 04:30:00 p.m. UTC :release-by: Ask Solem - Now depends on billiard 2.7.3.21 which fixed a syntax error crash. - Fixed bug with :setting:`CELERY_SEND_TASK_SENT_EVENT`. .. _version-3.0.14: 3.0.14 ====== :release-date: 2013-02-08 05:00:00 p.m. UTC :release-by: Ask Solem - Now depends on Kombu 2.5.6 - Now depends on billiard 2.7.3.20 - ``execv`` is now disabled by default. It was causing too many problems for users, you can still enable it using the `CELERYD_FORCE_EXECV` setting. execv was only enabled when transports other than AMQP/Redis was used, and it's there to prevent deadlocks caused by mutexes not being released before the process forks. Unfortunately it also changes the environment introducing many corner case bugs that're hard to fix without adding horrible hacks. Deadlock issues are reported far less often than the bugs that execv are causing, so we now disable it by default. Work is in motion to create non-blocking versions of these transports so that execv isn't necessary (which is the situation with the amqp and redis broker transports) - Chord exception behavior defined (Issue #1172). From Celery 3.1 the chord callback will change state to FAILURE when a task part of a chord raises an exception. It was never documented what happens in this case, and the actual behavior was very unsatisfactory, indeed it will just forward the exception value to the chord callback. For backward compatibility reasons we don't change to the new behavior in a bugfix release, even if the current behavior was never documented. Instead you can enable the :setting:`CELERY_CHORD_PROPAGATES` setting to get the new behavior that'll be default from Celery 3.1. See more at :ref:`chord-errors`. - worker: Fixes bug with ignored and retried tasks. The ``on_chord_part_return`` and ``Task.after_return`` callbacks, nor the ``task_postrun`` signal should be called when the task was retried/ignored. Fix contributed by Vlad. - ``GroupResult.join_native`` now respects the ``propagate`` argument. - ``subtask.id`` added as an alias to ``subtask['options'].id`` .. code-block:: pycon >>> s = add.s(2, 2) >>> s.id = 'my-id' >>> s['options'] {'task_id': 'my-id'} >>> s.id 'my-id' - worker: Fixed error `Could not start worker processes` occurring when restarting after connection failure (Issue #1118). - Adds new signal :signal:`task-retried` (Issue #1169). - `celery events --dumper` now handles connection loss. - Will now retry sending the task-sent event in case of connection failure. - amqp backend: Now uses ``Message.requeue`` instead of republishing the message after poll. - New :setting:`BROKER_HEARTBEAT_CHECKRATE` setting introduced to modify the rate at which broker connection heartbeats are monitored. The default value was also changed from 3.0 to 2.0. - :class:`celery.events.state.State` is now pickleable. Fix contributed by Mher Movsisyan. - :class:`celery.utils.functional.LRUCache` is now pickleable. Fix contributed by Mher Movsisyan. - The stats broadcast command now includes the workers pid. Contributed by Mher Movsisyan. - New ``conf`` remote control command to get a workers current configuration. Contributed by Mher Movsisyan. - Adds the ability to modify the chord unlock task's countdown argument (Issue #1146). Contributed by Jun Sakai - beat: The scheduler now uses the `now()`` method of the schedule, so that schedules can provide a custom way to get the current date and time. Contributed by Raphaël Slinckx - Fixed pickling of configuration modules on Windows or when execv is used (Issue #1126). - Multiprocessing logger is now configured with loglevel ``ERROR`` by default. Since 3.0 the multiprocessing loggers were disabled by default (only configured when the :envvar:`MP_LOG` environment variable was set). .. _version-3.0.13: 3.0.13 ====== :release-date: 2013-01-07 04:00:00 p.m. UTC :release-by: Ask Solem - Now depends on Kombu 2.5 - :pypi:`amqp` has replaced :pypi:`amqplib` as the default transport, gaining support for AMQP 0.9, and the RabbitMQ extensions, including Consumer Cancel Notifications and heartbeats. - support for multiple connection URLs for failover. - Read more in the :ref:`Kombu 2.5 changelog `. - Now depends on billiard 2.7.3.19 - Fixed a deadlock issue that could occur when the producer pool inherited the connection pool instance of the parent process. - The :option:`--loader ` option now works again (Issue #1066). - :program:`celery` umbrella command: All sub-commands now supports the :option:`--workdir ` option (Issue #1063). - Groups included in chains now give GroupResults (Issue #1057) Previously it would incorrectly add a regular result instead of a group result, but now this works: .. code-block:: pycon >>> # [4 + 4, 4 + 8, 16 + 8] >>> res = (add.s(2, 2) | group(add.s(4), add.s(8), add.s(16)))() >>> res - Chains can now chain other chains and use partial arguments (Issue #1057). Example: .. code-block:: pycon >>> c1 = (add.s(2) | add.s(4)) >>> c2 = (add.s(8) | add.s(16)) >>> c3 = (c1 | c2) >>> # 8 + 2 + 4 + 8 + 16 >>> assert c3(8).get() == 38 - Subtasks can now be used with unregistered tasks. You can specify subtasks even if you just have the name:: >>> s = subtask(task_name, args=(), kwargs=()) >>> s.delay() - The :program:`celery shell` command now always adds the current directory to the module path. - The worker will now properly handle the :exc:`pytz.AmbiguousTimeError` exception raised when an ETA/countdown is prepared while being in DST transition (Issue #1061). - force_execv: Now makes sure that task symbols in the original task modules will always use the correct app instance (Issue #1072). - AMQP Backend: Now republishes result messages that have been polled (using ``result.ready()`` and friends, ``result.get()`` won't do this in this version). - Crontab schedule values can now "wrap around" This means that values like ``11-1`` translates to ``[11, 12, 1]``. Contributed by Loren Abrams. - ``multi stopwait`` command now shows the pid of processes. Contributed by Loren Abrams. - Handling of ETA/countdown fixed when the :setting:`CELERY_ENABLE_UTC` setting is disabled (Issue #1065). - A number of unneeded properties were included in messages, caused by accidentally passing ``Queue.as_dict`` as message properties. - Rate limit values can now be float This also extends the string format so that values like ``"0.5/s"`` works. Contributed by Christoph Krybus - Fixed a typo in the broadcast routing documentation (Issue #1026). - Rewrote confusing section about idempotence in the task user guide. - Fixed typo in the daemonization tutorial (Issue #1055). - Fixed several typos in the documentation. Contributed by Marius Gedminas. - Batches: Now works when using the eventlet pool. Fix contributed by Thomas Grainger. - Batches: Added example sending results to ``celery.contrib.batches``. Contributed by Thomas Grainger. - MongoDB backend: Connection ``max_pool_size`` can now be set in :setting:`CELERY_MONGODB_BACKEND_SETTINGS`. Contributed by Craig Younkins. - Fixed problem when using earlier versions of :pypi:`pytz`. Fix contributed by Vlad. - Docs updated to include the default value for the :setting:`CELERY_TASK_RESULT_EXPIRES` setting. - Improvements to the :pypi:`django-celery` tutorial. Contributed by Locker537. - The ``add_consumer`` control command didn't properly persist the addition of new queues so that they survived connection failure (Issue #1079). 3.0.12 ====== :release-date: 2012-11-06 02:00 p.m. UTC :release-by: Ask Solem - Now depends on kombu 2.4.8 - [Redis] New and improved fair queue cycle algorithm (Kevin McCarthy). - [Redis] Now uses a Redis-based mutex when restoring messages. - [Redis] Number of messages that can be restored in one interval is no longer limited (but can be set using the ``unacked_restore_limit`` :setting:`transport option `). - Heartbeat value can be specified in broker URLs (Mher Movsisyan). - Fixed problem with msgpack on Python 3 (Jasper Bryant-Greene). - Now depends on billiard 2.7.3.18 - Celery can now be used with static analysis tools like PyDev/PyCharm/pylint etc. - Development documentation has moved to Read The Docs. The new URL is: http://docs.celeryproject.org/en/master - New :setting:`CELERY_QUEUE_HA_POLICY` setting used to set the default HA policy for queues when using RabbitMQ. - New method ``Task.subtask_from_request`` returns a subtask using the current request. - Results get_many method didn't respect timeout argument. Fix contributed by Remigiusz Modrzejewski - generic_init.d scripts now support setting :envvar:`CELERY_CREATE_DIRS` to always create log and pid directories (Issue #1045). This can be set in your :file:`/etc/default/celeryd`. - Fixed strange kombu import problem on Python 3.2 (Issue #1034). - Worker: ETA scheduler now uses millisecond precision (Issue #1040). - The :option:`--config ` argument to programs is now supported by all loaders. - The :setting:`CASSANDRA_OPTIONS` setting has now been documented. Contributed by Jared Biel. - Task methods (:mod:`celery.contrib.methods`) cannot be used with the old task base class, the task decorator in that module now inherits from the new. - An optimization was too eager and caused some logging messages to never emit. - ``celery.contrib.batches`` now works again. - Fixed missing white-space in ``bdist_rpm`` requirements (Issue #1046). - Event state's ``tasks_by_name`` applied limit before filtering by name. Fix contributed by Alexander A. Sosnovskiy. .. _version-3.0.11: 3.0.11 ====== :release-date: 2012-09-26 04:00 p.m. UTC :release-by: Ask Solem - [security:low] generic-init.d scripts changed permissions of /var/log & /var/run In the daemonization tutorial the recommended directories were as follows: .. code-block:: bash CELERYD_LOG_FILE="/var/log/celery/%n.log" CELERYD_PID_FILE="/var/run/celery/%n.pid" But in the scripts themselves the default files were ``/var/log/celery%n.log`` and ``/var/run/celery%n.pid``, so if the user didn't change the location by configuration, the directories ``/var/log`` and ``/var/run`` would be created - and worse have their permissions and owners changed. This change means that: - Default pid file is ``/var/run/celery/%n.pid`` - Default log file is ``/var/log/celery/%n.log`` - The directories are only created and have their permissions changed if *no custom locations are set*. Users can force paths to be created by calling the ``create-paths`` sub-command: .. code-block:: console $ sudo /etc/init.d/celeryd create-paths .. admonition:: Upgrading Celery won't update init-scripts To update the init-scripts you have to re-download the files from source control and update them manually. You can find the init-scripts for version 3.0.x at: https://github.com/celery/celery/tree/3.0/extra/generic-init.d - Now depends on billiard 2.7.3.17 - Fixes request stack protection when app is initialized more than once (Issue #1003). - ETA tasks now properly works when system timezone isn't same as the configured timezone (Issue #1004). - Terminating a task now works if the task has been sent to the pool but not yet acknowledged by a pool process (Issue #1007). Fix contributed by Alexey Zatelepin - Terminating a task now properly updates the state of the task to revoked, and sends a ``task-revoked`` event. - Generic worker init-script now waits for workers to shutdown by default. - Multi: No longer parses --app option (Issue #1008). - Multi: ``stop_verify`` command renamed to ``stopwait``. - Daemonization: Now delays trying to create pidfile/logfile until after the working directory has been changed into. - :program:`celery worker` and :program:`celery beat` commands now respects the :option:`--no-color ` option (Issue #999). - Fixed typos in eventlet examples (Issue #1000) Fix contributed by Bryan Bishop. Congratulations on opening bug #1000! - Tasks that raise :exc:`~celery.exceptions.Ignore` are now acknowledged. - Beat: Now shows the name of the entry in ``sending due task`` logs. .. _version-3.0.10: 3.0.10 ====== :release-date: 2012-09-20 05:30 p.m. BST :release-by: Ask Solem - Now depends on kombu 2.4.7 - Now depends on billiard 2.7.3.14 - Fixes crash at start-up when using Django and pre-1.4 projects (``setup_environ``). - Hard time limits now sends the KILL signal shortly after TERM, to terminate processes that have signal handlers blocked by C extensions. - Billiard now installs even if the C extension cannot be built. It's still recommended to build the C extension if you're using a transport other than RabbitMQ/Redis (or use forced execv for some other reason). - Pool now sets a ``current_process().index`` attribute that can be used to create as many log files as there are processes in the pool. - Canvas: chord/group/chain no longer modifies the state when called Previously calling a chord/group/chain would modify the ids of subtasks so that: .. code-block:: pycon >>> c = chord([add.s(2, 2), add.s(4, 4)], xsum.s()) >>> c() >>> c() <-- call again at the second time the ids for the tasks would be the same as in the previous invocation. This is now fixed, so that calling a subtask won't mutate any options. - Canvas: Chaining a chord to another task now works (Issue #965). - Worker: Fixed a bug where the request stack could be corrupted if relative imports are used. Problem usually manifested itself as an exception while trying to send a failed task result (``NoneType does not have id attribute``). Fix contributed by Sam Cooke. - Tasks can now raise :exc:`~celery.exceptions.Ignore` to skip updating states or events after return. Example: .. code-block:: python from celery.exceptions import Ignore @task def custom_revokes(): if redis.sismember('tasks.revoked', custom_revokes.request.id): raise Ignore() - The worker now makes sure the request/task stacks aren't modified by the initial ``Task.__call__``. This would previously be a problem if a custom task class defined ``__call__`` and also called ``super()``. - Because of problems the fast local optimization has been disabled, and can only be enabled by setting the :envvar:`USE_FAST_LOCALS` attribute. - Worker: Now sets a default socket timeout of 5 seconds at shutdown so that broken socket reads don't hinder proper shutdown (Issue #975). - More fixes related to late eventlet/gevent patching. - Documentation for settings out of sync with reality: - :setting:`CELERY_TASK_PUBLISH_RETRY` Documented as disabled by default, but it was enabled by default since 2.5 as stated by the 2.5 changelog. - :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` The default max_retries had been set to 100, but documented as being 3, and the interval_max was set to 1 but documented as 0.2. The default setting are now set to 3 and 0.2 as it was originally documented. Fix contributed by Matt Long. - Worker: Log messages when connection established and lost have been improved. - The repr of a Crontab schedule value of '0' should be '*' (Issue #972). - Revoked tasks are now removed from reserved/active state in the worker (Issue #969) Fix contributed by Alexey Zatelepin. - gevent: Now supports hard time limits using ``gevent.Timeout``. - Documentation: Links to init-scripts now point to the 3.0 branch instead of the development branch (master). - Documentation: Fixed typo in signals user guide (Issue #986). ``instance.app.queues`` -> ``instance.app.amqp.queues``. - Eventlet/gevent: The worker didn't properly set the custom app for new greenlets. - Eventlet/gevent: Fixed a bug where the worker could not recover from connection loss (Issue #959). Also, because of a suspected bug in gevent the :setting:`BROKER_CONNECTION_TIMEOUT` setting has been disabled when using gevent 3.0.9 ===== :release-date: 2012-08-31 06:00 p.m. BST :release-by: Ask Solem - Important note for users of Django and the database scheduler! Recently a timezone issue has been fixed for periodic tasks, but erroneous timezones could have already been stored in the database, so for the fix to work you need to reset the ``last_run_at`` fields. You can do this by executing the following command: .. code-block:: console $ python manage.py shell >>> from djcelery.models import PeriodicTask >>> PeriodicTask.objects.update(last_run_at=None) You also have to do this if you change the timezone or :setting:`CELERY_ENABLE_UTC` setting. - Note about the :setting:`CELERY_ENABLE_UTC` setting. If you previously disabled this just to force periodic tasks to work with your timezone, then you're now *encouraged to re-enable it*. - Now depends on Kombu 2.4.5 which fixes PyPy + Jython installation. - Fixed bug with timezones when :setting:`CELERY_ENABLE_UTC` is disabled (Issue #952). - Fixed a typo in the ``celerybeat`` upgrade mechanism (Issue #951). - Make sure the `exc_info` argument to logging is resolved (Issue #899). - Fixed problem with Python 3.2 and thread join timeout overflow (Issue #796). - A test case was occasionally broken for Python 2.5. - Unit test suite now passes for PyPy 1.9. - App instances now supports the :keyword:`with` statement. This calls the new :meth:`@close` method at exit, which cleans up after the app like closing pool connections. Note that this is only necessary when dynamically creating apps, for example "temporary" apps. - Support for piping a subtask to a chain. For example: .. code-block:: python pipe = sometask.s() | othertask.s() new_pipe = mytask.s() | pipe Contributed by Steve Morin. - Fixed problem with group results on non-pickle serializers. Fix contributed by Steeve Morin. .. _version-3.0.8: 3.0.8 ===== :release-date: 2012-08-29 05:00 p.m. BST :release-by: Ask Solem - Now depends on Kombu 2.4.4 - Fixed problem with :pypi:`amqplib` and receiving larger message payloads (Issue #922). The problem would manifest itself as either the worker hanging, or occasionally a ``Framing error`` exception appearing. Users of the new ``pyamqp://`` transport must upgrade to :pypi:`amqp` 0.9.3. - Beat: Fixed another timezone bug with interval and Crontab schedules (Issue #943). - Beat: The schedule file is now automatically cleared if the timezone is changed. The schedule is also cleared when you upgrade to 3.0.8 from an earlier version, this to register the initial timezone info. - Events: The :event:`worker-heartbeat` event now include processed and active count fields. Contributed by Mher Movsisyan. - Fixed error with error email and new task classes (Issue #931). - ``BaseTask.__call__`` is no longer optimized away if it has been monkey patched. - Fixed shutdown issue when using gevent (Issue #911 & Issue #936). Fix contributed by Thomas Meson. .. _version-3.0.7: 3.0.7 ===== :release-date: 2012-08-24 05:00 p.m. BST :release-by: Ask Solem - Fixes several problems with periodic tasks and timezones (Issue #937). - Now depends on kombu 2.4.2 - Redis: Fixes a race condition crash - Fixes an infinite loop that could happen when retrying establishing the broker connection. - Daemons now redirect standard file descriptors to :file:`/dev/null` Though by default the standard outs are also redirected to the logger instead, but you can disable this by changing the :setting:`CELERY_REDIRECT_STDOUTS` setting. - Fixes possible problems when eventlet/gevent is patched too late. - ``LoggingProxy`` no longer defines ``fileno()`` (Issue #928). - Results are now ignored for the chord unlock task. Fix contributed by Steeve Morin. - Cassandra backend now works if result expiry is disabled. Fix contributed by Steeve Morin. - The traceback object is now passed to signal handlers instead of the string representation. Fix contributed by Adam DePue. - Celery command: Extensions are now sorted by name. - A regression caused the :event:`task-failed` event to be sent with the exception object instead of its string representation. - The worker daemon would try to create the pid file before daemonizing to catch errors, but this file wasn't immediately released (Issue #923). - Fixes Jython compatibility. - ``billiard.forking_enable`` was called by all pools not just the processes pool, which would result in a useless warning if the billiard C extensions weren't installed. .. _version-3.0.6: 3.0.6 ===== :release-date: 2012-08-17 11:00 p.mp.m. Ask Solem - Now depends on kombu 2.4.0 - Now depends on billiard 2.7.3.12 - Redis: Celery now tries to restore messages whenever there are no messages in the queue. - Crontab schedules now properly respects :setting:`CELERY_TIMEZONE` setting. It's important to note that Crontab schedules uses UTC time by default unless this setting is set. Issue #904 and :pypi:`django-celery` #150. - ``billiard.enable_forking`` is now only set by the processes pool. - The transport is now properly shown by :program:`celery report` (Issue #913). - The `--app` argument now works if the last part is a module name (Issue #921). - Fixed problem with unpickleable exceptions (billiard #12). - Adds ``task_name`` attribute to ``EagerResult`` which is always :const:`None` (Issue #907). - Old Task class in :mod:`celery.task` no longer accepts magic kwargs by default (Issue #918). A regression long ago disabled magic kwargs for these, and since no one has complained about it we don't have any incentive to fix it now. - The ``inspect reserved`` control command didn't work properly. - Should now play better with tools for static analysis by explicitly specifying dynamically created attributes in the :mod:`celery` and :mod:`celery.task` modules. - Terminating a task now results in :exc:`~celery.exceptions.RevokedTaskError` instead of a ``WorkerLostError``. - ``AsyncResult.revoke`` now accepts ``terminate`` and ``signal`` arguments. - The :event:`task-revoked` event now includes new fields: ``terminated``, ``signum``, and ``expired``. - The argument to :class:`~celery.exceptions.TaskRevokedError` is now one of the reasons ``revoked``, ``expired`` or ``terminated``. - Old Task class does no longer use :class:`classmethod` for ``push_request`` and ``pop_request`` (Issue #912). - ``GroupResult`` now supports the ``children`` attribute (Issue #916). - ``AsyncResult.collect`` now respects the ``intermediate`` argument (Issue #917). - Fixes example task in documentation (Issue #902). - Eventlet fixed so that the environment is patched as soon as possible. - eventlet: Now warns if Celery related modules that depends on threads are imported before eventlet is patched. - Improved event and camera examples in the monitoring guide. - Disables celery command setuptools entry-points if the command can't be loaded. - Fixed broken ``dump_request`` example in the tasks guide. .. _version-3.0.5: 3.0.5 ===== :release-date: 2012-08-01 04:00 p.m. BST :release-by: Ask Solem - Now depends on kombu 2.3.1 + billiard 2.7.3.11 - Fixed a bug with the -B option (``cannot pickle thread.lock objects``) (Issue #894 + Issue #892, + :pypi:`django-celery` #154). - The :control:`restart_pool` control command now requires the :setting:`CELERYD_POOL_RESTARTS` setting to be enabled This change was necessary as the multiprocessing event that the restart command depends on is responsible for creating many semaphores/file descriptors, resulting in problems in some environments. - ``chain.apply`` now passes args to the first task (Issue #889). - Documented previously secret options to the :pypi:`django-celery` monitor in the monitoring user guide (Issue #396). - Old changelog are now organized in separate documents for each series, see :ref:`history`. .. _version-3.0.4: 3.0.4 ===== :release-date: 2012-07-26 07:00 p.m. BST :release-by: Ask Solem - Now depends on Kombu 2.3 - New experimental standalone Celery monitor: Flower See :ref:`monitoring-flower` to read more about it! Contributed by Mher Movsisyan. - Now supports AMQP heartbeats if using the new ``pyamqp://`` transport. - The :pypi:`amqp` transport requires the :pypi:`amqp` library to be installed: .. code-block:: console $ pip install amqp - Then you need to set the transport URL prefix to ``pyamqp://``. - The default heartbeat value is 10 seconds, but this can be changed using the :setting:`BROKER_HEARTBEAT` setting:: BROKER_HEARTBEAT = 5.0 - If the broker heartbeat is set to 10 seconds, the heartbeats will be monitored every 5 seconds (double the heartbeat rate). See the :ref:`Kombu 2.3 changelog ` for more information. - Now supports RabbitMQ Consumer Cancel Notifications, using the ``pyamqp://`` transport. This is essential when running RabbitMQ in a cluster. See the :ref:`Kombu 2.3 changelog ` for more information. - Delivery info is no longer passed directly through. It was discovered that the SQS transport adds objects that can't be pickled to the delivery info mapping, so we had to go back to using the white-list again. Fixing this bug also means that the SQS transport is now working again. - The semaphore wasn't properly released when a task was revoked (Issue #877). This could lead to tasks being swallowed and not released until a worker restart. Thanks to Hynek Schlawack for debugging the issue. - Retrying a task now also forwards any linked tasks. This means that if a task is part of a chain (or linked in some other way) and that even if the task is retried, then the next task in the chain will be executed when the retry succeeds. - Chords: Now supports setting the interval and other keyword arguments to the chord unlock task. - The interval can now be set as part of the chord subtasks kwargs:: chord(header)(body, interval=10.0) - In addition the chord unlock task now honors the Task.default_retry_delay option, used when none is specified, which also means that the default interval can also be changed using annotations: .. code-block:: python CELERY_ANNOTATIONS = { 'celery.chord_unlock': { 'default_retry_delay': 10.0, } } - New :meth:`@add_defaults` method can add new default configuration dictionaries to the applications configuration. For example:: config = {'FOO': 10} app.add_defaults(config) is the same as ``app.conf.update(config)`` except that data won't be copied, and that it won't be pickled when the worker spawns child processes. In addition the method accepts a callable:: def initialize_config(): # insert heavy stuff that can't be done at import time here. app.add_defaults(initialize_config) which means the same as the above except that it won't happen until the Celery configuration is actually used. As an example, Celery can lazily use the configuration of a Flask app:: flask_app = Flask() app = Celery() app.add_defaults(lambda: flask_app.config) - Revoked tasks weren't marked as revoked in the result backend (Issue #871). Fix contributed by Hynek Schlawack. - Event-loop now properly handles the case when the :manpage:`epoll` poller object has been closed (Issue #882). - Fixed syntax error in ``funtests/test_leak.py`` Fix contributed by Catalin Iacob. - group/chunks: Now accepts empty task list (Issue #873). - New method names: - ``Celery.default_connection()`` âž  :meth:`~@connection_or_acquire`. - ``Celery.default_producer()`` âž  :meth:`~@producer_or_acquire`. The old names still work for backward compatibility. .. _version-3.0.3: 3.0.3 ===== :release-date: 2012-07-20 09:17 p.m. BST :release-by: Ask Solem - :pypi:`amqplib` passes the channel object as part of the delivery_info and it's not pickleable, so we now remove it. .. _version-3.0.2: 3.0.2 ===== :release-date: 2012-07-20 04:00 p.m. BST :release-by: Ask Solem - A bug caused the following task options to not take defaults from the configuration (Issue #867 + Issue #858) The following settings were affected: - :setting:`CELERY_IGNORE_RESULT` - :setting:`CELERYD_SEND_TASK_ERROR_EMAILS` - :setting:`CELERY_TRACK_STARTED` - :setting:`CElERY_STORE_ERRORS_EVEN_IF_IGNORED` Fix contributed by John Watson. - Task Request: ``delivery_info`` is now passed through as-is (Issue #807). - The ETA argument now supports datetime's with a timezone set (Issue #855). - The worker's banner displayed the autoscale settings in the wrong order (Issue #859). - Extension commands are now loaded after concurrency is set up so that they don't interfere with things like eventlet patching. - Fixed bug in the threaded pool (Issue #863) - The task failure handler mixed up the fields in :func:`sys.exc_info`. Fix contributed by Rinat Shigapov. - Fixed typos and wording in the docs. Fix contributed by Paul McMillan - New setting: :setting:`CELERY_WORKER_DIRECT` If enabled each worker will consume from their own dedicated queue which can be used to route tasks to specific workers. - Fixed several edge case bugs in the add consumer remote control command. - :mod:`~celery.contrib.migrate`: Can now filter and move tasks to specific workers if :setting:`CELERY_WORKER_DIRECT` is enabled. Among other improvements, the following functions have been added: * ``move_direct(filterfun, **opts)`` * ``move_direct_by_id(task_id, worker_hostname, **opts)`` * ``move_direct_by_idmap({task_id: worker_hostname, ...}, **opts)`` * ``move_direct_by_taskmap({task_name: worker_hostname, ...}, **opts)`` - :meth:`~celery.Celery.default_connection` now accepts a pool argument that if set to false causes a new connection to be created instead of acquiring one from the pool. - New signal: :signal:`celeryd_after_setup`. - Default loader now keeps lowercase attributes from the configuration module. .. _version-3.0.1: 3.0.1 ===== :release-date: 2012-07-10 06:00 p.m. BST :release-by: Ask Solem - Now depends on kombu 2.2.5 - inspect now supports limit argument:: myapp.control.inspect(limit=1).ping() - Beat: now works with timezone aware datetime's. - Task classes inheriting ``from celery import Task`` mistakenly enabled ``accept_magic_kwargs``. - Fixed bug in ``inspect scheduled`` (Issue #829). - Beat: Now resets the schedule to upgrade to UTC. - The :program:`celery worker` command now works with eventlet/gevent. Previously it wouldn't patch the environment early enough. - The :program:`celery` command now supports extension commands using setuptools entry-points. Libraries can add additional commands to the :program:`celery` command by adding an entry-point like:: setup( entry_points=[ 'celery.commands': [ 'foo = my.module:Command', ], ], ...) The command must then support the interface of :class:`celery.bin.base.Command`. - contrib.migrate: New utilities to move tasks from one queue to another. - :func:`~celery.contrib.migrate.move_tasks` - :func:`~celery.contrib.migrate.move_task_by_id` - The :event:`task-sent` event now contains ``exchange`` and ``routing_key`` fields. - Fixes bug with installing on Python 3. Fix contributed by Jed Smith. .. _version-3.0.0: 3.0.0 (Chiastic Slide) ====================== :release-date: 2012-07-07 01:30 p.m. BST :release-by: Ask Solem See :ref:`whatsnew-3.0`. celery-4.1.0/docs/history/changelog-1.0.rst0000644000175000017500000016136713130607475020357 0ustar omeromer00000000000000.. _changelog-1.0: =============================== Change history for Celery 1.0 =============================== .. contents:: :local: .. _version-1.0.6: 1.0.6 ===== :release-date: 2010-06-30 09:57 a.m. CEST :release-by: Ask Solem * RabbitMQ 1.8.0 has extended their exchange equivalence tests to include `auto_delete` and `durable`. This broke the AMQP backend. If you've already used the AMQP backend this means you have to delete the previous definitions: .. code-block:: console $ camqadm exchange.delete celeryresults or: .. code-block:: console $ python manage.py camqadm exchange.delete celeryresults .. _version-1.0.5: 1.0.5 ===== :release-date: 2010-06-01 02:36 p.m. CEST :release-by: Ask Solem .. _v105-critical: Critical -------- * :sig:`INT`/:kbd:`Control-c` killed the pool, abruptly terminating the currently executing tasks. Fixed by making the pool worker processes ignore :const:`SIGINT`. * Shouldn't close the consumers before the pool is terminated, just cancel the consumers. See issue #122. * Now depends on :pypi:`billiard` >= 0.3.1 * worker: Previously exceptions raised by worker components could stall start-up, now it correctly logs the exceptions and shuts down. * worker: Prefetch counts was set too late. QoS is now set as early as possible, so the worker: can't slurp in all the messages at start-up. .. _v105-changes: Changes ------- * :mod:`celery.contrib.abortable`: Abortable tasks. Tasks that defines steps of execution, the task can then be aborted after each step has completed. * :class:`~celery.events.EventDispatcher`: No longer creates AMQP channel if events are disabled * Added required RPM package names under `[bdist_rpm]` section, to support building RPMs from the sources using :file:`setup.py`. * Running unit tests: :envvar:`NOSE_VERBOSE` environment var now enables verbose output from Nose. * :func:`celery.execute.apply`: Pass log file/log level arguments as task kwargs. See issue #110. * celery.execute.apply: Should return exception, not :class:`~billiard.einfo.ExceptionInfo` on error. See issue #111. * Added new entries to the :ref:`FAQs `: * Should I use retry or acks_late? * Can I call a task by name? .. _version-1.0.4: 1.0.4 ===== :release-date: 2010-05-31 09:54 a.m. CEST :release-by: Ask Solem * Changelog merged with 1.0.5 as the release was never announced. .. _version-1.0.3: 1.0.3 ===== :release-date: 2010-05-15 03:00 p.m. CEST :release-by: Ask Solem .. _v103-important: Important notes --------------- * Messages are now acknowledged *just before* the task function is executed. This is the behavior we've wanted all along, but couldn't have because of limitations in the multiprocessing module. The previous behavior wasn't good, and the situation worsened with the release of 1.0.1, so this change will definitely improve reliability, performance and operations in general. For more information please see http://bit.ly/9hom6T * Database result backend: result now explicitly sets `null=True` as `django-picklefield` version 0.1.5 changed the default behavior right under our noses :( See: http://bit.ly/d5OwMr This means those who created their Celery tables (via ``syncdb`` or ``celeryinit``) with :pypi:`django-picklefield`` versions >= 0.1.5 has to alter their tables to allow the result field to be `NULL` manually. MySQL: .. code-block:: sql ALTER TABLE celery_taskmeta MODIFY result TEXT NULL PostgreSQL: .. code-block:: sql ALTER TABLE celery_taskmeta ALTER COLUMN result DROP NOT NULL * Removed `Task.rate_limit_queue_type`, as it wasn't really useful and made it harder to refactor some parts. * Now depends on carrot >= 0.10.4 * Now depends on billiard >= 0.3.0 .. _v103-news: News ---- * AMQP backend: Added timeout support for `result.get()` / `result.wait()`. * New task option: `Task.acks_late` (default: :setting:`CELERY_ACKS_LATE`) Late ack means the task messages will be acknowledged **after** the task has been executed, not *just before*, which is the default behavior. .. note:: This means the tasks may be executed twice if the worker crashes in mid-execution. Not acceptable for most applications, but desirable for others. * Added Crontab-like scheduling to periodic tasks. Like a cronjob, you can specify units of time of when you'd like the task to execute. While not a full implementation of :command:`cron`'s features, it should provide a fair degree of common scheduling needs. You can specify a minute (0-59), an hour (0-23), and/or a day of the week (0-6 where 0 is Sunday, or by names: ``sun, mon, tue, wed, thu, fri, sat``). Examples: .. code-block:: python from celery.schedules import crontab from celery.decorators import periodic_task @periodic_task(run_every=crontab(hour=7, minute=30)) def every_morning(): print('Runs every morning at 7:30a.m') @periodic_task(run_every=crontab(hour=7, minute=30, day_of_week='mon')) def every_monday_morning(): print('Run every monday morning at 7:30a.m') @periodic_task(run_every=crontab(minutes=30)) def every_hour(): print('Runs every hour on the clock (e.g., 1:30, 2:30, 3:30 etc.).') .. note:: This a late addition. While we have unit tests, due to the nature of this feature we haven't been able to completely test this in practice, so consider this experimental. * `TaskPool.apply_async`: Now supports the `accept_callback` argument. * `apply_async`: Now raises :exc:`ValueError` if task args isn't a list, or kwargs isn't a tuple (Issue #95). * `Task.max_retries` can now be `None`, which means it will retry forever. * ``celerybeat``: Now reuses the same connection when publishing large sets of tasks. * Modified the task locking example in the documentation to use `cache.add` for atomic locking. * Added experimental support for a *started* status on tasks. If `Task.track_started` is enabled the task will report its status as "started" when the task is executed by a worker. The default value is `False` as the normal behavior is to not report that level of granularity. Tasks are either pending, finished, or waiting to be retried. Having a "started" status can be useful for when there are long running tasks and there's a need to report which task is currently running. The global default can be overridden by the :setting:`CELERY_TRACK_STARTED` setting. * User Guide: New section `Tips and Best Practices`. Contributions welcome! .. _v103-remote-control: Remote control commands ----------------------- * Remote control commands can now send replies back to the caller. Existing commands has been improved to send replies, and the client interface in `celery.task.control` has new keyword arguments: `reply`, `timeout` and `limit`. Where reply means it will wait for replies, timeout is the time in seconds to stop waiting for replies, and limit is the maximum number of replies to get. By default, it will wait for as many replies as possible for one second. * rate_limit(task_name, destination=all, reply=False, timeout=1, limit=0) Worker returns `{'ok': message}` on success, or `{'failure': message}` on failure. >>> from celery.task.control import rate_limit >>> rate_limit('tasks.add', '10/s', reply=True) [{'worker1': {'ok': 'new rate limit set successfully'}}, {'worker2': {'ok': 'new rate limit set successfully'}}] * ping(destination=all, reply=False, timeout=1, limit=0) Worker returns the simple message `"pong"`. >>> from celery.task.control import ping >>> ping(reply=True) [{'worker1': 'pong'}, {'worker2': 'pong'}, * revoke(destination=all, reply=False, timeout=1, limit=0) Worker simply returns `True`. >>> from celery.task.control import revoke >>> revoke('419e46eb-cf6a-4271-86a8-442b7124132c', reply=True) [{'worker1': True}, {'worker2'; True}] * You can now add your own remote control commands! Remote control commands are functions registered in the command registry. Registering a command is done using :meth:`celery.worker.control.Panel.register`: .. code-block:: python from celery.task.control import Panel @Panel.register def reset_broker_connection(state, **kwargs): state.consumer.reset_connection() return {'ok': 'connection re-established'} With this module imported in the worker, you can launch the command using `celery.task.control.broadcast`:: >>> from celery.task.control import broadcast >>> broadcast('reset_broker_connection', reply=True) [{'worker1': {'ok': 'connection re-established'}, {'worker2': {'ok': 'connection re-established'}}] **TIP** You can choose the worker(s) to receive the command by using the `destination` argument:: >>> broadcast('reset_broker_connection', destination=['worker1']) [{'worker1': {'ok': 'connection re-established'}] * New remote control command: `dump_reserved` Dumps tasks reserved by the worker, waiting to be executed:: >>> from celery.task.control import broadcast >>> broadcast('dump_reserved', reply=True) [{'myworker1': []}] * New remote control command: `dump_schedule` Dumps the workers currently registered ETA schedule. These are tasks with an `eta` (or `countdown`) argument waiting to be executed by the worker. >>> from celery.task.control import broadcast >>> broadcast('dump_schedule', reply=True) [{'w1': []}, {'w3': []}, {'w2': ['0. 2010-05-12 11:06:00 pri0 ,)', kwargs:'{'page': 2}'}>']}, {'w4': ['0. 2010-05-12 11:00:00 pri0 ,)', kwargs:'{\'page\': 1}'}>', '1. 2010-05-12 11:12:00 pri0 ,)', kwargs:'{\'page\': 3}'}>']}] .. _v103-fixes: Fixes ----- * Mediator thread no longer blocks for more than 1 second. With rate limits enabled and when there was a lot of remaining time, the mediator thread could block shutdown (and potentially block other jobs from coming in). * Remote rate limits wasn't properly applied (Issue #98). * Now handles exceptions with Unicode messages correctly in `TaskRequest.on_failure`. * Database backend: `TaskMeta.result`: default value should be `None` not empty string. .. _version-1.0.2: 1.0.2 ===== :release-date: 2010-03-31 12:50 p.m. CET :release-by: Ask Solem * Deprecated: :setting:`CELERY_BACKEND`, please use :setting:`CELERY_RESULT_BACKEND` instead. * We now use a custom logger in tasks. This logger supports task magic keyword arguments in formats. The default format for tasks (:setting:`CELERYD_TASK_LOG_FORMAT`) now includes the id and the name of tasks so the origin of task log messages can easily be traced. Example output:: [2010-03-25 13:11:20,317: INFO/PoolWorker-1] [tasks.add(a6e1c5ad-60d9-42a0-8b24-9e39363125a4)] Hello from add To revert to the previous behavior you can set:: CELERYD_TASK_LOG_FORMAT = """ [%(asctime)s: %(levelname)s/%(processName)s] %(message)s """.strip() * Unit tests: Don't disable the django test database tear down, instead fixed the underlying issue which was caused by modifications to the `DATABASE_NAME` setting (Issue #82). * Django Loader: New config :setting:`CELERY_DB_REUSE_MAX` (max number of tasks to reuse the same database connection) The default is to use a new connection for every task. We'd very much like to reuse the connection, but a safe number of reuses isn't known, and we don't have any way to handle the errors that might happen, which may even be database dependent. See: http://bit.ly/94fwdd * worker: The worker components are now configurable: :setting:`CELERYD_POOL`, :setting:`CELERYD_CONSUMER`, :setting:`CELERYD_MEDIATOR`, and :setting:`CELERYD_ETA_SCHEDULER`. The default configuration is as follows: .. code-block:: python CELERYD_POOL = 'celery.concurrency.processes.TaskPool' CELERYD_MEDIATOR = 'celery.worker.controllers.Mediator' CELERYD_ETA_SCHEDULER = 'celery.worker.controllers.ScheduleController' CELERYD_CONSUMER = 'celery.worker.consumer.Consumer' The :setting:`CELERYD_POOL` setting makes it easy to swap out the multiprocessing pool with a threaded pool, or how about a twisted/eventlet pool? Consider the competition for the first pool plug-in started! * Debian init-scripts: Use `-a` not `&&` (Issue #82). * Debian init-scripts: Now always preserves `$CELERYD_OPTS` from the `/etc/default/celeryd` and `/etc/default/celerybeat`. * celery.beat.Scheduler: Fixed a bug where the schedule wasn't properly flushed to disk if the schedule hadn't been properly initialized. * ``celerybeat``: Now syncs the schedule to disk when receiving the :sig:`SIGTERM` and :sig:`SIGINT` signals. * Control commands: Make sure keywords arguments aren't in Unicode. * ETA scheduler: Was missing a logger object, so the scheduler crashed when trying to log that a task had been revoked. * ``management.commands.camqadm``: Fixed typo `camqpadm` -> `camqadm` (Issue #83). * PeriodicTask.delta_resolution: wasn't working for days and hours, now fixed by rounding to the nearest day/hour. * Fixed a potential infinite loop in `BaseAsyncResult.__eq__`, although there's no evidence that it has ever been triggered. * worker: Now handles messages with encoding problems by acking them and emitting an error message. .. _version-1.0.1: 1.0.1 ===== :release-date: 2010-02-24 07:05 p.m. CET :release-by: Ask Solem * Tasks are now acknowledged early instead of late. This is done because messages can only be acknowledged within the same connection channel, so if the connection is lost we'd've to re-fetch the message again to acknowledge it. This might or might not affect you, but mostly those running tasks with a really long execution time are affected, as all tasks that's made it all the way into the pool needs to be executed before the worker can safely terminate (this is at most the number of pool workers, multiplied by the :setting:`CELERYD_PREFETCH_MULTIPLIER` setting). We multiply the prefetch count by default to increase the performance at times with bursts of tasks with a short execution time. If this doesn't apply to your use case, you should be able to set the prefetch multiplier to zero, without sacrificing performance. .. note:: A patch to :mod:`multiprocessing` is currently being worked on, this patch would enable us to use a better solution, and is scheduled for inclusion in the `2.0.0` release. * The worker now shutdowns cleanly when receiving the :sig:`SIGTERM` signal. * The worker now does a cold shutdown if the :sig:`SIGINT` signal is received (:kbd:`Control-c`), this means it tries to terminate as soon as possible. * Caching of results now moved to the base backend classes, so no need to implement this functionality in the base classes. * Caches are now also limited in size, so their memory usage doesn't grow out of control. You can set the maximum number of results the cache can hold using the :setting:`CELERY_MAX_CACHED_RESULTS` setting (the default is five thousand results). In addition, you can re-fetch already retrieved results using `backend.reload_task_result` + `backend.reload_taskset_result` (that's for those who want to send results incrementally). * The worker now works on Windows again. .. warning:: If you're using Celery with Django, you can't use `project.settings` as the settings module name, but the following should work: .. code-block:: console $ python manage.py celeryd --settings=settings * Execution: `.messaging.TaskPublisher.send_task` now incorporates all the functionality apply_async previously did. Like converting countdowns to ETA, so :func:`celery.execute.apply_async` is now simply a convenient front-end to :meth:`celery.messaging.TaskPublisher.send_task`, using the task classes default options. Also :func:`celery.execute.send_task` has been introduced, which can apply tasks using just the task name (useful if the client doesn't have the destination task in its task registry). Example: >>> from celery.execute import send_task >>> result = send_task('celery.ping', args=[], kwargs={}) >>> result.get() 'pong' * `camqadm`: This is a new utility for command-line access to the AMQP API. Excellent for deleting queues/bindings/exchanges, experimentation and testing: .. code-block:: console $ camqadm 1> help Gives an interactive shell, type `help` for a list of commands. When using Django, use the management command instead: .. code-block:: console $ python manage.py camqadm 1> help * Redis result backend: To conform to recent Redis API changes, the following settings has been deprecated: * `REDIS_TIMEOUT` * `REDIS_CONNECT_RETRY` These will emit a `DeprecationWarning` if used. A `REDIS_PASSWORD` setting has been added, so you can use the new simple authentication mechanism in Redis. * The redis result backend no longer calls `SAVE` when disconnecting, as this is apparently better handled by Redis itself. * If `settings.DEBUG` is on, the worker now warns about the possible memory leak it can result in. * The ETA scheduler now sleeps at most two seconds between iterations. * The ETA scheduler now deletes any revoked tasks it might encounter. As revokes aren't yet persistent, this is done to make sure the task is revoked even though, for example, it's currently being hold because its ETA is a week into the future. * The `task_id` argument is now respected even if the task is executed eagerly (either using apply, or :setting:`CELERY_ALWAYS_EAGER`). * The internal queues are now cleared if the connection is reset. * New magic keyword argument: `delivery_info`. Used by retry() to resend the task to its original destination using the same exchange/routing_key. * Events: Fields wasn't passed by `.send()` (fixes the UUID key errors in celerymon) * Added `--schedule`/`-s` option to the worker, so it is possible to specify a custom schedule filename when using an embedded ``celerybeat`` server (the `-B`/`--beat`) option. * Better Python 2.4 compatibility. The test suite now passes. * task decorators: Now preserve docstring as `cls.__doc__`, (was previously copied to `cls.run.__doc__`) * The `testproj` directory has been renamed to `tests` and we're now using `nose` + `django-nose` for test discovery, and `unittest2` for test cases. * New pip requirements files available in :file:`requirements`. * TaskPublisher: Declarations are now done once (per process). * Added `Task.delivery_mode` and the :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting. These can be used to mark messages non-persistent (i.e., so they're lost if the broker is restarted). * Now have our own `ImproperlyConfigured` exception, instead of using the Django one. * Improvements to the Debian init-scripts: Shows an error if the program is not executable. Does not modify `CELERYD` when using django with virtualenv. .. _version-1.0.0: 1.0.0 ===== :release-date: 2010-02-10 04:00 p.m. CET :release-by: Ask Solem .. _v100-incompatible: Backward incompatible changes ----------------------------- * Celery doesn't support detaching anymore, so you have to use the tools available on your platform, or something like :pypi:`supervisor` to make ``celeryd``/``celerybeat``/``celerymon`` into background processes. We've had too many problems with the worker daemonizing itself, so it was decided it has to be removed. Example start-up scripts has been added to the `extra/` directory: * Debian, Ubuntu, (:command:`start-stop-daemon`) `extra/debian/init.d/celeryd` `extra/debian/init.d/celerybeat` * macOS :command:`launchd` `extra/mac/org.celeryq.celeryd.plist` `extra/mac/org.celeryq.celerybeat.plist` `extra/mac/org.celeryq.celerymon.plist` * Supervisor (http://supervisord.org) `extra/supervisord/supervisord.conf` In addition to `--detach`, the following program arguments has been removed: `--uid`, `--gid`, `--workdir`, `--chroot`, `--pidfile`, `--umask`. All good daemonization tools should support equivalent functionality, so don't worry. Also the following configuration keys has been removed: `CELERYD_PID_FILE`, `CELERYBEAT_PID_FILE`, `CELERYMON_PID_FILE`. * Default worker loglevel is now `WARN`, to enable the previous log level start the worker with `--loglevel=INFO`. * Tasks are automatically registered. This means you no longer have to register your tasks manually. You don't have to change your old code right away, as it doesn't matter if a task is registered twice. If you don't want your task to be automatically registered you can set the `abstract` attribute .. code-block:: python class MyTask(Task): abstract = True By using `abstract` only tasks subclassing this task will be automatically registered (this works like the Django ORM). If you don't want subclasses to be registered either, you can set the `autoregister` attribute to `False`. Incidentally, this change also fixes the problems with automatic name assignment and relative imports. So you also don't have to specify a task name anymore if you use relative imports. * You can no longer use regular functions as tasks. This change was added because it makes the internals a lot more clean and simple. However, you can now turn functions into tasks by using the `@task` decorator: .. code-block:: python from celery.decorators import task @task() def add(x, y): return x + y .. seealso:: :ref:`guide-tasks` for more information about the task decorators. * The periodic task system has been rewritten to a centralized solution. This means the worker no longer schedules periodic tasks by default, but a new daemon has been introduced: `celerybeat`. To launch the periodic task scheduler you have to run ``celerybeat``: .. code-block:: console $ celerybeat Make sure this is running on one server only, if you run it twice, all periodic tasks will also be executed twice. If you only have one worker server you can embed it into the worker like this: .. code-block:: console $ celeryd --beat # Embed celerybeat in celeryd. * The supervisor has been removed. This means the `-S` and `--supervised` options to `celeryd` is no longer supported. Please use something like http://supervisord.org instead. * `TaskSet.join` has been removed, use `TaskSetResult.join` instead. * The task status `"DONE"` has been renamed to `"SUCCESS"`. * `AsyncResult.is_done` has been removed, use `AsyncResult.successful` instead. * The worker no longer stores errors if `Task.ignore_result` is set, to revert to the previous behavior set :setting:`CELERY_STORE_ERRORS_EVEN_IF_IGNORED` to `True`. * The statistics functionality has been removed in favor of events, so the `-S` and --statistics` switches has been removed. * The module `celery.task.strategy` has been removed. * `celery.discovery` has been removed, and it's ``autodiscover`` function is now in `celery.loaders.djangoapp`. Reason: Internal API. * The :envvar:`CELERY_LOADER` environment variable now needs loader class name in addition to module name, For example, where you previously had: `"celery.loaders.default"`, you now need `"celery.loaders.default.Loader"`, using the previous syntax will result in a `DeprecationWarning`. * Detecting the loader is now lazy, and so isn't done when importing `celery.loaders`. To make this happen `celery.loaders.settings` has been renamed to `load_settings` and is now a function returning the settings object. `celery.loaders.current_loader` is now also a function, returning the current loader. So:: loader = current_loader needs to be changed to:: loader = current_loader() .. _v100-deprecations: Deprecations ------------ * The following configuration variables has been renamed and will be deprecated in v2.0: * ``CELERYD_DAEMON_LOG_FORMAT`` -> ``CELERYD_LOG_FORMAT`` * ``CELERYD_DAEMON_LOG_LEVEL`` -> ``CELERYD_LOG_LEVEL`` * ``CELERY_AMQP_CONNECTION_TIMEOUT`` -> ``CELERY_BROKER_CONNECTION_TIMEOUT`` * ``CELERY_AMQP_CONNECTION_RETRY`` -> ``CELERY_BROKER_CONNECTION_RETRY`` * ``CELERY_AMQP_CONNECTION_MAX_RETRIES`` -> ``CELERY_BROKER_CONNECTION_MAX_RETRIES`` * ``SEND_CELERY_TASK_ERROR_EMAILS`` -> ``CELERY_SEND_TASK_ERROR_EMAILS`` * The public API names in celery.conf has also changed to a consistent naming scheme. * We now support consuming from an arbitrary number of queues. To do this we had to rename the configuration syntax. If you use any of the custom AMQP routing options (queue/exchange/routing_key, etc.), you should read the new FAQ entry: :ref:`faq-task-routing`. The previous syntax is deprecated and scheduled for removal in v2.0. * `TaskSet.run` has been renamed to `TaskSet.apply_async`. `TaskSet.run` has now been deprecated, and is scheduled for removal in v2.0. .. v100-news: News ---- * Rate limiting support (per task type, or globally). * New periodic task system. * Automatic registration. * New cool task decorator syntax. * worker: now sends events if enabled with the `-E` argument. Excellent for monitoring tools, one is already in the making (https://github.com/celery/celerymon). Current events include: :event:`worker-heartbeat`, task-[received/succeeded/failed/retried], :event:`worker-online`, :event:`worker-offline`. * You can now delete (revoke) tasks that's already been applied. * You can now set the hostname the worker identifies as using the `--hostname` argument. * Cache backend now respects the :setting:`CELERY_TASK_RESULT_EXPIRES` setting. * Message format has been standardized and now uses ISO-8601 format for dates instead of datetime. * worker now responds to the :sig:`SIGHUP` signal by restarting itself. * Periodic tasks are now scheduled on the clock. That is, `timedelta(hours=1)` means every hour at :00 minutes, not every hour from the server starts. To revert to the previous behavior you can set `PeriodicTask.relative = True`. * Now supports passing execute options to a TaskSets list of args. Example: .. code-block:: pycon >>> ts = TaskSet(add, [([2, 2], {}, {'countdown': 1}), ... ([4, 4], {}, {'countdown': 2}), ... ([8, 8], {}, {'countdown': 3})]) >>> ts.run() * Got a 3x performance gain by setting the prefetch count to four times the concurrency, (from an average task round-trip of 0.1s to 0.03s!). A new setting has been added: :setting:`CELERYD_PREFETCH_MULTIPLIER`, which is set to `4` by default. * Improved support for webhook tasks. `celery.task.rest` is now deprecated, replaced with the new and shiny `celery.task.http`. With more reflective names, sensible interface, and it's possible to override the methods used to perform HTTP requests. * The results of task sets are now cached by storing it in the result backend. .. _v100-changes: Changes ------- * Now depends on :pypi:`carrot` >= 0.8.1 * New dependencies: :pypi:`billiard`, :pypi:`python-dateutil`, :pypi:`django-picklefield`. * No longer depends on python-daemon * The `uuid` distribution is added as a dependency when running Python 2.4. * Now remembers the previously detected loader by keeping it in the :envvar:`CELERY_LOADER` environment variable. This may help on windows where fork emulation is used. * ETA no longer sends datetime objects, but uses ISO 8601 date format in a string for better compatibility with other platforms. * No longer sends error mails for retried tasks. * Task can now override the backend used to store results. * Refactored the ExecuteWrapper, `apply` and :setting:`CELERY_ALWAYS_EAGER` now also executes the task callbacks and signals. * Now using a proper scheduler for the tasks with an ETA. This means waiting ETA tasks are sorted by time, so we don't have to poll the whole list all the time. * Now also imports modules listed in :setting:`CELERY_IMPORTS` when running with django (as documented). * Log level for stdout/stderr changed from INFO to ERROR * ImportErrors are now properly propagated when auto-discovering tasks. * You can now use `celery.messaging.establish_connection` to establish a connection to the broker. * When running as a separate service the periodic task scheduler does some smart moves to not poll too regularly. If you need faster poll times you can lower the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`. * You can now change periodic task intervals at runtime, by making `run_every` a property, or subclassing `PeriodicTask.is_due`. * The worker now supports control commands enabled through the use of a broadcast queue, you can remotely revoke tasks or set the rate limit for a task type. See :mod:`celery.task.control`. * The services now sets informative process names (as shown in `ps` listings) if the :pypi:`setproctitle` module is installed. * :exc:`~@NotRegistered` now inherits from :exc:`KeyError`, and `TaskRegistry.__getitem__`+`pop` raises `NotRegistered` instead * You can set the loader via the :envvar:`CELERY_LOADER` environment variable. * You can now set :setting:`CELERY_IGNORE_RESULT` to ignore task results by default (if enabled, tasks doesn't save results or errors to the backend used). * The worker now correctly handles malformed messages by throwing away and acknowledging the message, instead of crashing. .. _v100-bugs: Bugs ---- * Fixed a race condition that could happen while storing task results in the database. .. _v100-documentation: Documentation ------------- * Reference now split into two sections; API reference and internal module reference. .. _version-0.8.4: 0.8.4 ===== :release-date: 2010-02-05 01:52 p.m. CEST :release-by: Ask Solem * Now emits a warning if the --detach argument is used. --detach shouldn't be used anymore, as it has several not easily fixed bugs related to it. Instead, use something like start-stop-daemon, :pypi:`supervisor` or :command:`launchd` (macOS). * Make sure logger class is process aware, even if running Python >= 2.6. * Error emails are not sent anymore when the task is retried. .. _version-0.8.3: 0.8.3 ===== :release-date: 2009-12-22 09:43 a.m. CEST :release-by: Ask Solem * Fixed a possible race condition that could happen when storing/querying task results using the database backend. * Now has console script entry points in the :file:`setup.py` file, so tools like :pypi:`zc.buildout` will correctly install the programs ``celeryd`` and ``celeryinit``. .. _version-0.8.2: 0.8.2 ===== :release-date: 2009-11-20 03:40 p.m. CEST :release-by: Ask Solem * QOS Prefetch count wasn't applied properly, as it was set for every message received (which apparently behaves like, "receive one more"), instead of only set when our wanted value changed. .. _version-0.8.1: 0.8.1 ================================= :release-date: 2009-11-16 05:21 p.m. CEST :release-by: Ask Solem .. _v081-very-important: Very important note ------------------- This release (with carrot 0.8.0) enables AMQP QoS (quality of service), which means the workers will only receive as many messages as it can handle at a time. As with any release, you should test this version upgrade on your development servers before rolling it out to production! .. _v081-important: Important changes ----------------- * If you're using Python < 2.6 and you use the multiprocessing backport, then multiprocessing version 2.6.2.1 is required. * All AMQP_* settings has been renamed to BROKER_*, and in addition AMQP_SERVER has been renamed to BROKER_HOST, so before where you had:: AMQP_SERVER = 'localhost' AMQP_PORT = 5678 AMQP_USER = 'myuser' AMQP_PASSWORD = 'mypassword' AMQP_VHOST = 'celery' You need to change that to:: BROKER_HOST = 'localhost' BROKER_PORT = 5678 BROKER_USER = 'myuser' BROKER_PASSWORD = 'mypassword' BROKER_VHOST = 'celery' * Custom carrot backends now need to include the backend class name, so before where you had:: CARROT_BACKEND = 'mycustom.backend.module' you need to change it to:: CARROT_BACKEND = 'mycustom.backend.module.Backend' where `Backend` is the class name. This is probably `"Backend"`, as that was the previously implied name. * New version requirement for carrot: 0.8.0 .. _v081-changes: Changes ------- * Incorporated the multiprocessing backport patch that fixes the `processName` error. * Ignore the result of PeriodicTask's by default. * Added a Redis result store backend * Allow :file:`/etc/default/celeryd` to define additional options for the ``celeryd`` init-script. * MongoDB periodic tasks issue when using different time than UTC fixed. * Windows specific: Negate test for available ``os.fork`` (thanks :github_user:`miracle2k`). * Now tried to handle broken PID files. * Added a Django test runner to contrib that sets `CELERY_ALWAYS_EAGER = True` for testing with the database backend. * Added a :setting:`CELERY_CACHE_BACKEND` setting for using something other than the Django-global cache backend. * Use custom implementation of ``functools.partial`` for Python 2.4 support (Probably still problems with running on 2.4, but it will eventually be supported) * Prepare exception to pickle when saving :state:`RETRY` status for all backends. * SQLite no concurrency limit should only be effective if the database backend is used. .. _version-0.8.0: 0.8.0 ===== :release-date: 2009-09-22 03:06 p.m. CEST :release-by: Ask Solem .. _v080-incompatible: Backward incompatible changes ----------------------------- * Add traceback to result value on failure. .. note:: If you use the database backend you have to re-create the database table `celery_taskmeta`. Contact the :ref:`mailing-list` or :ref:`irc-channel` channel for help doing this. * Database tables are now only created if the database backend is used, so if you change back to the database backend at some point, be sure to initialize tables (django: `syncdb`, python: `celeryinit`). .. note:: This is only applies if using Django version 1.1 or higher. * Now depends on `carrot` version 0.6.0. * Now depends on python-daemon 1.4.8 .. _v080-important: Important changes ----------------- * Celery can now be used in pure Python (outside of a Django project). This means Celery is no longer Django specific. For more information see the FAQ entry :ref:`faq-is-celery-for-django-only`. * Celery now supports task retries. See :ref:`task-retry` for more information. * We now have an AMQP result store backend. It uses messages to publish task return value and status. And it's incredibly fast! See issue #6 for more info! * AMQP QoS (prefetch count) implemented: This to not receive more messages than we can handle. * Now redirects stdout/stderr to the workers log file when detached * Now uses `inspect.getargspec` to only pass default arguments the task supports. * Add Task.on_success, .on_retry, .on_failure handlers See :meth:`celery.task.base.Task.on_success`, :meth:`celery.task.base.Task.on_retry`, :meth:`celery.task.base.Task.on_failure`, * `celery.utils.gen_unique_id`: Workaround for http://bugs.python.org/issue4607 * You can now customize what happens at worker start, at process init, etc., by creating your own loaders (see :mod:`celery.loaders.default`, :mod:`celery.loaders.djangoapp`, :mod:`celery.loaders`). * Support for multiple AMQP exchanges and queues. This feature misses documentation and tests, so anyone interested is encouraged to improve this situation. * The worker now survives a restart of the AMQP server! Automatically re-establish AMQP broker connection if it's lost. New settings: * AMQP_CONNECTION_RETRY Set to `True` to enable connection retries. * AMQP_CONNECTION_MAX_RETRIES. Maximum number of restarts before we give up. Default: `100`. .. _v080-news: News ---- * Fix an incompatibility between python-daemon and multiprocessing, which resulted in the `[Errno 10] No child processes` problem when detaching. * Fixed a possible DjangoUnicodeDecodeError being raised when saving pickled data to Django`s Memcached cache backend. * Better Windows compatibility. * New version of the pickled field (taken from http://www.djangosnippets.org/snippets/513/) * New signals introduced: `task_sent`, `task_prerun` and `task_postrun`, see :mod:`celery.signals` for more information. * `TaskSetResult.join` caused `TypeError` when `timeout=None`. Thanks Jerzy Kozera. Closes #31 * `views.apply` should return `HttpResponse` instance. Thanks to Jerzy Kozera. Closes #32 * `PeriodicTask`: Save conversion of `run_every` from `int` to `timedelta` to the class attribute instead of on the instance. * Exceptions has been moved to `celery.exceptions`, but are still available in the previous module. * Try to rollback transaction and retry saving result if an error happens while setting task status with the database backend. * jail() refactored into :class:`celery.execute.ExecuteWrapper`. * `views.apply` now correctly sets mime-type to "application/json" * `views.task_status` now returns exception if state is :state:`RETRY` * `views.task_status` now returns traceback if state is :state:`FAILURE` or :state:`RETRY` * Documented default task arguments. * Add a sensible __repr__ to ExceptionInfo for easier debugging * Fix documentation typo `.. import map` -> `.. import dmap`. Thanks to :github_user:`mikedizon`. .. _version-0.6.0: 0.6.0 ===== :release-date: 2009-08-07 06:54 a.m. CET :release-by: Ask Solem .. _v060-important: Important changes ----------------- * Fixed a bug where tasks raising unpickleable exceptions crashed pool workers. So if you've had pool workers mysteriously disappearing, or problems with the worker stopping working, this has been fixed in this version. * Fixed a race condition with periodic tasks. * The task pool is now supervised, so if a pool worker crashes, goes away or stops responding, it is automatically replaced with a new one. * Task.name is now automatically generated out of class module+name, for example `"djangotwitter.tasks.UpdateStatusesTask"`. Very convenient. No idea why we didn't do this before. Some documentation is updated to not manually specify a task name. .. _v060-news: News ---- * Tested with Django 1.1 * New Tutorial: Creating a click counter using Carrot and Celery * Database entries for periodic tasks are now created at the workers start-up instead of for each check (which has been a forgotten TODO/XXX in the code for a long time) * New settings variable: :setting:`CELERY_TASK_RESULT_EXPIRES` Time (in seconds, or a `datetime.timedelta` object) for when after stored task results are deleted. For the moment this only works for the database backend. * The worker now emits a debug log message for which periodic tasks has been launched. * The periodic task table is now locked for reading while getting periodic task status (MySQL only so far, seeking patches for other engines) * A lot more debugging information is now available by turning on the `DEBUG` log level (`--loglevel=DEBUG`). * Functions/methods with a timeout argument now works correctly. * New: `celery.strategy.even_time_distribution`: With an iterator yielding task args, kwargs tuples, evenly distribute the processing of its tasks throughout the time window available. * Log message `Unknown task ignored...` now has log level `ERROR` * Log message when task is received is now emitted for all tasks, even if the task has an ETA (estimated time of arrival). Also the log message now includes the ETA for the task (if any). * Acknowledgment now happens in the pool callback. Can't do ack in the job target, as it's not pickleable (can't share AMQP connection, etc.). * Added note about .delay hanging in README * Tests now passing in Django 1.1 * Fixed discovery to make sure app is in INSTALLED_APPS * Previously overridden pool behavior (process reap, wait until pool worker available, etc.) is now handled by `multiprocessing.Pool` itself. * Convert statistics data to Unicode for use as kwargs. Thanks Lucy! .. _version-0.4.1: 0.4.1 ===== :release-date: 2009-07-02 01:42 p.m. CET :release-by: Ask Solem * Fixed a bug with parsing the message options (`mandatory`, `routing_key`, `priority`, `immediate`) .. _version-0.4.0: 0.4.0 ===== :release-date: 2009-07-01 07:29 p.m. CET :release-by: Ask Solem * Adds eager execution. `celery.execute.apply`|`Task.apply` executes the function blocking until the task is done, for API compatibility it returns a `celery.result.EagerResult` instance. You can configure Celery to always run tasks locally by setting the :setting:`CELERY_ALWAYS_EAGER` setting to `True`. * Now depends on `anyjson`. * 99% coverage using Python `coverage` 3.0. .. _version-0.3.20: 0.3.20 ====== :release-date: 2009-06-25 08:42 p.m. CET :release-by: Ask Solem * New arguments to `apply_async` (the advanced version of `delay_task`), `countdown` and `eta`; >>> # Run 10 seconds into the future. >>> res = apply_async(MyTask, countdown=10); >>> # Run 1 day from now >>> res = apply_async(MyTask, ... eta=datetime.now() + timedelta(days=1)) * Now unlinks stale PID files * Lots of more tests. * Now compatible with carrot >= 0.5.0. * **IMPORTANT** The `subtask_ids` attribute on the `TaskSetResult` instance has been removed. To get this information instead use: >>> subtask_ids = [subtask.id for subtask in ts_res.subtasks] * `Taskset.run()` now respects extra message options from the task class. * Task: Add attribute `ignore_result`: Don't store the status and return value. This means you can't use the `celery.result.AsyncResult` to check if the task is done, or get its return value. Only use if you need the performance and is able live without these features. Any exceptions raised will store the return value/status as usual. * Task: Add attribute `disable_error_emails` to disable sending error emails for that task. * Should now work on Windows (although running in the background won't work, so using the `--detach` argument results in an exception being raised). * Added support for statistics for profiling and monitoring. To start sending statistics start the worker with the `--statistics option. Then after a while you can dump the results by running `python manage.py celerystats`. See `celery.monitoring` for more information. * The Celery daemon can now be supervised (i.e., it is automatically restarted if it crashes). To use this start the worker with the --supervised` option (or alternatively `-S`). * views.apply: View calling a task. Example: .. code-block:: text http://e.com/celery/apply/task_name/arg1/arg2//?kwarg1=a&kwarg2=b .. warning:: Use with caution! Don't expose this URL to the public without first ensuring that your code is safe! * Refactored `celery.task`. It's now split into three modules: * ``celery.task`` Contains `apply_async`, `delay_task`, `discard_all`, and task shortcuts, plus imports objects from `celery.task.base` and `celery.task.builtins` * ``celery.task.base`` Contains task base classes: `Task`, `PeriodicTask`, `TaskSet`, `AsynchronousMapTask`, `ExecuteRemoteTask`. * ``celery.task.builtins`` Built-in tasks: `PingTask`, `DeleteExpiredTaskMetaTask`. .. _version-0.3.7: 0.3.7 ===== :release-date: 2008-06-16 11:41 p.m. CET :release-by: Ask Solem * **IMPORTANT** Now uses AMQP`s `basic.consume` instead of `basic.get`. This means we're no longer polling the broker for new messages. * **IMPORTANT** Default concurrency limit is now set to the number of CPUs available on the system. * **IMPORTANT** `tasks.register`: Renamed `task_name` argument to `name`, so:: >>> tasks.register(func, task_name='mytask') has to be replaced with:: >>> tasks.register(func, name='mytask') * The daemon now correctly runs if the pidfile is stale. * Now compatible with carrot 0.4.5 * Default AMQP connection timeout is now 4 seconds. * `AsyncResult.read()` was always returning `True`. * Only use README as long_description if the file exists so easy_install doesn't break. * `celery.view`: JSON responses now properly set its mime-type. * `apply_async` now has a `connection` keyword argument so you can re-use the same AMQP connection if you want to execute more than one task. * Handle failures in task_status view such that it won't throw 500s. * Fixed typo `AMQP_SERVER` in documentation to `AMQP_HOST`. * Worker exception emails sent to administrators now works properly. * No longer depends on `django`, so installing `celery` won't affect the preferred Django version installed. * Now works with PostgreSQL (:pypi:`psycopg2`) again by registering the `PickledObject` field. * Worker: Added `--detach` option as an alias to `--daemon`, and it's the term used in the documentation from now on. * Make sure the pool and periodic task worker thread is terminated properly at exit (so :kbd:`Control-c` works again). * Now depends on `python-daemon`. * Removed dependency to `simplejson` * Cache Backend: Re-establishes connection for every task process if the Django cache backend is :pypi:`python-memcached`/:pypi:`libmemcached`. * Tyrant Backend: Now re-establishes the connection for every task executed. .. _version-0.3.3: 0.3.3 ===== :release-date: 2009-06-08 01:07 p.m. CET :release-by: Ask Solem * The `PeriodicWorkController` now sleeps for 1 second between checking for periodic tasks to execute. .. _version-0.3.2: 0.3.2 ===== :release-date: 2009-06-08 01:07 p.m. CET :release-by: Ask Solem * worker: Added option `--discard`: Discard (delete!) all waiting messages in the queue. * Worker: The `--wakeup-after` option wasn't handled as a float. .. _version-0.3.1: 0.3.1 ===== :release-date: 2009-06-08 01:07 p.m. CET :release-by: Ask Solem * The `PeriodicTask` worker is now running in its own thread instead of blocking the `TaskController` loop. * Default `QUEUE_WAKEUP_AFTER` has been lowered to `0.1` (was `0.3`) .. _version-0.3.0: 0.3.0 ===== :release-date: 2009-06-08 12:41 p.m. CET :release-by: Ask Solem .. warning:: This is a development version, for the stable release, please see versions 0.2.x. **VERY IMPORTANT:** Pickle is now the encoder used for serializing task arguments, so be sure to flush your task queue before you upgrade. * **IMPORTANT** TaskSet.run() now returns a ``celery.result.TaskSetResult`` instance, which lets you inspect the status and return values of a taskset as it was a single entity. * **IMPORTANT** Celery now depends on carrot >= 0.4.1. * The Celery daemon now sends task errors to the registered admin emails. To turn off this feature, set `SEND_CELERY_TASK_ERROR_EMAILS` to `False` in your `settings.py`. Thanks to Grégoire Cachet. * You can now run the Celery daemon by using `manage.py`: .. code-block:: console $ python manage.py celeryd Thanks to Grégoire Cachet. * Added support for message priorities, topic exchanges, custom routing keys for tasks. This means we've introduced `celery.task.apply_async`, a new way of executing tasks. You can use `celery.task.delay` and `celery.Task.delay` like usual, but if you want greater control over the message sent, you want `celery.task.apply_async` and `celery.Task.apply_async`. This also means the AMQP configuration has changed. Some settings has been renamed, while others are new: - ``CELERY_AMQP_EXCHANGE`` - ``CELERY_AMQP_PUBLISHER_ROUTING_KEY`` - ``CELERY_AMQP_CONSUMER_ROUTING_KEY`` - ``CELERY_AMQP_CONSUMER_QUEUE`` - ``CELERY_AMQP_EXCHANGE_TYPE`` See the entry :ref:`faq-task-routing` in the :ref:`FAQ ` for more information. * Task errors are now logged using log level `ERROR` instead of `INFO`, and stack-traces are dumped. Thanks to Grégoire Cachet. * Make every new worker process re-establish it's Django DB connection, this solving the "MySQL connection died?" exceptions. Thanks to Vitaly Babiy and Jirka Vejrazka. * **IMPORTANT** Now using pickle to encode task arguments. This means you now can pass complex Python objects to tasks as arguments. * Removed dependency to `yadayada`. * Added a FAQ, see `docs/faq.rst`. * Now converts any Unicode keys in task `kwargs` to regular strings. Thanks Vitaly Babiy. * Renamed the `TaskDaemon` to `WorkController`. * `celery.datastructures.TaskProcessQueue` is now renamed to `celery.pool.TaskPool`. * The pool algorithm has been refactored for greater performance and stability. .. _version-0.2.0: 0.2.0 ===== :release-date: 2009-05-20 05:14 p.m. CET :release-by: Ask Solem * Final release of 0.2.0 * Compatible with carrot version 0.4.0. * Fixes some syntax errors related to fetching results from the database backend. .. _version-0.2.0-pre3: 0.2.0-pre3 ========== :release-date: 2009-05-20 05:14 p.m. CET :release-by: Ask Solem * *Internal release*. Improved handling of unpickleable exceptions, `get_result` now tries to recreate something looking like the original exception. .. _version-0.2.0-pre2: 0.2.0-pre2 ========== :release-date: 2009-05-20 01:56 p.m. CET :release-by: Ask Solem * Now handles unpickleable exceptions (like the dynamically generated subclasses of `django.core.exception.MultipleObjectsReturned`). .. _version-0.2.0-pre1: 0.2.0-pre1 ========== :release-date: 2009-05-20 12:33 p.m. CET :release-by: Ask Solem * It's getting quite stable, with a lot of new features, so bump version to 0.2. This is a pre-release. * `celery.task.mark_as_read()` and `celery.task.mark_as_failure()` has been removed. Use `celery.backends.default_backend.mark_as_read()`, and `celery.backends.default_backend.mark_as_failure()` instead. .. _version-0.1.15: 0.1.15 ====== :release-date: 2009-05-19 04:13 p.m. CET :release-by: Ask Solem * The Celery daemon was leaking AMQP connections, this should be fixed, if you have any problems with too many files open (like `emfile` errors in `rabbit.log`, please contact us! .. _version-0.1.14: 0.1.14 ====== :release-date: 2009-05-19 01:08 p.m. CET :release-by: Ask Solem * Fixed a syntax error in the `TaskSet` class (no such variable `TimeOutError`). .. _version-0.1.13: 0.1.13 ====== :release-date: 2009-05-19 12:36 p.m. CET :release-by: Ask Solem * Forgot to add `yadayada` to install requirements. * Now deletes all expired task results, not just those marked as done. * Able to load the Tokyo Tyrant backend class without django configuration, can specify tyrant settings directly in the class constructor. * Improved API documentation * Now using the Sphinx documentation system, you can build the html documentation by doing: .. code-block:: console $ cd docs $ make html and the result will be in `docs/_build/html`. .. _version-0.1.12: 0.1.12 ====== :release-date: 2009-05-18 04:38 p.m. CET :release-by: Ask Solem * `delay_task()` etc. now returns `celery.task.AsyncResult` object, which lets you check the result and any failure that might've happened. It kind of works like the `multiprocessing.AsyncResult` class returned by `multiprocessing.Pool.map_async`. * Added ``dmap()`` and ``dmap_async()``. This works like the `multiprocessing.Pool` versions except they're tasks distributed to the Celery server. Example: .. code-block:: pycon >>> from celery.task import dmap >>> import operator >>> dmap(operator.add, [[2, 2], [4, 4], [8, 8]]) >>> [4, 8, 16] >>> from celery.task import dmap_async >>> import operator >>> result = dmap_async(operator.add, [[2, 2], [4, 4], [8, 8]]) >>> result.ready() False >>> time.sleep(1) >>> result.ready() True >>> result.result [4, 8, 16] * Refactored the task meta-data cache and database backends, and added a new backend for Tokyo Tyrant. You can set the backend in your django settings file. Example: .. code-block:: python CELERY_RESULT_BACKEND = 'database'; # Uses the database CELERY_RESULT_BACKEND = 'cache'; # Uses the django cache framework CELERY_RESULT_BACKEND = 'tyrant'; # Uses Tokyo Tyrant TT_HOST = 'localhost'; # Hostname for the Tokyo Tyrant server. TT_PORT = 6657; # Port of the Tokyo Tyrant server. .. _version-0.1.11: 0.1.11 ====== :release-date: 2009-05-12 02:08 p.m. CET :release-by: Ask Solem * The logging system was leaking file descriptors, resulting in servers stopping with the EMFILES (too many open files) error (fixed). .. _version-0.1.10: 0.1.10 ====== :release-date: 2009-05-11 12:46 p.m. CET :release-by: Ask Solem * Tasks now supports both positional arguments and keyword arguments. * Requires carrot 0.3.8. * The daemon now tries to reconnect if the connection is lost. .. _version-0.1.8: 0.1.8 ===== :release-date: 2009-05-07 12:27 p.m. CET :release-by: Ask Solem * Better test coverage * More documentation * The worker doesn't emit `Queue is empty` message if `settings.CELERYD_EMPTY_MSG_EMIT_EVERY` is 0. .. _version-0.1.7: 0.1.7 ===== :release-date: 2009-04-30 01:50 p.m. CET :release-by: Ask Solem * Added some unit tests * Can now use the database for task meta-data (like if the task has been executed or not). Set `settings.CELERY_TASK_META` * Can now run `python setup.py test` to run the unit tests from within the `tests` project. * Can set the AMQP exchange/routing key/queue using `settings.CELERY_AMQP_EXCHANGE`, `settings.CELERY_AMQP_ROUTING_KEY`, and `settings.CELERY_AMQP_CONSUMER_QUEUE`. .. _version-0.1.6: 0.1.6 ===== :release-date: 2009-04-28 02:13 p.m. CET :release-by: Ask Solem * Introducing `TaskSet`. A set of subtasks is executed and you can find out how many, or if all them, are done (excellent for progress bars and such) * Now catches all exceptions when running `Task.__call__`, so the daemon doesn't die. This doesn't happen for pure functions yet, only `Task` classes. * `autodiscover()` now works with zipped eggs. * Worker: Now adds current working directory to `sys.path` for convenience. * The `run_every` attribute of `PeriodicTask` classes can now be a `datetime.timedelta()` object. * Worker: You can now set the `DJANGO_PROJECT_DIR` variable for the worker and it will add that to `sys.path` for easy launching. * Can now check if a task has been executed or not via HTTP. * You can do this by including the Celery `urls.py` into your project, >>> url(r'^celery/$', include('celery.urls')) then visiting the following URL: .. code-block:: text http://mysite/celery/$task_id/done/ this will return a JSON dictionary, for example: .. code-block:: json {"task": {"id": "TASK_ID", "executed": true}} * `delay_task` now returns string id, not `uuid.UUID` instance. * Now has `PeriodicTasks`, to have `cron` like functionality. * Project changed name from `crunchy` to `celery`. The details of the name change request is in `docs/name_change_request.txt`. .. _version-0.1.0: 0.1.0 ===== :release-date: 2009-04-24 11:28 a.m. CET :release-by: Ask Solem * Initial release Sphinx started sucking by removing images from _static, so we need to add them here into actual content to ensure they are included :-( .. image:: ../images/celery-banner.png .. image:: ../images/celery-banner-small.png celery-4.1.0/docs/history/changelog-2.1.rst0000644000175000017500000005577613130607475020367 0ustar omeromer00000000000000.. _changelog-2.1: =============================== Change history for Celery 2.1 =============================== .. contents:: :local: .. _version-2.1.4: 2.1.4 ===== :release-date: 2010-12-03 12:00 p.m. CEST :release-by: Ask Solem .. _v214-fixes: Fixes ----- * Execution options to `apply_async` now takes precedence over options returned by active routers. This was a regression introduced recently (Issue #244). * curses monitor: Long arguments are now truncated so curses doesn't crash with out of bounds errors (Issue #235). * multi: Channel errors occurring while handling control commands no longer crash the worker but are instead logged with severity error. * SQLAlchemy database backend: Fixed a race condition occurring when the client wrote the pending state. Just like the Django database backend, it does no longer save the pending state (Issue #261 + Issue #262). * Error email body now uses `repr(exception)` instead of `str(exception)`, as the latter could result in Unicode decode errors (Issue #245). * Error email timeout value is now configurable by using the :setting:`EMAIL_TIMEOUT` setting. * `celeryev`: Now works on Windows (but the curses monitor won't work without having curses). * Unit test output no longer emits non-standard characters. * worker: The broadcast consumer is now closed if the connection is reset. * worker: Now properly handles errors occurring while trying to acknowledge the message. * `TaskRequest.on_failure` now encodes traceback using the current file-system encoding (Issue #286). * `EagerResult` can now be pickled (Issue #288). .. _v214-documentation: Documentation ------------- * Adding :ref:`contributing`. * Added :ref:`guide-optimizing`. * Added :ref:`faq-security` section to the FAQ. .. _version-2.1.3: 2.1.3 ===== :release-date: 2010-11-09 05:00 p.m. CEST :release-by: Ask Solem .. _v213-fixes: * Fixed deadlocks in `timer2` which could lead to `djcelerymon`/`celeryev -c` hanging. * `EventReceiver`: now sends heartbeat request to find workers. This means :program:`celeryev` and friends finds workers immediately at start-up. * ``celeryev`` curses monitor: Set screen_delay to 10ms, so the screen refreshes more often. * Fixed pickling errors when pickling :class:`AsyncResult` on older Python versions. * worker: prefetch count was decremented by ETA tasks even if there were no active prefetch limits. .. _version-2.1.2: 2.1.2 ===== :release-data: TBA .. _v212-fixes: Fixes ----- * worker: Now sends the :event:`task-retried` event for retried tasks. * worker: Now honors ignore result for :exc:`~@WorkerLostError` and timeout errors. * ``celerybeat``: Fixed :exc:`UnboundLocalError` in ``celerybeat`` logging when using logging setup signals. * worker: All log messages now includes `exc_info`. .. _version-2.1.1: 2.1.1 ===== :release-date: 2010-10-14 02:00 p.m. CEST :release-by: Ask Solem .. _v211-fixes: Fixes ----- * Now working on Windows again. Removed dependency on the :mod:`pwd`/:mod:`grp` modules. * snapshots: Fixed race condition leading to loss of events. * worker: Reject tasks with an ETA that cannot be converted to a time stamp. See issue #209 * concurrency.processes.pool: The semaphore was released twice for each task (both at ACK and result ready). This has been fixed, and it is now released only once per task. * docs/configuration: Fixed typo `CELERYD_TASK_SOFT_TIME_LIMIT` -> :setting:`CELERYD_TASK_SOFT_TIME_LIMIT`. See issue #214 * control command `dump_scheduled`: was using old .info attribute * multi: Fixed `set changed size during iteration` bug occurring in the restart command. * worker: Accidentally tried to use additional command-line arguments. This would lead to an error like: `got multiple values for keyword argument 'concurrency'`. Additional command-line arguments are now ignored, and doesn't produce this error. However -- we do reserve the right to use positional arguments in the future, so please don't depend on this behavior. * ``celerybeat``: Now respects routers and task execution options again. * ``celerybeat``: Now reuses the publisher instead of the connection. * Cache result backend: Using :class:`float` as the expires argument to `cache.set` is deprecated by the Memcached libraries, so we now automatically cast to :class:`int`. * unit tests: No longer emits logging and warnings in test output. .. _v211-news: News ---- * Now depends on carrot version 0.10.7. * Added :setting:`CELERY_REDIRECT_STDOUTS`, and :setting:`CELERYD_REDIRECT_STDOUTS_LEVEL` settings. :setting:`CELERY_REDIRECT_STDOUTS` is used by the worker and beat. All output to `stdout` and `stderr` will be redirected to the current logger if enabled. :setting:`CELERY_REDIRECT_STDOUTS_LEVEL` decides the log level used and is :const:`WARNING` by default. * Added :setting:`CELERYBEAT_SCHEDULER` setting. This setting is used to define the default for the -S option to :program:`celerybeat`. Example: .. code-block:: python CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler' * Added Task.expires: Used to set default expiry time for tasks. * New remote control commands: `add_consumer` and `cancel_consumer`. .. method:: add_consumer(queue, exchange, exchange_type, routing_key, \*\*options) :module: Tells the worker to declare and consume from the specified declaration. .. method:: cancel_consumer(queue_name) :module: Tells the worker to stop consuming from queue (by queue name). Commands also added to :program:`celeryctl` and :class:`~celery.task.control.inspect`. Example using ``celeryctl`` to start consuming from queue "queue", in exchange "exchange", of type "direct" using binding key "key": .. code-block:: console $ celeryctl inspect add_consumer queue exchange direct key $ celeryctl inspect cancel_consumer queue See :ref:`monitoring-control` for more information about the :program:`celeryctl` program. Another example using :class:`~celery.task.control.inspect`: .. code-block:: pycon >>> from celery.task.control import inspect >>> inspect.add_consumer(queue='queue', exchange='exchange', ... exchange_type='direct', ... routing_key='key', ... durable=False, ... auto_delete=True) >>> inspect.cancel_consumer('queue') * ``celerybeat``: Now logs the traceback if a message can't be sent. * ``celerybeat``: Now enables a default socket timeout of 30 seconds. * ``README``/introduction/homepage: Added link to `Flask-Celery`_. .. _`Flask-Celery`: https://github.com/ask/flask-celery .. _version-2.1.0: 2.1.0 ===== :release-date: 2010-10-08 12:00 p.m. CEST :release-by: Ask Solem .. _v210-important: Important Notes --------------- * Celery is now following the versioning semantics defined by `semver`_. This means we're no longer allowed to use odd/even versioning semantics By our previous versioning scheme this stable release should've been version 2.2. .. _`semver`: http://semver.org * Now depends on Carrot 0.10.7. * No longer depends on SQLAlchemy, this needs to be installed separately if the database result backend is used. * :pypi:`django-celery` now comes with a monitor for the Django Admin interface. This can also be used if you're not a Django user. (Update: Django-Admin monitor has been replaced with Flower, see the Monitoring guide). * If you get an error after upgrading saying: `AttributeError: 'module' object has no attribute 'system'`, Then this is because the `celery.platform` module has been renamed to `celery.platforms` to not collide with the built-in :mod:`platform` module. You have to remove the old :file:`platform.py` (and maybe :file:`platform.pyc`) file from your previous Celery installation. To do this use :program:`python` to find the location of this module: .. code-block:: console $ python >>> import celery.platform >>> celery.platform Here the compiled module is in :file:`/opt/devel/celery/celery/`, to remove the offending files do: .. code-block:: console $ rm -f /opt/devel/celery/celery/platform.py* .. _v210-news: News ---- * Added support for expiration of AMQP results (requires RabbitMQ 2.1.0) The new configuration option :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` sets the expiry time in seconds (can be int or float): .. code-block:: python CELERY_AMQP_TASK_RESULT_EXPIRES = 30 * 60 # 30 minutes. CELERY_AMQP_TASK_RESULT_EXPIRES = 0.80 # 800 ms. * ``celeryev``: Event Snapshots If enabled, the worker sends messages about what the worker is doing. These messages are called "events". The events are used by real-time monitors to show what the cluster is doing, but they're not very useful for monitoring over a longer period of time. Snapshots lets you take "pictures" of the clusters state at regular intervals. This can then be stored in a database to generate statistics with, or even monitoring over longer time periods. :pypi:`django-celery` now comes with a Celery monitor for the Django Admin interface. To use this you need to run the :pypi:`django-celery` snapshot camera, which stores snapshots to the database at configurable intervals. To use the Django admin monitor you need to do the following: 1. Create the new database tables: .. code-block:: console $ python manage.py syncdb 2. Start the :pypi:`django-celery` snapshot camera: .. code-block:: console $ python manage.py celerycam 3. Open up the django admin to monitor your cluster. The admin interface shows tasks, worker nodes, and even lets you perform some actions, like revoking and rate limiting tasks, and shutting down worker nodes. There's also a Debian init.d script for :mod:`~celery.bin.events` available, see :ref:`daemonizing` for more information. New command-line arguments to ``celeryev``: * :option:`celery events --camera`: Snapshot camera class to use. * :option:`celery events --logfile`: Log file * :option:`celery events --loglevel`: Log level * :option:`celery events --maxrate`: Shutter rate limit. * :option:`celery events --freq`: Shutter frequency The :option:`--camera ` argument is the name of a class used to take snapshots with. It must support the interface defined by :class:`celery.events.snapshot.Polaroid`. Shutter frequency controls how often the camera thread wakes up, while the rate limit controls how often it will actually take a snapshot. The rate limit can be an integer (snapshots/s), or a rate limit string which has the same syntax as the task rate limit strings (`"200/m"`, `"10/s"`, `"1/h",` etc). For the Django camera case, this rate limit can be used to control how often the snapshots are written to the database, and the frequency used to control how often the thread wakes up to check if there's anything new. The rate limit is off by default, which means it will take a snapshot for every :option:`--frequency ` seconds. * :func:`~celery.task.control.broadcast`: Added callback argument, this can be used to process replies immediately as they arrive. * ``celeryctl``: New command line utility to manage and inspect worker nodes, apply tasks and inspect the results of tasks. .. seealso:: The :ref:`monitoring-control` section in the :ref:`guide`. Some examples: .. code-block:: console $ celeryctl apply tasks.add -a '[2, 2]' --countdown=10 $ celeryctl inspect active $ celeryctl inspect registered_tasks $ celeryctl inspect scheduled $ celeryctl inspect --help $ celeryctl apply --help * Added the ability to set an expiry date and time for tasks. Example:: >>> # Task expires after one minute from now. >>> task.apply_async(args, kwargs, expires=60) >>> # Also supports datetime >>> task.apply_async(args, kwargs, ... expires=datetime.now() + timedelta(days=1) When a worker receives a task that's been expired it will be marked as revoked (:exc:`~@TaskRevokedError`). * Changed the way logging is configured. We now configure the root logger instead of only configuring our custom logger. In addition we don't hijack the multiprocessing logger anymore, but instead use a custom logger name for different applications: ===================================== ===================================== **Application** **Logger Name** ===================================== ===================================== ``celeryd`` ``"celery"`` ``celerybeat`` ``"celery.beat"`` ``celeryev`` ``"celery.ev"`` ===================================== ===================================== This means that the `loglevel` and `logfile` arguments will affect all registered loggers (even those from third-party libraries). Unless you configure the loggers manually as shown below, that is. *Users can choose to configure logging by subscribing to the :signal:`~celery.signals.setup_logging` signal:* .. code-block:: python from logging.config import fileConfig from celery import signals @signals.setup_logging.connect def setup_logging(**kwargs): fileConfig('logging.conf') If there are no receivers for this signal, the logging subsystem will be configured using the :option:`--loglevel `/ :option:`--logfile ` arguments, this will be used for *all defined loggers*. Remember that the worker also redirects stdout and stderr to the Celery logger, if manually configure logging you also need to redirect the standard outs manually: .. code-block:: python from logging.config import fileConfig from celery import log def setup_logging(**kwargs): import logging fileConfig('logging.conf') stdouts = logging.getLogger('mystdoutslogger') log.redirect_stdouts_to_logger(stdouts, loglevel=logging.WARNING) * worker Added command line option :option:`--include `: A comma separated list of (task) modules to be imported. Example: .. code-block:: console $ celeryd -I app1.tasks,app2.tasks * worker: now emits a warning if running as the root user (euid is 0). * :func:`celery.messaging.establish_connection`: Ability to override defaults used using keyword argument "defaults". * worker: Now uses `multiprocessing.freeze_support()` so that it should work with **py2exe**, **PyInstaller**, **cx_Freeze**, etc. * worker: Now includes more meta-data for the :state:`STARTED` state: PID and host name of the worker that started the task. See issue #181 * subtask: Merge additional keyword arguments to `subtask()` into task keyword arguments. For example: .. code-block:: pycon >>> s = subtask((1, 2), {'foo': 'bar'}, baz=1) >>> s.args (1, 2) >>> s.kwargs {'foo': 'bar', 'baz': 1} See issue #182. * worker: Now emits a warning if there's already a worker node using the same name running on the same virtual host. * AMQP result backend: Sending of results are now retried if the connection is down. * AMQP result backend: `result.get()`: Wait for next state if state isn't in :data:`~celery.states.READY_STATES`. * TaskSetResult now supports subscription. :: >>> res = TaskSet(tasks).apply_async() >>> res[0].get() * Added `Task.send_error_emails` + `Task.error_whitelist`, so these can be configured per task instead of just by the global setting. * Added `Task.store_errors_even_if_ignored`, so it can be changed per Task, not just by the global setting. * The Crontab scheduler no longer wakes up every second, but implements `remaining_estimate` (*Optimization*). * worker: Store :state:`FAILURE` result if the :exc:`~@WorkerLostError` exception occurs (worker process disappeared). * worker: Store :state:`FAILURE` result if one of the `*TimeLimitExceeded` exceptions occurs. * Refactored the periodic task responsible for cleaning up results. * The backend cleanup task is now only added to the schedule if :setting:`CELERY_TASK_RESULT_EXPIRES` is set. * If the schedule already contains a periodic task named "celery.backend_cleanup" it won't change it, so the behavior of the backend cleanup task can be easily changed. * The task is now run every day at 4:00 AM, rather than every day since the first time it was run (using Crontab schedule instead of `run_every`) * Renamed `celery.task.builtins.DeleteExpiredTaskMetaTask` -> :class:`celery.task.builtins.backend_cleanup` * The task itself has been renamed from "celery.delete_expired_task_meta" to "celery.backend_cleanup" See issue #134. * Implemented `AsyncResult.forget` for SQLAlchemy/Memcached/Redis/Tokyo Tyrant backends (forget and remove task result). See issue #184. * :meth:`TaskSetResult.join `: Added 'propagate=True' argument. When set to :const:`False` exceptions occurring in subtasks will not be re-raised. * Added `Task.update_state(task_id, state, meta)` as a shortcut to `task.backend.store_result(task_id, meta, state)`. The backend interface is "private" and the terminology outdated, so better to move this to :class:`~celery.task.base.Task` so it can be used. * timer2: Set `self.running=False` in :meth:`~celery.utils.timer2.Timer.stop` so it won't try to join again on subsequent calls to `stop()`. * Log colors are now disabled by default on Windows. * `celery.platform` renamed to :mod:`celery.platforms`, so it doesn't collide with the built-in :mod:`platform` module. * Exceptions occurring in Mediator+Pool callbacks are now caught and logged instead of taking down the worker. * Redis result backend: Now supports result expiration using the Redis `EXPIRE` command. * unit tests: Don't leave threads running at tear down. * worker: Task results shown in logs are now truncated to 46 chars. * `Task.__name__` is now an alias to `self.__class__.__name__`. This way tasks introspects more like regular functions. * `Task.retry`: Now raises :exc:`TypeError` if kwargs argument is empty. See issue #164. * ``timedelta_seconds``: Use ``timedelta.total_seconds`` if running on Python 2.7 * :class:`~kombu.utils.limits.TokenBucket`: Generic Token Bucket algorithm * :mod:`celery.events.state`: Recording of cluster state can now be paused and resumed, including support for buffering. .. method:: State.freeze(buffer=True) Pauses recording of the stream. If `buffer` is true, events received while being frozen will be buffered, and may be replayed later. .. method:: State.thaw(replay=True) Resumes recording of the stream. If `replay` is true, then the recorded buffer will be applied. .. method:: State.freeze_while(fun) With a function to apply, freezes the stream before, and replays the buffer after the function returns. * :meth:`EventReceiver.capture ` Now supports a timeout keyword argument. * worker: The mediator thread is now disabled if :setting:`CELERY_RATE_LIMITS` is enabled, and tasks are directly sent to the pool without going through the ready queue (*Optimization*). .. _v210-fixes: Fixes ----- * Pool: Process timed out by `TimeoutHandler` must be joined by the Supervisor, so don't remove it from the internal process list. See issue #192. * `TaskPublisher.delay_task` now supports exchange argument, so exchange can be overridden when sending tasks in bulk using the same publisher See issue #187. * the worker no longer marks tasks as revoked if :setting:`CELERY_IGNORE_RESULT` is enabled. See issue #207. * AMQP Result backend: Fixed bug with `result.get()` if :setting:`CELERY_TRACK_STARTED` enabled. `result.get()` would stop consuming after receiving the :state:`STARTED` state. * Fixed bug where new processes created by the pool supervisor becomes stuck while reading from the task Queue. See http://bugs.python.org/issue10037 * Fixed timing issue when declaring the remote control command reply queue This issue could result in replies being lost, but have now been fixed. * Backward compatible `LoggerAdapter` implementation: Now works for Python 2.4. Also added support for several new methods: `fatal`, `makeRecord`, `_log`, `log`, `isEnabledFor`, `addHandler`, `removeHandler`. .. _v210-experimental: Experimental ------------ * multi: Added daemonization support. multi can now be used to start, stop and restart worker nodes: .. code-block:: console $ celeryd-multi start jerry elaine george kramer This also creates PID files and log files (:file:`celeryd@jerry.pid`, ..., :file:`celeryd@jerry.log`. To specify a location for these files use the `--pidfile` and `--logfile` arguments with the `%n` format: .. code-block:: console $ celeryd-multi start jerry elaine george kramer \ --logfile=/var/log/celeryd@%n.log \ --pidfile=/var/run/celeryd@%n.pid Stopping: .. code-block:: console $ celeryd-multi stop jerry elaine george kramer Restarting. The nodes will be restarted one by one as the old ones are shutdown: .. code-block:: console $ celeryd-multi restart jerry elaine george kramer Killing the nodes (**WARNING**: Will discard currently executing tasks): .. code-block:: console $ celeryd-multi kill jerry elaine george kramer See `celeryd-multi help` for help. * multi: `start` command renamed to `show`. `celeryd-multi start` will now actually start and detach worker nodes. To just generate the commands you have to use `celeryd-multi show`. * worker: Added `--pidfile` argument. The worker will write its pid when it starts. The worker will not be started if this file exists and the pid contained is still alive. * Added generic init.d script using `celeryd-multi` https://github.com/celery/celery/tree/master/extra/generic-init.d/celeryd .. _v210-documentation: Documentation ------------- * Added User guide section: Monitoring * Added user guide section: Periodic Tasks Moved from `getting-started/periodic-tasks` and updated. * tutorials/external moved to new section: "community". * References has been added to all sections in the documentation. This makes it easier to link between documents. celery-4.1.0/docs/history/changelog-3.1.rst0000644000175000017500000014571713130607475020363 0ustar omeromer00000000000000.. _changelog-3.1: ================ Change history ================ This document contains change notes for bugfix releases in the 3.1.x series (Cipater), please see :ref:`whatsnew-3.1` for an overview of what's new in Celery 3.1. .. _version-3.1.25: 3.1.25 ====== :release-date: 2016-10-10 12:00 PM PDT :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.37 ` - Fixed problem with chords in group introduced in 3.1.24 (Issue #3504). .. _version-3.1.24: 3.1.24 ====== :release-date: 2016-09-30 04:21 PM PDT :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.36 `. - Now supports Task protocol 2 from the future 4.0 release. Workers running 3.1.24 are now able to process messages sent using the `new task message protocol`_ to be introduced in Celery 4.0. Users upgrading to Celery 4.0 when this is released are encouraged to upgrade to this version as an intermediate step, as this means workers not yet upgraded will be able to process messages from clients/workers running 4.0. .. _`new task message protocol`: http://docs.celeryproject.org/en/master/internals/protocol.html#version-2 - ``Task.send_events`` can now be set to disable sending of events for that task only. Example when defining the task: .. code-block:: python @app.task(send_events=False) def add(x, y): return x + y - **Utils**: Fixed compatibility with recent :pypi:`psutil` versions (Issue #3262). - **Canvas**: Chord now forwards partial arguments to its subtasks. Fix contributed by Tayfun Sen. - **App**: Arguments to app such as ``backend``, ``broker``, etc are now pickled and sent to the child processes on Windows. Fix contributed by Jeremy Zafran. - **Deployment**: Generic init scripts now supports being symlinked in runlevel directories (Issue #3208). - **Deployment**: Updated CentOS scripts to work with CentOS 7. Contributed by Joe Sanford. - **Events**: The curses monitor no longer crashes when the result of a task is empty. Fix contributed by Dongweiming. - **Worker**: ``repr(worker)`` would crash when called early in the startup process (Issue #2514). - **Tasks**: GroupResult now defines __bool__ and __nonzero__. This is to fix an issue where a ResultSet or GroupResult with an empty result list are not properly tupled with the as_tuple() method when it is a parent result. This is due to the as_tuple() method performing a logical and operation on the ResultSet. Fix contributed by Colin McIntosh. - **Worker**: Fixed wrong values in autoscale related logging message. Fix contributed by ``@raducc``. - Documentation improvements by * Alexandru Chirila * Michael Aquilina * Mikko Ekström * Mitchel Humpherys * Thomas A. Neil * Tiago Moreira Vieira * Yuriy Syrovetskiy * ``@dessant`` .. _version-3.1.23: 3.1.23 ====== :release-date: 2016-03-09 06:00 P.M PST :release-by: Ask Solem - **Programs**: Last release broke support for the ``--hostnmame`` argument to :program:`celery multi` and :program:`celery worker --detach` (Issue #3103). - **Results**: MongoDB result backend could crash the worker at startup if not configured using an URL. .. _version-3.1.22: 3.1.22 ====== :release-date: 2016-03-07 01:30 P.M PST :release-by: Ask Solem - **Programs**: The worker would crash immediately on startup on ``backend.as_uri()`` when using some result backends (Issue #3094). - **Programs**: :program:`celery multi`/:program:`celery worker --detach` would create an extraneous logfile including literal formats (e.g. ``%I``) in the filename (Issue #3096). .. _version-3.1.21: 3.1.21 ====== :release-date: 2016-03-04 11:16 a.m. PST :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.34 `. - Now depends on :mod:`billiard` 3.3.0.23. - **Prefork pool**: Fixes 100% CPU loop on Linux :manpage:`epoll` (Issue #1845). Also potential fix for: Issue #2142, Issue #2606 - **Prefork pool**: Fixes memory leak related to processes exiting (Issue #2927). - **Worker**: Fixes crash at start-up when trying to censor passwords in MongoDB and Cache result backend URLs (Issue #3079, Issue #3045, Issue #3049, Issue #3068, Issue #3073). Fix contributed by Maxime Verger. - **Task**: An exception is now raised if countdown/expires is less than -2147483648 (Issue #3078). - **Programs**: :program:`celery shell --ipython` now compatible with newer :pypi:`IPython` versions. - **Programs**: The DuplicateNodeName warning emitted by inspect/control now includes a list of the node names returned. Contributed by Sebastian Kalinowski. - **Utils**: The ``.discard(item)`` method of :class:`~celery.utils.collections.LimitedSet` didn't actually remove the item (Issue #3087). Fix contributed by Dave Smith. - **Worker**: Node name formatting now emits less confusing error message for unmatched format keys (Issue #3016). - **Results**: RPC/AMQP backends: Fixed deserialization of JSON exceptions (Issue #2518). Fix contributed by Allard Hoeve. - **Prefork pool**: The `process inqueue damaged` error message now includes the original exception raised. - **Documentation**: Includes improvements by: - Jeff Widman. .. _version-3.1.20: 3.1.20 ====== :release-date: 2016-01-22 06:50 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.33 `. - Now depends on :mod:`billiard` 3.3.0.22. Includes binary wheels for Microsoft Windows x86 and x86_64! - **Task**: Error emails now uses ``utf-8`` character set by default (Issue #2737). - **Task**: Retry now forwards original message headers (Issue #3017). - **Worker**: Bootsteps can now hook into ``on_node_join``/``leave``/``lost``. See :ref:`extending-consumer-attributes` for an example. - **Events**: Fixed handling of DST timezones (Issue #2983). - **Results**: Redis backend stopped respecting certain settings. Contributed by Jeremy Llewellyn. - **Results**: Database backend now properly supports JSON exceptions (Issue #2441). - **Results**: Redis ``new_join`` didn't properly call task errbacks on chord error (Issue #2796). - **Results**: Restores Redis compatibility with Python :pypi:`redis` < 2.10.0 (Issue #2903). - **Results**: Fixed rare issue with chord error handling (Issue #2409). - **Tasks**: Using queue-name values in :setting:`CELERY_ROUTES` now works again (Issue #2987). - **General**: Result backend password now sanitized in report output (Issue #2812, Issue #2004). - **Configuration**: Now gives helpful error message when the result backend configuration points to a module, and not a class (Issue #2945). - **Results**: Exceptions sent by JSON serialized workers are now properly handled by pickle configured workers. - **Programs**: ``celery control autoscale`` now works (Issue #2950). - **Programs**: ``celery beat --detached`` now runs after fork callbacks. - **General**: Fix for LRU cache implementation on Python 3.5 (Issue #2897). Contributed by Dennis Brakhane. Python 3.5's ``OrderedDict`` doesn't allow mutation while it is being iterated over. This breaks "update" if it is called with a dict larger than the maximum size. This commit changes the code to a version that doesn't iterate over the dict, and should also be a little bit faster. - **Init-scripts**: The beat init-script now properly reports service as down when no pid file can be found. Eric Zarowny - **Beat**: Added cleaning of corrupted scheduler files for some storage backend errors (Issue #2985). Fix contributed by Aleksandr Kuznetsov. - **Beat**: Now syncs the schedule even if the schedule is empty. Fix contributed by Colin McIntosh. - **Supervisord**: Set higher process priority in the :pypi:`supervisord` example. Contributed by George Tantiras. - **Documentation**: Includes improvements by: :github_user:`Bryson` Caleb Mingle Christopher Martin Dieter Adriaenssens Jason Veatch Jeremy Cline Juan Rossi Kevin Harvey Kevin McCarthy Kirill Pavlov Marco Buttu :github_user:`Mayflower` Mher Movsisyan Michael Floering :github_user:`michael-k` Nathaniel Varona Rudy Attias Ryan Luckie Steven Parker :github_user:`squfrans` Tadej Janež TakesxiSximada Tom S .. _version-3.1.19: 3.1.19 ====== :release-date: 2015-10-26 01:00 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.29 `. - Now depends on :mod:`billiard` 3.3.0.21. - **Results**: Fixed MongoDB result backend URL parsing problem (Issue celery/kombu#375). - **Worker**: Task request now properly sets ``priority`` in delivery_info. Fix contributed by Gerald Manipon. - **Beat**: PyPy shelve may raise ``KeyError`` when setting keys (Issue #2862). - **Programs**: :program:`celery beat --deatched` now working on PyPy. Fix contributed by Krzysztof Bujniewicz. - **Results**: Redis result backend now ensures all pipelines are cleaned up. Contributed by Justin Patrin. - **Results**: Redis result backend now allows for timeout to be set in the query portion of the result backend URL. For example ``CELERY_RESULT_BACKEND = 'redis://?timeout=10'`` Contributed by Justin Patrin. - **Results**: ``result.get`` now properly handles failures where the exception value is set to :const:`None` (Issue #2560). - **Prefork pool**: Fixed attribute error ``proc.dead``. - **Worker**: Fixed worker hanging when gossip/heartbeat disabled (Issue #1847). Fix contributed by Aaron Webber and Bryan Helmig. - **Results**: MongoDB result backend now supports pymongo 3.x (Issue #2744). Fix contributed by Sukrit Khera. - **Results**: RPC/AMQP backends didn't deserialize exceptions properly (Issue #2691). Fix contributed by Sukrit Khera. - **Programs**: Fixed problem with :program:`celery amqp`'s ``basic_publish`` (Issue #2013). - **Worker**: Embedded beat now properly sets app for thread/process (Issue #2594). - **Documentation**: Many improvements and typos fixed. Contributions by: Carlos Garcia-Dubus D. Yu :github_user:`jerry` Jocelyn Delalande Josh Kupershmidt Juan Rossi :github_user:`kanemra` Paul Pearce Pavel Savchenko Sean Wang Seungha Kim Zhaorong Ma .. _version-3.1.18: 3.1.18 ====== :release-date: 2015-04-22 05:30 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.25 `. - Now depends on :mod:`billiard` 3.3.0.20. - **Django**: Now supports Django 1.8 (Issue #2536). Fix contributed by Bence Tamas and Mickaël Penhard. - **Results**: MongoDB result backend now compatible with pymongo 3.0. Fix contributed by Fatih Sucu. - **Tasks**: Fixed bug only happening when a task has multiple callbacks (Issue #2515). Fix contributed by NotSqrt. - **Commands**: Preload options now support ``--arg value`` syntax. Fix contributed by John Anderson. - **Compat**: A typo caused ``celery.log.setup_logging_subsystem`` to be undefined. Fix contributed by Gunnlaugur Thor Briem. - **init-scripts**: The beat generic init-script now uses :file:`/bin/sh` instead of :command:`bash` (Issue #2496). Fix contributed by Jelle Verstraaten. - **Django**: Fixed a :exc:`TypeError` sometimes occurring in logging when validating models. Fix contributed by Alexander. - **Commands**: Worker now supports new :option:`--executable ` argument that can be used with :option:`celery worker --detach`. Contributed by Bert Vanderbauwhede. - **Canvas**: Fixed crash in chord unlock fallback task (Issue #2404). - **Worker**: Fixed rare crash occurring with :option:`--autoscale ` enabled (Issue #2411). - **Django**: Properly recycle worker Django database connections when the Django ``CONN_MAX_AGE`` setting is enabled (Issue #2453). Fix contributed by Luke Burden. .. _version-3.1.17: 3.1.17 ====== :release-date: 2014-11-19 03:30 p.m. UTC :release-by: Ask Solem .. admonition:: Don't enable the `CELERYD_FORCE_EXECV` setting! Please review your configuration and disable this option if you're using the RabbitMQ or Redis transport. Keeping this option enabled after 3.1 means the async based prefork pool will be disabled, which can easily cause instability. - **Requirements** - Now depends on :ref:`Kombu 3.0.24 `. Includes the new Qpid transport coming in Celery 3.2, backported to support those who may still require Python 2.6 compatibility. - Now depends on :mod:`billiard` 3.3.0.19. - ``celery[librabbitmq]`` now depends on librabbitmq 1.6.1. - **Task**: The timing of ETA/countdown tasks were off after the example ``LocalTimezone`` implementation in the Python documentation no longer works in Python 3.4. (Issue #2306). - **Task**: Raising :exc:`~celery.exceptions.Ignore` no longer sends ``task-failed`` event (Issue #2365). - **Redis result backend**: Fixed unbound local errors. Fix contributed by Thomas French. - **Task**: Callbacks wasn't called properly if ``link`` was a list of signatures (Issue #2350). - **Canvas**: chain and group now handles json serialized signatures (Issue #2076). - **Results**: ``.join_native()`` would accidentally treat the ``STARTED`` state as being ready (Issue #2326). This could lead to the chord callback being called with invalid arguments when using chords with the :setting:`CELERY_TRACK_STARTED` setting enabled. - **Canvas**: The ``chord_size`` attribute is now set for all canvas primitives, making sure more combinations will work with the ``new_join`` optimization for Redis (Issue #2339). - **Task**: Fixed problem with app not being properly propagated to ``trace_task`` in all cases. Fix contributed by :github_user:`kristaps`. - **Worker**: Expires from task message now associated with a timezone. Fix contributed by Albert Wang. - **Cassandra result backend**: Fixed problems when using detailed mode. When using the Cassandra backend in detailed mode, a regression caused errors when attempting to retrieve results. Fix contributed by Gino Ledesma. - **Mongodb Result backend**: Pickling the backend instance will now include the original URL (Issue #2347). Fix contributed by Sukrit Khera. - **Task**: Exception info wasn't properly set for tasks raising :exc:`~celery.exceptions.Reject` (Issue #2043). - **Worker**: Duplicates are now removed when loading the set of revoked tasks from the worker state database (Issue #2336). - **celery.contrib.rdb**: Fixed problems with ``rdb.set_trace`` calling stop from the wrong frame. Fix contributed by :github_user:`llllllllll`. - **Canvas**: ``chain`` and ``chord`` can now be immutable. - **Canvas**: ``chord.apply_async`` will now keep partial args set in ``self.args`` (Issue #2299). - **Results**: Small refactoring so that results are decoded the same way in all result backends. - **Logging**: The ``processName`` format was introduced in Python 2.6.2 so for compatibility this format is now excluded when using earlier versions (Issue #1644). .. _version-3.1.16: 3.1.16 ====== :release-date: 2014-10-03 06:00 p.m. UTC :release-by: Ask Solem - **Worker**: 3.1.15 broke :option:`-Ofair ` behavior (Issue #2286). This regression could result in all tasks executing in a single child process if ``-Ofair`` was enabled. - **Canvas**: ``celery.signature`` now properly forwards app argument in all cases. - **Task**: ``.retry()`` didn't raise the exception correctly when called without a current exception. Fix contributed by Andrea Rabbaglietti. - **Worker**: The ``enable_events`` remote control command disabled worker-related events by mistake (Issue #2272). Fix contributed by Konstantinos Koukopoulos. - **Django**: Adds support for Django 1.7 class names in INSTALLED_APPS when using ``app.autodiscover_tasks()`` (Issue #2248). - **Sphinx**: ``celery.contrib.sphinx`` now uses ``getfullargspec`` on Python 3 (Issue #2302). - **Redis/Cache Backends**: Chords will now run at most once if one or more tasks in the chord are executed multiple times for some reason. .. _version-3.1.15: 3.1.15 ====== :release-date: 2014-09-14 11:00 p.m. UTC :release-by: Ask Solem - **Django**: Now makes sure ``django.setup()`` is called before importing any task modules (Django 1.7 compatibility, Issue #2227) - **Results**: ``result.get()`` was misbehaving by calling ``backend.get_task_meta`` in a :keyword:`finally` call leading to AMQP result backend queues not being properly cleaned up (Issue #2245). .. _version-3.1.14: 3.1.14 ====== :release-date: 2014-09-08 03:00 p.m. UTC :release-by: Ask Solem - **Requirements** - Now depends on :ref:`Kombu 3.0.22 `. - **Init-scripts**: The generic worker init-scripts ``status`` command now gets an accurate pidfile list (Issue #1942). - **Init-scripts**: The generic beat script now implements the ``status`` command. Contributed by John Whitlock. - **Commands**: Multi now writes informational output to stdout instead of stderr. - **Worker**: Now ignores not implemented error for ``pool.restart`` (Issue #2153). - **Task**: Retry no longer raises retry exception when executed in eager mode (Issue #2164). - **AMQP Result backend**: Now ensured ``on_interval`` is called at least every second for blocking calls to properly propagate parent errors. - **Django**: Compatibility with Django 1.7 on Windows (Issue #2126). - **Programs**: :option:`--umask ` argument can now be specified in both octal (if starting with 0) or decimal. .. _version-3.1.13: 3.1.13 ====== Security Fixes -------------- * [Security: `CELERYSA-0002`_] Insecure default umask. The built-in utility used to daemonize the Celery worker service sets an insecure umask by default (umask 0). This means that any files or directories created by the worker will end up having world-writable permissions. Special thanks to Red Hat for originally discovering and reporting the issue! This version will no longer set a default umask by default, so if unset the umask of the parent process will be used. .. _`CELERYSA-0002`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0002.txt News ---- - **Requirements** - Now depends on :ref:`Kombu 3.0.21 `. - Now depends on :mod:`billiard` 3.3.0.18. - **App**: ``backend`` argument now also sets the :setting:`CELERY_RESULT_BACKEND` setting. - **Task**: ``signature_from_request`` now propagates ``reply_to`` so that the RPC backend works with retried tasks (Issue #2113). - **Task**: ``retry`` will no longer attempt to re-queue the task if sending the retry message fails. Unrelated exceptions being raised could cause a message loop, so it was better to remove this behavior. - **Beat**: Accounts for standard 1ms drift by always waking up 0.010s earlier. This will adjust the latency so that the periodic tasks won't move 1ms after every invocation. - Documentation fixes Contributed by Yuval Greenfield, Lucas Wiman, :github_user:`nicholsonjf`. - **Worker**: Removed an outdated assert statement that could lead to errors being masked (Issue #2086). .. _version-3.1.12: 3.1.12 ====== :release-date: 2014-06-09 10:12 p.m. UTC :release-by: Ask Solem - **Requirements** Now depends on :ref:`Kombu 3.0.19 `. - **App**: Connections weren't being closed after fork due to an error in the after fork handler (Issue #2055). This could manifest itself by causing framing errors when using RabbitMQ. (``Unexpected frame``). - **Django**: ``django.setup()`` was being called too late when using Django 1.7 (Issue #1802). - **Django**: Fixed problems with event timezones when using Django (``Substantial drift``). Celery didn't take into account that Django modifies the ``time.timeone`` attributes and friends. - **Canvas**: ``Signature.link`` now works when the link option is a scalar value (Issue #2019). - **Prefork pool**: Fixed race conditions for when file descriptors are removed from the event loop. Fix contributed by Roger Hu. - **Prefork pool**: Improved solution for dividing tasks between child processes. This change should improve performance when there are many child processes, and also decrease the chance that two subsequent tasks are written to the same child process. - **Worker**: Now ignores unknown event types, instead of crashing. Fix contributed by Illes Solt. - **Programs**: :program:`celery worker --detach` no longer closes open file descriptors when :envvar:`C_FAKEFORK` is used so that the workers output can be seen. - **Programs**: The default working directory for :program:`celery worker --detach` is now the current working directory, not ``/``. - **Canvas**: ``signature(s, app=app)`` didn't upgrade serialized signatures to their original class (``subtask_type``) when the ``app`` keyword argument was used. - **Control**: The ``duplicate nodename`` warning emitted by control commands now shows the duplicate node name. - **Tasks**: Can now call ``ResultSet.get()`` on a result set without members. Fix contributed by Alexey Kotlyarov. - **App**: Fixed strange traceback mangling issue for ``app.connection_or_acquire``. - **Programs**: The :program:`celery multi stopwait` command is now documented in usage. - **Other**: Fixed cleanup problem with ``PromiseProxy`` when an error is raised while trying to evaluate the promise. - **Other**: The utility used to censor configuration values now handles non-string keys. Fix contributed by Luke Pomfrey. - **Other**: The ``inspect conf`` command didn't handle non-string keys well. Fix contributed by Jay Farrimond. - **Programs**: Fixed argument handling problem in :program:`celery worker --detach`. Fix contributed by Dmitry Malinovsky. - **Programs**: :program:`celery worker --detach` didn't forward working directory option (Issue #2003). - **Programs**: :program:`celery inspect registered` no longer includes the list of built-in tasks. - **Worker**: The ``requires`` attribute for boot steps weren't being handled correctly (Issue #2002). - **Eventlet**: The eventlet pool now supports the ``pool_grow`` and ``pool_shrink`` remote control commands. Contributed by Mher Movsisyan. - **Eventlet**: The eventlet pool now implements statistics for :program:``celery inspect stats``. Contributed by Mher Movsisyan. - **Documentation**: Clarified ``Task.rate_limit`` behavior. Contributed by Jonas Haag. - **Documentation**: ``AbortableTask`` examples now updated to use the new API (Issue #1993). - **Documentation**: The security documentation examples used an out of date import. Fix contributed by Ian Dees. - **Init-scripts**: The CentOS init-scripts didn't quote :envvar:`CELERY_CHDIR`. Fix contributed by :github_user:`ffeast`. .. _version-3.1.11: 3.1.11 ====== :release-date: 2014-04-16 11:00 p.m. UTC :release-by: Ask Solem - **Now compatible with RabbitMQ 3.3.0** You need to run Celery 3.1.11 or later when using RabbitMQ 3.3, and if you use the ``librabbitmq`` module you also have to upgrade to librabbitmq 1.5.0: .. code-block:: bash $ pip install -U librabbitmq - **Requirements**: - Now depends on :ref:`Kombu 3.0.15 `. - Now depends on `billiard 3.3.0.17`_. - Bundle ``celery[librabbitmq]`` now depends on :mod:`librabbitmq` 1.5.0. .. _`billiard 3.3.0.17`: https://github.com/celery/billiard/blob/master/CHANGES.txt - **Tasks**: The :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting was being ignored (Issue #1953). - **Worker**: New :option:`celery worker --heartbeat-interval` can be used to change the time (in seconds) between sending event heartbeats. Contributed by Matthew Duggan and Craig Northway. - **App**: Fixed memory leaks occurring when creating lots of temporary app instances (Issue #1949). - **MongoDB**: SSL configuration with non-MongoDB transport breaks MongoDB results backend (Issue #1973). Fix contributed by Brian Bouterse. - **Logging**: The color formatter accidentally modified ``record.msg`` (Issue #1939). - **Results**: Fixed problem with task trails being stored multiple times, causing ``result.collect()`` to hang (Issue #1936, Issue #1943). - **Results**: ``ResultSet`` now implements a ``.backend`` attribute for compatibility with ``AsyncResult``. - **Results**: ``.forget()`` now also clears the local cache. - **Results**: Fixed problem with multiple calls to ``result._set_cache`` (Issue #1940). - **Results**: ``join_native`` populated result cache even if disabled. - **Results**: The YAML result serializer should now be able to handle storing exceptions. - **Worker**: No longer sends task error emails for expected errors (in ``@task(throws=(..., )))``. - **Canvas**: Fixed problem with exception deserialization when using the JSON serializer (Issue #1987). - **Eventlet**: Fixes crash when ``celery.contrib.batches`` attempted to cancel a non-existing timer (Issue #1984). - Can now import ``celery.version_info_t``, and ``celery.five`` (Issue #1968). .. _version-3.1.10: 3.1.10 ====== :release-date: 2014-03-22 09:40 p.m. UTC :release-by: Ask Solem - **Requirements**: - Now depends on :ref:`Kombu 3.0.14 `. - **Results**: Reliability improvements to the SQLAlchemy database backend. Previously the connection from the MainProcess was improperly shared with the workers. (Issue #1786) - **Redis:** Important note about events (Issue #1882). There's a new transport option for Redis that enables monitors to filter out unwanted events. Enabling this option in the workers will increase performance considerably: .. code-block:: python BROKER_TRANSPORT_OPTIONS = {'fanout_patterns': True} Enabling this option means that your workers won't be able to see workers with the option disabled (or is running an older version of Celery), so if you do enable it then make sure you do so on all nodes. See :ref:`redis-caveats`. This will be the default in Celery 3.2. - **Results**: The :class:`@AsyncResult` object now keeps a local cache of the final state of the task. This means that the global result cache can finally be disabled, and you can do so by setting :setting:`CELERY_MAX_CACHED_RESULTS` to :const:`-1`. The lifetime of the cache will then be bound to the lifetime of the result object, which will be the default behavior in Celery 3.2. - **Events**: The "Substantial drift" warning message is now logged once per node name only (Issue #1802). - **Worker**: Ability to use one log file per child process when using the prefork pool. This can be enabled by using the new ``%i`` and ``%I`` format specifiers for the log file name. See :ref:`worker-files-process-index`. - **Redis**: New experimental chord join implementation. This is an optimization for chords when using the Redis result backend, where the join operation is now considerably faster and using less resources than the previous strategy. The new option can be set in the result backend URL: .. code-block:: python CELERY_RESULT_BACKEND = 'redis://localhost?new_join=1' This must be enabled manually as it's incompatible with workers and clients not using it, so be sure to enable the option in all clients and workers if you decide to use it. - **Multi**: With ``-opt:index`` (e.g., ``-c:1``) the index now always refers to the position of a node in the argument list. This means that referring to a number will work when specifying a list of node names and not just for a number range: .. code-block:: bash celery multi start A B C D -c:1 4 -c:2-4 8 In this example ``1`` refers to node A (as it's the first node in the list). - **Signals**: The sender argument to ``Signal.connect`` can now be a proxy object, which means that it can be used with the task decorator (Issue #1873). - **Task**: A regression caused the ``queue`` argument to ``Task.retry`` to be ignored (Issue #1892). - **App**: Fixed error message for :meth:`~@Celery.config_from_envvar`. Fix contributed by Dmitry Malinovsky. - **Canvas**: Chords can now contain a group of other chords (Issue #1921). - **Canvas**: Chords can now be combined when using the amqp result backend (a chord where the callback is also a chord). - **Canvas**: Calling ``result.get()`` for a chain task will now complete even if one of the tasks in the chain is ``ignore_result=True`` (Issue #1905). - **Canvas**: Worker now also logs chord errors. - **Canvas**: A chord task raising an exception will now result in any errbacks (``link_error``) to the chord callback to also be called. - **Results**: Reliability improvements to the SQLAlchemy database backend (Issue #1786). Previously the connection from the ``MainProcess`` was improperly inherited by child processes. Fix contributed by Ionel Cristian MărieÈ™. - **Task**: Task callbacks and errbacks are now called using the group primitive. - **Task**: ``Task.apply`` now properly sets ``request.headers`` (Issue #1874). - **Worker**: Fixed :exc:`UnicodeEncodeError` occurring when worker is started by :pypi:`supervisor`. Fix contributed by Codeb Fan. - **Beat**: No longer attempts to upgrade a newly created database file (Issue #1923). - **Beat**: New setting :setting:``CELERYBEAT_SYNC_EVERY`` can be be used to control file sync by specifying the number of tasks to send between each sync. Contributed by Chris Clark. - **Commands**: :program:`celery inspect memdump` no longer crashes if the :mod:`psutil` module isn't installed (Issue #1914). - **Worker**: Remote control commands now always accepts json serialized messages (Issue #1870). - **Worker**: Gossip will now drop any task related events it receives by mistake (Issue #1882). .. _version-3.1.9: 3.1.9 ===== :release-date: 2014-02-10 06:43 p.m. UTC :release-by: Ask Solem - **Requirements**: - Now depends on :ref:`Kombu 3.0.12 `. - **Prefork pool**: Better handling of exiting child processes. Fix contributed by Ionel Cristian MărieÈ™. - **Prefork pool**: Now makes sure all file descriptors are removed from the hub when a process is cleaned up. Fix contributed by Ionel Cristian MărieÈ™. - **New Sphinx extension**: for autodoc documentation of tasks: :mod:`celery.contrib.spinx` (Issue #1833). - **Django**: Now works with Django 1.7a1. - **Task**: Task.backend is now a property that forwards to ``app.backend`` if no custom backend has been specified for the task (Issue #1821). - **Generic init-scripts**: Fixed bug in stop command. Fix contributed by Rinat Shigapov. - **Generic init-scripts**: Fixed compatibility with GNU :manpage:`stat`. Fix contributed by Paul Kilgo. - **Generic init-scripts**: Fixed compatibility with the minimal :program:`dash` shell (Issue #1815). - **Commands**: The :program:`celery amqp basic.publish` command wasn't working properly. Fix contributed by Andrey Voronov. - **Commands**: Did no longer emit an error message if the pidfile exists and the process is still alive (Issue #1855). - **Commands**: Better error message for missing arguments to preload options (Issue #1860). - **Commands**: :program:`celery -h` didn't work because of a bug in the argument parser (Issue #1849). - **Worker**: Improved error message for message decoding errors. - **Time**: Now properly parses the `Z` timezone specifier in ISO 8601 date strings. Fix contributed by Martin Davidsson. - **Worker**: Now uses the *negotiated* heartbeat value to calculate how often to run the heartbeat checks. - **Beat**: Fixed problem with beat hanging after the first schedule iteration (Issue #1822). Fix contributed by Roger Hu. - **Signals**: The header argument to :signal:`before_task_publish` is now always a dictionary instance so that signal handlers can add headers. - **Worker**: A list of message headers is now included in message related errors. .. _version-3.1.8: 3.1.8 ===== :release-date: 2014-01-17 10:45 p.m. UTC :release-by: Ask Solem - **Requirements**: - Now depends on :ref:`Kombu 3.0.10 `. - Now depends on `billiard 3.3.0.14`_. .. _`billiard 3.3.0.14`: https://github.com/celery/billiard/blob/master/CHANGES.txt - **Worker**: The event loop wasn't properly reinitialized at consumer restart which would force the worker to continue with a closed ``epoll`` instance on Linux, resulting in a crash. - **Events:** Fixed issue with both heartbeats and task events that could result in the data not being kept in sorted order. As a result this would force the worker to log "heartbeat missed" events even though the remote node was sending heartbeats in a timely manner. - **Results:** The pickle serializer no longer converts group results to tuples, and will keep the original type (*Issue #1750*). - **Results:** ``ResultSet.iterate`` is now pending deprecation. The method will be deprecated in version 3.2 and removed in version 3.3. Use ``result.get(callback=)`` (or ``result.iter_native()`` where available) instead. - **Worker**\|eventlet/gevent: A regression caused :kbd:`Control-c` to be ineffective for shutdown. - **Redis result backend:** Now using a pipeline to store state changes for improved performance. Contributed by Pepijn de Vos. - **Redis result backend:** Will now retry storing the result if disconnected. - **Worker**\|gossip: Fixed attribute error occurring when another node leaves. Fix contributed by Brodie Rao. - **Generic init-scripts:** Now runs a check at start-up to verify that any configuration scripts are owned by root and that they aren't world/group writable. The init-script configuration is a shell script executed by root, so this is a preventive measure to ensure that users don't leave this file vulnerable to changes by unprivileged users. .. note:: Note that upgrading Celery won't update the init-scripts, instead you need to manually copy the improved versions from the source distribution: https://github.com/celery/celery/tree/3.1/extra/generic-init.d - **Commands**: The :program:`celery purge` command now warns that the operation will delete all tasks and prompts the user for confirmation. A new :option:`-f ` was added that can be used to disable interactive mode. - **Task**: ``.retry()`` didn't raise the value provided in the ``exc`` argument when called outside of an error context (*Issue #1755*). - **Commands:** The :program:`celery multi` command didn't forward command line configuration to the target workers. The change means that multi will forward the special ``--`` argument and configuration content at the end of the arguments line to the specified workers. Example using command-line configuration to set a broker heartbeat from :program:`celery multi`: .. code-block:: bash $ celery multi start 1 -c3 -- broker.heartbeat=30 Fix contributed by Antoine Legrand. - **Canvas:** ``chain.apply_async()`` now properly forwards execution options. Fix contributed by Konstantin Podshumok. - **Redis result backend:** Now takes ``connection_pool`` argument that can be used to change the connection pool class/constructor. - **Worker:** Now truncates very long arguments and keyword arguments logged by the pool at debug severity. - **Worker:** The worker now closes all open files on :sig:`SIGHUP` (regression) (*Issue #1768*). Fix contributed by Brodie Rao - **Worker:** Will no longer accept remote control commands while the worker start-up phase is incomplete (*Issue #1741*). - **Commands:** The output of the event dump utility (:program:`celery events -d`) can now be piped into other commands. - **Documentation:** The RabbitMQ installation instructions for macOS was updated to use modern Homebrew practices. Contributed by Jon Chen. - **Commands:** The :program:`celery inspect conf` utility now works. - **Commands:** The :option:`--no-color ` argument was not respected by all commands (*Issue #1799*). - **App:** Fixed rare bug with ``autodiscover_tasks()`` (*Issue #1797*). - **Distribution:** The sphinx docs will now always add the parent directory to path so that the current Celery source code is used as a basis for API documentation (*Issue #1782*). - **Documentation:** :pypi:`supervisor` examples contained an extraneous '-' in a :option:`--logfile ` argument example. Fix contributed by Mohammad Almeer. .. _version-3.1.7: 3.1.7 ===== :release-date: 2013-12-17 06:00 p.m. UTC :release-by: Ask Solem .. _v317-important: Important Notes --------------- Init-script security improvements --------------------------------- Where the generic init-scripts (for ``celeryd``, and ``celerybeat``) before delegated the responsibility of dropping privileges to the target application, it will now use ``su`` instead, so that the Python program isn't trusted with superuser privileges. This isn't in reaction to any known exploit, but it will limit the possibility of a privilege escalation bug being abused in the future. You have to upgrade the init-scripts manually from this directory: https://github.com/celery/celery/tree/3.1/extra/generic-init.d AMQP result backend ~~~~~~~~~~~~~~~~~~~ The 3.1 release accidentally left the amqp backend configured to be non-persistent by default. Upgrading from 3.0 would give a "not equivalent" error when attempting to set or retrieve results for a task. That's unless you manually set the persistence setting:: CELERY_RESULT_PERSISTENT = True This version restores the previous value so if you already forced the upgrade by removing the existing exchange you must either keep the configuration by setting ``CELERY_RESULT_PERSISTENT = False`` or delete the ``celeryresults`` exchange again. Synchronous subtasks ~~~~~~~~~~~~~~~~~~~~ Tasks waiting for the result of a subtask will now emit a :exc:`RuntimeWarning` warning when using the prefork pool, and in 3.2 this will result in an exception being raised. It's not legal for tasks to block by waiting for subtasks as this is likely to lead to resource starvation and eventually deadlock when using the prefork pool (see also :ref:`task-synchronous-subtasks`). If you really know what you're doing you can avoid the warning (and the future exception being raised) by moving the operation in a white-list block: .. code-block:: python from celery.result import allow_join_result @app.task def misbehaving(): result = other_task.delay() with allow_join_result(): result.get() Note also that if you wait for the result of a subtask in any form when using the prefork pool you must also disable the pool prefetching behavior with the worker :ref:`-Ofair option `. .. _v317-fixes: Fixes ----- - Now depends on :ref:`Kombu 3.0.8 `. - Now depends on :mod:`billiard` 3.3.0.13 - Events: Fixed compatibility with non-standard json libraries that sends float as :class:`decimal.Decimal` (Issue #1731) - Events: State worker objects now always defines attributes: ``active``, ``processed``, ``loadavg``, ``sw_ident``, ``sw_ver`` and ``sw_sys``. - Worker: Now keeps count of the total number of tasks processed, not just by type (``all_active_count``). - Init-scripts: Fixed problem with reading configuration file when the init-script is symlinked to a runlevel (e.g., ``S02celeryd``). (Issue #1740). This also removed a rarely used feature where you can symlink the script to provide alternative configurations. You instead copy the script and give it a new name, but perhaps a better solution is to provide arguments to ``CELERYD_OPTS`` to separate them: .. code-block:: bash CELERYD_NODES="X1 X2 Y1 Y2" CELERYD_OPTS="-A:X1 x -A:X2 x -A:Y1 y -A:Y2 y" - Fallback chord unlock task is now always called after the chord header (Issue #1700). This means that the unlock task won't be started if there's an error sending the header. - Celery command: Fixed problem with arguments for some control commands. Fix contributed by Konstantin Podshumok. - Fixed bug in ``utcoffset`` where the offset when in DST would be completely wrong (Issue #1743). - Worker: Errors occurring while attempting to serialize the result of a task will now cause the task to be marked with failure and a :class:`kombu.exceptions.EncodingError` error. Fix contributed by Ionel Cristian MărieÈ™. - Worker with :option:`-B ` argument didn't properly shut down the beat instance. - Worker: The ``%n`` and ``%h`` formats are now also supported by the :option:`--logfile `, :option:`--pidfile ` and :option:`--statedb ` arguments. Example: .. code-block:: bash $ celery -A proj worker -n foo@%h --logfile=%n.log --statedb=%n.db - Redis/Cache result backends: Will now timeout if keys evicted while trying to join a chord. - The fallback unlock chord task now raises :exc:`Retry` so that the retry even is properly logged by the worker. - Multi: Will no longer apply Eventlet/gevent monkey patches (Issue #1717). - Redis result backend: Now supports UNIX sockets. Like the Redis broker transport the result backend now also supports using ``redis+socket:///tmp/redis.sock`` URLs. Contributed by Alcides Viamontes Esquivel. - Events: Events sent by clients was mistaken for worker related events (Issue #1714). For ``events.State`` the tasks now have a ``Task.client`` attribute that's set when a ``task-sent`` event is being received. Also, a clients logical clock isn't in sync with the cluster so they live in a "time bubble." So for this reason monitors will no longer attempt to merge with the clock of an event sent by a client, instead it will fake the value by using the current clock with a skew of -1. - Prefork pool: The method used to find terminated processes was flawed in that it didn't also take into account missing ``popen`` objects. - Canvas: ``group`` and ``chord`` now works with anon signatures as long as the group/chord object is associated with an app instance (Issue #1744). You can pass the app by using ``group(..., app=app)``. .. _version-3.1.6: 3.1.6 ===== :release-date: 2013-12-02 06:00 p.m. UTC :release-by: Ask Solem - Now depends on :mod:`billiard` 3.3.0.10. - Now depends on :ref:`Kombu 3.0.7 `. - Fixed problem where Mingle caused the worker to hang at start-up (Issue #1686). - Beat: Would attempt to drop privileges twice (Issue #1708). - Windows: Fixed error with ``geteuid`` not being available (Issue #1676). - Tasks can now provide a list of expected error classes (Issue #1682). The list should only include errors that the task is expected to raise during normal operation:: @task(throws=(KeyError, HttpNotFound)) What happens when an exceptions is raised depends on the type of error: - Expected errors (included in ``Task.throws``) Will be logged using severity ``INFO``, and traceback is excluded. - Unexpected errors Will be logged using severity ``ERROR``, with traceback included. - Cache result backend now compatible with Python 3 (Issue #1697). - CentOS init-script: Now compatible with SysV style init symlinks. Fix contributed by Jonathan Jordan. - Events: Fixed problem when task name isn't defined (Issue #1710). Fix contributed by Mher Movsisyan. - Task: Fixed unbound local errors (Issue #1684). Fix contributed by Markus Ullmann. - Canvas: Now unrolls groups with only one task (optimization) (Issue #1656). - Task: Fixed problem with ETA and timezones. Fix contributed by Alexander Koval. - Django: Worker now performs model validation (Issue #1681). - Task decorator now emits less confusing errors when used with incorrect arguments (Issue #1692). - Task: New method ``Task.send_event`` can be used to send custom events to Flower and other monitors. - Fixed a compatibility issue with non-abstract task classes - Events from clients now uses new node name format (``gen@``). - Fixed rare bug with Callable not being defined at interpreter shutdown (Issue #1678). Fix contributed by Nick Johnson. - Fixed Python 2.6 compatibility (Issue #1679). .. _version-3.1.5: 3.1.5 ===== :release-date: 2013-11-21 06:20 p.m. UTC :release-by: Ask Solem - Now depends on :ref:`Kombu 3.0.6 `. - Now depends on :mod:`billiard` 3.3.0.8 - App: ``config_from_object`` is now lazy (Issue #1665). - App: ``autodiscover_tasks`` is now lazy. Django users should now wrap access to the settings object in a lambda:: app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) this ensures that the settings object isn't prepared prematurely. - Fixed regression for :option:`--app ` argument experienced by some users (Issue #1653). - Worker: Now respects the :option:`--uid ` and :option:`--gid ` arguments even if :option:`--detach ` isn't enabled. - Beat: Now respects the :option:`--uid ` and :option:`--gid ` arguments even if :option:`--detach ` isn't enabled. - Python 3: Fixed unorderable error occurring with the worker :option:`-B ` argument enabled. - ``celery.VERSION`` is now a named tuple. - ``maybe_signature(list)`` is now applied recursively (Issue #1645). - ``celery shell`` command: Fixed ``IPython.frontend`` deprecation warning. - The default app no longer includes the built-in fix-ups. This fixes a bug where ``celery multi`` would attempt to load the Django settings module before entering the target working directory. - The Django daemonization tutorial was changed. Users no longer have to explicitly export ``DJANGO_SETTINGS_MODULE`` in :file:`/etc/default/celeryd` when the new project layout is used. - Redis result backend: expiry value can now be 0 (Issue #1661). - Censoring settings now accounts for non-string keys (Issue #1663). - App: New ``autofinalize`` option. Apps are automatically finalized when the task registry is accessed. You can now disable this behavior so that an exception is raised instead. Example: .. code-block:: python app = Celery(autofinalize=False) # raises RuntimeError tasks = app.tasks @app.task def add(x, y): return x + y # raises RuntimeError add.delay(2, 2) app.finalize() # no longer raises: tasks = app.tasks add.delay(2, 2) - The worker didn't send monitoring events during shutdown. - Worker: Mingle and gossip is now automatically disabled when used with an unsupported transport (Issue #1664). - ``celery`` command: Preload options now supports the rare ``--opt value`` format (Issue #1668). - ``celery`` command: Accidentally removed options appearing before the sub-command, these are now moved to the end instead. - Worker now properly responds to ``inspect stats`` commands even if received before start-up is complete (Issue #1659). - :signal:`task_postrun` is now sent within a :keyword:`finally` block, to make sure the signal is always sent. - Beat: Fixed syntax error in string formatting. Contributed by :github_user:`nadad`. - Fixed typos in the documentation. Fixes contributed by Loic Bistuer, :github_user:`sunfinite`. - Nested chains now works properly when constructed using the ``chain`` type instead of the ``|`` operator (Issue #1656). .. _version-3.1.4: 3.1.4 ===== :release-date: 2013-11-15 11:40 p.m. UTC :release-by: Ask Solem - Now depends on :ref:`Kombu 3.0.5 `. - Now depends on :mod:`billiard` 3.3.0.7 - Worker accidentally set a default socket timeout of 5 seconds. - Django: Fix-up now sets the default app so that threads will use the same app instance (e.g., for :command:`manage.py runserver`). - Worker: Fixed Unicode error crash at start-up experienced by some users. - Calling ``.apply_async`` on an empty chain now works again (Issue #1650). - The ``celery multi show`` command now generates the same arguments as the start command does. - The :option:`--app ` argument could end up using a module object instead of an app instance (with a resulting crash). - Fixed a syntax error problem in the beat init-script. Fix contributed by Vsevolod. - Tests now passing on PyPy 2.1 and 2.2. .. _version-3.1.3: 3.1.3 ===== :release-date: 2013-11-13 00:55 a.m. UTC :release-by: Ask Solem - Fixed compatibility problem with Python 2.7.0 - 2.7.5 (Issue #1637) ``unpack_from`` started supporting ``memoryview`` arguments in Python 2.7.6. - Worker: :option:`-B ` argument accidentally closed files used for logging. - Task decorated tasks now keep their docstring (Issue #1636) .. _version-3.1.2: 3.1.2 ===== :release-date: 2013-11-12 08:00 p.m. UTC :release-by: Ask Solem - Now depends on :mod:`billiard` 3.3.0.6 - No longer needs the billiard C extension to be installed. - The worker silently ignored task errors. - Django: Fixed ``ImproperlyConfigured`` error raised when no database backend specified. Fix contributed by :github_user:`j0hnsmith`. - Prefork pool: Now using ``_multiprocessing.read`` with ``memoryview`` if available. - ``close_open_fds`` now uses ``os.closerange`` if available. - ``get_fdmax`` now takes value from ``sysconfig`` if possible. .. _version-3.1.1: 3.1.1 ===== :release-date: 2013-11-11 06:30 p.m. UTC :release-by: Ask Solem - Now depends on :mod:`billiard` 3.3.0.4. - Python 3: Fixed compatibility issues. - Windows: Accidentally showed warning that the billiard C extension wasn't installed (Issue #1630). - Django: Tutorial updated with a solution that sets a default :envvar:`DJANGO_SETTINGS_MODULE` so that it doesn't have to be typed in with the :program:`celery` command. Also fixed typos in the tutorial, and added the settings required to use the Django database backend. Thanks to Chris Ward, :github_user:`orarbel`. - Django: Fixed a problem when using the Django settings in Django 1.6. - Django: Fix-up shouldn't be applied if the django loader is active. - Worker: Fixed attribute error for ``human_write_stats`` when using the compatibility prefork pool implementation. - Worker: Fixed compatibility with billiard without C extension. - Inspect.conf: Now supports a ``with_defaults`` argument. - Group.restore: The backend argument wasn't respected. .. _version-3.1.0: 3.1.0 ======= :release-date: 2013-11-09 11:00 p.m. UTC :release-by: Ask Solem See :ref:`whatsnew-3.1`. celery-4.1.0/docs/history/changelog-2.2.rst0000644000175000017500000010151013130607475020342 0ustar omeromer00000000000000.. _changelog-2.2: =============================== Change history for Celery 2.2 =============================== .. contents:: :local: .. _version-2.2.8: 2.2.8 ===== :release-date: 2011-11-25 04:00 p.m. GMT :release-by: Ask Solem .. _v228-security-fixes: Security Fixes -------------- * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than real id's when the :option:`--uid `/ :option:`--gid ` arguments to :program:`celery multi`, :program:`celeryd_detach`, :program:`celery beat` and :program:`celery events` were used. This means privileges weren't properly dropped, and that it would be possible to regain supervisor privileges later. .. _`CELERYSA-0001`: https://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt .. _version-2.2.7: 2.2.7 ===== :release-date: 2011-06-13 04:00 p.m. BST :release-by: Ask Solem * New signals: :signal:`after_setup_logger` and :signal:`after_setup_task_logger` These signals can be used to augment logging configuration after Celery has set up logging. * Redis result backend now works with Redis 2.4.4. * multi: The :option:`--gid ` option now works correctly. * worker: Retry wrongfully used the repr of the traceback instead of the string representation. * App.config_from_object: Now loads module, not attribute of module. * Fixed issue where logging of objects would give "" .. _version-2.2.6: 2.2.6 ===== :release-date: 2011-04-15 04:00 p.m. CEST :release-by: Ask Solem .. _v226-important: Important Notes --------------- * Now depends on :pypi:`Kombu` 1.1.2. * Dependency lists now explicitly specifies that we don't want :pypi:`python-dateutil` 2.x, as this version only supports Python 3. If you have installed dateutil 2.0 by accident you should downgrade to the 1.5.0 version: .. code-block:: console $ pip install -U python-dateutil==1.5.0 or by ``easy_install``: .. code-block:: console $ easy_install -U python-dateutil==1.5.0 .. _v226-fixes: Fixes ----- * The new ``WatchedFileHandler`` broke Python 2.5 support (Issue #367). * Task: Don't use ``app.main`` if the task name is set explicitly. * Sending emails didn't work on Python 2.5, due to a bug in the version detection code (Issue #378). * Beat: Adds method ``ScheduleEntry._default_now`` This method can be overridden to change the default value of ``last_run_at``. * An error occurring in process cleanup could mask task errors. We no longer propagate errors happening at process cleanup, but log them instead. This way they won't interfere with publishing the task result (Issue #365). * Defining tasks didn't work properly when using the Django ``shell_plus`` utility (Issue #366). * ``AsyncResult.get`` didn't accept the ``interval`` and ``propagate`` arguments. * worker: Fixed a bug where the worker wouldn't shutdown if a :exc:`socket.error` was raised. .. _version-2.2.5: 2.2.5 ===== :release-date: 2011-03-28 06:00 p.m. CEST :release-by: Ask Solem .. _v225-important: Important Notes --------------- * Now depends on Kombu 1.0.7 .. _v225-news: News ---- * Our documentation is now hosted by Read The Docs (http://docs.celeryproject.org), and all links have been changed to point to the new URL. * Logging: Now supports log rotation using external tools like `logrotate.d`_ (Issue #321) This is accomplished by using the ``WatchedFileHandler``, which re-opens the file if it's renamed or deleted. .. _`logrotate.d`: http://www.ducea.com/2006/06/06/rotating-linux-log-files-part-2-logrotate/ * ``otherqueues`` tutorial now documents how to configure Redis/Database result backends. * gevent: Now supports ETA tasks. But gevent still needs ``CELERY_DISABLE_RATE_LIMITS=True`` to work. * TaskSet User Guide: now contains TaskSet callback recipes. * Eventlet: New signals: * ``eventlet_pool_started`` * ``eventlet_pool_preshutdown`` * ``eventlet_pool_postshutdown`` * ``eventlet_pool_apply`` See :mod:`celery.signals` for more information. * New :setting:`BROKER_TRANSPORT_OPTIONS` setting can be used to pass additional arguments to a particular broker transport. * worker: ``worker_pid`` is now part of the request info as returned by broadcast commands. * TaskSet.apply/Taskset.apply_async now accepts an optional ``taskset_id`` argument. * The taskset_id (if any) is now available in the Task request context. * SQLAlchemy result backend: taskset_id and taskset_id columns now have a unique constraint (tables need to recreated for this to take affect). * Task user guide: Added section about choosing a result backend. * Removed unused attribute ``AsyncResult.uuid``. .. _v225-fixes: Fixes ----- * multiprocessing.Pool: Fixes race condition when marking job with ``WorkerLostError`` (Issue #268). The process may have published a result before it was terminated, but we have no reliable way to detect that this is the case. So we have to wait for 10 seconds before marking the result with WorkerLostError. This gives the result handler a chance to retrieve the result. * multiprocessing.Pool: Shutdown could hang if rate limits disabled. There was a race condition when the MainThread was waiting for the pool semaphore to be released. The ResultHandler now terminates after 5 seconds if there are unacked jobs, but no worker processes left to start them (it needs to timeout because there could still be an ack+result that we haven't consumed from the result queue. It is unlikely we'll receive any after 5 seconds with no worker processes). * ``celerybeat``: Now creates pidfile even if the ``--detach`` option isn't set. * eventlet/gevent: The broadcast command consumer is now running in a separate green-thread. This ensures broadcast commands will take priority even if there are many active tasks. * Internal module ``celery.worker.controllers`` renamed to ``celery.worker.mediator``. * worker: Threads now terminates the program by calling ``os._exit``, as it is the only way to ensure exit in the case of syntax errors, or other unrecoverable errors. * Fixed typo in ``maybe_timedelta`` (Issue #352). * worker: Broadcast commands now logs with loglevel debug instead of warning. * AMQP Result Backend: Now resets cached channel if the connection is lost. * Polling results with the AMQP result backend wasn't working properly. * Rate limits: No longer sleeps if there are no tasks, but rather waits for the task received condition (Performance improvement). * ConfigurationView: ``iter(dict)`` should return keys, not items (Issue #362). * ``celerybeat``: PersistentScheduler now automatically removes a corrupted schedule file (Issue #346). * Programs that doesn't support positional command-line arguments now provides a user friendly error message. * Programs no longer tries to load the configuration file when showing ``--version`` (Issue #347). * Autoscaler: The "all processes busy" log message is now severity debug instead of error. * worker: If the message body can't be decoded, it's now passed through ``safe_str`` when logging. This to ensure we don't get additional decoding errors when trying to log the failure. * ``app.config_from_object``/``app.config_from_envvar`` now works for all loaders. * Now emits a user-friendly error message if the result backend name is unknown (Issue #349). * ``celery.contrib.batches``: Now sets loglevel and logfile in the task request so ``task.get_logger`` works with batch tasks (Issue #357). * worker: An exception was raised if using the amqp transport and the prefetch count value exceeded 65535 (Issue #359). The prefetch count is incremented for every received task with an ETA/countdown defined. The prefetch count is a short, so can only support a maximum value of 65535. If the value exceeds the maximum value we now disable the prefetch count, it's re-enabled as soon as the value is below the limit again. * ``cursesmon``: Fixed unbound local error (Issue #303). * eventlet/gevent is now imported on demand so autodoc can import the modules without having eventlet/gevent installed. * worker: Ack callback now properly handles ``AttributeError``. * ``Task.after_return`` is now always called *after* the result has been written. * Cassandra Result Backend: Should now work with the latest ``pycassa`` version. * multiprocessing.Pool: No longer cares if the ``putlock`` semaphore is released too many times (this can happen if one or more worker processes are killed). * SQLAlchemy Result Backend: Now returns accidentally removed ``date_done`` again (Issue #325). * Task.request context is now always initialized to ensure calling the task function directly works even if it actively uses the request context. * Exception occurring when iterating over the result from ``TaskSet.apply`` fixed. * eventlet: Now properly schedules tasks with an ETA in the past. .. _version-2.2.4: 2.2.4 ===== :release-date: 2011-02-19 00:00 AM CET :release-by: Ask Solem .. _v224-fixes: Fixes ----- * worker: 2.2.3 broke error logging, resulting in tracebacks not being logged. * AMQP result backend: Polling task states didn't work properly if there were more than one result message in the queue. * ``TaskSet.apply_async()`` and ``TaskSet.apply()`` now supports an optional ``taskset_id`` keyword argument (Issue #331). * The current taskset id (if any) is now available in the task context as ``request.taskset`` (Issue #329). * SQLAlchemy result backend: `date_done` was no longer part of the results as it had been accidentally removed. It's now available again (Issue #325). * SQLAlchemy result backend: Added unique constraint on `Task.id` and `TaskSet.taskset_id`. Tables needs to be recreated for this to take effect. * Fixed exception raised when iterating on the result of ``TaskSet.apply()``. * Tasks user guide: Added section on choosing a result backend. .. _version-2.2.3: 2.2.3 ===== :release-date: 2011-02-12 04:00 p.m. CET :release-by: Ask Solem .. _v223-fixes: Fixes ----- * Now depends on :pypi:`Kombu` 1.0.3 * Task.retry now supports a ``max_retries`` argument, used to change the default value. * `multiprocessing.cpu_count` may raise :exc:`NotImplementedError` on platforms where this isn't supported (Issue #320). * Coloring of log messages broke if the logged object wasn't a string. * Fixed several typos in the init-script documentation. * A regression caused `Task.exchange` and `Task.routing_key` to no longer have any effect. This is now fixed. * Routing user guide: Fixes typo, routers in :setting:`CELERY_ROUTES` must be instances, not classes. * :program:`celeryev` didn't create pidfile even though the :option:`--pidfile ` argument was set. * Task logger format was no longer used (Issue #317). The id and name of the task is now part of the log message again. * A safe version of ``repr()`` is now used in strategic places to ensure objects with a broken ``__repr__`` doesn't crash the worker, or otherwise make errors hard to understand (Issue #298). * Remote control command :control:`active_queues`: didn't account for queues added at runtime. In addition the dictionary replied by this command now has a different structure: the exchange key is now a dictionary containing the exchange declaration in full. * The :option:`celery worker -Q` option removed unused queue declarations, so routing of tasks could fail. Queues are no longer removed, but rather `app.amqp.queues.consume_from()` is used as the list of queues to consume from. This ensures all queues are available for routing purposes. * ``celeryctl``: Now supports the `inspect active_queues` command. .. _version-2.2.2: 2.2.2 ===== :release-date: 2011-02-03 04:00 p.m. CET :release-by: Ask Solem .. _v222-fixes: Fixes ----- * ``celerybeat`` couldn't read the schedule properly, so entries in :setting:`CELERYBEAT_SCHEDULE` wouldn't be scheduled. * Task error log message now includes `exc_info` again. * The `eta` argument can now be used with `task.retry`. Previously it was overwritten by the countdown argument. * ``celery multi``/``celeryd_detach``: Now logs errors occurring when executing the `celery worker` command. * daemonizing tutorial: Fixed typo ``--time-limit 300`` -> ``--time-limit=300`` * Colors in logging broke non-string objects in log messages. * ``setup_task_logger`` no longer makes assumptions about magic task kwargs. .. _version-2.2.1: 2.2.1 ===== :release-date: 2011-02-02 04:00 p.m. CET :release-by: Ask Solem .. _v221-fixes: Fixes ----- * Eventlet pool was leaking memory (Issue #308). * Deprecated function ``celery.execute.delay_task`` was accidentally removed, now available again. * ``BasePool.on_terminate`` stub didn't exist * ``celeryd_detach``: Adds readable error messages if user/group name doesn't exist. * Smarter handling of unicode decode errors when logging errors. .. _version-2.2.0: 2.2.0 ===== :release-date: 2011-02-01 10:00 AM CET :release-by: Ask Solem .. _v220-important: Important Notes --------------- * Carrot has been replaced with :pypi:`Kombu` Kombu is the next generation messaging library for Python, fixing several flaws present in Carrot that was hard to fix without breaking backwards compatibility. Also it adds: * First-class support for virtual transports; Redis, Django ORM, SQLAlchemy, Beanstalk, MongoDB, CouchDB and in-memory. * Consistent error handling with introspection, * The ability to ensure that an operation is performed by gracefully handling connection and channel errors, * Message compression (:mod:`zlib`, :mod:`bz2`, or custom compression schemes). This means that `ghettoq` is no longer needed as the functionality it provided is already available in Celery by default. The virtual transports are also more feature complete with support for exchanges (direct and topic). The Redis transport even supports fanout exchanges so it's able to perform worker remote control commands. * Magic keyword arguments pending deprecation. The magic keyword arguments were responsible for many problems and quirks: notably issues with tasks and decorators, and name collisions in keyword arguments for the unaware. It wasn't easy to find a way to deprecate the magic keyword arguments, but we think this is a solution that makes sense and it won't have any adverse effects for existing code. The path to a magic keyword argument free world is: * the `celery.decorators` module is deprecated and the decorators can now be found in `celery.task`. * The decorators in `celery.task` disables keyword arguments by default * All examples in the documentation have been changed to use `celery.task`. This means that the following will have magic keyword arguments enabled (old style): .. code-block:: python from celery.decorators import task @task() def add(x, y, **kwargs): print('In task %s' % kwargs['task_id']) return x + y And this won't use magic keyword arguments (new style): .. code-block:: python from celery.task import task @task() def add(x, y): print('In task %s' % add.request.id) return x + y In addition, tasks can choose not to accept magic keyword arguments by setting the `task.accept_magic_kwargs` attribute. .. admonition:: Deprecation Using the decorators in :mod:`celery.decorators` emits a :class:`PendingDeprecationWarning` with a helpful message urging you to change your code, in version 2.4 this will be replaced with a :class:`DeprecationWarning`, and in version 4.0 the :mod:`celery.decorators` module will be removed and no longer exist. Similarly, the `task.accept_magic_kwargs` attribute will no longer have any effect starting from version 4.0. * The magic keyword arguments are now available as `task.request` This is called *the context*. Using thread-local storage the context contains state that's related to the current request. It's mutable and you can add custom attributes that'll only be seen by the current task request. The following context attributes are always available: ===================================== =================================== **Magic Keyword Argument** **Replace with** ===================================== =================================== `kwargs['task_id']` `self.request.id` `kwargs['delivery_info']` `self.request.delivery_info` `kwargs['task_retries']` `self.request.retries` `kwargs['logfile']` `self.request.logfile` `kwargs['loglevel']` `self.request.loglevel` `kwargs['task_is_eager']` `self.request.is_eager` **NEW** `self.request.args` **NEW** `self.request.kwargs` ===================================== =================================== In addition, the following methods now automatically uses the current context, so you don't have to pass `kwargs` manually anymore: * `task.retry` * `task.get_logger` * `task.update_state` * `Eventlet`_ support. This is great news for I/O-bound tasks! To change pool implementations you use the :option:`celery worker --pool` argument, or globally using the :setting:`CELERYD_POOL` setting. This can be the full name of a class, or one of the following aliases: `processes`, `eventlet`, `gevent`. For more information please see the :ref:`concurrency-eventlet` section in the User Guide. .. admonition:: Why not gevent? For our first alternative concurrency implementation we've focused on `Eventlet`_, but there's also an experimental `gevent`_ pool available. This is missing some features, notably the ability to schedule ETA tasks. Hopefully the `gevent`_ support will be feature complete by version 2.3, but this depends on user demand (and contributions). .. _`Eventlet`: http://eventlet.net .. _`gevent`: http://gevent.org * Python 2.4 support deprecated! We're happy^H^H^H^H^Hsad to announce that this is the last version to support Python 2.4. You're urged to make some noise if you're currently stuck with Python 2.4. Complain to your package maintainers, sysadmins and bosses: tell them it's time to move on! Apart from wanting to take advantage of :keyword:`with` statements, coroutines, conditional expressions and enhanced :keyword:`try` blocks, the code base now contains so many 2.4 related hacks and workarounds it's no longer just a compromise, but a sacrifice. If it really isn't your choice, and you don't have the option to upgrade to a newer version of Python, you can just continue to use Celery 2.2. Important fixes can be back ported for as long as there's interest. * worker: Now supports Autoscaling of child worker processes. The :option:`--autoscale ` option can be used to configure the minimum and maximum number of child worker processes: .. code-block:: text --autoscale=AUTOSCALE Enable autoscaling by providing max_concurrency,min_concurrency. Example: --autoscale=10,3 (always keep 3 processes, but grow to 10 if necessary). * Remote Debugging of Tasks ``celery.contrib.rdb`` is an extended version of :mod:`pdb` that enables remote debugging of processes that doesn't have terminal access. Example usage: .. code-block:: text from celery.contrib import rdb from celery.task import task @task() def add(x, y): result = x + y # set breakpoint rdb.set_trace() return result :func:`~celery.contrib.rdb.set_trace` sets a breakpoint at the current location and creates a socket you can telnet into to remotely debug your task. The debugger may be started by multiple processes at the same time, so rather than using a fixed port the debugger will search for an available port, starting from the base port (6900 by default). The base port can be changed using the environment variable :envvar:`CELERY_RDB_PORT`. By default the debugger will only be available from the local host, to enable access from the outside you have to set the environment variable :envvar:`CELERY_RDB_HOST`. When the worker encounters your breakpoint it will log the following information:: [INFO/MainProcess] Received task: tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8] [WARNING/PoolWorker-1] Remote Debugger:6900: Please telnet 127.0.0.1 6900. Type `exit` in session to continue. [2011-01-18 14:25:44,119: WARNING/PoolWorker-1] Remote Debugger:6900: Waiting for client... If you telnet the port specified you'll be presented with a ``pdb`` shell: .. code-block:: console $ telnet localhost 6900 Connected to localhost. Escape character is '^]'. > /opt/devel/demoapp/tasks.py(128)add() -> return result (Pdb) Enter ``help`` to get a list of available commands, It may be a good idea to read the `Python Debugger Manual`_ if you have never used `pdb` before. .. _`Python Debugger Manual`: http://docs.python.org/library/pdb.html * Events are now transient and is using a topic exchange (instead of direct). The `CELERYD_EVENT_EXCHANGE`, `CELERYD_EVENT_ROUTING_KEY`, `CELERYD_EVENT_EXCHANGE_TYPE` settings are no longer in use. This means events won't be stored until there's a consumer, and the events will be gone as soon as the consumer stops. Also it means there can be multiple monitors running at the same time. The routing key of an event is the type of event (e.g., `worker.started`, `worker.heartbeat`, `task.succeeded`, etc. This means a consumer can filter on specific types, to only be alerted of the events it cares about. Each consumer will create a unique queue, meaning it's in effect a broadcast exchange. This opens up a lot of possibilities, for example the workers could listen for worker events to know what workers are in the neighborhood, and even restart workers when they go down (or use this information to optimize tasks/autoscaling). .. note:: The event exchange has been renamed from ``"celeryevent"`` to ``"celeryev"`` so it doesn't collide with older versions. If you'd like to remove the old exchange you can do so by executing the following command: .. code-block:: console $ camqadm exchange.delete celeryevent * The worker now starts without configuration, and configuration can be specified directly on the command-line. Configuration options must appear after the last argument, separated by two dashes: .. code-block:: console $ celery worker -l info -I tasks -- broker.host=localhost broker.vhost=/app * Configuration is now an alias to the original configuration, so changes to the original will reflect Celery at runtime. * `celery.conf` has been deprecated, and modifying `celery.conf.ALWAYS_EAGER` will no longer have any effect. The default configuration is now available in the :mod:`celery.app.defaults` module. The available configuration options and their types can now be introspected. * Remote control commands are now provided by `kombu.pidbox`, the generic process mailbox. * Internal module `celery.worker.listener` has been renamed to `celery.worker.consumer`, and `.CarrotListener` is now `.Consumer`. * Previously deprecated modules `celery.models` and `celery.management.commands` have now been removed as per the deprecation time-line. * [Security: Low severity] Removed `celery.task.RemoteExecuteTask` and accompanying functions: `dmap`, `dmap_async`, and `execute_remote`. Executing arbitrary code using pickle is a potential security issue if someone gains unrestricted access to the message broker. If you really need this functionality, then you'd've to add this to your own project. * [Security: Low severity] The `stats` command no longer transmits the broker password. One would've needed an authenticated broker connection to receive this password in the first place, but sniffing the password at the wire level would've been possible if using unencrypted communication. .. _v220-news: News ---- * The internal module `celery.task.builtins` has been removed. * The module `celery.task.schedules` is deprecated, and `celery.schedules` should be used instead. For example if you have:: from celery.task.schedules import crontab You should replace that with:: from celery.schedules import crontab The module needs to be renamed because it must be possible to import schedules without importing the `celery.task` module. * The following functions have been deprecated and is scheduled for removal in version 2.3: * `celery.execute.apply_async` Use `task.apply_async()` instead. * `celery.execute.apply` Use `task.apply()` instead. * `celery.execute.delay_task` Use `registry.tasks[name].delay()` instead. * Importing `TaskSet` from `celery.task.base` is now deprecated. You should use:: >>> from celery.task import TaskSet instead. * New remote control commands: * `active_queues` Returns the queue declarations a worker is currently consuming from. * Added the ability to retry publishing the task message in the event of connection loss or failure. This is disabled by default but can be enabled using the :setting:`CELERY_TASK_PUBLISH_RETRY` setting, and tweaked by the :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` setting. In addition `retry`, and `retry_policy` keyword arguments have been added to `Task.apply_async`. .. note:: Using the `retry` argument to `apply_async` requires you to handle the publisher/connection manually. * Periodic Task classes (`@periodic_task`/`PeriodicTask`) will *not* be deprecated as previously indicated in the source code. But you're encouraged to use the more flexible :setting:`CELERYBEAT_SCHEDULE` setting. * Built-in daemonization support of the worker using `celery multi` is no longer experimental and is considered production quality. See :ref:`daemon-generic` if you want to use the new generic init scripts. * Added support for message compression using the :setting:`CELERY_MESSAGE_COMPRESSION` setting, or the `compression` argument to `apply_async`. This can also be set using routers. * worker: Now logs stack-trace of all threads when receiving the `SIGUSR1` signal (doesn't work on CPython 2.4, Windows or Jython). Inspired by https://gist.github.com/737056 * Can now remotely terminate/kill the worker process currently processing a task. The `revoke` remote control command now supports a `terminate` argument Default signal is `TERM`, but can be specified using the `signal` argument. Signal can be the uppercase name of any signal defined in the :mod:`signal` module in the Python Standard Library. Terminating a task also revokes it. Example:: >>> from celery.task.control import revoke >>> revoke(task_id, terminate=True) >>> revoke(task_id, terminate=True, signal='KILL') >>> revoke(task_id, terminate=True, signal='SIGKILL') * `TaskSetResult.join_native`: Backend-optimized version of `join()`. If available, this version uses the backends ability to retrieve multiple results at once, unlike `join()` which fetches the results one by one. So far only supported by the AMQP result backend. Support for Memcached and Redis may be added later. * Improved implementations of `TaskSetResult.join` and `AsyncResult.wait`. An `interval` keyword argument have been added to both so the polling interval can be specified (default interval is 0.5 seconds). A `propagate` keyword argument have been added to `result.wait()`, errors will be returned instead of raised if this is set to False. .. warning:: You should decrease the polling interval when using the database result backend, as frequent polling can result in high database load. * The PID of the child worker process accepting a task is now sent as a field with the :event:`task-started` event. * The following fields have been added to all events in the worker class: * `sw_ident`: Name of worker software (e.g., ``"py-celery"``). * `sw_ver`: Software version (e.g., 2.2.0). * `sw_sys`: Operating System (e.g., Linux, Windows, Darwin). * For better accuracy the start time reported by the multiprocessing worker process is used when calculating task duration. Previously the time reported by the accept callback was used. * `celerybeat`: New built-in daemonization support using the `--detach` option. * `celeryev`: New built-in daemonization support using the `--detach` option. * `TaskSet.apply_async`: Now supports custom publishers by using the `publisher` argument. * Added :setting:`CELERY_SEND_TASK_SENT_EVENT` setting. If enabled an event will be sent with every task, so monitors can track tasks before the workers receive them. * `celerybeat`: Now reuses the broker connection when calling scheduled tasks. * The configuration module and loader to use can now be specified on the command-line. For example: .. code-block:: console $ celery worker --config=celeryconfig.py --loader=myloader.Loader * Added signals: `beat_init` and `beat_embedded_init` * :signal:`celery.signals.beat_init` Dispatched when :program:`celerybeat` starts (either standalone or embedded). Sender is the :class:`celery.beat.Service` instance. * :signal:`celery.signals.beat_embedded_init` Dispatched in addition to the :signal:`beat_init` signal when :program:`celerybeat` is started as an embedded process. Sender is the :class:`celery.beat.Service` instance. * Redis result backend: Removed deprecated settings `REDIS_TIMEOUT` and `REDIS_CONNECT_RETRY`. * CentOS init-script for :program:`celery worker` now available in `extra/centos`. * Now depends on :pypi:`pyparsing` version 1.5.0 or higher. There have been reported issues using Celery with :pypi:`pyparsing` 1.4.x, so please upgrade to the latest version. * Lots of new unit tests written, now with a total coverage of 95%. .. _v220-fixes: Fixes ----- * `celeryev` Curses Monitor: Improved resize handling and UI layout (Issue #274 + Issue #276) * AMQP Backend: Exceptions occurring while sending task results are now propagated instead of silenced. the worker will then show the full traceback of these errors in the log. * AMQP Backend: No longer deletes the result queue after successful poll, as this should be handled by the :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting instead. * AMQP Backend: Now ensures queues are declared before polling results. * Windows: worker: Show error if running with `-B` option. Running ``celerybeat`` embedded is known not to work on Windows, so users are encouraged to run ``celerybeat`` as a separate service instead. * Windows: Utilities no longer output ANSI color codes on Windows * ``camqadm``: Now properly handles :kbd:`Control-c` by simply exiting instead of showing confusing traceback. * Windows: All tests are now passing on Windows. * Remove bin/ directory, and `scripts` section from :file:`setup.py`. This means we now rely completely on setuptools entry-points. .. _v220-experimental: Experimental ------------ * Jython: worker now runs on Jython using the threaded pool. All tests pass, but there may still be bugs lurking around the corners. * PyPy: worker now runs on PyPy. It runs without any pool, so to get parallel execution you must start multiple instances (e.g., using :program:`multi`). Sadly an initial benchmark seems to show a 30% performance decrease on ``pypy-1.4.1`` + JIT. We would like to find out why this is, so stay tuned. * :class:`PublisherPool`: Experimental pool of task publishers and connections to be used with the `retry` argument to `apply_async`. The example code below will re-use connections and channels, and retry sending of the task message if the connection is lost. .. code-block:: python from celery import current_app # Global pool pool = current_app().amqp.PublisherPool(limit=10) def my_view(request): with pool.acquire() as publisher: add.apply_async((2, 2), publisher=publisher, retry=True) celery-4.1.0/docs/history/whatsnew-2.5.rst0000644000175000017500000003723213130607475020267 0ustar omeromer00000000000000.. _whatsnew-2.5: ========================== What's new in Celery 2.5 ========================== Celery aims to be a flexible and reliable, best-of-breed solution to process vast amounts of messages in a distributed fashion, while providing operations with the tools to maintain such a system. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should visit our `website`_. While this version is backward compatible with previous versions it's important that you read the following section. If you use Celery in combination with Django you must also read the `django-celery changelog ` and upgrade to :pypi:`django-celery 2.5 `. This version is officially supported on CPython 2.5, 2.6, 2.7, 3.2 and 3.3, as well as PyPy and Jython. .. _`website`: http://celeryproject.org/ .. contents:: :local: .. _v250-important: Important Notes =============== Broker connection pool now enabled by default --------------------------------------------- The default limit is 10 connections, if you have many threads/green-threads using connections at the same time you may want to tweak this limit to avoid contention. See the :setting:`BROKER_POOL_LIMIT` setting for more information. Also note that publishing tasks will be retried by default, to change this default or the default retry policy see :setting:`CELERY_TASK_PUBLISH_RETRY` and :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY`. Rabbit Result Backend: Exchange is no longer *auto delete* ---------------------------------------------------------- The exchange used for results in the Rabbit (AMQP) result backend used to have the *auto_delete* flag set, which could result in a race condition leading to an annoying warning. .. admonition:: For RabbitMQ users Old exchanges created with the *auto_delete* flag enabled has to be removed. The :program:`camqadm` command can be used to delete the previous exchange: .. code-block:: console $ camqadm exchange.delete celeryresults As an alternative to deleting the old exchange you can configure a new name for the exchange:: CELERY_RESULT_EXCHANGE = 'celeryresults2' But you have to make sure that all clients and workers use this new setting, so they're updated to use the same exchange name. Solution for hanging workers (but must be manually enabled) ----------------------------------------------------------- The `CELERYD_FORCE_EXECV` setting has been added to solve a problem with deadlocks that originate when threads and fork is mixed together: .. code-block:: python CELERYD_FORCE_EXECV = True This setting is recommended for all users using the prefork pool, but especially users also using time limits or a max tasks per child setting. - See `Python Issue 6721`_ to read more about this issue, and why resorting to :func:`~os.execv`` is the only safe solution. Enabling this option will result in a slight performance penalty when new child worker processes are started, and it will also increase memory usage (but many platforms are optimized, so the impact may be minimal). Considering that it ensures reliability when replacing lost worker processes, it should be worth it. - It's already the default behavior on Windows. - It will be the default behavior for all platforms in a future version. .. _`Python Issue 6721`: http://bugs.python.org/issue6721#msg140215 .. _v250-optimizations: Optimization ============ - The code path used when the worker executes a task has been heavily optimized, meaning the worker is able to process a great deal more tasks/second compared to previous versions. As an example the solo pool can now process up to 15000 tasks/second on a 4 core MacBook Pro when using the :pypi:`pylibrabbitmq` transport, where it previously could only do 5000 tasks/second. - The task error tracebacks are now much shorter. - Fixed a noticeable delay in task processing when rate limits are enabled. .. _v250-deprecations: Deprecation Time-line Changes ============================= Removals -------- * The old :class:`TaskSet` signature of ``(task_name, list_of_tasks)`` can no longer be used (originally scheduled for removal in 2.4). The deprecated ``.task_name`` and ``.task`` attributes has also been removed. * The functions ``celery.execute.delay_task``, ``celery.execute.apply``, and ``celery.execute.apply_async`` has been removed (originally) scheduled for removal in 2.3). * The built-in ``ping`` task has been removed (originally scheduled for removal in 2.3). Please use the ping broadcast command instead. * It's no longer possible to import ``subtask`` and ``TaskSet`` from :mod:`celery.task.base`, please import them from :mod:`celery.task` instead (originally scheduled for removal in 2.4). Deprecated modules ------------------ * The :mod:`celery.decorators` module has changed status from pending deprecation to deprecated, and is scheduled for removal in version 4.0. The ``celery.task`` module must be used instead. .. _v250-news: News ==== Timezone support ---------------- Celery can now be configured to treat all incoming and outgoing dates as UTC, and the local timezone can be configured. This isn't yet enabled by default, since enabling time zone support means workers running versions pre-2.5 will be out of sync with upgraded workers. To enable UTC you have to set :setting:`CELERY_ENABLE_UTC`:: CELERY_ENABLE_UTC = True When UTC is enabled, dates and times in task messages will be converted to UTC, and then converted back to the local timezone when received by a worker. You can change the local timezone using the :setting:`CELERY_TIMEZONE` setting. Installing the :pypi:`pytz` library is recommended when using a custom timezone, to keep timezone definition up-to-date, but it will fallback to a system definition of the timezone if available. UTC will enabled by default in version 3.0. .. note:: :pypi:`django-celery` will use the local timezone as specified by the ``TIME_ZONE`` setting, it will also honor the new `USE_TZ`_ setting introduced in Django 1.4. .. _`USE_TZ`: https://docs.djangoproject.com/en/dev/topics/i18n/timezones/ New security serializer using cryptographic signing --------------------------------------------------- A new serializer has been added that signs and verifies the signature of messages. The name of the new serializer is ``auth``, and needs additional configuration to work (see :ref:`conf-security`). .. seealso:: :ref:`guide-security` Contributed by Mher Movsisyan. New :setting:`CELERY_ANNOTATIONS` setting ----------------------------------------- This new setting enables the configuration to modify task classes and their attributes. The setting can be a dict, or a list of annotation objects that filter for tasks and return a map of attributes to change. As an example, this is an annotation to change the ``rate_limit`` attribute for the ``tasks.add`` task: .. code-block:: python CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}} or change the same for all tasks: .. code-block:: python CELERY_ANNOTATIONS = {'*': {'rate_limit': '10/s'}} You can change methods too, for example the ``on_failure`` handler: .. code-block:: python def my_on_failure(self, exc, task_id, args, kwargs, einfo): print('Oh no! Task failed: %r' % (exc,)) CELERY_ANNOTATIONS = {'*': {'on_failure': my_on_failure}} If you need more flexibility then you can also create objects that filter for tasks to annotate: .. code-block:: python class MyAnnotate(object): def annotate(self, task): if task.name.startswith('tasks.'): return {'rate_limit': '10/s'} CELERY_ANNOTATIONS = (MyAnnotate(), {other_annotations,}) ``current`` provides the currently executing task ------------------------------------------------- The new :data:`celery.task.current` proxy will always give the currently executing task. **Example**: .. code-block:: python from celery.task import current, task @task def update_twitter_status(auth, message): twitter = Twitter(auth) try: twitter.update_status(message) except twitter.FailWhale, exc: # retry in 10 seconds. current.retry(countdown=10, exc=exc) Previously you'd've to type ``update_twitter_status.retry(…)`` here, which can be annoying for long task names. .. note:: This won't work if the task function is called directly (i.e., ``update_twitter_status(a, b)``). For that to work ``apply`` must be used: ``update_twitter_status.apply((a, b))``. In Other News ------------- - Now depends on Kombu 2.1.0. - Efficient Chord support for the Memcached backend (Issue #533) This means Memcached joins Redis in the ability to do non-polling chords. Contributed by Dan McGee. - Adds Chord support for the Rabbit result backend (amqp) The Rabbit result backend can now use the fallback chord solution. - Sending :sig:`QUIT` to ``celeryd`` will now cause it cold terminate. That is, it won't finish executing the tasks it's currently working on. Contributed by Alec Clowes. - New "detailed" mode for the Cassandra backend. Allows to have a "detailed" mode for the Cassandra backend. Basically the idea is to keep all states using Cassandra wide columns. New states are then appended to the row as new columns, the last state being the last column. See the :setting:`CASSANDRA_DETAILED_MODE` setting. Contributed by Steeve Morin. - The Crontab parser now matches Vixie Cron behavior when parsing ranges with steps (e.g., 1-59/2). Contributed by Daniel Hepper. - ``celerybeat`` can now be configured on the command-line like ``celeryd``. Additional configuration must be added at the end of the argument list followed by ``--``, for example: .. code-block:: console $ celerybeat -l info -- celerybeat.max_loop_interval=10.0 - Now limits the number of frames in a traceback so that ``celeryd`` doesn't crash on maximum recursion limit exceeded exceptions (Issue #615). The limit is set to the current recursion limit divided by 8 (which is 125 by default). To get or set the current recursion limit use :func:`sys.getrecursionlimit` and :func:`sys.setrecursionlimit`. - More information is now preserved in the pickleable traceback. This has been added so that Sentry can show more details. Contributed by Sean O'Connor. - CentOS init-script has been updated and should be more flexible. Contributed by Andrew McFague. - MongoDB result backend now supports ``forget()``. Contributed by Andrew McFague - ``task.retry()`` now re-raises the original exception keeping the original stack trace. Suggested by :github_user:`ojii`. - The `--uid` argument to daemons now uses ``initgroups()`` to set groups to all the groups the user is a member of. Contributed by Åukasz OleÅ›. - ``celeryctl``: Added ``shell`` command. The shell will have the current_app (``celery``) and all tasks automatically added to locals. - ``celeryctl``: Added ``migrate`` command. The migrate command moves all tasks from one broker to another. Note that this is experimental and you should have a backup of the data before proceeding. **Examples**: .. code-block:: console $ celeryctl migrate redis://localhost amqp://localhost $ celeryctl migrate amqp://localhost//v1 amqp://localhost//v2 $ python manage.py celeryctl migrate django:// redis:// * Routers can now override the ``exchange`` and ``routing_key`` used to create missing queues (Issue #577). By default this will always use the name of the queue, but you can now have a router return exchange and routing_key keys to set them. This is useful when using routing classes which decides a destination at run-time. Contributed by Akira Matsuzaki. - Redis result backend: Adds support for a ``max_connections`` parameter. It's now possible to configure the maximum number of simultaneous connections in the Redis connection pool used for results. The default max connections setting can be configured using the :setting:`CELERY_REDIS_MAX_CONNECTIONS` setting, or it can be changed individually by ``RedisBackend(max_connections=int)``. Contributed by Steeve Morin. - Redis result backend: Adds the ability to wait for results without polling. Contributed by Steeve Morin. - MongoDB result backend: Now supports save and restore ``taskset``. Contributed by Julien Poissonnier. - There's a new :ref:`guide-security` guide in the documentation. - The init-scripts have been updated, and many bugs fixed. Contributed by Chris Streeter. - User (tilde) is now expanded in command-line arguments. - Can now configure :envvar:`CELERYCTL` environment variable in :file:`/etc/default/celeryd`. While not necessary for operation, :program:`celeryctl` is used for the ``celeryd status`` command, and the path to :program:`celeryctl` must be configured for that to work. The daemonization cookbook contains examples. Contributed by Jude Nagurney. - The MongoDB result backend can now use Replica Sets. Contributed by Ivan Metzlar. - gevent: Now supports autoscaling (Issue #599). Contributed by Mark Lavin. - multiprocessing: Mediator thread is now always enabled, even though rate limits are disabled, as the pool semaphore is known to block the main thread, causing broadcast commands and shutdown to depend on the semaphore being released. Fixes ===== - Exceptions that are re-raised with a new exception object now keeps the original stack trace. - Windows: Fixed the ``no handlers found for multiprocessing`` warning. - Windows: The ``celeryd`` program can now be used. Previously Windows users had to launch ``celeryd`` using ``python -m celery.bin.celeryd``. - Redis result backend: Now uses ``SETEX`` command to set result key, and expiry atomically. Suggested by :github_user:`yaniv-aknin`. - ``celeryd``: Fixed a problem where shutdown hanged when :kbd:`Control-c` was used to terminate. - ``celeryd``: No longer crashes when channel errors occur. Fix contributed by Roger Hu. - Fixed memory leak in the eventlet pool, caused by the use of ``greenlet.getcurrent``. Fix contributed by Ignas MikalajÅ«nas. - Cassandra backend: No longer uses :func:`pycassa.connect` which is deprecated since :pypi:`pycassa` 1.4. Fix contributed by Jeff Terrace. - Fixed unicode decode errors that could occur while sending error emails. Fix contributed by Seong Wun Mun. - ``celery.bin`` programs now always defines ``__package__`` as recommended by PEP-366. - ``send_task`` now emits a warning when used in combination with :setting:`CELERY_ALWAYS_EAGER` (Issue #581). Contributed by Mher Movsisyan. - ``apply_async`` now forwards the original keyword arguments to ``apply`` when :setting:`CELERY_ALWAYS_EAGER` is enabled. - ``celeryev`` now tries to re-establish the connection if the connection to the broker is lost (Issue #574). - ``celeryev``: Fixed a crash occurring if a task has no associated worker information. Fix contributed by Matt Williamson. - The current date and time is now consistently taken from the current loaders ``now`` method. - Now shows helpful error message when given a configuration module ending in ``.py`` that can't be imported. - ``celeryctl``: The :option:`--expires ` and :option:`--eta ` arguments to the apply command can now be an ISO-8601 formatted string. - ``celeryctl`` now exits with exit status ``EX_UNAVAILABLE`` (69) if no replies have been received. celery-4.1.0/docs/history/changelog-2.0.rst0000644000175000017500000010233513130607475020346 0ustar omeromer00000000000000.. _changelog-2.0: =============================== Change history for Celery 2.0 =============================== .. contents:: :local: .. _version-2.0.3: 2.0.3 ===== :release-date: 2010-08-27 12:00 p.m. CEST :release-by: Ask Solem .. _v203-fixes: Fixes ----- * Worker: Properly handle connection errors happening while closing consumers. * Worker: Events are now buffered if the connection is down, then sent when the connection is re-established. * No longer depends on the :pypi:`mailer` package. This package had a name space collision with `django-mailer`, so its functionality was replaced. * Redis result backend: Documentation typos: Redis doesn't have database names, but database numbers. The default database is now 0. * :class:`~celery.task.control.inspect`: `registered_tasks` was requesting an invalid command because of a typo. See issue #170. * :setting:`CELERY_ROUTES`: Values defined in the route should now have precedence over values defined in :setting:`CELERY_QUEUES` when merging the two. With the follow settings: .. code-block:: python CELERY_QUEUES = {'cpubound': {'exchange': 'cpubound', 'routing_key': 'cpubound'}} CELERY_ROUTES = {'tasks.add': {'queue': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json'}} The final routing options for `tasks.add` will become: .. code-block:: python {'exchange': 'cpubound', 'routing_key': 'tasks.add', 'serializer': 'json'} This wasn't the case before: the values in :setting:`CELERY_QUEUES` would take precedence. * Worker crashed if the value of :setting:`CELERY_TASK_ERROR_WHITELIST` was not an iterable * :func:`~celery.execute.apply`: Make sure `kwargs['task_id']` is always set. * `AsyncResult.traceback`: Now returns :const:`None`, instead of raising :exc:`KeyError` if traceback is missing. * :class:`~celery.task.control.inspect`: Replies didn't work correctly if no destination was specified. * Can now store result/meta-data for custom states. * Worker: A warning is now emitted if the sending of task error emails fails. * ``celeryev``: Curses monitor no longer crashes if the terminal window is resized. See issue #160. * Worker: On macOS it isn't possible to run `os.exec*` in a process that's threaded. This breaks the SIGHUP restart handler, and is now disabled on macOS, emitting a warning instead. See issue #152. * :mod:`celery.execute.trace`: Properly handle `raise(str)`, which is still allowed in Python 2.4. See issue #175. * Using urllib2 in a periodic task on macOS crashed because of the proxy auto detection used in macOS. This is now fixed by using a workaround. See issue #143. * Debian init-scripts: Commands shouldn't run in a sub shell See issue #163. * Debian init-scripts: Use the absolute path of ``celeryd`` program to allow stat See issue #162. .. _v203-documentation: Documentation ------------- * getting-started/broker-installation: Fixed typo `set_permissions ""` -> `set_permissions ".*"`. * Tasks User Guide: Added section on database transactions. See issue #169. * Routing User Guide: Fixed typo `"feed": -> {"queue": "feeds"}`. See issue #169. * Documented the default values for the :setting:`CELERYD_CONCURRENCY` and :setting:`CELERYD_PREFETCH_MULTIPLIER` settings. * Tasks User Guide: Fixed typos in the subtask example * celery.signals: Documented worker_process_init. * Daemonization cookbook: Need to export DJANGO_SETTINGS_MODULE in `/etc/default/celeryd`. * Added some more FAQs from stack overflow * Daemonization cookbook: Fixed typo `CELERYD_LOGFILE/CELERYD_PIDFILE` to `CELERYD_LOG_FILE` / `CELERYD_PID_FILE` Also added troubleshooting section for the init-scripts. .. _version-2.0.2: 2.0.2 ===== :release-date: 2010-07-22 11:31 a.m. CEST :release-by: Ask Solem * Routes: When using the dict route syntax, the exchange for a task could disappear making the task unroutable. See issue #158. * Test suite now passing on Python 2.4 * No longer have to type `PYTHONPATH=.` to use ``celeryconfig`` in the current directory. This is accomplished by the default loader ensuring that the current directory is in `sys.path` when loading the config module. `sys.path` is reset to its original state after loading. Adding the current working directory to `sys.path` without the user knowing may be a security issue, as this means someone can drop a Python module in the users directory that executes arbitrary commands. This was the original reason not to do this, but if done *only when loading the config module*, this means that the behavior will only apply to the modules imported in the config module, which I think is a good compromise (certainly better than just explicitly setting `PYTHONPATH=.` anyway) * Experimental Cassandra backend added. * Worker: SIGHUP handler accidentally propagated to worker pool processes. In combination with :sha:`7a7c44e39344789f11b5346e9cc8340f5fe4846c` this would make each child process start a new worker instance when the terminal window was closed :/ * Worker: Don't install SIGHUP handler if running from a terminal. This fixes the problem where the worker is launched in the background when closing the terminal. * Worker: Now joins threads at shutdown. See issue #152. * Test tear down: Don't use `atexit` but nose's `teardown()` functionality instead. See issue #154. * Debian worker init-script: Stop now works correctly. * Task logger: `warn` method added (synonym for `warning`) * Can now define a white list of errors to send error emails for. Example: .. code-block:: python CELERY_TASK_ERROR_WHITELIST = ('myapp.MalformedInputError',) See issue #153. * Worker: Now handles overflow exceptions in `time.mktime` while parsing the ETA field. * LoggerWrapper: Try to detect loggers logging back to stderr/stdout making an infinite loop. * Added :class:`celery.task.control.inspect`: Inspects a running worker. Examples: .. code-block:: pycon # Inspect a single worker >>> i = inspect('myworker.example.com') # Inspect several workers >>> i = inspect(['myworker.example.com', 'myworker2.example.com']) # Inspect all workers consuming on this vhost. >>> i = inspect() ### Methods # Get currently executing tasks >>> i.active() # Get currently reserved tasks >>> i.reserved() # Get the current ETA schedule >>> i.scheduled() # Worker statistics and info >>> i.stats() # List of currently revoked tasks >>> i.revoked() # List of registered tasks >>> i.registered_tasks() * Remote control commands `dump_active`/`dump_reserved`/`dump_schedule` now replies with detailed task requests. Containing the original arguments and fields of the task requested. In addition the remote control command `set_loglevel` has been added, this only changes the log level for the main process. * Worker control command execution now catches errors and returns their string representation in the reply. * Functional test suite added :mod:`celery.tests.functional.case` contains utilities to start and stop an embedded worker process, for use in functional testing. .. _version-2.0.1: 2.0.1 ===== :release-date: 2010-07-09 03:02 p.m. CEST :release-by: Ask Solem * multiprocessing.pool: Now handles encoding errors, so that pickling errors doesn't crash the worker processes. * The remote control command replies wasn't working with RabbitMQ 1.8.0's stricter equivalence checks. If you've already hit this problem you may have to delete the declaration: .. code-block:: console $ camqadm exchange.delete celerycrq or: .. code-block:: console $ python manage.py camqadm exchange.delete celerycrq * A bug sneaked in the ETA scheduler that made it only able to execute one task per second(!) The scheduler sleeps between iterations so it doesn't consume too much CPU. It keeps a list of the scheduled items sorted by time, at each iteration it sleeps for the remaining time of the item with the nearest deadline. If there are no ETA tasks it will sleep for a minimum amount of time, one second by default. A bug sneaked in here, making it sleep for one second for every task that was scheduled. This has been fixed, so now it should move tasks like hot knife through butter. In addition a new setting has been added to control the minimum sleep interval; :setting:`CELERYD_ETA_SCHEDULER_PRECISION`. A good value for this would be a float between 0 and 1, depending on the needed precision. A value of 0.8 means that when the ETA of a task is met, it will take at most 0.8 seconds for the task to be moved to the ready queue. * Pool: Supervisor didn't release the semaphore. This would lead to a deadlock if all workers terminated prematurely. * Added Python version trove classifiers: 2.4, 2.5, 2.6 and 2.7 * Tests now passing on Python 2.7. * Task.__reduce__: Tasks created using the task decorator can now be pickled. * :file:`setup.py`: :pypi:`nose` added to `tests_require`. * Pickle should now work with SQLAlchemy 0.5.x * New homepage design by Jan Henrik Helmers: http://celeryproject.org * New Sphinx theme by Armin Ronacher: http://docs.celeryproject.org/ * Fixed "pending_xref" errors shown in the HTML rendering of the documentation. Apparently this was caused by new changes in Sphinx 1.0b2. * Router classes in :setting:`CELERY_ROUTES` are now imported lazily. Importing a router class in a module that also loads the Celery environment would cause a circular dependency. This is solved by importing it when needed after the environment is set up. * :setting:`CELERY_ROUTES` was broken if set to a single dict. This example in the docs should now work again: .. code-block:: python CELERY_ROUTES = {'feed.tasks.import_feed': 'feeds'} * `CREATE_MISSING_QUEUES` wasn't honored by apply_async. * New remote control command: `stats` Dumps information about the worker, like pool process ids, and total number of tasks executed by type. Example reply: .. code-block:: python [{'worker.local': 'total': {'tasks.sleeptask': 6}, 'pool': {'timeouts': [None, None], 'processes': [60376, 60377], 'max-concurrency': 2, 'max-tasks-per-child': None, 'put-guarded-by-semaphore': True}}] * New remote control command: `dump_active` Gives a list of tasks currently being executed by the worker. By default arguments are passed through repr in case there are arguments that's not JSON encodable. If you know the arguments are JSON safe, you can pass the argument `safe=True`. Example reply: .. code-block:: pycon >>> broadcast('dump_active', arguments={'safe': False}, reply=True) [{'worker.local': [ {'args': '(1,)', 'time_start': 1278580542.6300001, 'name': 'tasks.sleeptask', 'delivery_info': { 'consumer_tag': '30', 'routing_key': 'celery', 'exchange': 'celery'}, 'hostname': 'casper.local', 'acknowledged': True, 'kwargs': '{}', 'id': '802e93e9-e470-47ed-b913-06de8510aca2', } ]}] * Added experimental support for persistent revokes. Use the `-S|--statedb` argument to the worker to enable it: .. code-block:: console $ celeryd --statedb=/var/run/celeryd This will use the file: `/var/run/celeryd.db`, as the `shelve` module automatically adds the `.db` suffix. .. _version-2.0.0: 2.0.0 ===== :release-date: 2010-07-02 02:30 p.m. CEST :release-by: Ask Solem Foreword -------- Celery 2.0 contains backward incompatible changes, the most important being that the Django dependency has been removed so Celery no longer supports Django out of the box, but instead as an add-on package called :pypi:`django-celery`. We're very sorry for breaking backwards compatibility, but there's also many new and exciting features to make up for the time you lose upgrading, so be sure to read the :ref:`News ` section. Quite a lot of potential users have been upset about the Django dependency, so maybe this is a chance to get wider adoption by the Python community as well. Big thanks to all contributors, testers and users! .. _v200-django-upgrade: Upgrading for Django-users -------------------------- Django integration has been moved to a separate package: :pypi:`django-celery`. * To upgrade you need to install the :pypi:`django-celery` module and change: .. code-block:: python INSTALLED_APPS = 'celery' to: .. code-block:: python INSTALLED_APPS = 'djcelery' * If you use `mod_wsgi` you need to add the following line to your `.wsgi` file: .. code-block:: python import os os.environ['CELERY_LOADER'] = 'django' * The following modules has been moved to :pypi:`django-celery`: ===================================== ===================================== **Module name** **Replace with** ===================================== ===================================== `celery.models` `djcelery.models` `celery.managers` `djcelery.managers` `celery.views` `djcelery.views` `celery.urls` `djcelery.urls` `celery.management` `djcelery.management` `celery.loaders.djangoapp` `djcelery.loaders` `celery.backends.database` `djcelery.backends.database` `celery.backends.cache` `djcelery.backends.cache` ===================================== ===================================== Importing :mod:`djcelery` will automatically setup Celery to use Django loader. loader. It does this by setting the :envvar:`CELERY_LOADER` environment variable to `"django"` (it won't change it if a loader is already set). When the Django loader is used, the "database" and "cache" result backend aliases will point to the :mod:`djcelery` backends instead of the built-in backends, and configuration will be read from the Django settings. .. _v200-upgrade: Upgrading for others -------------------- .. _v200-upgrade-database: Database result backend ~~~~~~~~~~~~~~~~~~~~~~~ The database result backend is now using `SQLAlchemy`_ instead of the Django ORM, see `Supported Databases`_ for a table of supported databases. The `DATABASE_*` settings has been replaced by a single setting: :setting:`CELERY_RESULT_DBURI`. The value here should be an `SQLAlchemy Connection String`_, some examples include: .. code-block:: python # sqlite (filename) CELERY_RESULT_DBURI = 'sqlite:///celerydb.sqlite' # mysql CELERY_RESULT_DBURI = 'mysql://scott:tiger@localhost/foo' # postgresql CELERY_RESULT_DBURI = 'postgresql://scott:tiger@localhost/mydatabase' # oracle CELERY_RESULT_DBURI = 'oracle://scott:tiger@127.0.0.1:1521/sidname' See `SQLAlchemy Connection Strings`_ for more information about connection strings. To specify additional SQLAlchemy database engine options you can use the :setting:`CELERY_RESULT_ENGINE_OPTIONS` setting: .. code-block:: python # echo enables verbose logging from SQLAlchemy. CELERY_RESULT_ENGINE_OPTIONS = {'echo': True} .. _`SQLAlchemy`: http://www.sqlalchemy.org .. _`Supported Databases`: http://www.sqlalchemy.org/docs/core/engines.html#supported-databases .. _`SQLAlchemy Connection String`: http://www.sqlalchemy.org/docs/core/engines.html#database-urls .. _`SQLAlchemy Connection Strings`: http://www.sqlalchemy.org/docs/core/engines.html#database-urls .. _v200-upgrade-cache: Cache result backend ~~~~~~~~~~~~~~~~~~~~ The cache result backend is no longer using the Django cache framework, but it supports mostly the same configuration syntax: .. code-block:: python CELERY_CACHE_BACKEND = 'memcached://A.example.com:11211;B.example.com' To use the cache backend you must either have the :pypi:`pylibmc` or :pypi:`python-memcached` library installed, of which the former is regarded as the best choice. The support backend types are `memcached://` and `memory://`, we haven't felt the need to support any of the other backends provided by Django. .. _v200-incompatible: Backward incompatible changes ----------------------------- * Default (python) loader now prints warning on missing `celeryconfig.py` instead of raising :exc:`ImportError`. The worker raises :exc:`~@ImproperlyConfigured` if the configuration isn't set up. This makes it possible to use `--help` etc., without having a working configuration. Also this makes it possible to use the client side of Celery without being configured: .. code-block:: pycon >>> from carrot.connection import BrokerConnection >>> conn = BrokerConnection('localhost', 'guest', 'guest', '/') >>> from celery.execute import send_task >>> r = send_task('celery.ping', args=(), kwargs={}, connection=conn) >>> from celery.backends.amqp import AMQPBackend >>> r.backend = AMQPBackend(connection=conn) >>> r.get() 'pong' * The following deprecated settings has been removed (as scheduled by the :ref:`deprecation-timeline`): ===================================== ===================================== **Setting name** **Replace with** ===================================== ===================================== `CELERY_AMQP_CONSUMER_QUEUES` `CELERY_QUEUES` `CELERY_AMQP_EXCHANGE` `CELERY_DEFAULT_EXCHANGE` `CELERY_AMQP_EXCHANGE_TYPE` `CELERY_DEFAULT_EXCHANGE_TYPE` `CELERY_AMQP_CONSUMER_ROUTING_KEY` `CELERY_QUEUES` `CELERY_AMQP_PUBLISHER_ROUTING_KEY` `CELERY_DEFAULT_ROUTING_KEY` ===================================== ===================================== * The `celery.task.rest` module has been removed, use `celery.task.http` instead (as scheduled by the :ref:`deprecation-timeline`). * It's no longer allowed to skip the class name in loader names. (as scheduled by the :ref:`deprecation-timeline`): Assuming the implicit `Loader` class name is no longer supported, for example, if you use: .. code-block:: python CELERY_LOADER = 'myapp.loaders' You need to include the loader class name, like this: .. code-block:: python CELERY_LOADER = 'myapp.loaders.Loader' * :setting:`CELERY_TASK_RESULT_EXPIRES` now defaults to 1 day. Previous default setting was to expire in 5 days. * AMQP backend: Don't use different values for `auto_delete`. This bug became visible with RabbitMQ 1.8.0, which no longer allows conflicting declarations for the auto_delete and durable settings. If you've already used Celery with this backend chances are you have to delete the previous declaration: .. code-block:: console $ camqadm exchange.delete celeryresults * Now uses pickle instead of cPickle on Python versions <= 2.5 cPickle is broken in Python <= 2.5. It unsafely and incorrectly uses relative instead of absolute imports, so for example: .. code-block:: python exceptions.KeyError becomes: .. code-block:: python celery.exceptions.KeyError Your best choice is to upgrade to Python 2.6, as while the pure pickle version has worse performance, it is the only safe option for older Python versions. .. _v200-news: News ---- * **celeryev**: Curses Celery Monitor and Event Viewer. This is a simple monitor allowing you to see what tasks are executing in real-time and investigate tracebacks and results of ready tasks. It also enables you to set new rate limits and revoke tasks. Screenshot: .. figure:: ../images/celeryevshotsm.jpg If you run `celeryev` with the `-d` switch it will act as an event dumper, simply dumping the events it receives to standard out: .. code-block:: console $ celeryev -d -> celeryev: starting capture... casper.local [2010-06-04 10:42:07.020000] heartbeat casper.local [2010-06-04 10:42:14.750000] task received: tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={} eta=2010-06-04T10:42:16.669290, retries=0 casper.local [2010-06-04 10:42:17.230000] task started tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={} casper.local [2010-06-04 10:42:17.960000] task succeeded: tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={} result=4, runtime=0.782663106918 The fields here are, in order: *sender hostname*, *timestamp*, *event type* and *additional event fields*. * AMQP result backend: Now supports `.ready()`, `.successful()`, `.result`, `.status`, and even responds to changes in task state * New user guides: * :ref:`guide-workers` * :ref:`guide-canvas` * :ref:`guide-routing` * Worker: Standard out/error is now being redirected to the log file. * :pypi:`billiard` has been moved back to the Celery repository. ===================================== ===================================== **Module name** **celery equivalent** ===================================== ===================================== `billiard.pool` `celery.concurrency.processes.pool` `billiard.serialization` `celery.serialization` `billiard.utils.functional` `celery.utils.functional` ===================================== ===================================== The :pypi:`billiard` distribution may be maintained, depending on interest. * now depends on :pypi:`carrot` >= 0.10.5 * now depends on :pypi:`pyparsing` * Worker: Added `--purge` as an alias to `--discard`. * Worker: :kbd:`Control-c` (SIGINT) once does warm shutdown, hitting :kbd:`Control-c` twice forces termination. * Added support for using complex Crontab-expressions in periodic tasks. For example, you can now use: .. code-block:: pycon >>> crontab(minute='*/15') or even: .. code-block:: pycon >>> crontab(minute='*/30', hour='8-17,1-2', day_of_week='thu-fri') See :ref:`guide-beat`. * Worker: Now waits for available pool processes before applying new tasks to the pool. This means it doesn't have to wait for dozens of tasks to finish at shutdown because it has applied prefetched tasks without having any pool processes available to immediately accept them. See issue #122. * New built-in way to do task callbacks using :class:`~celery.subtask`. See :ref:`guide-canvas` for more information. * TaskSets can now contain several types of tasks. :class:`~celery.task.sets.TaskSet` has been refactored to use a new syntax, please see :ref:`guide-canvas` for more information. The previous syntax is still supported, but will be deprecated in version 1.4. * TaskSet failed() result was incorrect. See issue #132. * Now creates different loggers per task class. See issue #129. * Missing queue definitions are now created automatically. You can disable this using the :setting:`CELERY_CREATE_MISSING_QUEUES` setting. The missing queues are created with the following options: .. code-block:: python CELERY_QUEUES[name] = {'exchange': name, 'exchange_type': 'direct', 'routing_key': 'name} This feature is added for easily setting up routing using the `-Q` option to the worker: .. code-block:: console $ celeryd -Q video, image See the new routing section of the User Guide for more information: :ref:`guide-routing`. * New Task option: `Task.queue` If set, message options will be taken from the corresponding entry in :setting:`CELERY_QUEUES`. `exchange`, `exchange_type` and `routing_key` will be ignored * Added support for task soft and hard time limits. New settings added: * :setting:`CELERYD_TASK_TIME_LIMIT` Hard time limit. The worker processing the task will be killed and replaced with a new one when this is exceeded. * :setting:`CELERYD_TASK_SOFT_TIME_LIMIT` Soft time limit. The :exc:`~@SoftTimeLimitExceeded` exception will be raised when this is exceeded. The task can catch this to, for example, clean up before the hard time limit comes. New command-line arguments to ``celeryd`` added: `--time-limit` and `--soft-time-limit`. What's left? This won't work on platforms not supporting signals (and specifically the `SIGUSR1` signal) yet. So an alternative the ability to disable the feature all together on nonconforming platforms must be implemented. Also when the hard time limit is exceeded, the task result should be a `TimeLimitExceeded` exception. * Test suite is now passing without a running broker, using the carrot in-memory backend. * Log output is now available in colors. ===================================== ===================================== **Log level** **Color** ===================================== ===================================== `DEBUG` Blue `WARNING` Yellow `CRITICAL` Magenta `ERROR` Red ===================================== ===================================== This is only enabled when the log output is a tty. You can explicitly enable/disable this feature using the :setting:`CELERYD_LOG_COLOR` setting. * Added support for task router classes (like the django multi-db routers) * New setting: :setting:`CELERY_ROUTES` This is a single, or a list of routers to traverse when sending tasks. Dictionaries in this list converts to a :class:`celery.routes.MapRoute` instance. Examples: >>> CELERY_ROUTES = {'celery.ping': 'default', 'mytasks.add': 'cpu-bound', 'video.encode': { 'queue': 'video', 'exchange': 'media' 'routing_key': 'media.video.encode'}} >>> CELERY_ROUTES = ('myapp.tasks.Router', {'celery.ping': 'default}) Where `myapp.tasks.Router` could be: .. code-block:: python class Router(object): def route_for_task(self, task, args=None, kwargs=None): if task == 'celery.ping': return 'default' route_for_task may return a string or a dict. A string then means it's a queue name in :setting:`CELERY_QUEUES`, a dict means it's a custom route. When sending tasks, the routers are consulted in order. The first router that doesn't return `None` is the route to use. The message options is then merged with the found route settings, where the routers settings have priority. Example if :func:`~celery.execute.apply_async` has these arguments: .. code-block:: pycon >>> Task.apply_async(immediate=False, exchange='video', ... routing_key='video.compress') and a router returns: .. code-block:: python {'immediate': True, 'exchange': 'urgent'} the final message options will be: .. code-block:: pycon >>> task.apply_async( ... immediate=True, ... exchange='urgent', ... routing_key='video.compress', ... ) (and any default message options defined in the :class:`~celery.task.base.Task` class) * New Task handler called after the task returns: :meth:`~celery.task.base.Task.after_return`. * :class:`~billiard.einfo.ExceptionInfo` now passed to :meth:`~celery.task.base.Task.on_retry`/ :meth:`~celery.task.base.Task.on_failure` as ``einfo`` keyword argument. * Worker: Added :setting:`CELERYD_MAX_TASKS_PER_CHILD` / ``celery worker --maxtasksperchild``. Defines the maximum number of tasks a pool worker can process before the process is terminated and replaced by a new one. * Revoked tasks now marked with state :state:`REVOKED`, and `result.get()` will now raise :exc:`~@TaskRevokedError`. * :func:`celery.task.control.ping` now works as expected. * `apply(throw=True)` / :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS`: Makes eager execution re-raise task errors. * New signal: :signal:`~celery.signals.worker_process_init`: Sent inside the pool worker process at init. * Worker: :option:`celery worker -Q` option: Ability to specify list of queues to use, disabling other configured queues. For example, if :setting:`CELERY_QUEUES` defines four queues: `image`, `video`, `data` and `default`, the following command would make the worker only consume from the `image` and `video` queues: .. code-block:: console $ celeryd -Q image,video * Worker: New return value for the `revoke` control command: Now returns: .. code-block:: python {'ok': 'task $id revoked'} instead of :const:`True`. * Worker: Can now enable/disable events using remote control Example usage: >>> from celery.task.control import broadcast >>> broadcast('enable_events') >>> broadcast('disable_events') * Removed top-level tests directory. Test config now in celery.tests.config This means running the unit tests doesn't require any special setup. `celery/tests/__init__` now configures the :envvar:`CELERY_CONFIG_MODULE` and :envvar:`CELERY_LOADER` environment variables, so when `nosetests` imports that, the unit test environment is all set up. Before you run the tests you need to install the test requirements: .. code-block:: console $ pip install -r requirements/test.txt Running all tests: .. code-block:: console $ nosetests Specifying the tests to run: .. code-block:: console $ nosetests celery.tests.test_task Producing HTML coverage: .. code-block:: console $ nosetests --with-coverage3 The coverage output is then located in `celery/tests/cover/index.html`. * Worker: New option `--version`: Dump version info and exit. * :mod:`celeryd-multi `: Tool for shell scripts to start multiple workers. Some examples: - Advanced example with 10 workers: * Three of the workers processes the images and video queue * Two of the workers processes the data queue with loglevel DEBUG * the rest processes the default' queue. .. code-block:: console $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data -Q default -L:4,5 DEBUG - Get commands to start 10 workers, with 3 processes each .. code-block:: console $ celeryd-multi start 3 -c 3 celeryd -n celeryd1.myhost -c 3 celeryd -n celeryd2.myhost -c 3 celeryd -n celeryd3.myhost -c 3 - Start 3 named workers .. code-block:: console $ celeryd-multi start image video data -c 3 celeryd -n image.myhost -c 3 celeryd -n video.myhost -c 3 celeryd -n data.myhost -c 3 - Specify custom hostname .. code-block:: console $ celeryd-multi start 2 -n worker.example.com -c 3 celeryd -n celeryd1.worker.example.com -c 3 celeryd -n celeryd2.worker.example.com -c 3 Additional options are added to each ``celeryd``, but you can also modify the options for ranges of or single workers - 3 workers: Two with 3 processes, and one with 10 processes. .. code-block:: console $ celeryd-multi start 3 -c 3 -c:1 10 celeryd -n celeryd1.myhost -c 10 celeryd -n celeryd2.myhost -c 3 celeryd -n celeryd3.myhost -c 3 - Can also specify options for named workers .. code-block:: console $ celeryd-multi start image video data -c 3 -c:image 10 celeryd -n image.myhost -c 10 celeryd -n video.myhost -c 3 celeryd -n data.myhost -c 3 - Ranges and lists of workers in options is also allowed: (``-c:1-3`` can also be written as ``-c:1,2,3``) .. code-block:: console $ celeryd-multi start 5 -c 3 -c:1-3 10 celeryd-multi -n celeryd1.myhost -c 10 celeryd-multi -n celeryd2.myhost -c 10 celeryd-multi -n celeryd3.myhost -c 10 celeryd-multi -n celeryd4.myhost -c 3 celeryd-multi -n celeryd5.myhost -c 3 - Lists also work with named workers: .. code-block:: console $ celeryd-multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 celeryd-multi -n foo.myhost -c 10 celeryd-multi -n bar.myhost -c 10 celeryd-multi -n baz.myhost -c 10 celeryd-multi -n xuzzy.myhost -c 3 * The worker now calls the result backends `process_cleanup` method *after* task execution instead of before. * AMQP result backend now supports Pika. celery-4.1.0/docs/history/whatsnew-3.0.rst0000644000175000017500000007425413130607475020270 0ustar omeromer00000000000000.. _whatsnew-3.0: =========================================== What's new in Celery 3.0 (Chiastic Slide) =========================================== Celery is a simple, flexible, and reliable distributed system to process vast amounts of messages, while providing operations with the tools required to maintain such a system. It's a task queue with focus on real-time processing, while also supporting task scheduling. Celery has a large and diverse community of users and contributors, you should come join us :ref:`on IRC ` or :ref:`our mailing-list `. To read more about Celery you should go read the :ref:`introduction `. While this version is backward compatible with previous versions it's important that you read the following section. If you use Celery in combination with Django you must also read the `django-celery changelog`_ and upgrade to :pypi:`django-celery 3.0 `. This version is officially supported on CPython 2.5, 2.6, 2.7, 3.2 and 3.3, as well as PyPy and Jython. Highlights ========== .. topic:: Overview - A new and improved API, that's both simpler and more powerful. Everyone must read the new :ref:`first-steps` tutorial, and the new :ref:`next-steps` tutorial. Oh, and why not reread the user guide while you're at it :) There are no current plans to deprecate the old API, so you don't have to be in a hurry to port your applications. - The worker is now thread-less, giving great performance improvements. - The new "Canvas" makes it easy to define complex work-flows. Ever wanted to chain tasks together? This is possible, but not just that, now you can even chain together groups and chords, or even combine multiple chains. Read more in the :ref:`Canvas ` user guide. - All of Celery's command-line programs are now available from a single :program:`celery` umbrella command. - This is the last version to support Python 2.5. Starting with Celery 3.1, Python 2.6 or later is required. - Support for the new :pypi:`librabbitmq` C client. Celery will automatically use the :pypi:`librabbitmq` module if installed, which is a very fast and memory-optimized replacement for the :pypi:`amqp` module. - Redis support is more reliable with improved ack emulation. - Celery now always uses UTC - Over 600 commits, 30k additions/36k deletions. In comparison 1.0âž 2.0 had 18k additions/8k deletions. .. _`website`: http://celeryproject.org/ .. _`django-celery changelog`: https://github.com/celery/django-celery/tree/master/Changelog .. contents:: :local: :depth: 2 .. _v300-important: Important Notes =============== Broadcast exchanges renamed --------------------------- The workers remote control command exchanges has been renamed (a new :term:`pidbox` name), this is because the ``auto_delete`` flag on the exchanges has been removed, and that makes it incompatible with earlier versions. You can manually delete the old exchanges if you want, using the :program:`celery amqp` command (previously called ``camqadm``): .. code-block:: console $ celery amqp exchange.delete celeryd.pidbox $ celery amqp exchange.delete reply.celeryd.pidbox Event-loop ---------- The worker is now running *without threads* when used with RabbitMQ (AMQP), or Redis as a broker, resulting in: - Much better overall performance. - Fixes several edge case race conditions. - Sub-millisecond timer precision. - Faster shutdown times. The transports supported are: ``py-amqp`` ``librabbitmq``, ``redis``, and ``amqplib``. Hopefully this can be extended to include additional broker transports in the future. For increased reliability the :setting:`CELERY_FORCE_EXECV` setting is enabled by default if the event-loop isn't used. New ``celery`` umbrella command ------------------------------- All Celery's command-line programs are now available from a single :program:`celery` umbrella command. You can see a list of sub-commands and options by running: .. code-block:: console $ celery help Commands include: - ``celery worker`` (previously ``celeryd``). - ``celery beat`` (previously ``celerybeat``). - ``celery amqp`` (previously ``camqadm``). The old programs are still available (``celeryd``, ``celerybeat``, etc), but you're discouraged from using them. Now depends on :pypi:`billiard` ------------------------------- Billiard is a fork of the multiprocessing containing the no-execv patch by ``sbt`` (http://bugs.python.org/issue8713), and also contains the pool improvements previously located in Celery. This fork was necessary as changes to the C extension code was required for the no-execv patch to work. - Issue #625 - Issue #627 - Issue #640 - `django-celery #122 >> from celery import chain # (2 + 2) * 8 / 2 >>> res = chain(add.subtask((2, 2)), mul.subtask((8,)), div.subtask((2,))).apply_async() >>> res.get() == 16 >>> res.parent.get() == 32 >>> res.parent.parent.get() == 4 - Adds :meth:`AsyncResult.get_leaf` Waits and returns the result of the leaf subtask. That's the last node found when traversing the graph, but this means that the graph can be 1-dimensional only (in effect a list). - Adds ``subtask.link(subtask)`` + ``subtask.link_error(subtask)`` Shortcut to ``s.options.setdefault('link', []).append(subtask)`` - Adds ``subtask.flatten_links()`` Returns a flattened list of all dependencies (recursively) Redis: Priority support ----------------------- The message's ``priority`` field is now respected by the Redis transport by having multiple lists for each named queue. The queues are then consumed by in order of priority. The priority field is a number in the range of 0 - 9, where 0 is the default and highest priority. The priority range is collapsed into four steps by default, since it is unlikely that nine steps will yield more benefit than using four steps. The number of steps can be configured by setting the ``priority_steps`` transport option, which must be a list of numbers in **sorted order**: .. code-block:: pycon >>> BROKER_TRANSPORT_OPTIONS = { ... 'priority_steps': [0, 2, 4, 6, 8, 9], ... } Priorities implemented in this way isn't as reliable as priorities on the server side, which is why the feature is nicknamed "quasi-priorities"; **Using routing is still the suggested way of ensuring quality of service**, as client implemented priorities fall short in a number of ways, for example if the worker is busy with long running tasks, has prefetched many messages, or the queues are congested. Still, it is possible that using priorities in combination with routing can be more beneficial than using routing or priorities alone. Experimentation and monitoring should be used to prove this. Contributed by Germán M. Bravo. Redis: Now cycles queues so that consuming is fair -------------------------------------------------- This ensures that a very busy queue won't block messages from other queues, and ensures that all queues have an equal chance of being consumed from. This used to be the case before, but the behavior was accidentally changed while switching to using blocking pop. `group`/`chord`/`chain` are now subtasks ---------------------------------------- - group is no longer an alias to ``TaskSet``, but new all together, since it was very difficult to migrate the ``TaskSet`` class to become a subtask. - A new shortcut has been added to tasks: .. code-block:: pycon >>> task.s(arg1, arg2, kw=1) as a shortcut to: .. code-block:: pycon >>> task.subtask((arg1, arg2), {'kw': 1}) - Tasks can be chained by using the ``|`` operator: .. code-block:: pycon >>> (add.s(2, 2), pow.s(2)).apply_async() - Subtasks can be "evaluated" using the ``~`` operator: .. code-block:: pycon >>> ~add.s(2, 2) 4 >>> ~(add.s(2, 2) | pow.s(2)) is the same as: .. code-block:: pycon >>> chain(add.s(2, 2), pow.s(2)).apply_async().get() - A new subtask_type key has been added to the subtask dictionary. This can be the string ``"chord"``, ``"group"``, ``"chain"``, ``"chunks"``, ``"xmap"``, or ``"xstarmap"``. - maybe_subtask now uses subtask_type to reconstruct the object, to be used when using non-pickle serializers. - The logic for these operations have been moved to dedicated tasks celery.chord, celery.chain and celery.group. - subtask no longer inherits from AttributeDict. It's now a pure dict subclass with properties for attribute access to the relevant keys. - The repr's now outputs how the sequence would like imperatively: .. code-block:: pycon >>> from celery import chord >>> (chord([add.s(i, i) for i in xrange(10)], xsum.s()) | pow.s(2)) tasks.xsum([tasks.add(0, 0), tasks.add(1, 1), tasks.add(2, 2), tasks.add(3, 3), tasks.add(4, 4), tasks.add(5, 5), tasks.add(6, 6), tasks.add(7, 7), tasks.add(8, 8), tasks.add(9, 9)]) | tasks.pow(2) New remote control commands --------------------------- These commands were previously experimental, but they've proven stable and is now documented as part of the official API. - :control:`add_consumer`/:control:`cancel_consumer` Tells workers to consume from a new queue, or cancel consuming from a queue. This command has also been changed so that the worker remembers the queues added, so that the change will persist even if the connection is re-connected. These commands are available programmatically as :meth:`@control.add_consumer` / :meth:`@control.cancel_consumer`: .. code-block:: pycon >>> celery.control.add_consumer(queue_name, ... destination=['w1.example.com']) >>> celery.control.cancel_consumer(queue_name, ... destination=['w1.example.com']) or using the :program:`celery control` command: .. code-block:: console $ celery control -d w1.example.com add_consumer queue $ celery control -d w1.example.com cancel_consumer queue .. note:: Remember that a control command without *destination* will be sent to **all workers**. - :control:`autoscale` Tells workers with ``--autoscale`` enabled to change autoscale max/min concurrency settings. This command is available programmatically as :meth:`@control.autoscale`: .. code-block:: pycon >>> celery.control.autoscale(max=10, min=5, ... destination=['w1.example.com']) or using the :program:`celery control` command: .. code-block:: console $ celery control -d w1.example.com autoscale 10 5 - :control:`pool_grow`/:control:`pool_shrink` Tells workers to add or remove pool processes. These commands are available programmatically as :meth:`@control.pool_grow` / :meth:`@control.pool_shrink`: .. code-block:: pycon >>> celery.control.pool_grow(2, destination=['w1.example.com']) >>> celery.contorl.pool_shrink(2, destination=['w1.example.com']) or using the :program:`celery control` command: .. code-block:: console $ celery control -d w1.example.com pool_grow 2 $ celery control -d w1.example.com pool_shrink 2 - :program:`celery control` now supports :control:`rate_limit` and :control:`time_limit` commands. See ``celery control --help`` for details. Crontab now supports Day of Month, and Month of Year arguments -------------------------------------------------------------- See the updated list of examples at :ref:`beat-crontab`. Immutable subtasks ------------------ ``subtask``'s can now be immutable, which means that the arguments won't be modified when calling callbacks: .. code-block:: pycon >>> chain(add.s(2, 2), clear_static_electricity.si()) means it'll not receive the argument of the parent task, and ``.si()`` is a shortcut to: .. code-block:: pycon >>> clear_static_electricity.subtask(immutable=True) Logging Improvements -------------------- Logging support now conforms better with best practices. - Classes used by the worker no longer uses app.get_default_logger, but uses `celery.utils.log.get_logger` which simply gets the logger not setting the level, and adds a NullHandler. - Loggers are no longer passed around, instead every module using logging defines a module global logger that's used throughout. - All loggers inherit from a common logger called "celery". - Before ``task.get_logger`` would setup a new logger for every task, and even set the log level. This is no longer the case. - Instead all task loggers now inherit from a common "celery.task" logger that's set up when programs call `setup_logging_subsystem`. - Instead of using LoggerAdapter to augment the formatter with the task_id and task_name field, the task base logger now use a special formatter adding these values at run-time from the currently executing task. - In fact, ``task.get_logger`` is no longer recommended, it is better to add a module-level logger to your tasks module. For example, like this: .. code-block:: python from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @celery.task def add(x, y): logger.debug('Adding %r + %r' % (x, y)) return x + y The resulting logger will then inherit from the ``"celery.task"`` logger so that the current task name and id is included in logging output. - Redirected output from stdout/stderr is now logged to a "celery.redirected" logger. - In addition a few warnings.warn have been replaced with logger.warn. - Now avoids the 'no handlers for logger multiprocessing' warning Task registry no longer global ------------------------------ Every Celery instance now has its own task registry. You can make apps share registries by specifying it: .. code-block:: pycon >>> app1 = Celery() >>> app2 = Celery(tasks=app1.tasks) Note that tasks are shared between registries by default, so that tasks will be added to every subsequently created task registry. As an alternative tasks can be private to specific task registries by setting the ``shared`` argument to the ``@task`` decorator: .. code-block:: python @celery.task(shared=False) def add(x, y): return x + y Abstract tasks are now lazily bound ----------------------------------- The :class:`~celery.task.Task` class is no longer bound to an app by default, it will first be bound (and configured) when a concrete subclass is created. This means that you can safely import and make task base classes, without also initializing the app environment: .. code-block:: python from celery.task import Task class DebugTask(Task): abstract = True def __call__(self, *args, **kwargs): print('CALLING %r' % (self,)) return self.run(*args, **kwargs) .. code-block:: pycon >>> DebugTask >>> @celery1.task(base=DebugTask) ... def add(x, y): ... return x + y >>> add.__class__ > Lazy task decorators -------------------- The ``@task`` decorator is now lazy when used with custom apps. That is, if ``accept_magic_kwargs`` is enabled (her by called "compat mode"), the task decorator executes inline like before, however for custom apps the @task decorator now returns a special PromiseProxy object that's only evaluated on access. All promises will be evaluated when :meth:`@finalize` is called, or implicitly when the task registry is first used. Smart `--app` option -------------------- The :option:`--app ` option now 'auto-detects' - If the provided path is a module it tries to get an attribute named 'celery'. - If the provided path is a package it tries to import a sub module named celery', and get the celery attribute from that module. For example, if you have a project named ``proj`` where the celery app is located in ``from proj.celery import app``, then the following will be equivalent: .. code-block:: console $ celery worker --app=proj $ celery worker --app=proj.celery: $ celery worker --app=proj.celery:app In Other News ------------- - New :setting:`CELERYD_WORKER_LOST_WAIT` to control the timeout in seconds before :exc:`billiard.WorkerLostError` is raised when a worker can't be signaled (Issue #595). Contributed by Brendon Crawford. - Redis event monitor queues are now automatically deleted (Issue #436). - App instance factory methods have been converted to be cached descriptors that creates a new subclass on access. For example, this means that ``app.Worker`` is an actual class and will work as expected when: .. code-block:: python class Worker(app.Worker): ... - New signal: :signal:`task_success`. - Multiprocessing logs are now only emitted if the :envvar:`MP_LOG` environment variable is set. - The Celery instance can now be created with a broker URL .. code-block:: python app = Celery(broker='redis://') - Result backends can now be set using a URL Currently only supported by redis. Example use: .. code-block:: python CELERY_RESULT_BACKEND = 'redis://localhost/1' - Heartbeat frequency now every 5s, and frequency sent with event The heartbeat frequency is now available in the worker event messages, so that clients can decide when to consider workers offline based on this value. - Module celery.actors has been removed, and will be part of cl instead. - Introduces new ``celery`` command, which is an entry-point for all other commands. The main for this command can be run by calling ``celery.start()``. - Annotations now supports decorators if the key starts with '@'. For example: .. code-block:: python def debug_args(fun): @wraps(fun) def _inner(*args, **kwargs): print('ARGS: %r' % (args,)) return _inner CELERY_ANNOTATIONS = { 'tasks.add': {'@__call__': debug_args}, } Also tasks are now always bound by class so that annotated methods end up being bound. - Bug-report now available as a command and broadcast command - Get it from a Python REPL: .. code-block:: pycon >>> import celery >>> print(celery.bugreport()) - Using the ``celery`` command line program: .. code-block:: console $ celery report - Get it from remote workers: .. code-block:: console $ celery inspect report - Module ``celery.log`` moved to :mod:`celery.app.log`. - Module ``celery.task.control`` moved to :mod:`celery.app.control`. - New signal: :signal:`task_revoked` Sent in the main process when the task is revoked or terminated. - ``AsyncResult.task_id`` renamed to ``AsyncResult.id`` - ``TasksetResult.taskset_id`` renamed to ``.id`` - ``xmap(task, sequence)`` and ``xstarmap(task, sequence)`` Returns a list of the results applying the task function to every item in the sequence. Example: .. code-block:: pycon >>> from celery import xstarmap >>> xstarmap(add, zip(range(10), range(10)).apply_async() [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] - ``chunks(task, sequence, chunksize)`` - ``group.skew(start=, stop=, step=)`` Skew will skew the countdown for the individual tasks in a group -- for example with this group: .. code-block:: pycon >>> g = group(add.s(i, i) for i in xrange(10)) Skewing the tasks from 0 seconds to 10 seconds: .. code-block:: pycon >>> g.skew(stop=10) Will have the first task execute in 0 seconds, the second in 1 second, the third in 2 seconds and so on. - 99% test Coverage - :setting:`CELERY_QUEUES` can now be a list/tuple of :class:`~kombu.Queue` instances. Internally :attr:`@amqp.queues` is now a mapping of name/Queue instances, instead of converting on the fly. - Can now specify connection for :class:`@control.inspect`. .. code-block:: python from kombu import Connection i = celery.control.inspect(connection=Connection('redis://')) i.active_queues() - :setting:`CELERY_FORCE_EXECV` is now enabled by default. If the old behavior is wanted the setting can be set to False, or the new `--no-execv` option to :program:`celery worker`. - Deprecated module ``celery.conf`` has been removed. - The :setting:`CELERY_TIMEZONE` now always require the :pypi:`pytz` library to be installed (except if the timezone is set to `UTC`). - The Tokyo Tyrant backend has been removed and is no longer supported. - Now uses :func:`~kombu.common.maybe_declare` to cache queue declarations. - There's no longer a global default for the :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` setting, it is instead set by individual schedulers. - Worker: now truncates very long message bodies in error reports. - No longer deep-copies exceptions when trying to serialize errors. - :envvar:`CELERY_BENCH` environment variable, will now also list memory usage statistics at worker shutdown. - Worker: now only ever use a single timer for all timing needs, and instead set different priorities. - An exceptions arguments are now safely pickled Contributed by Matt Long. - Worker/Beat no longer logs the start-up banner. Previously it would be logged with severity warning, now it's only written to stdout. - The ``contrib/`` directory in the distribution has been renamed to ``extra/``. - New signal: :signal:`task_revoked` - :mod:`celery.contrib.migrate`: Many improvements, including; filtering, queue migration, and support for acking messages on the broker migrating from. Contributed by John Watson. - Worker: Prefetch count increments are now optimized and grouped together. - Worker: No longer calls ``consume`` on the remote control command queue twice. Probably didn't cause any problems, but was unnecessary. Internals --------- - ``app.broker_connection`` is now ``app.connection`` Both names still work. - Compatibility modules are now generated dynamically upon use. These modules are ``celery.messaging``, ``celery.log``, ``celery.decorators`` and ``celery.registry``. - :mod:`celery.utils` refactored into multiple modules: :mod:`celery.utils.text` :mod:`celery.utils.imports` :mod:`celery.utils.functional` - Now using :mod:`kombu.utils.encoding` instead of :mod:`celery.utils.encoding`. - Renamed module ``celery.routes`` -> :mod:`celery.app.routes`. - Renamed package ``celery.db`` -> :mod:`celery.backends.database`. - Renamed module ``celery.abstract`` -> :mod:`celery.worker.bootsteps`. - Command line docs are now parsed from the module docstrings. - Test suite directory has been reorganized. - :program:`setup.py` now reads docs from the :file:`requirements/` directory. - Celery commands no longer wraps output (Issue #700). Contributed by Thomas Johansson. .. _v300-experimental: Experimental ============ :mod:`celery.contrib.methods`: Task decorator for methods ---------------------------------------------------------- This is an experimental module containing a task decorator, and a task decorator filter, that can be used to create tasks out of methods:: from celery.contrib.methods import task_method class Counter(object): def __init__(self): self.value = 1 @celery.task(name='Counter.increment', filter=task_method) def increment(self, n=1): self.value += 1 return self.value See :mod:`celery.contrib.methods` for more information. .. _v300-unscheduled-removals: Unscheduled Removals ==================== Usually we don't make backward incompatible removals, but these removals should have no major effect. - The following settings have been renamed: - ``CELERYD_ETA_SCHEDULER`` -> ``CELERYD_TIMER`` - ``CELERYD_ETA_SCHEDULER_PRECISION`` -> ``CELERYD_TIMER_PRECISION`` .. _v300-deprecations: Deprecation Time-line Changes ============================= See the :ref:`deprecation-timeline`. - The ``celery.backends.pyredis`` compat module has been removed. Use :mod:`celery.backends.redis` instead! - The following undocumented API's has been moved: - ``control.inspect.add_consumer`` -> :meth:`@control.add_consumer`. - ``control.inspect.cancel_consumer`` -> :meth:`@control.cancel_consumer`. - ``control.inspect.enable_events`` -> :meth:`@control.enable_events`. - ``control.inspect.disable_events`` -> :meth:`@control.disable_events`. This way ``inspect()`` is only used for commands that don't modify anything, while idempotent control commands that make changes are on the control objects. Fixes ===== - Retry SQLAlchemy backend operations on DatabaseError/OperationalError (Issue #634) - Tasks that called ``retry`` wasn't acknowledged if acks late was enabled Fix contributed by David Markey. - The message priority argument wasn't properly propagated to Kombu (Issue #708). Fix contributed by Eran Rundstein celery-4.1.0/docs/history/changelog-2.5.rst0000644000175000017500000001244613130607475020356 0ustar omeromer00000000000000.. _changelog-2.5: =============================== Change history for Celery 2.5 =============================== This document contains change notes for bugfix releases in the 2.5.x series, please see :ref:`whatsnew-2.5` for an overview of what's new in Celery 2.5. If you're looking for versions prior to 2.5 you should visit our :ref:`history` of releases. .. contents:: :local: .. _version-2.5.5: 2.5.5 ===== :release-date: 2012-06-06 04:00 p.m. BST :release-by: Ask Solem This is a dummy release performed for the following goals: - Protect against force upgrading to Kombu 2.2.0 - Version parity with :pypi:`django-celery` .. _version-2.5.3: 2.5.3 ===== :release-date: 2012-04-16 07:00 p.m. BST :release-by: Ask Solem * A bug causes messages to be sent with UTC time-stamps even though :setting:`CELERY_ENABLE_UTC` wasn't enabled (Issue #636). * ``celerybeat``: No longer crashes if an entry's args is set to None (Issue #657). * Auto-reload didn't work if a module's ``__file__`` attribute was set to the modules ``.pyc`` file. (Issue #647). * Fixes early 2.5 compatibility where ``__package__`` doesn't exist (Issue #638). .. _version-2.5.2: 2.5.2 ===== :release-date: 2012-04-13 04:30 p.m. GMT :release-by: Ask Solem .. _v252-news: News ---- - Now depends on Kombu 2.1.5. - Django documentation has been moved to the main Celery docs. See :ref:`django`. - New :signal:`celeryd_init` signal can be used to configure workers by hostname. - Signal.connect can now be used as a decorator. Example: .. code-block:: python from celery.signals import task_sent @task_sent.connect def on_task_sent(**kwargs): print('sent task: %r' % (kwargs,)) - Invalid task messages are now rejected instead of acked. This means that they will be moved to the dead-letter queue introduced in the latest RabbitMQ version (but must be enabled manually, consult the RabbitMQ documentation). - Internal logging calls has been cleaned up to work better with tools like Sentry. Contributed by David Cramer. - New method ``subtask.clone()`` can be used to clone an existing subtask with augmented arguments/options. Example: .. code-block:: pycon >>> s = add.subtask((5,)) >>> new = s.clone(args=(10,), countdown=5}) >>> new.args (10, 5) >>> new.options {'countdown': 5} - Chord callbacks are now triggered in eager mode. .. _v252-fixes: Fixes ----- - Programs now verifies that the pidfile is actually written correctly (Issue #641). Hopefully this will crash the worker immediately if the system is out of space to store the complete pidfile. In addition, we now verify that existing pidfiles contain a new line so that a partially written pidfile is detected as broken, as before doing: .. code-block:: console $ echo -n "1" > celeryd.pid would cause the worker to think that an existing instance was already running (init has pid 1 after all). - Fixed 2.5 compatibility issue with use of print_exception. Fix contributed by Martin Melin. - Fixed 2.5 compatibility issue with imports. Fix contributed by Iurii Kriachko. - All programs now fix up ``__package__`` when called as main. This fixes compatibility with Python 2.5. Fix contributed by Martin Melin. - [celery control|inspect] can now be configured on the command-line. Like with the worker it is now possible to configure Celery settings on the command-line for celery control|inspect .. code-block:: console $ celery inspect -- broker.pool_limit=30 - Version dependency for :pypi:`python-dateutil` fixed to be strict. Fix contributed by Thomas Meson. - ``Task.__call__`` is now optimized away in the task tracer rather than when the task class is created. This fixes a bug where a custom __call__ may mysteriously disappear. - Auto-reload's ``inotify`` support has been improved. Contributed by Mher Movsisyan. - The Django broker documentation has been improved. - Removed confusing warning at top of routing user guide. .. _version-2.5.1: 2.5.1 ===== :release-date: 2012-03-01 01:00 p.m. GMT :release-by: Ask Solem .. _v251-fixes: Fixes ----- * Eventlet/Gevent: A small typo caused the worker to hang when eventlet/gevent was used, this was because the environment wasn't monkey patched early enough. * Eventlet/Gevent: Another small typo caused the mediator to be started with eventlet/gevent, which would make the worker sometimes hang at shutdown. * :mod:`multiprocessing`: Fixed an error occurring if the pool was stopped before it was properly started. * Proxy objects now redirects ``__doc__`` and ``__name__`` so ``help(obj)`` works. * Internal timer (timer2) now logs exceptions instead of swallowing them (Issue #626). * celery shell: can now be started with :option:`--eventlet ` or :option:`--gevent ` options to apply their monkey patches. .. _version-2.5.0: 2.5.0 ===== :release-date: 2012-02-24 04:00 p.m. GMT :release-by: Ask Solem See :ref:`whatsnew-2.5`. Since the changelog has gained considerable size, we decided to do things differently this time: by having separate "what's new" documents for major version changes. Bugfix releases will still be found in the changelog. celery-4.1.0/docs/_templates/0000755000175000017500000000000013135426347016022 5ustar omeromer00000000000000celery-4.1.0/docs/_templates/sidebardonations.html0000644000175000017500000000640113130607475022237 0ustar omeromer00000000000000

celery-4.1.0/docs/_static/0000755000175000017500000000000013135426347015313 5ustar omeromer00000000000000celery-4.1.0/docs/_static/.keep0000644000175000017500000000000013130607475016224 0ustar omeromer00000000000000celery-4.1.0/docs/templates/0000755000175000017500000000000013135426347015663 5ustar omeromer00000000000000celery-4.1.0/docs/templates/readme.txt0000644000175000017500000000227113130607475017661 0ustar omeromer00000000000000.. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |license| |wheel| |pyversion| |pyimp| .. include:: ../includes/introduction.txt .. include:: ../includes/installation.txt .. include:: ../includes/resources.txt .. |build-status| image:: https://secure.travis-ci.org/celery/celery.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/celery .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.python.org/pypi/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.python.org/pypi/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Support Python implementations. :target: https://pypi.python.org/pypi/celery/ celery-4.1.0/docs/community.rst0000644000175000017500000000167513130607475016452 0ustar omeromer00000000000000.. _community: ======================= Community Resources ======================= This is a list of external blog posts, tutorials, and slides related to Celery. If you have a link that's missing from this list, please contact the mailing-list or submit a patch. .. contents:: :local: .. _community-resources: Resources ========= .. _res-using-celery: Who's using Celery ------------------ https://wiki.github.com/celery/celery/using .. _res-wiki: Wiki ---- https://wiki.github.com/celery/celery/ .. _res-stackoverflow: Celery questions on Stack Overflow ---------------------------------- https://stackoverflow.com/search?q=celery&tab=newest .. _res-mailing-list-archive: Mailing-list Archive: celery-users ---------------------------------- http://blog.gmane.org/gmane.comp.python.amqp.celery.user .. _res-irc-logs: .. _community-news: News ==== This section has moved to the Celery homepage: http://celeryproject.org/community/ celery-4.1.0/README.rst0000644000175000017500000003040513135426314014420 0ustar omeromer00000000000000.. image:: http://docs.celeryproject.org/en/latest/_images/celery-banner-small.png |build-status| |license| |wheel| |pyversion| |pyimp| :Version: 4.1.0 (latentcall) :Web: http://celeryproject.org/ :Download: https://pypi.python.org/pypi/celery/ :Source: https://github.com/celery/celery/ :Keywords: task, queue, job, async, rabbitmq, amqp, redis, python, distributed, actors -- What's a Task Queue? ==================== Task queues are used as a mechanism to distribute work across threads or machines. A task queue's input is a unit of work, called a task, dedicated worker processes then constantly monitor the queue for new work to perform. Celery communicates via messages, usually using a broker to mediate between clients and workers. To initiate a task a client puts a message on the queue, the broker then delivers the message to a worker. A Celery system can consist of multiple workers and brokers, giving way to high availability and horizontal scaling. Celery is written in Python, but the protocol can be implemented in any language. In addition to Python there's node-celery_ for Node.js, and a `PHP client`_. Language interoperability can also be achieved by using webhooks in such a way that the client enqueues an URL to be requested by a worker. .. _node-celery: https://github.com/mher/node-celery .. _`PHP client`: https://github.com/gjedeer/celery-php What do I need? =============== Celery version 4.0 runs on, - Python (2.7, 3.4, 3.5) - PyPy (5.4, 5.5) This is the last version to support Python 2.7, and from the next version (Celery 5.x) Python 3.5 or newer is required. If you're running an older version of Python, you need to be running an older version of Celery: - Python 2.6: Celery series 3.1 or earlier. - Python 2.5: Celery series 3.0 or earlier. - Python 2.4 was Celery series 2.2 or earlier. Celery is a project with minimal funding, so we don't support Microsoft Windows. Please don't open any issues related to that platform. *Celery* is usually used with a message broker to send and receive messages. The RabbitMQ, Redis transports are feature complete, but there's also experimental support for a myriad of other solutions, including using SQLite for local development. *Celery* can run on a single machine, on multiple machines, or even across datacenters. Get Started =========== If this is the first time you're trying to use Celery, or you're new to Celery 4.0 coming from previous versions then you should read our getting started tutorials: - `First steps with Celery`_ Tutorial teaching you the bare minimum needed to get started with Celery. - `Next steps`_ A more complete overview, showing more features. .. _`First steps with Celery`: http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html .. _`Next steps`: http://docs.celeryproject.org/en/latest/getting-started/next-steps.html Celery is... ============= - **Simple** Celery is easy to use and maintain, and does *not need configuration files*. It has an active, friendly community you can talk to for support, like at our `mailing-list`_, or the IRC channel. Here's one of the simplest applications you can make:: from celery import Celery app = Celery('hello', broker='amqp://guest@localhost//') @app.task def hello(): return 'hello world' - **Highly Available** Workers and clients will automatically retry in the event of connection loss or failure, and some brokers support HA in way of *Primary/Primary* or *Primary/Replica* replication. - **Fast** A single Celery process can process millions of tasks a minute, with sub-millisecond round-trip latency (using RabbitMQ, py-librabbitmq, and optimized settings). - **Flexible** Almost every part of *Celery* can be extended or used on its own, Custom pool implementations, serializers, compression schemes, logging, schedulers, consumers, producers, broker transports, and much more. It supports... ================ - **Message Transports** - RabbitMQ_, Redis_, Amazon SQS - **Concurrency** - Prefork, Eventlet_, gevent_, single threaded (``solo``) - **Result Stores** - AMQP, Redis - memcached - SQLAlchemy, Django ORM - Apache Cassandra, IronCache, Elasticsearch - **Serialization** - *pickle*, *json*, *yaml*, *msgpack*. - *zlib*, *bzip2* compression. - Cryptographic message signing. .. _`Eventlet`: http://eventlet.net/ .. _`gevent`: http://gevent.org/ .. _RabbitMQ: https://rabbitmq.com .. _Redis: https://redis.io .. _SQLAlchemy: http://sqlalchemy.org Framework Integration ===================== Celery is easy to integrate with web frameworks, some of which even have integration packages: +--------------------+------------------------+ | `Django`_ | not needed | +--------------------+------------------------+ | `Pyramid`_ | `pyramid_celery`_ | +--------------------+------------------------+ | `Pylons`_ | `celery-pylons`_ | +--------------------+------------------------+ | `Flask`_ | not needed | +--------------------+------------------------+ | `web2py`_ | `web2py-celery`_ | +--------------------+------------------------+ | `Tornado`_ | `tornado-celery`_ | +--------------------+------------------------+ The integration packages aren't strictly necessary, but they can make development easier, and sometimes they add important hooks like closing database connections at ``fork``. .. _`Django`: https://djangoproject.com/ .. _`Pylons`: http://pylonsproject.org/ .. _`Flask`: http://flask.pocoo.org/ .. _`web2py`: http://web2py.com/ .. _`Bottle`: https://bottlepy.org/ .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html .. _`pyramid_celery`: https://pypi.python.org/pypi/pyramid_celery/ .. _`celery-pylons`: https://pypi.python.org/pypi/celery-pylons .. _`web2py-celery`: https://code.google.com/p/web2py-celery/ .. _`Tornado`: http://www.tornadoweb.org/ .. _`tornado-celery`: https://github.com/mher/tornado-celery/ .. _celery-documentation: Documentation ============= The `latest documentation`_ is hosted at Read The Docs, containing user guides, tutorials, and an API reference. .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ .. _celery-installation: Installation ============ You can install Celery either via the Python Package Index (PyPI) or from source. To install using ``pip``: :: $ pip install -U Celery .. _bundles: Bundles ------- Celery also defines a group of bundles that can be used to install Celery and the dependencies for a given feature. You can specify these in your requirements or on the ``pip`` command-line by using brackets. Multiple bundles can be specified by separating them by commas. :: $ pip install "celery[librabbitmq]" $ pip install "celery[librabbitmq,redis,auth,msgpack]" The following bundles are available: Serializers ~~~~~~~~~~~ :``celery[auth]``: for using the ``auth`` security serializer. :``celery[msgpack]``: for using the msgpack serializer. :``celery[yaml]``: for using the yaml serializer. Concurrency ~~~~~~~~~~~ :``celery[eventlet]``: for using the ``eventlet`` pool. :``celery[gevent]``: for using the ``gevent`` pool. Transports and Backends ~~~~~~~~~~~~~~~~~~~~~~~ :``celery[librabbitmq]``: for using the librabbitmq C library. :``celery[redis]``: for using Redis as a message transport or as a result backend. :``celery[sqs]``: for using Amazon SQS as a message transport (*experimental*). :``celery[tblib``] for using the ``task_remote_tracebacks`` feature. :``celery[memcache]``: for using Memcached as a result backend (using ``pylibmc``) :``celery[pymemcache]``: for using Memcached as a result backend (pure-Python implementation). :``celery[cassandra]``: for using Apache Cassandra as a result backend with DataStax driver. :``celery[couchbase]``: for using Couchbase as a result backend. :``celery[elasticsearch]``: for using Elasticsearch as a result backend. :``celery[riak]``: for using Riak as a result backend. :``celery[zookeeper]``: for using Zookeeper as a message transport. :``celery[sqlalchemy]``: for using SQLAlchemy as a result backend (*supported*). :``celery[pyro]``: for using the Pyro4 message transport (*experimental*). :``celery[slmq]``: for using the SoftLayer Message Queue transport (*experimental*). :``celery[consul]``: for using the Consul.io Key/Value store as a message transport or result backend (*experimental*). :``celery[django]`` specifies the lowest version possible for Django support. You should probably not use this in your requirements, it's here for informational purposes only. .. _celery-installing-from-source: Downloading and installing from source -------------------------------------- Download the latest version of Celery from PyPI: https://pypi.python.org/pypi/celery/ You can install it by doing the following,: :: $ tar xvfz celery-0.0.0.tar.gz $ cd celery-0.0.0 $ python setup.py build # python setup.py install The last command must be executed as a privileged user if you aren't currently using a virtualenv. .. _celery-installing-from-git: Using the development version ----------------------------- With pip ~~~~~~~~ The Celery development version also requires the development versions of ``kombu``, ``amqp``, ``billiard``, and ``vine``. You can install the latest snapshot of these using the following pip commands: :: $ pip install https://github.com/celery/celery/zipball/master#egg=celery $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu $ pip install https://github.com/celery/vine/zipball/master#egg=vine With git ~~~~~~~~ Please see the Contributing section. .. _getting-help: Getting Help ============ .. _mailing-list: Mailing list ------------ For discussions about the usage, development, and future of Celery, please join the `celery-users`_ mailing list. .. _`celery-users`: https://groups.google.com/group/celery-users/ .. _irc-channel: IRC --- Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ network. .. _`Freenode`: https://freenode.net .. _bug-tracker: Bug tracker =========== If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at https://github.com/celery/celery/issues/ .. _wiki: Wiki ==== https://wiki.github.com/celery/celery/ .. _contributing-short: Contributing ============ Development of `celery` happens at GitHub: https://github.com/celery/celery You're highly encouraged to participate in the development of `celery`. If you don't like GitHub (for some reason) you're welcome to send regular patches. Be sure to also read the `Contributing to Celery`_ section in the documentation. .. _`Contributing to Celery`: http://docs.celeryproject.org/en/master/contributing.html .. _license: License ======= This software is licensed under the `New BSD License`. See the ``LICENSE`` file in the top distribution directory for the full license text. .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround .. |build-status| image:: https://secure.travis-ci.org/celery/celery.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/celery .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master :target: https://codecov.io/github/celery/celery?branch=master .. |license| image:: https://img.shields.io/pypi/l/celery.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg :alt: Celery can be installed via wheel :target: https://pypi.python.org/pypi/celery/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg :alt: Supported Python versions. :target: https://pypi.python.org/pypi/celery/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg :alt: Support Python implementations. :target: https://pypi.python.org/pypi/celery/ celery-4.1.0/extra/0000755000175000017500000000000013135426347014060 5ustar omeromer00000000000000celery-4.1.0/extra/bash-completion/0000755000175000017500000000000013135426347017144 5ustar omeromer00000000000000celery-4.1.0/extra/bash-completion/celery.bash0000644000175000017500000001014113130607475021261 0ustar omeromer00000000000000# This is a bash completion script for celery # Redirect it to a file, then source it or copy it to /etc/bash_completion.d # to get tab completion. celery must be on your PATH for this to work. _celery() { local cur basep opts base kval kkey loglevels prevp in_opt controlargs local pools COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prevp="${COMP_WORDS[COMP_CWORD-1]}" basep="${COMP_WORDS[1]}" opts="worker events beat shell multi amqp status inspect control purge list migrate call result report upgrade flower graph logtool help" fargs="--app= --broker= --loader= --config= --version" dopts="--detach --umask= --gid= --uid= --pidfile= --logfile= --loglevel= --executable=" controlargs="--timeout --destination" pools="prefork eventlet gevent solo" loglevels="critical error warning info debug" in_opt=0 # find the current sub-command, store in basep' for index in $(seq 1 $((${#COMP_WORDS[@]} - 2))) do basep=${COMP_WORDS[$index]} if [ "${basep:0:2}" != "--" ]; then break; fi done if [ "${cur:0:2}" == "--" -a "$cur" != "${cur//=}" ]; then in_opt=1 kkey="${cur%=*}" kval="${cur#*=}" elif [ "${prevp:0:1}" == "-" ]; then in_opt=1 kkey="$prevp" kval="$cur" fi if [ $in_opt -eq 1 ]; then case "${kkey}" in --uid|-u) COMPREPLY=( $(compgen -u -- "$kval") ) return 0 ;; --gid|-g) COMPREPLY=( $(compgen -g -- "$kval") ) return 0 ;; --pidfile|--logfile|-p|-f|--statedb|-S|-s|--schedule-filename) COMPREPLY=( $(compgen -f -- "$kval") ) return 0 ;; --workdir) COMPREPLY=( $(compgen -d -- "$kval") ) return 0 ;; --loglevel|-l) COMPREPLY=( $(compgen -W "$loglevels" -- "$kval") ) return 0 ;; --pool|-P) COMPREPLY=( $(compgen -W "$pools" -- "$kval") ) return 0 ;; *) ;; esac fi case "${basep}" in worker) COMPREPLY=( $(compgen -W '--concurrency= --pool= --purge --logfile= --loglevel= --hostname= --beat --schedule= --scheduler= --statedb= --events --time-limit= --soft-time-limit= --max-tasks-per-child= --queues= --include= --pidfile= --autoscale $fargs' -- ${cur} ) ) return 0 ;; inspect) COMPREPLY=( $(compgen -W 'active active_queues ping registered report reserved revoked scheduled stats --help $controlargs $fargs' -- ${cur}) ) return 0 ;; control) COMPREPLY=( $(compgen -W 'add_consumer autoscale cancel_consumer disable_events enable_events pool_grow pool_shrink rate_limit time_limit --help $controlargs $fargs' -- ${cur}) ) return 0 ;; multi) COMPREPLY=( $(compgen -W 'start restart stopwait stop show kill names expand get help --quiet --nosplash --verbose --no-color --help $fargs' -- ${cur} ) ) return 0 ;; amqp) COMPREPLY=( $(compgen -W 'queue.declare queue.purge exchange.delete basic.publish exchange.declare queue.delete queue.bind basic.get --help $fargs' -- ${cur} )) return 0 ;; list) COMPREPLY=( $(compgen -W 'bindings $fargs' -- ${cur} ) ) return 0 ;; shell) COMPREPLY=( $(compgen -W '--ipython --bpython --python --without-tasks --eventlet --gevent $fargs' -- ${cur} ) ) return 0 ;; beat) COMPREPLY=( $(compgen -W '--schedule= --scheduler= --max-interval= $dopts $fargs' -- ${cur} )) return 0 ;; events) COMPREPLY=( $(compgen -W '--dump --camera= --freq= --maxrate= $dopts $fargs' -- ${cur})) return 0 ;; *) ;; esac COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur})) return 0 } complete -F _celery celery celery-4.1.0/extra/systemd/0000755000175000017500000000000013135426347015550 5ustar omeromer00000000000000celery-4.1.0/extra/systemd/celery.service0000644000175000017500000000126713130607475020421 0ustar omeromer00000000000000[Unit] Description=Celery Service After=network.target [Service] Type=forking User=celery Group=celery EnvironmentFile=-/etc/conf.d/celery WorkingDirectory=/opt/celery ExecStart=/bin/sh -c '${CELERY_BIN} multi start $CELERYD_NODES \ -A $CELERY_APP --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS' ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \ --pidfile=${CELERYD_PID_FILE}' ExecReload=/bin/sh -c '${CELERY_BIN} multi restart $CELERYD_NODES \ -A $CELERY_APP --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS' [Install] WantedBy=multi-user.target celery-4.1.0/extra/systemd/celery.conf0000644000175000017500000000043713130607475017704 0ustar omeromer00000000000000# See # http://docs.celeryproject.org/en/latest/userguide/daemonizing.html#usage-systemd CELERY_APP="proj" CELERYD_NODES="worker" CELERYD_OPTS="" CELERY_BIN="/usr/bin/celery" CELERYD_PID_FILE="/var/run/celery/%n.pid" CELERYD_LOG_FILE="/var/log/celery/%n%I.log" CELERYD_LOG_LEVEL="INFO" celery-4.1.0/extra/systemd/celery.tmpfiles0000644000175000017500000000011613130607475020574 0ustar omeromer00000000000000d /var/run/celery 0755 celery celery - d /var/log/celery 0755 celery celery - celery-4.1.0/extra/zsh-completion/0000755000175000017500000000000013135426347017033 5ustar omeromer00000000000000celery-4.1.0/extra/zsh-completion/celery.zsh0000644000175000017500000001414413130607475021046 0ustar omeromer00000000000000# This is a zsh completion script for Celery # It has to be installed as follows: # # Alternative A) Copy the script to your zsh site-functions directory (often # ``/usr/share/zsh/site-functions``) and name the script ``_celery`` # # Alternative B). Or, use this file as a oh-my-zsh plugin (rename the script # to ``_celery``), and add it to .zshrc: plugins=(celery git osx ruby) # _celery () { local -a _1st_arguments ifargs dopts controlargs typeset -A opt_args _1st_arguments=('worker' 'events' 'beat' 'shell' 'multi' 'amqp' 'status' 'inspect' \ 'control' 'purge' 'list' 'migrate' 'call' 'result' 'report' \ 'graph', 'logtool', 'help') ifargs=('--app=' '--broker=' '--loader=' '--config=' '--version') dopts=('--detach' '--umask=' '--gid=' '--uid=' '--pidfile=' '--logfile=' '--loglevel=') controlargs=('--timeout' '--destination') _arguments \ '(-A --app=)'{-A,--app}'[app instance to use (e.g., module.attr_name):APP]' \ '(-b --broker=)'{-b,--broker}'[url to broker. default is "amqp://guest@localhost//":BROKER]' \ '(--loader)--loader[name of custom loader class to use.:LOADER]' \ '(--config)--config[Name of the configuration module:CONFIG]' \ '(--workdir)--workdir[Optional directory to change to after detaching.:WORKING_DIRECTORY]' \ '(-q --quiet)'{-q,--quiet}'[Don"t show as much output.]' \ '(-C --no-color)'{-C,--no-color}'[Don"t display colors.]' \ '(--version)--version[show program"s version number and exit]' \ '(- : *)'{-h,--help}'[show this help message and exit]' \ '*:: :->subcmds' && return 0 if (( CURRENT == 1 )); then _describe -t commands "celery sub-command" _1st_arguments return fi case "$words[1]" in worker) _arguments \ '(-C --concurrency=)'{-C,--concurrency=}'[Number of child processes processing the queue. The default is the number of CPUs.]' \ '(--pool)--pool=:::(prefork eventlet gevent solo)' \ '(--purge --discard)'{--discard,--purge}'[Purges all waiting tasks before the daemon is started.]' \ '(-f --logfile=)'{-f,--logfile=}'[Path to log file. If no logfile is specified, stderr is used.]' \ '(--loglevel=)--loglevel=:::(critical error warning info debug)' \ '(-N --hostname=)'{-N,--hostname=}'[Set custom hostname, e.g., "foo@example.com".]' \ '(-B --beat)'{-B,--beat}'[Also run the celerybeat periodic task scheduler.]' \ '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database if running with the -B option. Defaults to celerybeat-schedule.]' \ '(-S --statedb=)'{-S,--statedb=}'[Path to the state database.Default: None]' \ '(-E --events)'{-E,--events}'[Send events that can be captured by monitors like celeryev, celerymon, and others.]' \ '(--time-limit=)--time-limit=[nables a hard time limit (in seconds int/float) for tasks]' \ '(--soft-time-limit=)--soft-time-limit=[Enables a soft time limit (in seconds int/float) for tasks]' \ '(--max-tasks-per-child=)--max-tasks-per-child=[Maximum number of tasks a pool worker can execute before it"s terminated and replaced by a new worker.]' \ '(-Q --queues=)'{-Q,--queues=}'[List of queues to enable for this worker, separated by comma. By default all configured queues are enabled.]' \ '(-I --include=)'{-I,--include=}'[Comma separated list of additional modules to import.]' \ '(--pidfile=)--pidfile=[Optional file used to store the process pid.]' \ '(--autoscale=)--autoscale=[Enable autoscaling by providing max_concurrency, min_concurrency.]' \ compadd -a ifargs ;; inspect) _values -s \ 'active[dump active tasks (being processed)]' \ 'active_queues[dump queues being consumed from]' \ 'ping[ping worker(s)]' \ 'registered[dump of registered tasks]' \ 'report[get bugreport info]' \ 'reserved[dump reserved tasks (waiting to be processed)]' \ 'revoked[dump of revoked task ids]' \ 'scheduled[dump scheduled tasks (eta/countdown/retry)]' \ 'stats[dump worker statistics]' compadd -a controlargs ifargs ;; control) _values -s \ 'add_consumer[tell worker(s) to start consuming a queue]' \ 'autoscale[change autoscale settings]' \ 'cancel_consumer[tell worker(s) to stop consuming a queue]' \ 'disable_events[tell worker(s) to disable events]' \ 'enable_events[tell worker(s) to enable events]' \ 'pool_grow[start more pool processes]' \ 'pool_shrink[use less pool processes]' \ 'rate_limit[tell worker(s) to modify the rate limit for a task type]' \ 'time_limit[tell worker(s) to modify the time limit for a task type.]' compadd -a controlargs ifargs ;; multi) _values -s \ '--nosplash[Don"t display program info.]' \ '--verbose[Show more output.]' \ '--no-color[Don"t display colors.]' \ '--quiet[Don"t show as much output.]' \ 'start' 'restart' 'stopwait' 'stop' 'show' \ 'names' 'expand' 'get' 'kill' compadd -a ifargs ;; amqp) _values -s \ 'queue.declare' 'queue.purge' 'exchange.delete' 'basic.publish' \ 'exchange.declare' 'queue.delete' 'queue.bind' 'basic.get' ;; list) _values -s, 'bindings' ;; shell) _values -s \ '--ipython[force iPython.]' \ '--bpython[force bpython.]' \ '--python[force default Python shell.]' \ '--without-tasks[don"t add tasks to locals.]' \ '--eventlet[use eventlet.]' \ '--gevent[use gevent.]' compadd -a ifargs ;; beat) _arguments \ '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database. Defaults to celerybeat-schedule.]' \ '(-S --scheduler=)'{-S,--scheduler=}'[Scheduler class to use. Default is celery.beat.PersistentScheduler.]' \ '(--max-interval)--max-interval[]' compadd -a dopts fargs ;; events) _arguments \ '(-d --dump)'{-d,--dump}'[Dump events to stdout.]' \ '(-c --camera=)'{-c,--camera=}'[Take snapshots of events using this camera.]' \ '(-F --frequency=)'{-F,--frequency=}'[Camera: Shutter frequency. Default is every 1.0 seconds.]' \ '(-r --maxrate=)'{-r,--maxrate=}'[Camera: Optional shutter rate limit (e.g., 10/m).]' compadd -a dopts fargs ;; *) ;; esac } celery-4.1.0/extra/macOS/0000755000175000017500000000000013135426347015062 5ustar omeromer00000000000000celery-4.1.0/extra/macOS/org.celeryq.beat.plist0000644000175000017500000000135713130607475021307 0ustar omeromer00000000000000 Disabled GroupName celery-beat KeepAlive Label org.celeryq.beat Program celery ProgramArguments beat --loglevel=WARNING RunAtLoad Umask 7 UserName nobody WorkingDirectory / celery-4.1.0/extra/macOS/org.celeryq.worker.plist0000644000175000017500000000136513130607475021704 0ustar omeromer00000000000000 Disabled GroupName celery-worker KeepAlive Label org.celeryq.worker Program celery ProgramArguments worker --loglevel=WARNING RunAtLoad Umask 7 UserName nobody WorkingDirectory / celery-4.1.0/extra/generic-init.d/0000755000175000017500000000000013135426347016657 5ustar omeromer00000000000000celery-4.1.0/extra/generic-init.d/celerybeat0000755000175000017500000002147313130607475020731 0ustar omeromer00000000000000#!/bin/sh -e # ========================================================= # celerybeat - Starts the Celery periodic task scheduler. # ========================================================= # # :Usage: /etc/init.d/celerybeat {start|stop|force-reload|restart|try-restart|status} # :Configuration file: /etc/default/celerybeat or /etc/default/celeryd # # See http://docs.celeryproject.org/en/latest/userguide/daemonizing.html#generic-init-scripts ### BEGIN INIT INFO # Provides: celerybeat # Required-Start: $network $local_fs $remote_fs # Required-Stop: $network $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: celery periodic task scheduler ### END INIT INFO # Cannot use set -e/bash -e since the kill -0 command will abort # abnormally in the absence of a valid process ID. #set -e VERSION=10.1 echo "celery init v${VERSION}." if [ $(id -u) -ne 0 ]; then echo "Error: This program can only be used by the root user." echo " Unpriviliged users must use 'celery beat --detach'" exit 1 fi origin_is_runlevel_dir () { set +e dirname $0 | grep -q "/etc/rc.\.d" echo $? } # Can be a runlevel symlink (e.g., S02celeryd) if [ $(origin_is_runlevel_dir) -eq 0 ]; then SCRIPT_FILE=$(readlink "$0") else SCRIPT_FILE="$0" fi SCRIPT_NAME="$(basename "$SCRIPT_FILE")" # /etc/init.d/celerybeat: start and stop the celery periodic task scheduler daemon. # Make sure executable configuration script is owned by root _config_sanity() { local path="$1" local owner=$(ls -ld "$path" | awk '{print $3}') local iwgrp=$(ls -ld "$path" | cut -b 6) local iwoth=$(ls -ld "$path" | cut -b 9) if [ "$(id -u $owner)" != "0" ]; then echo "Error: Config script '$path' must be owned by root!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with mailicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change ownership of the script:" echo " $ sudo chown root '$path'" exit 1 fi if [ "$iwoth" != "-" ]; then # S_IWOTH echo "Error: Config script '$path' cannot be writable by others!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi if [ "$iwgrp" != "-" ]; then # S_IWGRP echo "Error: Config script '$path' cannot be writable by group!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi } scripts="" if test -f /etc/default/celeryd; then scripts="/etc/default/celeryd" _config_sanity /etc/default/celeryd . /etc/default/celeryd fi EXTRA_CONFIG="/etc/default/${SCRIPT_NAME}" if test -f "$EXTRA_CONFIG"; then scripts="$scripts, $EXTRA_CONFIG" _config_sanity "$EXTRA_CONFIG" . "$EXTRA_CONFIG" fi echo "Using configuration: $scripts" CELERY_BIN=${CELERY_BIN:-"celery"} DEFAULT_USER="celery" DEFAULT_PID_FILE="/var/run/celery/beat.pid" DEFAULT_LOG_FILE="/var/log/celery/beat.log" DEFAULT_LOG_LEVEL="INFO" DEFAULT_CELERYBEAT="$CELERY_BIN beat" CELERYBEAT=${CELERYBEAT:-$DEFAULT_CELERYBEAT} CELERYBEAT_LOG_LEVEL=${CELERYBEAT_LOG_LEVEL:-${CELERYBEAT_LOGLEVEL:-$DEFAULT_LOG_LEVEL}} CELERYBEAT_SU=${CELERYBEAT_SU:-"su"} CELERYBEAT_SU_ARGS=${CELERYBEAT_SU_ARGS:-""} # Sets --app argument for CELERY_BIN CELERY_APP_ARG="" if [ ! -z "$CELERY_APP" ]; then CELERY_APP_ARG="--app=$CELERY_APP" fi CELERYBEAT_USER=${CELERYBEAT_USER:-${CELERYD_USER:-$DEFAULT_USER}} # Set CELERY_CREATE_DIRS to always create log/pid dirs. CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0} CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS if [ -z "$CELERYBEAT_PID_FILE" ]; then CELERYBEAT_PID_FILE="$DEFAULT_PID_FILE" CELERY_CREATE_RUNDIR=1 fi if [ -z "$CELERYBEAT_LOG_FILE" ]; then CELERYBEAT_LOG_FILE="$DEFAULT_LOG_FILE" CELERY_CREATE_LOGDIR=1 fi export CELERY_LOADER CELERYBEAT_OPTS="$CELERYBEAT_OPTS -f $CELERYBEAT_LOG_FILE -l $CELERYBEAT_LOG_LEVEL" if [ -n "$2" ]; then CELERYBEAT_OPTS="$CELERYBEAT_OPTS $2" fi CELERYBEAT_LOG_DIR=`dirname $CELERYBEAT_LOG_FILE` CELERYBEAT_PID_DIR=`dirname $CELERYBEAT_PID_FILE` # Extra start-stop-daemon options, like user/group. CELERYBEAT_CHDIR=${CELERYBEAT_CHDIR:-$CELERYD_CHDIR} if [ -n "$CELERYBEAT_CHDIR" ]; then DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYBEAT_CHDIR" fi export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" check_dev_null() { if [ ! -c /dev/null ]; then echo "/dev/null is not a character device!" exit 75 # EX_TEMPFAIL fi } maybe_die() { if [ $? -ne 0 ]; then echo "Exiting: $*" exit 77 # EX_NOPERM fi } create_default_dir() { if [ ! -d "$1" ]; then echo "- Creating default directory: '$1'" mkdir -p "$1" maybe_die "Couldn't create directory $1" echo "- Changing permissions of '$1' to 02755" chmod 02755 "$1" maybe_die "Couldn't change permissions for $1" if [ -n "$CELERYBEAT_USER" ]; then echo "- Changing owner of '$1' to '$CELERYBEAT_USER'" chown "$CELERYBEAT_USER" "$1" maybe_die "Couldn't change owner of $1" fi if [ -n "$CELERYBEAT_GROUP" ]; then echo "- Changing group of '$1' to '$CELERYBEAT_GROUP'" chgrp "$CELERYBEAT_GROUP" "$1" maybe_die "Couldn't change group of $1" fi fi } check_paths() { if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then create_default_dir "$CELERYBEAT_LOG_DIR" fi if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then create_default_dir "$CELERYBEAT_PID_DIR" fi } create_paths () { create_default_dir "$CELERYBEAT_LOG_DIR" create_default_dir "$CELERYBEAT_PID_DIR" } is_running() { pid=$1 ps $pid > /dev/null 2>&1 } wait_pid () { pid=$1 forever=1 i=0 while [ $forever -gt 0 ]; do if ! is_running $pid; then echo "OK" forever=0 else kill -TERM "$pid" i=$((i + 1)) if [ $i -gt 60 ]; then echo "ERROR" echo "Timed out while stopping (30s)" forever=0 else sleep 0.5 fi fi done } stop_beat () { echo -n "Stopping ${SCRIPT_NAME}... " if [ -f "$CELERYBEAT_PID_FILE" ]; then wait_pid $(cat "$CELERYBEAT_PID_FILE") else echo "NOT RUNNING" fi } _chuid () { ${CELERYBEAT_SU} ${CELERYBEAT_SU_ARGS} \ "$CELERYBEAT_USER" -c "$CELERYBEAT $*" } start_beat () { echo "Starting ${SCRIPT_NAME}..." _chuid $CELERY_APP_ARG $CELERYBEAT_OPTS $DAEMON_OPTS --detach \ --pidfile="$CELERYBEAT_PID_FILE" } check_status () { local failed= local pid_file=$CELERYBEAT_PID_FILE if [ ! -e $pid_file ]; then echo "${SCRIPT_NAME} is down: no pid file found" failed=true elif [ ! -r $pid_file ]; then echo "${SCRIPT_NAME} is in unknown state, user cannot read pid file." failed=true else local pid=`cat "$pid_file"` local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'` if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then echo "${SCRIPT_NAME}: bad pid file ($pid_file)" failed=true else local failed= kill -0 $pid 2> /dev/null || failed=true if [ "$failed" ]; then echo "${SCRIPT_NAME} (pid $pid) is down, but pid file exists!" failed=true else echo "${SCRIPT_NAME} (pid $pid) is up..." fi fi fi [ "$failed" ] && exit 1 || exit 0 } case "$1" in start) check_dev_null check_paths start_beat ;; stop) check_paths stop_beat ;; reload|force-reload) echo "Use start+stop" ;; status) check_status ;; restart) echo "Restarting celery periodic task scheduler" check_paths stop_beat && check_dev_null && start_beat ;; create-paths) check_dev_null create_paths ;; check-paths) check_dev_null check_paths ;; *) echo "Usage: /etc/init.d/${SCRIPT_NAME} {start|stop|restart|create-paths|status}" exit 64 # EX_USAGE ;; esac exit 0 celery-4.1.0/extra/generic-init.d/celeryd0000755000175000017500000002505113130607475020235 0ustar omeromer00000000000000#!/bin/sh -e # ============================================ # celeryd - Starts the Celery worker daemon. # ============================================ # # :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status} # :Configuration file: /etc/default/celeryd (or /usr/local/etc/celeryd on BSD) # # See http://docs.celeryproject.org/en/latest/userguide/daemonizing.html#generic-init-scripts ### BEGIN INIT INFO # Provides: celeryd # Required-Start: $network $local_fs $remote_fs # Required-Stop: $network $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: celery task worker daemon ### END INIT INFO # # # To implement separate init-scripts, copy this script and give it a different # name. That is, if your new application named "little-worker" needs an init, # you should use: # # cp /etc/init.d/celeryd /etc/init.d/little-worker # # You can then configure this by manipulating /etc/default/little-worker. # VERSION=10.1 echo "celery init v${VERSION}." if [ $(id -u) -ne 0 ]; then echo "Error: This program can only be used by the root user." echo " Unprivileged users must use the 'celery multi' utility, " echo " or 'celery worker --detach'." exit 1 fi origin_is_runlevel_dir () { set +e dirname $0 | grep -q "/etc/rc.\.d" echo $? } # Can be a runlevel symlink (e.g., S02celeryd) if [ $(origin_is_runlevel_dir) -eq 0 ]; then SCRIPT_FILE=$(readlink "$0") else SCRIPT_FILE="$0" fi SCRIPT_NAME="$(basename "$SCRIPT_FILE")" DEFAULT_USER="celery" DEFAULT_PID_FILE="/var/run/celery/%n.pid" DEFAULT_LOG_FILE="/var/log/celery/%n%I.log" DEFAULT_LOG_LEVEL="INFO" DEFAULT_NODES="celery" DEFAULT_CELERYD="-m celery worker --detach" if [ -d "/etc/default" ]; then CELERY_CONFIG_DIR="/etc/default" else CELERY_CONFIG_DIR="/usr/local/etc" fi CELERY_DEFAULTS=${CELERY_DEFAULTS:-"$CELERY_CONFIG_DIR/${SCRIPT_NAME}"} # Make sure executable configuration script is owned by root _config_sanity() { local path="$1" local owner=$(ls -ld "$path" | awk '{print $3}') local iwgrp=$(ls -ld "$path" | cut -b 6) local iwoth=$(ls -ld "$path" | cut -b 9) if [ "$(id -u $owner)" != "0" ]; then echo "Error: Config script '$path' must be owned by root!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with mailicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change ownership of the script:" echo " $ sudo chown root '$path'" exit 1 fi if [ "$iwoth" != "-" ]; then # S_IWOTH echo "Error: Config script '$path' cannot be writable by others!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi if [ "$iwgrp" != "-" ]; then # S_IWGRP echo "Error: Config script '$path' cannot be writable by group!" echo echo "Resolution:" echo "Review the file carefully, and make sure it hasn't been " echo "modified with malicious intent. When sure the " echo "script is safe to execute with superuser privileges " echo "you can change the scripts permissions:" echo " $ sudo chmod 640 '$path'" exit 1 fi } if [ -f "$CELERY_DEFAULTS" ]; then _config_sanity "$CELERY_DEFAULTS" echo "Using config script: $CELERY_DEFAULTS" . "$CELERY_DEFAULTS" fi # Sets --app argument for CELERY_BIN CELERY_APP_ARG="" if [ ! -z "$CELERY_APP" ]; then CELERY_APP_ARG="--app=$CELERY_APP" fi # Options to su # can be used to enable login shell (CELERYD_SU_ARGS="-l"), # or even to use start-stop-daemon instead of su. CELERYD_SU=${CELERY_SU:-"su"} CELERYD_SU_ARGS=${CELERYD_SU_ARGS:-""} CELERYD_USER=${CELERYD_USER:-$DEFAULT_USER} # Set CELERY_CREATE_DIRS to always create log/pid dirs. CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0} CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS if [ -z "$CELERYD_PID_FILE" ]; then CELERYD_PID_FILE="$DEFAULT_PID_FILE" CELERY_CREATE_RUNDIR=1 fi if [ -z "$CELERYD_LOG_FILE" ]; then CELERYD_LOG_FILE="$DEFAULT_LOG_FILE" CELERY_CREATE_LOGDIR=1 fi CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-$DEFAULT_LOG_LEVEL}} CELERY_BIN=${CELERY_BIN:-"celery"} CELERYD_MULTI=${CELERYD_MULTI:-"$CELERY_BIN multi"} CELERYD_NODES=${CELERYD_NODES:-$DEFAULT_NODES} export CELERY_LOADER if [ -n "$2" ]; then CELERYD_OPTS="$CELERYD_OPTS $2" fi CELERYD_LOG_DIR=`dirname $CELERYD_LOG_FILE` CELERYD_PID_DIR=`dirname $CELERYD_PID_FILE` # Extra start-stop-daemon options, like user/group. if [ -n "$CELERYD_CHDIR" ]; then DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYD_CHDIR" fi check_dev_null() { if [ ! -c /dev/null ]; then echo "/dev/null is not a character device!" exit 75 # EX_TEMPFAIL fi } maybe_die() { if [ $? -ne 0 ]; then echo "Exiting: $* (errno $?)" exit 77 # EX_NOPERM fi } create_default_dir() { if [ ! -d "$1" ]; then echo "- Creating default directory: '$1'" mkdir -p "$1" maybe_die "Couldn't create directory $1" echo "- Changing permissions of '$1' to 02755" chmod 02755 "$1" maybe_die "Couldn't change permissions for $1" if [ -n "$CELERYD_USER" ]; then echo "- Changing owner of '$1' to '$CELERYD_USER'" chown "$CELERYD_USER" "$1" maybe_die "Couldn't change owner of $1" fi if [ -n "$CELERYD_GROUP" ]; then echo "- Changing group of '$1' to '$CELERYD_GROUP'" chgrp "$CELERYD_GROUP" "$1" maybe_die "Couldn't change group of $1" fi fi } check_paths() { if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then create_default_dir "$CELERYD_LOG_DIR" fi if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then create_default_dir "$CELERYD_PID_DIR" fi } create_paths() { create_default_dir "$CELERYD_LOG_DIR" create_default_dir "$CELERYD_PID_DIR" } export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" _get_pidfiles () { # note: multi < 3.1.14 output to stderr, not stdout, hence the redirect. ${CELERYD_MULTI} expand "${CELERYD_PID_FILE}" ${CELERYD_NODES} 2>&1 } _get_pids() { found_pids=0 my_exitcode=0 for pidfile in $(_get_pidfiles); do local pid=`cat "$pidfile"` local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'` if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then echo "bad pid file ($pidfile)" one_failed=true my_exitcode=1 else found_pids=1 echo "$pid" fi if [ $found_pids -eq 0 ]; then echo "${SCRIPT_NAME}: All nodes down" exit $my_exitcode fi done } _chuid () { ${CELERYD_SU} ${CELERYD_SU_ARGS} "$CELERYD_USER" -c "$CELERYD_MULTI $*" } start_workers () { if [ ! -z "$CELERYD_ULIMIT" ]; then ulimit $CELERYD_ULIMIT fi _chuid $* start $CELERYD_NODES $DAEMON_OPTS \ --pidfile="$CELERYD_PID_FILE" \ --logfile="$CELERYD_LOG_FILE" \ --loglevel="$CELERYD_LOG_LEVEL" \ $CELERY_APP_ARG \ $CELERYD_OPTS } dryrun () { (C_FAKEFORK=1 start_workers --verbose) } stop_workers () { _chuid stopwait $CELERYD_NODES --pidfile="$CELERYD_PID_FILE" } restart_workers () { _chuid restart $CELERYD_NODES $DAEMON_OPTS \ --pidfile="$CELERYD_PID_FILE" \ --logfile="$CELERYD_LOG_FILE" \ --loglevel="$CELERYD_LOG_LEVEL" \ $CELERY_APP_ARG \ $CELERYD_OPTS } kill_workers() { _chuid kill $CELERYD_NODES --pidfile="$CELERYD_PID_FILE" } restart_workers_graceful () { echo "WARNING: Use with caution in production" echo "The workers will attempt to restart, but they may not be able to." local worker_pids= worker_pids=`_get_pids` [ "$one_failed" ] && exit 1 for worker_pid in $worker_pids; do local failed= kill -HUP $worker_pid 2> /dev/null || failed=true if [ "$failed" ]; then echo "${SCRIPT_NAME} worker (pid $worker_pid) could not be restarted" one_failed=true else echo "${SCRIPT_NAME} worker (pid $worker_pid) received SIGHUP" fi done [ "$one_failed" ] && exit 1 || exit 0 } check_status () { my_exitcode=0 found_pids=0 local one_failed= for pidfile in $(_get_pidfiles); do if [ ! -r $pidfile ]; then echo "${SCRIPT_NAME} down: no pidfiles found" one_failed=true break fi local node=`basename "$pidfile" .pid` local pid=`cat "$pidfile"` local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'` if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then echo "bad pid file ($pidfile)" one_failed=true else local failed= kill -0 $pid 2> /dev/null || failed=true if [ "$failed" ]; then echo "${SCRIPT_NAME} (node $node) (pid $pid) is down, but pidfile exists!" one_failed=true else echo "${SCRIPT_NAME} (node $node) (pid $pid) is up..." fi fi done [ "$one_failed" ] && exit 1 || exit 0 } case "$1" in start) check_dev_null check_paths start_workers ;; stop) check_dev_null check_paths stop_workers ;; reload|force-reload) echo "Use restart" ;; status) check_status ;; restart) check_dev_null check_paths restart_workers ;; graceful) check_dev_null restart_workers_graceful ;; kill) check_dev_null kill_workers ;; dryrun) check_dev_null dryrun ;; try-restart) check_dev_null check_paths restart_workers ;; create-paths) check_dev_null create_paths ;; check-paths) check_dev_null check_paths ;; *) echo "Usage: /etc/init.d/${SCRIPT_NAME} {start|stop|restart|graceful|kill|dryrun|create-paths}" exit 64 # EX_USAGE ;; esac exit 0 celery-4.1.0/extra/supervisord/0000755000175000017500000000000013135426347016445 5ustar omeromer00000000000000celery-4.1.0/extra/supervisord/celeryd.conf0000644000175000017500000000171413130607475020744 0ustar omeromer00000000000000; ================================== ; celery worker supervisor example ; ================================== [program:celery] ; Set full path to celery program if using virtualenv command=celery worker -A proj --loglevel=INFO ; Alternatively, ;command=celery --app=your_app.celery:app worker --loglevel=INFO -n worker.%%h ; Or run a script ;command=celery.sh directory=/path/to/project user=nobody numprocs=1 stdout_logfile=/var/log/celery/worker.log stderr_logfile=/var/log/celery/worker.log autostart=true autorestart=true startsecs=10 ; Need to wait for currently executing tasks to finish at shutdown. ; Increase this if you have very long running tasks. stopwaitsecs = 600 ; When resorting to send SIGKILL to the program to terminate it ; send SIGKILL to its whole process group instead, ; taking care of its children as well. killasgroup=true ; Set Celery priority higher than default (999) ; so, if rabbitmq is supervised, it will start first. priority=1000 celery-4.1.0/extra/supervisord/celerybeat.conf0000644000175000017500000000112013130607475021423 0ustar omeromer00000000000000; ================================ ; celery beat supervisor example ; ================================ [program:celerybeat] ; Set full path to celery program if using virtualenv command=celery beat -A myapp --schedule /var/lib/celery/beat.db --loglevel=INFO ; remove the -A myapp argument if you aren't using an app instance directory=/path/to/project user=nobody numprocs=1 stdout_logfile=/var/log/celery/beat.log stderr_logfile=/var/log/celery/beat.log autostart=true autorestart=true startsecs=10 ; if rabbitmq is supervised, set its priority higher ; so it starts first priority=999 celery-4.1.0/extra/supervisord/supervisord.conf0000644000175000017500000000172513130607475021704 0ustar omeromer00000000000000[unix_http_server] file=/tmp/supervisor.sock ; path to your socket file [supervisord] logfile=/var/log/supervisord/supervisord.log ; supervisord log file logfile_maxbytes=50MB ; maximum size of logfile before rotation logfile_backups=10 ; number of backed up logfiles loglevel=info ; info, debug, warn, trace pidfile=/var/run/supervisord.pid ; pidfile location nodaemon=false ; run supervisord as a daemon minfds=1024 ; number of startup file descriptors minprocs=200 ; number of process descriptors user=root ; default user childlogdir=/var/log/supervisord/ ; where child log files will live [rpcinterface:supervisor] supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] serverurl=unix:///tmp/supervisor.sock ; use unix:// schem for a unix sockets. [include] # Uncomment this line for celeryd for Python ;files=celeryd.conf celery-4.1.0/extra/supervisord/celery.sh0000644000175000017500000000020613130607475020260 0ustar omeromer00000000000000#!/bin/bash source {{ additional variables }} exec celery --app={{ application_name }}.celery:app worker --loglevel=INFO -n worker.%%hcelery-4.1.0/celery/0000755000175000017500000000000013135426347014220 5ustar omeromer00000000000000celery-4.1.0/celery/exceptions.py0000644000175000017500000001642013130607475016754 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Celery error types. Error Hierarchy =============== - :exc:`Exception` - :exc:`celery.exceptions.CeleryError` - :exc:`~celery.exceptions.ImproperlyConfigured` - :exc:`~celery.exceptions.SecurityError` - :exc:`~celery.exceptions.TaskPredicate` - :exc:`~celery.exceptions.Ignore` - :exc:`~celery.exceptions.Reject` - :exc:`~celery.exceptions.Retry` - :exc:`~celery.exceptions.TaskError` - :exc:`~celery.exceptions.QueueNotFound` - :exc:`~celery.exceptions.IncompleteStream` - :exc:`~celery.exceptions.NotRegistered` - :exc:`~celery.exceptions.AlreadyRegistered` - :exc:`~celery.exceptions.TimeoutError` - :exc:`~celery.exceptions.MaxRetriesExceededError` - :exc:`~celery.exceptions.TaskRevokedError` - :exc:`~celery.exceptions.InvalidTaskError` - :exc:`~celery.exceptions.ChordError` - :class:`kombu.exceptions.KombuError` - :exc:`~celery.exceptions.OperationalError` Raised when a transport connection error occurs while sending a message (be it a task, remote control command error). .. note:: This exception does not inherit from :exc:`~celery.exceptions.CeleryError`. - **billiard errors** (prefork pool) - :exc:`~celery.exceptions.SoftTimeLimitExceeded` - :exc:`~celery.exceptions.TimeLimitExceeded` - :exc:`~celery.exceptions.WorkerLostError` - :exc:`~celery.exceptions.Terminated` - :class:`UserWarning` - :class:`~celery.exceptions.CeleryWarning` - :class:`~celery.exceptions.AlwaysEagerIgnored` - :class:`~celery.exceptions.DuplicateNodenameWarning` - :class:`~celery.exceptions.FixupWarning` - :class:`~celery.exceptions.NotConfigured` - :exc:`BaseException` - :exc:`SystemExit` - :exc:`~celery.exceptions.WorkerTerminate` - :exc:`~celery.exceptions.WorkerShutdown` """ from __future__ import absolute_import, unicode_literals import numbers from .five import python_2_unicode_compatible, string_t from billiard.exceptions import ( SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated, ) from kombu.exceptions import OperationalError __all__ = [ # Warnings 'CeleryWarning', 'AlwaysEagerIgnored', 'DuplicateNodenameWarning', 'FixupWarning', 'NotConfigured', # Core errors 'CeleryError', 'ImproperlyConfigured', 'SecurityError', # Kombu (messaging) errors. 'OperationalError', # Task semi-predicates 'TaskPredicate', 'Ignore', 'Reject', 'Retry', # Task related errors. 'TaskError', 'QueueNotFound', 'IncompleteStream', 'NotRegistered', 'AlreadyRegistered', 'TimeoutError', 'MaxRetriesExceededError', 'TaskRevokedError', 'InvalidTaskError', 'ChordError', # Billiard task errors. 'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError', 'Terminated', # Deprecation warnings (forcing Python to emit them). 'CPendingDeprecationWarning', 'CDeprecationWarning', # Worker shutdown semi-predicates (inherits from SystemExit). 'WorkerShutdown', 'WorkerTerminate', ] UNREGISTERED_FMT = """\ Task of kind {0} never registered, please make sure it's imported.\ """ class CeleryWarning(UserWarning): """Base class for all Celery warnings.""" class AlwaysEagerIgnored(CeleryWarning): """send_task ignores :setting:`task_always_eager` option.""" class DuplicateNodenameWarning(CeleryWarning): """Multiple workers are using the same nodename.""" class FixupWarning(CeleryWarning): """Fixup related warning.""" class NotConfigured(CeleryWarning): """Celery hasn't been configured, as no config module has been found.""" class CeleryError(Exception): """Base class for all Celery errors.""" class TaskPredicate(CeleryError): """Base class for task-related semi-predicates.""" @python_2_unicode_compatible class Retry(TaskPredicate): """The task is to be retried later.""" #: Optional message describing context of retry. message = None #: Exception (if any) that caused the retry to happen. exc = None #: Time of retry (ETA), either :class:`numbers.Real` or #: :class:`~datetime.datetime`. when = None def __init__(self, message=None, exc=None, when=None, **kwargs): from kombu.utils.encoding import safe_repr self.message = message if isinstance(exc, string_t): self.exc, self.excs = None, exc else: self.exc, self.excs = exc, safe_repr(exc) if exc else None self.when = when super(Retry, self).__init__(self, exc, when, **kwargs) def humanize(self): if isinstance(self.when, numbers.Number): return 'in {0.when}s'.format(self) return 'at {0.when}'.format(self) def __str__(self): if self.message: return self.message if self.excs: return 'Retry {0}: {1}'.format(self.humanize(), self.excs) return 'Retry {0}'.format(self.humanize()) def __reduce__(self): return self.__class__, (self.message, self.excs, self.when) RetryTaskError = Retry # noqa: E305 XXX compat class Ignore(TaskPredicate): """A task can raise this to ignore doing state updates.""" @python_2_unicode_compatible class Reject(TaskPredicate): """A task can raise this if it wants to reject/re-queue the message.""" def __init__(self, reason=None, requeue=False): self.reason = reason self.requeue = requeue super(Reject, self).__init__(reason, requeue) def __repr__(self): return 'reject requeue=%s: %s' % (self.requeue, self.reason) class ImproperlyConfigured(CeleryError): """Celery is somehow improperly configured.""" class SecurityError(CeleryError): """Security related exception.""" class TaskError(CeleryError): """Task related errors.""" class QueueNotFound(KeyError, TaskError): """Task routed to a queue not in ``conf.queues``.""" class IncompleteStream(TaskError): """Found the end of a stream of data, but the data isn't complete.""" @python_2_unicode_compatible class NotRegistered(KeyError, TaskError): """The task ain't registered.""" def __repr__(self): return UNREGISTERED_FMT.format(self) class AlreadyRegistered(TaskError): """The task is already registered.""" # XXX Unused class TimeoutError(TaskError): """The operation timed out.""" class MaxRetriesExceededError(TaskError): """The tasks max restart limit has been exceeded.""" class TaskRevokedError(TaskError): """The task has been revoked, so no result available.""" class InvalidTaskError(TaskError): """The task has invalid data or ain't properly constructed.""" class ChordError(TaskError): """A task part of the chord raised an exception.""" class CPendingDeprecationWarning(PendingDeprecationWarning): """Warning of pending deprecation.""" class CDeprecationWarning(DeprecationWarning): """Warning of deprecation.""" class WorkerTerminate(SystemExit): """Signals that the worker should terminate immediately.""" SystemTerminate = WorkerTerminate # noqa: E305 XXX compat class WorkerShutdown(SystemExit): """Signals that the worker should perform a warm shutdown.""" celery-4.1.0/celery/result.py0000644000175000017500000007640113130607475016116 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Task results/state and results for groups of tasks.""" from __future__ import absolute_import, unicode_literals import time from collections import OrderedDict, deque from contextlib import contextmanager from copy import copy from kombu.utils.objects import cached_property from vine import Thenable, barrier, promise from . import current_app from . import states from ._state import _set_task_join_will_block, task_join_will_block from .app import app_or_default from .exceptions import ImproperlyConfigured, IncompleteStream, TimeoutError from .five import ( items, python_2_unicode_compatible, range, string_t, monotonic, ) from .utils import deprecated from .utils.graph import DependencyGraph, GraphFormatter try: import tblib except ImportError: tblib = None __all__ = [ 'ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult', 'EagerResult', 'result_from_tuple', ] E_WOULDBLOCK = """\ Never call result.get() within a task! See http://docs.celeryq.org/en/latest/userguide/tasks.html\ #task-synchronous-subtasks """ def assert_will_not_block(): if task_join_will_block(): raise RuntimeError(E_WOULDBLOCK) @contextmanager def allow_join_result(): reset_value = task_join_will_block() _set_task_join_will_block(False) try: yield finally: _set_task_join_will_block(reset_value) class ResultBase(object): """Base class for results.""" #: Parent result (if part of a chain) parent = None @Thenable.register @python_2_unicode_compatible class AsyncResult(ResultBase): """Query task state. Arguments: id (str): See :attr:`id`. backend (Backend): See :attr:`backend`. """ app = None #: Error raised for timeouts. TimeoutError = TimeoutError #: The task's UUID. id = None #: The task result backend to use. backend = None def __init__(self, id, backend=None, task_name=None, # deprecated app=None, parent=None): if id is None: raise ValueError( 'AsyncResult requires valid id, not {0}'.format(type(id))) self.app = app_or_default(app or self.app) self.id = id self.backend = backend or self.app.backend self.parent = parent self.on_ready = promise(self._on_fulfilled, weak=True) self._cache = None def then(self, callback, on_error=None, weak=False): self.backend.add_pending_result(self, weak=weak) return self.on_ready.then(callback, on_error) def _on_fulfilled(self, result): self.backend.remove_pending_result(self) return result def as_tuple(self): parent = self.parent return (self.id, parent and parent.as_tuple()), None def forget(self): """Forget about (and possibly remove the result of) this task.""" self._cache = None self.backend.forget(self.id) def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None): """Send revoke signal to all workers. Any worker receiving the task, or having reserved the task, *must* ignore it. Arguments: terminate (bool): Also terminate the process currently working on the task (if any). signal (str): Name of signal to send to process if terminate. Default is TERM. wait (bool): Wait for replies from workers. The ``timeout`` argument specifies the seconds to wait. Disabled by default. timeout (float): Time in seconds to wait for replies when ``wait`` is enabled. """ self.app.control.revoke(self.id, connection=connection, terminate=terminate, signal=signal, reply=wait, timeout=timeout) def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True, follow_parents=True, callback=None, on_message=None, on_interval=None, disable_sync_subtasks=True, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES): """Wait until task is ready, and return its result. Warning: Waiting for tasks within a task may lead to deadlocks. Please read :ref:`task-synchronous-subtasks`. Arguments: timeout (float): How long to wait, in seconds, before the operation times out. propagate (bool): Re-raise exception if the task failed. interval (float): Time to wait (in seconds) before retrying to retrieve the result. Note that this does not have any effect when using the RPC/redis result store backends, as they don't use polling. no_ack (bool): Enable amqp no ack (automatically acknowledge message). If this is :const:`False` then the message will **not be acked**. follow_parents (bool): Re-raise any exception raised by parent tasks. disable_sync_subtasks (bool): Disable tasks to wait for sub tasks this is the default configuration. CAUTION do not enable this unless you must. Raises: celery.exceptions.TimeoutError: if `timeout` isn't :const:`None` and the result does not arrive within `timeout` seconds. Exception: If the remote call raised an exception then that exception will be re-raised in the caller process. """ if disable_sync_subtasks: assert_will_not_block() _on_interval = promise() if follow_parents and propagate and self.parent: on_interval = promise(self._maybe_reraise_parent_error, weak=True) self._maybe_reraise_parent_error() if on_interval: _on_interval.then(on_interval) if self._cache: if propagate: self.maybe_throw(callback=callback) return self.result self.backend.add_pending_result(self) return self.backend.wait_for_pending( self, timeout=timeout, interval=interval, on_interval=_on_interval, no_ack=no_ack, propagate=propagate, callback=callback, on_message=on_message, ) wait = get # deprecated alias to :meth:`get`. def _maybe_reraise_parent_error(self): for node in reversed(list(self._parents())): node.maybe_throw() def _parents(self): node = self.parent while node: yield node node = node.parent def collect(self, intermediate=False, **kwargs): """Collect results as they return. Iterator, like :meth:`get` will wait for the task to complete, but will also follow :class:`AsyncResult` and :class:`ResultSet` returned by the task, yielding ``(result, value)`` tuples for each result in the tree. An example would be having the following tasks: .. code-block:: python from celery import group from proj.celery import app @app.task(trail=True) def A(how_many): return group(B.s(i) for i in range(how_many))() @app.task(trail=True) def B(i): return pow2.delay(i) @app.task(trail=True) def pow2(i): return i ** 2 .. code-block:: pycon >>> from celery.result import ResultBase >>> from proj.tasks import A >>> result = A.delay(10) >>> [v for v in result.collect() ... if not isinstance(v, (ResultBase, tuple))] [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] Note: The ``Task.trail`` option must be enabled so that the list of children is stored in ``result.children``. This is the default but enabled explicitly for illustration. Yields: Tuple[AsyncResult, Any]: tuples containing the result instance of the child task, and the return value of that task. """ for _, R in self.iterdeps(intermediate=intermediate): yield R, R.get(**kwargs) def get_leaf(self): value = None for _, R in self.iterdeps(): value = R.get() return value def iterdeps(self, intermediate=False): stack = deque([(None, self)]) while stack: parent, node = stack.popleft() yield parent, node if node.ready(): stack.extend((node, child) for child in node.children or []) else: if not intermediate: raise IncompleteStream() def ready(self): """Return :const:`True` if the task has executed. If the task is still running, pending, or is waiting for retry then :const:`False` is returned. """ return self.state in self.backend.READY_STATES def successful(self): """Return :const:`True` if the task executed successfully.""" return self.state == states.SUCCESS def failed(self): """Return :const:`True` if the task failed.""" return self.state == states.FAILURE def throw(self, *args, **kwargs): self.on_ready.throw(*args, **kwargs) def maybe_throw(self, propagate=True, callback=None): cache = self._get_task_meta() if self._cache is None else self._cache state, value, tb = ( cache['status'], cache['result'], cache.get('traceback')) if state in states.PROPAGATE_STATES and propagate: self.throw(value, self._to_remote_traceback(tb)) if callback is not None: callback(self.id, value) return value maybe_reraise = maybe_throw # XXX compat alias def _to_remote_traceback(self, tb): if tb and tblib is not None and self.app.conf.task_remote_tracebacks: return tblib.Traceback.from_string(tb).as_traceback() def build_graph(self, intermediate=False, formatter=None): graph = DependencyGraph( formatter=formatter or GraphFormatter(root=self.id, shape='oval'), ) for parent, node in self.iterdeps(intermediate=intermediate): graph.add_arc(node) if parent: graph.add_edge(parent, node) return graph def __str__(self): """`str(self) -> self.id`.""" return str(self.id) def __hash__(self): """`hash(self) -> hash(self.id)`.""" return hash(self.id) def __repr__(self): return '<{0}: {1}>'.format(type(self).__name__, self.id) def __eq__(self, other): if isinstance(other, AsyncResult): return other.id == self.id elif isinstance(other, string_t): return other == self.id return NotImplemented def __ne__(self, other): res = self.__eq__(other) return True if res is NotImplemented else not res def __copy__(self): return self.__class__( self.id, self.backend, None, self.app, self.parent, ) def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return self.id, self.backend, None, None, self.parent @cached_property def graph(self): return self.build_graph() @property def supports_native_join(self): return self.backend.supports_native_join @property def children(self): return self._get_task_meta().get('children') def _maybe_set_cache(self, meta): if meta: state = meta['status'] if state in states.READY_STATES: d = self._set_cache(self.backend.meta_from_decoded(meta)) self.on_ready(self) return d return meta def _get_task_meta(self): if self._cache is None: return self._maybe_set_cache(self.backend.get_task_meta(self.id)) return self._cache def _iter_meta(self): return iter([self._get_task_meta()]) def _set_cache(self, d): children = d.get('children') if children: d['children'] = [ result_from_tuple(child, self.app) for child in children ] self._cache = d return d @property def result(self): """Task return value. Note: When the task has been executed, this contains the return value. If the task raised an exception, this will be the exception instance. """ return self._get_task_meta()['result'] info = result @property def traceback(self): """Get the traceback of a failed task.""" return self._get_task_meta().get('traceback') @property def state(self): """The tasks current state. Possible values includes: *PENDING* The task is waiting for execution. *STARTED* The task has been started. *RETRY* The task is to be retried, possibly because of failure. *FAILURE* The task raised an exception, or has exceeded the retry limit. The :attr:`result` attribute then contains the exception raised by the task. *SUCCESS* The task executed successfully. The :attr:`result` attribute then contains the tasks return value. """ return self._get_task_meta()['status'] status = state # XXX compat @property def task_id(self): """Compat. alias to :attr:`id`.""" return self.id @task_id.setter # noqa def task_id(self, id): self.id = id @Thenable.register @python_2_unicode_compatible class ResultSet(ResultBase): """A collection of results. Arguments: results (Sequence[AsyncResult]): List of result instances. """ _app = None #: List of results in in the set. results = None def __init__(self, results, app=None, ready_barrier=None, **kwargs): self._app = app self._cache = None self.results = results self.on_ready = promise(args=(self,)) self._on_full = ready_barrier or barrier(results) if self._on_full: self._on_full.then(promise(self.on_ready, weak=True)) def add(self, result): """Add :class:`AsyncResult` as a new member of the set. Does nothing if the result is already a member. """ if result not in self.results: self.results.append(result) if self._on_full: self._on_full.add(result) def _on_ready(self): self.backend.remove_pending_result(self) if self.backend.is_async: self._cache = [r.get() for r in self.results] self.on_ready() def remove(self, result): """Remove result from the set; it must be a member. Raises: KeyError: if the result isn't a member. """ if isinstance(result, string_t): result = self.app.AsyncResult(result) try: self.results.remove(result) except ValueError: raise KeyError(result) def discard(self, result): """Remove result from the set if it is a member. Does nothing if it's not a member. """ try: self.remove(result) except KeyError: pass def update(self, results): """Extend from iterable of results.""" self.results.extend(r for r in results if r not in self.results) def clear(self): """Remove all results from this set.""" self.results[:] = [] # don't create new list. def successful(self): """Return true if all tasks successful. Returns: bool: true if all of the tasks finished successfully (i.e. didn't raise an exception). """ return all(result.successful() for result in self.results) def failed(self): """Return true if any of the tasks failed. Returns: bool: true if one of the tasks failed. (i.e., raised an exception) """ return any(result.failed() for result in self.results) def maybe_throw(self, callback=None, propagate=True): for result in self.results: result.maybe_throw(callback=callback, propagate=propagate) maybe_reraise = maybe_throw # XXX compat alias. def waiting(self): """Return true if any of the tasks are incomplete. Returns: bool: true if one of the tasks are still waiting for execution. """ return any(not result.ready() for result in self.results) def ready(self): """Did all of the tasks complete? (either by success of failure). Returns: bool: true if all of the tasks have been executed. """ return all(result.ready() for result in self.results) def completed_count(self): """Task completion count. Returns: int: the number of tasks completed. """ return sum(int(result.successful()) for result in self.results) def forget(self): """Forget about (and possible remove the result of) all the tasks.""" for result in self.results: result.forget() def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None): """Send revoke signal to all workers for all tasks in the set. Arguments: terminate (bool): Also terminate the process currently working on the task (if any). signal (str): Name of signal to send to process if terminate. Default is TERM. wait (bool): Wait for replies from worker. The ``timeout`` argument specifies the number of seconds to wait. Disabled by default. timeout (float): Time in seconds to wait for replies when the ``wait`` argument is enabled. """ self.app.control.revoke([r.id for r in self.results], connection=connection, timeout=timeout, terminate=terminate, signal=signal, reply=wait) def __iter__(self): return iter(self.results) def __getitem__(self, index): """`res[i] -> res.results[i]`.""" return self.results[index] @deprecated.Callable('4.0', '5.0') def iterate(self, timeout=None, propagate=True, interval=0.5): """Deprecated method, use :meth:`get` with a callback argument.""" elapsed = 0.0 results = OrderedDict((result.id, copy(result)) for result in self.results) while results: removed = set() for task_id, result in items(results): if result.ready(): yield result.get(timeout=timeout and timeout - elapsed, propagate=propagate) removed.add(task_id) else: if result.backend.subpolling_interval: time.sleep(result.backend.subpolling_interval) for task_id in removed: results.pop(task_id, None) time.sleep(interval) elapsed += interval if timeout and elapsed >= timeout: raise TimeoutError('The operation timed out') def get(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True, on_message=None): """See :meth:`join`. This is here for API compatibility with :class:`AsyncResult`, in addition it uses :meth:`join_native` if available for the current result backend. """ if self._cache is not None: return self._cache return (self.join_native if self.supports_native_join else self.join)( timeout=timeout, propagate=propagate, interval=interval, callback=callback, no_ack=no_ack, on_message=on_message, ) def join(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True, on_message=None, on_interval=None): """Gather the results of all tasks as a list in order. Note: This can be an expensive operation for result store backends that must resort to polling (e.g., database). You should consider using :meth:`join_native` if your backend supports it. Warning: Waiting for tasks within a task may lead to deadlocks. Please see :ref:`task-synchronous-subtasks`. Arguments: timeout (float): The number of seconds to wait for results before the operation times out. propagate (bool): If any of the tasks raises an exception, the exception will be re-raised when this flag is set. interval (float): Time to wait (in seconds) before retrying to retrieve a result from the set. Note that this does not have any effect when using the amqp result store backend, as it does not use polling. callback (Callable): Optional callback to be called for every result received. Must have signature ``(task_id, value)`` No results will be returned by this function if a callback is specified. The order of results is also arbitrary when a callback is used. To get access to the result object for a particular id you'll have to generate an index first: ``index = {r.id: r for r in gres.results.values()}`` Or you can create new result objects on the fly: ``result = app.AsyncResult(task_id)`` (both will take advantage of the backend cache anyway). no_ack (bool): Automatic message acknowledgment (Note that if this is set to :const:`False` then the messages *will not be acknowledged*). Raises: celery.exceptions.TimeoutError: if ``timeout`` isn't :const:`None` and the operation takes longer than ``timeout`` seconds. """ assert_will_not_block() time_start = monotonic() remaining = None if on_message is not None: raise ImproperlyConfigured( 'Backend does not support on_message callback') results = [] for result in self.results: remaining = None if timeout: remaining = timeout - (monotonic() - time_start) if remaining <= 0.0: raise TimeoutError('join operation timed out') value = result.get( timeout=remaining, propagate=propagate, interval=interval, no_ack=no_ack, on_interval=on_interval, ) if callback: callback(result.id, value) else: results.append(value) return results def then(self, callback, on_error=None, weak=False): return self.on_ready.then(callback, on_error) def iter_native(self, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None): """Backend optimized version of :meth:`iterate`. .. versionadded:: 2.2 Note that this does not support collecting the results for different task types using different backends. This is currently only supported by the amqp, Redis and cache result backends. """ return self.backend.iter_native( self, timeout=timeout, interval=interval, no_ack=no_ack, on_message=on_message, on_interval=on_interval, ) def join_native(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True, on_message=None, on_interval=None): """Backend optimized version of :meth:`join`. .. versionadded:: 2.2 Note that this does not support collecting the results for different task types using different backends. This is currently only supported by the amqp, Redis and cache result backends. """ assert_will_not_block() order_index = None if callback else { result.id: i for i, result in enumerate(self.results) } acc = None if callback else [None for _ in range(len(self))] for task_id, meta in self.iter_native(timeout, interval, no_ack, on_message, on_interval): value = meta['result'] if propagate and meta['status'] in states.PROPAGATE_STATES: raise value if callback: callback(task_id, value) else: acc[order_index[task_id]] = value return acc def _iter_meta(self): return (meta for _, meta in self.backend.get_many( {r.id for r in self.results}, max_iterations=1, )) def _failed_join_report(self): return (res for res in self.results if res.backend.is_cached(res.id) and res.state in states.PROPAGATE_STATES) def __len__(self): return len(self.results) def __eq__(self, other): if isinstance(other, ResultSet): return other.results == self.results return NotImplemented def __ne__(self, other): res = self.__eq__(other) return True if res is NotImplemented else not res def __repr__(self): return '<{0}: [{1}]>'.format(type(self).__name__, ', '.join(r.id for r in self.results)) @property def supports_native_join(self): try: return self.results[0].supports_native_join except IndexError: pass @property def app(self): if self._app is None: self._app = (self.results[0].app if self.results else current_app._get_current_object()) return self._app @app.setter def app(self, app): # noqa self._app = app @property def backend(self): return self.app.backend if self.app else self.results[0].backend @Thenable.register @python_2_unicode_compatible class GroupResult(ResultSet): """Like :class:`ResultSet`, but with an associated id. This type is returned by :class:`~celery.group`. It enables inspection of the tasks state and return values as a single entity. Arguments: id (str): The id of the group. results (Sequence[AsyncResult]): List of result instances. """ #: The UUID of the group. id = None #: List/iterator of results in the group results = None def __init__(self, id=None, results=None, **kwargs): self.id = id ResultSet.__init__(self, results, **kwargs) def save(self, backend=None): """Save group-result for later retrieval using :meth:`restore`. Example: >>> def save_and_restore(result): ... result.save() ... result = GroupResult.restore(result.id) """ return (backend or self.app.backend).save_group(self.id, self) def delete(self, backend=None): """Remove this result if it was previously saved.""" (backend or self.app.backend).delete_group(self.id) def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return self.id, self.results def __bool__(self): return bool(self.id or self.results) __nonzero__ = __bool__ # Included for Py2 backwards compatibility def __eq__(self, other): if isinstance(other, GroupResult): return other.id == self.id and other.results == self.results return NotImplemented def __ne__(self, other): res = self.__eq__(other) return True if res is NotImplemented else not res def __repr__(self): return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id, ', '.join(r.id for r in self.results)) def as_tuple(self): return self.id, [r.as_tuple() for r in self.results] @property def children(self): return self.results @classmethod def restore(cls, id, backend=None, app=None): """Restore previously saved group result.""" app = app or cls.app backend = backend or (app.backend if app else current_app.backend) return backend.restore_group(id) @Thenable.register @python_2_unicode_compatible class EagerResult(AsyncResult): """Result that we know has already been executed.""" def __init__(self, id, ret_value, state, traceback=None): # pylint: disable=super-init-not-called # XXX should really not be inheriting from AsyncResult self.id = id self._result = ret_value self._state = state self._traceback = traceback self.on_ready = promise() self.on_ready(self) def then(self, callback, on_error=None, weak=False): return self.on_ready.then(callback, on_error) def _get_task_meta(self): return self._cache def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return (self.id, self._result, self._state, self._traceback) def __copy__(self): cls, args = self.__reduce__() return cls(*args) def ready(self): return True def get(self, timeout=None, propagate=True, **kwargs): if self.successful(): return self.result elif self.state in states.PROPAGATE_STATES: if propagate: raise self.result return self.result wait = get # XXX Compat (remove 5.0) def forget(self): pass def revoke(self, *args, **kwargs): self._state = states.REVOKED def __repr__(self): return ''.format(self) @property def _cache(self): return { 'task_id': self.id, 'result': self._result, 'status': self._state, 'traceback': self._traceback, } @property def result(self): """The tasks return value.""" return self._result @property def state(self): """The tasks state.""" return self._state status = state @property def traceback(self): """The traceback if the task failed.""" return self._traceback @property def supports_native_join(self): return False def result_from_tuple(r, app=None): """Deserialize result from tuple.""" # earlier backends may just pickle, so check if # result is already prepared. app = app_or_default(app) Result = app.AsyncResult if not isinstance(r, ResultBase): res, nodes = r if nodes: return app.GroupResult( res, [result_from_tuple(child, app) for child in nodes], ) # previously didn't include parent id, parent = res if isinstance(res, (list, tuple)) else (res, None) if parent: parent = result_from_tuple(parent, app) return Result(id, parent=parent) return r celery-4.1.0/celery/five.py0000644000175000017500000000027113130607475015521 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Python 2/3 compatibility utilities.""" from __future__ import absolute_import, unicode_literals import sys import vine.five sys.modules[__name__] = vine.five celery-4.1.0/celery/apps/0000755000175000017500000000000013135426347015163 5ustar omeromer00000000000000celery-4.1.0/celery/apps/worker.py0000644000175000017500000003076313135426300017044 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Worker command-line program. This module is the 'program-version' of :mod:`celery.worker`. It does everything necessary to run that module as an actual application, like installing signal handlers, platform tweaks, and so on. """ from __future__ import absolute_import, print_function, unicode_literals import logging import os import platform as _platform import sys from datetime import datetime from functools import partial from billiard.process import current_process from kombu.utils.encoding import safe_str from celery import VERSION_BANNER from celery import platforms from celery import signals from celery.app import trace from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.five import string, string_t from celery.loaders.app import AppLoader from celery.platforms import EX_FAILURE, EX_OK, check_privileges, isatty from celery.utils import static from celery.utils import term from celery.utils.debug import cry from celery.utils.imports import qualname from celery.utils.log import get_logger, in_sighandler, set_in_sighandler from celery.utils.text import pluralize from celery.worker import WorkController __all__ = ['Worker'] logger = get_logger(__name__) is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') ARTLINES = [ ' --------------', '---- **** -----', '--- * *** * --', '-- * - **** ---', '- ** ----------', '- ** ----------', '- ** ----------', '- ** ----------', '- *** --- * ---', '-- ******* ----', '--- ***** -----', ' --------------', ] BANNER = """\ {hostname} v{version} {platform} {timestamp} [config] .> app: {app} .> transport: {conninfo} .> results: {results} .> concurrency: {concurrency} .> task events: {events} [queues] {queues} """ EXTRA_INFO_FMT = """ [tasks] {tasks} """ def active_thread_count(): from threading import enumerate return sum(1 for t in enumerate() if not t.name.startswith('Dummy-')) def safe_say(msg): print('\n{0}'.format(msg), file=sys.__stderr__) class Worker(WorkController): """Worker as a program.""" def on_before_init(self, quiet=False, **kwargs): self.quiet = quiet trace.setup_worker_optimizations(self.app, self.hostname) # this signal can be used to set up configuration for # workers by name. signals.celeryd_init.send( sender=self.hostname, instance=self, conf=self.app.conf, options=kwargs, ) check_privileges(self.app.conf.accept_content) def on_after_init(self, purge=False, no_color=None, redirect_stdouts=None, redirect_stdouts_level=None, **kwargs): self.redirect_stdouts = self.app.either( 'worker_redirect_stdouts', redirect_stdouts) self.redirect_stdouts_level = self.app.either( 'worker_redirect_stdouts_level', redirect_stdouts_level) super(Worker, self).setup_defaults(**kwargs) self.purge = purge self.no_color = no_color self._isatty = isatty(sys.stdout) self.colored = self.app.log.colored( self.logfile, enabled=not no_color if no_color is not None else no_color ) def on_init_blueprint(self): self._custom_logging = self.setup_logging() # apply task execution optimizations # -- This will finalize the app! trace.setup_worker_optimizations(self.app, self.hostname) def on_start(self): app = self.app WorkController.on_start(self) # this signal can be used to, for example, change queues after # the -Q option has been applied. signals.celeryd_after_setup.send( sender=self.hostname, instance=self, conf=app.conf, ) if self.purge: self.purge_messages() if not self.quiet: self.emit_banner() self.set_process_status('-active-') self.install_platform_tweaks(self) if not self._custom_logging and self.redirect_stdouts: app.log.redirect_stdouts(self.redirect_stdouts_level) def emit_banner(self): # Dump configuration to screen so we have some basic information # for when users sends bug reports. use_image = term.supports_images() if use_image: print(term.imgcat(static.logo())) print(safe_str(''.join([ string(self.colored.cyan( ' \n', self.startup_info(artlines=not use_image))), string(self.colored.reset(self.extra_info() or '')), ])), file=sys.__stdout__) def on_consumer_ready(self, consumer): signals.worker_ready.send(sender=consumer) logger.info('%s ready.', safe_str(self.hostname)) def setup_logging(self, colorize=None): if colorize is None and self.no_color is not None: colorize = not self.no_color return self.app.log.setup( self.loglevel, self.logfile, redirect_stdouts=False, colorize=colorize, hostname=self.hostname, ) def purge_messages(self): with self.app.connection_for_write() as connection: count = self.app.control.purge(connection=connection) if count: # pragma: no cover print('purge: Erased {0} {1} from the queue.\n'.format( count, pluralize(count, 'message'))) def tasklist(self, include_builtins=True, sep='\n', int_='celery.'): return sep.join( ' . {0}'.format(task) for task in sorted(self.app.tasks) if (not task.startswith(int_) if not include_builtins else task) ) def extra_info(self): if self.loglevel is None: return if self.loglevel <= logging.INFO: include_builtins = self.loglevel <= logging.DEBUG tasklist = self.tasklist(include_builtins=include_builtins) return EXTRA_INFO_FMT.format(tasks=tasklist) def startup_info(self, artlines=True): app = self.app concurrency = string(self.concurrency) appr = '{0}:{1:#x}'.format(app.main or '__main__', id(app)) if not isinstance(app.loader, AppLoader): loader = qualname(app.loader) if loader.startswith('celery.loaders'): # pragma: no cover loader = loader[14:] appr += ' ({0})'.format(loader) if self.autoscale: max, min = self.autoscale concurrency = '{{min={0}, max={1}}}'.format(min, max) pool = self.pool_cls if not isinstance(pool, string_t): pool = pool.__module__ concurrency += ' ({0})'.format(pool.split('.')[-1]) events = 'ON' if not self.task_events: events = 'OFF (enable -E to monitor tasks in this worker)' banner = BANNER.format( app=appr, hostname=safe_str(self.hostname), timestamp=datetime.now().replace(microsecond=0), version=VERSION_BANNER, conninfo=self.app.connection().as_uri(), results=self.app.backend.as_uri(), concurrency=concurrency, platform=safe_str(_platform.platform()), events=events, queues=app.amqp.queues.format(indent=0, indent_first=False), ).splitlines() # integrate the ASCII art. if artlines: for i, _ in enumerate(banner): try: banner[i] = ' '.join([ARTLINES[i], banner[i]]) except IndexError: banner[i] = ' ' * 16 + banner[i] return '\n'.join(banner) + '\n' def install_platform_tweaks(self, worker): """Install platform specific tweaks and workarounds.""" if self.app.IS_macOS: self.macOS_proxy_detection_workaround() # Install signal handler so SIGHUP restarts the worker. if not self._isatty: # only install HUP handler if detached from terminal, # so closing the terminal window doesn't restart the worker # into the background. if self.app.IS_macOS: # macOS can't exec from a process using threads. # See https://github.com/celery/celery/issues#issue/152 install_HUP_not_supported_handler(worker) else: install_worker_restart_handler(worker) install_worker_term_handler(worker) install_worker_term_hard_handler(worker) install_worker_int_handler(worker) install_cry_handler() install_rdb_handler() def macOS_proxy_detection_workaround(self): """See https://github.com/celery/celery/issues#issue/161.""" os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd') def set_process_status(self, info): return platforms.set_mp_process_title( 'celeryd', info='{0} ({1})'.format(info, platforms.strargv(sys.argv)), hostname=self.hostname, ) def _shutdown_handler(worker, sig='TERM', how='Warm', exc=WorkerShutdown, callback=None, exitcode=EX_OK): def _handle_request(*args): with in_sighandler(): from celery.worker import state if current_process()._name == 'MainProcess': if callback: callback(worker) safe_say('worker: {0} shutdown (MainProcess)'.format(how)) signals.worker_shutting_down.send( sender=worker.hostname, sig=sig, how=how, exitcode=exitcode, ) if active_thread_count() > 1: setattr(state, {'Warm': 'should_stop', 'Cold': 'should_terminate'}[how], exitcode) else: raise exc(exitcode) _handle_request.__name__ = str('worker_{0}'.format(how)) platforms.signals[sig] = _handle_request install_worker_term_handler = partial( _shutdown_handler, sig='SIGTERM', how='Warm', exc=WorkerShutdown, ) if not is_jython: # pragma: no cover install_worker_term_hard_handler = partial( _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate, exitcode=EX_FAILURE, ) else: # pragma: no cover install_worker_term_handler = \ install_worker_term_hard_handler = lambda *a, **kw: None def on_SIGINT(worker): safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!') install_worker_term_hard_handler(worker, sig='SIGINT') if not is_jython: # pragma: no cover install_worker_int_handler = partial( _shutdown_handler, sig='SIGINT', callback=on_SIGINT, exitcode=EX_FAILURE, ) else: # pragma: no cover def install_worker_int_handler(*args, **kwargs): pass def _reload_current_worker(): platforms.close_open_fds([ sys.__stdin__, sys.__stdout__, sys.__stderr__, ]) os.execv(sys.executable, [sys.executable] + sys.argv) def install_worker_restart_handler(worker, sig='SIGHUP'): def restart_worker_sig_handler(*args): """Signal handler restarting the current python program.""" set_in_sighandler(True) safe_say('Restarting celery worker ({0})'.format(' '.join(sys.argv))) import atexit atexit.register(_reload_current_worker) from celery.worker import state state.should_stop = EX_OK platforms.signals[sig] = restart_worker_sig_handler def install_cry_handler(sig='SIGUSR1'): # Jython/PyPy does not have sys._current_frames if is_jython or is_pypy: # pragma: no cover return def cry_handler(*args): """Signal handler logging the stack-trace of all active threads.""" with in_sighandler(): safe_say(cry()) platforms.signals[sig] = cry_handler def install_rdb_handler(envvar='CELERY_RDBSIG', sig='SIGUSR2'): # pragma: no cover def rdb_handler(*args): """Signal handler setting a rdb breakpoint at the current frame.""" with in_sighandler(): from celery.contrib.rdb import set_trace, _frame # gevent does not pass standard signal handler args frame = args[1] if args else _frame().f_back set_trace(frame) if os.environ.get(envvar): platforms.signals[sig] = rdb_handler def install_HUP_not_supported_handler(worker, sig='SIGHUP'): def warn_on_HUP_handler(signum, frame): with in_sighandler(): safe_say('{sig} not supported: Restarting with {sig} is ' 'unstable on this platform!'.format(sig=sig)) platforms.signals[sig] = warn_on_HUP_handler celery-4.1.0/celery/apps/multi.py0000644000175000017500000003654013130607475016675 0ustar omeromer00000000000000"""Start/stop/manage workers.""" from __future__ import absolute_import, unicode_literals import errno import os import shlex import signal import sys from collections import OrderedDict, defaultdict from functools import partial from subprocess import Popen from time import sleep from kombu.utils.encoding import from_utf8 from kombu.utils.objects import cached_property from celery.five import UserList, items from celery.platforms import IS_WINDOWS, Pidfile, signal_name from celery.utils.nodenames import ( gethostname, host_format, node_format, nodesplit, ) from celery.utils.saferepr import saferepr __all__ = ['Cluster', 'Node'] CELERY_EXE = 'celery' def celery_exe(*args): return ' '.join((CELERY_EXE,) + args) def build_nodename(name, prefix, suffix): hostname = suffix if '@' in name: nodename = host_format(name) shortname, hostname = nodesplit(nodename) name = shortname else: shortname = '%s%s' % (prefix, name) nodename = host_format( '{0}@{1}'.format(shortname, hostname), ) return name, nodename, hostname def build_expander(nodename, shortname, hostname): return partial( node_format, name=nodename, N=shortname, d=hostname, h=nodename, i='%i', I='%I', ) def format_opt(opt, value): if not value: return opt if opt.startswith('--'): return '{0}={1}'.format(opt, value) return '{0} {1}'.format(opt, value) def _kwargs_to_command_line(kwargs): return { ('--{0}'.format(k.replace('_', '-')) if len(k) > 1 else '-{0}'.format(k)): '{0}'.format(v) for k, v in items(kwargs) } class NamespacedOptionParser(object): def __init__(self, args): self.args = args self.options = OrderedDict() self.values = [] self.passthrough = '' self.namespaces = defaultdict(lambda: OrderedDict()) def parse(self): rargs = list(self.args) pos = 0 while pos < len(rargs): arg = rargs[pos] if arg == '--': self.passthrough = ' '.join(rargs[pos:]) break elif arg[0] == '-': if arg[1] == '-': self.process_long_opt(arg[2:]) else: value = None if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-': value = rargs[pos + 1] pos += 1 self.process_short_opt(arg[1:], value) else: self.values.append(arg) pos += 1 def process_long_opt(self, arg, value=None): if '=' in arg: arg, value = arg.split('=', 1) self.add_option(arg, value, short=False) def process_short_opt(self, arg, value=None): self.add_option(arg, value, short=True) def optmerge(self, ns, defaults=None): if defaults is None: defaults = self.options return OrderedDict(defaults, **self.namespaces[ns]) def add_option(self, name, value, short=False, ns=None): prefix = short and '-' or '--' dest = self.options if ':' in name: name, ns = name.split(':') dest = self.namespaces[ns] dest[prefix + name] = value class Node(object): """Represents a node in a cluster.""" def __init__(self, name, cmd=None, append=None, options=None, extra_args=None): self.name = name self.cmd = cmd or '-m {0}'.format(celery_exe('worker', '--detach')) self.append = append self.extra_args = extra_args or '' self.options = self._annotate_with_default_opts( options or OrderedDict()) self.expander = self._prepare_expander() self.argv = self._prepare_argv() self._pid = None def _annotate_with_default_opts(self, options): options['-n'] = self.name self._setdefaultopt(options, ['--pidfile', '-p'], '%n.pid') self._setdefaultopt(options, ['--logfile', '-f'], '%n%I.log') self._setdefaultopt(options, ['--executable'], sys.executable) return options def _setdefaultopt(self, d, alt, value): for opt in alt[1:]: try: return d[opt] except KeyError: pass return d.setdefault(alt[0], value) def _prepare_expander(self): shortname, hostname = self.name.split('@', 1) return build_expander( self.name, shortname, hostname) def _prepare_argv(self): argv = tuple( [self.expander(self.cmd)] + [format_opt(opt, self.expander(value)) for opt, value in items(self.options)] + [self.extra_args] ) if self.append: argv += (self.expander(self.append),) return argv def alive(self): return self.send(0) def send(self, sig, on_error=None): pid = self.pid if pid: try: os.kill(pid, sig) except OSError as exc: if exc.errno != errno.ESRCH: raise maybe_call(on_error, self) return False return True maybe_call(on_error, self) def start(self, env=None, **kwargs): return self._waitexec( self.argv, path=self.executable, env=env, **kwargs) def _waitexec(self, argv, path=sys.executable, env=None, on_spawn=None, on_signalled=None, on_failure=None): argstr = self.prepare_argv(argv, path) maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env) pipe = Popen(argstr, env=env) return self.handle_process_exit( pipe.wait(), on_signalled=on_signalled, on_failure=on_failure, ) def handle_process_exit(self, retcode, on_signalled=None, on_failure=None): if retcode < 0: maybe_call(on_signalled, self, -retcode) return -retcode elif retcode > 0: maybe_call(on_failure, self, retcode) return retcode def prepare_argv(self, argv, path): args = ' '.join([path] + list(argv)) return shlex.split(from_utf8(args), posix=not IS_WINDOWS) def getopt(self, *alt): for opt in alt: try: return self.options[opt] except KeyError: pass raise KeyError(alt[0]) def __repr__(self): return '<{name}: {0.name}>'.format(self, name=type(self).__name__) @cached_property def pidfile(self): return self.expander(self.getopt('--pidfile', '-p')) @cached_property def logfile(self): return self.expander(self.getopt('--logfile', '-f')) @property def pid(self): if self._pid is not None: return self._pid try: return Pidfile(self.pidfile).read_pid() except ValueError: pass @pid.setter def pid(self, value): self._pid = value @cached_property def executable(self): return self.options['--executable'] @cached_property def argv_with_executable(self): return (self.executable,) + self.argv @classmethod def from_kwargs(cls, name, **kwargs): return cls(name, options=_kwargs_to_command_line(kwargs)) def maybe_call(fun, *args, **kwargs): if fun is not None: fun(*args, **kwargs) class MultiParser(object): Node = Node def __init__(self, cmd='celery worker', append='', prefix='', suffix='', range_prefix='celery'): self.cmd = cmd self.append = append self.prefix = prefix self.suffix = suffix self.range_prefix = range_prefix def parse(self, p): names = p.values options = dict(p.options) ranges = len(names) == 1 prefix = self.prefix cmd = options.pop('--cmd', self.cmd) append = options.pop('--append', self.append) hostname = options.pop('--hostname', options.pop('-n', gethostname())) prefix = options.pop('--prefix', prefix) or '' suffix = options.pop('--suffix', self.suffix) or hostname suffix = '' if suffix in ('""', "''") else suffix if ranges: try: names, prefix = self._get_ranges(names), self.range_prefix except ValueError: pass self._update_ns_opts(p, names) self._update_ns_ranges(p, ranges) return ( self._node_from_options( p, name, prefix, suffix, cmd, append, options) for name in names ) def _node_from_options(self, p, name, prefix, suffix, cmd, append, options): namespace, nodename, _ = build_nodename(name, prefix, suffix) namespace = nodename if nodename in p.namespaces else namespace return Node(nodename, cmd, append, p.optmerge(namespace, options), p.passthrough) def _get_ranges(self, names): noderange = int(names[0]) return [str(n) for n in range(1, noderange + 1)] def _update_ns_opts(self, p, names): # Numbers in args always refers to the index in the list of names. # (e.g., `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). for ns_name, ns_opts in list(items(p.namespaces)): if ns_name.isdigit(): ns_index = int(ns_name) - 1 if ns_index < 0: raise KeyError('Indexes start at 1 got: %r' % (ns_name,)) try: p.namespaces[names[ns_index]].update(ns_opts) except IndexError: raise KeyError('No node at index %r' % (ns_name,)) def _update_ns_ranges(self, p, ranges): for ns_name, ns_opts in list(items(p.namespaces)): if ',' in ns_name or (ranges and '-' in ns_name): for subns in self._parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) def _parse_ns_range(self, ns, ranges=False): ret = [] for space in ',' in ns and ns.split(',') or [ns]: if ranges and '-' in space: start, stop = space.split('-') ret.extend( str(n) for n in range(int(start), int(stop) + 1) ) else: ret.append(space) return ret class Cluster(UserList): """Represent a cluster of workers.""" def __init__(self, nodes, cmd=None, env=None, on_stopping_preamble=None, on_send_signal=None, on_still_waiting_for=None, on_still_waiting_progress=None, on_still_waiting_end=None, on_node_start=None, on_node_restart=None, on_node_shutdown_ok=None, on_node_status=None, on_node_signal=None, on_node_signal_dead=None, on_node_down=None, on_child_spawn=None, on_child_signalled=None, on_child_failure=None): self.nodes = nodes self.cmd = cmd or celery_exe('worker') self.env = env self.on_stopping_preamble = on_stopping_preamble self.on_send_signal = on_send_signal self.on_still_waiting_for = on_still_waiting_for self.on_still_waiting_progress = on_still_waiting_progress self.on_still_waiting_end = on_still_waiting_end self.on_node_start = on_node_start self.on_node_restart = on_node_restart self.on_node_shutdown_ok = on_node_shutdown_ok self.on_node_status = on_node_status self.on_node_signal = on_node_signal self.on_node_signal_dead = on_node_signal_dead self.on_node_down = on_node_down self.on_child_spawn = on_child_spawn self.on_child_signalled = on_child_signalled self.on_child_failure = on_child_failure def start(self): return [self.start_node(node) for node in self] def start_node(self, node): maybe_call(self.on_node_start, node) retcode = self._start_node(node) maybe_call(self.on_node_status, node, retcode) return retcode def _start_node(self, node): return node.start( self.env, on_spawn=self.on_child_spawn, on_signalled=self.on_child_signalled, on_failure=self.on_child_failure, ) def send_all(self, sig): for node in self.getpids(on_down=self.on_node_down): maybe_call(self.on_node_signal, node, signal_name(sig)) node.send(sig, self.on_node_signal_dead) def kill(self): return self.send_all(signal.SIGKILL) def restart(self, sig=signal.SIGTERM): retvals = [] def restart_on_down(node): maybe_call(self.on_node_restart, node) retval = self._start_node(node) maybe_call(self.on_node_status, node, retval) retvals.append(retval) self._stop_nodes(retry=2, on_down=restart_on_down, sig=sig) return retvals def stop(self, retry=None, callback=None, sig=signal.SIGTERM): return self._stop_nodes(retry=retry, on_down=callback, sig=sig) def stopwait(self, retry=2, callback=None, sig=signal.SIGTERM): return self._stop_nodes(retry=retry, on_down=callback, sig=sig) def _stop_nodes(self, retry=None, on_down=None, sig=signal.SIGTERM): on_down = on_down if on_down is not None else self.on_node_down nodes = list(self.getpids(on_down=on_down)) if nodes: for node in self.shutdown_nodes(nodes, sig=sig, retry=retry): maybe_call(on_down, node) def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None): P = set(nodes) maybe_call(self.on_stopping_preamble, nodes) to_remove = set() for node in P: maybe_call(self.on_send_signal, node, signal_name(sig)) if not node.send(sig, self.on_node_signal_dead): to_remove.add(node) yield node P -= to_remove if retry: maybe_call(self.on_still_waiting_for, P) its = 0 while P: to_remove = set() for node in P: its += 1 maybe_call(self.on_still_waiting_progress, P) if not node.alive(): maybe_call(self.on_node_shutdown_ok, node) to_remove.add(node) yield node maybe_call(self.on_still_waiting_for, P) break P -= to_remove if P and not its % len(P): sleep(float(retry)) maybe_call(self.on_still_waiting_end) def find(self, name): for node in self: if node.name == name: return node raise KeyError(name) def getpids(self, on_down=None): for node in self: if node.pid: yield node else: maybe_call(on_down, node) def __repr__(self): return '<{name}({0}): {1}>'.format( len(self), saferepr([n.name for n in self]), name=type(self).__name__, ) @property def data(self): return self.nodes celery-4.1.0/celery/apps/__init__.py0000644000175000017500000000000013130607475017260 0ustar omeromer00000000000000celery-4.1.0/celery/apps/beat.py0000644000175000017500000001224613130607475016453 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Beat command-line program. This module is the 'program-version' of :mod:`celery.beat`. It does everything necessary to run that module as an actual application, like installing signal handlers and so on. """ from __future__ import absolute_import, print_function, unicode_literals import numbers import socket import sys from datetime import datetime from celery import VERSION_BANNER, platforms, beat from celery.five import text_t from celery.utils.imports import qualname from celery.utils.log import LOG_LEVELS, get_logger from celery.utils.time import humanize_seconds __all__ = ['Beat'] STARTUP_INFO_FMT = """ LocalTime -> {timestamp} Configuration -> . broker -> {conninfo} . loader -> {loader} . scheduler -> {scheduler} {scheduler_info} . logfile -> {logfile}@%{loglevel} . maxinterval -> {hmax_interval} ({max_interval}s) """.strip() logger = get_logger('celery.beat') class Beat(object): """Beat as a service.""" Service = beat.Service app = None def __init__(self, max_interval=None, app=None, socket_timeout=30, pidfile=None, no_color=None, loglevel='WARN', logfile=None, schedule=None, scheduler=None, scheduler_cls=None, # XXX use scheduler redirect_stdouts=None, redirect_stdouts_level=None, **kwargs): self.app = app = app or self.app either = self.app.either self.loglevel = loglevel self.logfile = logfile self.schedule = either('beat_schedule_filename', schedule) self.scheduler_cls = either( 'beat_scheduler', scheduler, scheduler_cls) self.redirect_stdouts = either( 'worker_redirect_stdouts', redirect_stdouts) self.redirect_stdouts_level = either( 'worker_redirect_stdouts_level', redirect_stdouts_level) self.max_interval = max_interval self.socket_timeout = socket_timeout self.no_color = no_color self.colored = app.log.colored( self.logfile, enabled=not no_color if no_color is not None else no_color, ) self.pidfile = pidfile if not isinstance(self.loglevel, numbers.Integral): self.loglevel = LOG_LEVELS[self.loglevel.upper()] def run(self): print(str(self.colored.cyan( 'celery beat v{0} is starting.'.format(VERSION_BANNER)))) self.init_loader() self.set_process_title() self.start_scheduler() def setup_logging(self, colorize=None): if colorize is None and self.no_color is not None: colorize = not self.no_color self.app.log.setup(self.loglevel, self.logfile, self.redirect_stdouts, self.redirect_stdouts_level, colorize=colorize) def start_scheduler(self): if self.pidfile: platforms.create_pidlock(self.pidfile) service = self.Service( app=self.app, max_interval=self.max_interval, scheduler_cls=self.scheduler_cls, schedule_filename=self.schedule, ) print(self.banner(service)) self.setup_logging() if self.socket_timeout: logger.debug('Setting default socket timeout to %r', self.socket_timeout) socket.setdefaulttimeout(self.socket_timeout) try: self.install_sync_handler(service) service.start() except Exception as exc: logger.critical('beat raised exception %s: %r', exc.__class__, exc, exc_info=True) raise def banner(self, service): c = self.colored return text_t( # flake8: noqa c.blue('__ ', c.magenta('-'), c.blue(' ... __ '), c.magenta('-'), c.blue(' _\n'), c.reset(self.startup_info(service))), ) def init_loader(self): # Run the worker init handler. # (Usually imports task modules and such.) self.app.loader.init_worker() self.app.finalize() def startup_info(self, service): scheduler = service.get_scheduler(lazy=True) return STARTUP_INFO_FMT.format( conninfo=self.app.connection().as_uri(), timestamp=datetime.now().replace(microsecond=0), logfile=self.logfile or '[stderr]', loglevel=LOG_LEVELS[self.loglevel], loader=qualname(self.app.loader), scheduler=qualname(scheduler), scheduler_info=scheduler.info, hmax_interval=humanize_seconds(scheduler.max_interval), max_interval=scheduler.max_interval, ) def set_process_title(self): arg_start = 'manage' in sys.argv[0] and 2 or 1 platforms.set_process_title( 'celery beat', info=' '.join(sys.argv[arg_start:]), ) def install_sync_handler(self, service): """Install a `SIGTERM` + `SIGINT` handler saving the schedule.""" def _sync(signum, frame): service.sync() raise SystemExit() platforms.signals.update(SIGTERM=_sync, SIGINT=_sync) celery-4.1.0/celery/_state.py0000644000175000017500000001203013130607475016043 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Internal state. This is an internal module containing thread state like the ``current_app``, and ``current_task``. This module shouldn't be used directly. """ from __future__ import absolute_import, print_function, unicode_literals import os import sys import threading import weakref from celery.local import Proxy from celery.utils.threads import LocalStack __all__ = [ 'set_default_app', 'get_current_app', 'get_current_task', 'get_current_worker_task', 'current_app', 'current_task', 'connect_on_app_finalize', ] #: Global default app used when no current app. default_app = None #: Function returning the app provided or the default app if none. #: #: The environment variable :envvar:`CELERY_TRACE_APP` is used to #: trace app leaks. When enabled an exception is raised if there #: is no active app. app_or_default = None #: List of all app instances (weakrefs), mustn't be used directly. _apps = weakref.WeakSet() #: Global set of functions to call whenever a new app is finalized. #: Shared tasks, and built-in tasks are created by adding callbacks here. _on_app_finalizers = set() _task_join_will_block = False def connect_on_app_finalize(callback): """Connect callback to be called when any app is finalized.""" _on_app_finalizers.add(callback) return callback def _announce_app_finalized(app): callbacks = set(_on_app_finalizers) for callback in callbacks: callback(app) def _set_task_join_will_block(blocks): global _task_join_will_block _task_join_will_block = blocks def task_join_will_block(): return _task_join_will_block class _TLS(threading.local): #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute #: sets this, so it will always contain the last instantiated app, #: and is the default app returned by :func:`app_or_default`. current_app = None _tls = _TLS() _task_stack = LocalStack() #: Function used to push a task to the thread local stack #: keeping track of the currently executing task. #: You must remember to pop the task after. push_current_task = _task_stack.push #: Function used to pop a task from the thread local stack #: keeping track of the currently executing task. pop_current_task = _task_stack.pop def set_default_app(app): """Set default app.""" global default_app default_app = app def _get_current_app(): if default_app is None: #: creates the global fallback app instance. from celery.app.base import Celery set_default_app(Celery( 'default', fixups=[], set_as_current=False, loader=os.environ.get('CELERY_LOADER') or 'default', )) return _tls.current_app or default_app def _set_current_app(app): _tls.current_app = app if os.environ.get('C_STRICT_APP'): # pragma: no cover def get_current_app(): """Return the current app.""" raise RuntimeError('USES CURRENT APP') elif os.environ.get('C_WARN_APP'): # pragma: no cover def get_current_app(): # noqa import traceback print('-- USES CURRENT_APP', file=sys.stderr) # noqa+ traceback.print_stack(file=sys.stderr) return _get_current_app() else: get_current_app = _get_current_app def get_current_task(): """Currently executing task.""" return _task_stack.top def get_current_worker_task(): """Currently executing task, that was applied by the worker. This is used to differentiate between the actual task executed by the worker and any task that was called within a task (using ``task.__call__`` or ``task.apply``) """ for task in reversed(_task_stack.stack): if not task.request.called_directly: return task #: Proxy to current app. current_app = Proxy(get_current_app) #: Proxy to current task. current_task = Proxy(get_current_task) def _register_app(app): _apps.add(app) def _deregister_app(app): _apps.discard(app) def _get_active_apps(): return _apps def _app_or_default(app=None): if app is None: return get_current_app() return app def _app_or_default_trace(app=None): # pragma: no cover from traceback import print_stack try: from billiard.process import current_process except ImportError: current_process = None if app is None: if getattr(_tls, 'current_app', None): print('-- RETURNING TO CURRENT APP --') # noqa+ print_stack() return _tls.current_app if not current_process or current_process()._name == 'MainProcess': raise Exception('DEFAULT APP') print('-- RETURNING TO DEFAULT APP --') # noqa+ print_stack() return default_app return app def enable_trace(): """Enable tracing of app instances.""" global app_or_default app_or_default = _app_or_default_trace def disable_trace(): """Disable tracing of app instances.""" global app_or_default app_or_default = _app_or_default if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover enable_trace() else: disable_trace() celery-4.1.0/celery/events/0000755000175000017500000000000013135426347015524 5ustar omeromer00000000000000celery-4.1.0/celery/events/event.py0000644000175000017500000000311113130607475017211 0ustar omeromer00000000000000"""Creating events, and event exchange definition.""" from __future__ import absolute_import, unicode_literals import time from copy import copy from kombu import Exchange __all__ = [ 'Event', 'event_exchange', 'get_exchange', 'group_from', ] #: Exchange used to send events on. #: Note: Use :func:`get_exchange` instead, as the type of #: exchange will vary depending on the broker connection. event_exchange = Exchange('celeryev', type='topic') def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields): """Create an event. Notes: An event is simply a dictionary: the only required field is ``type``. A ``timestamp`` field will be set to the current time if not provided. """ event = __dict__(_fields, **fields) if _fields else fields if 'timestamp' not in event: event.update(timestamp=__now__(), type=type) else: event['type'] = type return event def group_from(type): """Get the group part of an event type name. Example: >>> group_from('task-sent') 'task' >>> group_from('custom-my-event') 'custom' """ return type.split('-', 1)[0] def get_exchange(conn): """Get exchange used for sending events. Arguments: conn (kombu.Connection): Connection used for sending/receving events. Note: The event type changes if Redis is used as the transport (from topic -> fanout). """ ex = copy(event_exchange) if conn.transport.driver_type == 'redis': # quick hack for Issue #436 ex.type = 'fanout' return ex celery-4.1.0/celery/events/__init__.py0000644000175000017500000000105513130607475017634 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Monitoring Event Receiver+Dispatcher. Events is a stream of messages sent for certain actions occurring in the worker (and clients if :setting:`task_send_sent_event` is enabled), used for monitoring purposes. """ from __future__ import absolute_import, unicode_literals from .dispatcher import EventDispatcher from .event import Event, event_exchange, get_exchange, group_from from .receiver import EventReceiver __all__ = [ 'Event', 'EventDispatcher', 'EventReceiver', 'event_exchange', 'get_exchange', 'group_from', ] celery-4.1.0/celery/events/cursesmon.py0000644000175000017500000004366013130607475020123 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Graphical monitor of Celery events using curses.""" from __future__ import absolute_import, print_function, unicode_literals import curses import sys import threading from datetime import datetime from itertools import count from textwrap import wrap from time import time from math import ceil from celery import VERSION_BANNER from celery import states from celery.app import app_or_default from celery.five import items, values from celery.utils.text import abbr, abbrtask __all__ = ['CursesMonitor', 'evtop'] BORDER_SPACING = 4 LEFT_BORDER_OFFSET = 3 UUID_WIDTH = 36 STATE_WIDTH = 8 TIMESTAMP_WIDTH = 8 MIN_WORKER_WIDTH = 15 MIN_TASK_WIDTH = 16 # this module is considered experimental # we don't care about coverage. STATUS_SCREEN = """\ events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all} """ class CursesMonitor(object): # pragma: no cover """A curses based Celery task monitor.""" keymap = {} win = None screen_delay = 10 selected_task = None selected_position = 0 selected_str = 'Selected: ' foreground = curses.COLOR_BLACK background = curses.COLOR_WHITE online_str = 'Workers online: ' help_title = 'Keys: ' help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit') greet = 'celery events {0}'.format(VERSION_BANNER) info_str = 'Info: ' def __init__(self, state, app, keymap=None): self.app = app self.keymap = keymap or self.keymap self.state = state default_keymap = { 'J': self.move_selection_down, 'K': self.move_selection_up, 'C': self.revoke_selection, 'T': self.selection_traceback, 'R': self.selection_result, 'I': self.selection_info, 'L': self.selection_rate_limit, } self.keymap = dict(default_keymap, **self.keymap) self.lock = threading.RLock() def format_row(self, uuid, task, worker, timestamp, state): mx = self.display_width # include spacing detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH if uuid_space < UUID_WIDTH: uuid_width = uuid_space else: uuid_width = UUID_WIDTH detail_width = detail_width - uuid_width - 1 task_width = int(ceil(detail_width / 2.0)) worker_width = detail_width - task_width - 1 uuid = abbr(uuid, uuid_width).ljust(uuid_width) worker = abbr(worker, worker_width).ljust(worker_width) task = abbrtask(task, task_width).ljust(task_width) state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH) timestamp = timestamp.ljust(TIMESTAMP_WIDTH) row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task, timestamp, state) if self.screen_width is None: self.screen_width = len(row[:mx]) return row[:mx] @property def screen_width(self): _, mx = self.win.getmaxyx() return mx @property def screen_height(self): my, _ = self.win.getmaxyx() return my @property def display_width(self): _, mx = self.win.getmaxyx() return mx - BORDER_SPACING @property def display_height(self): my, _ = self.win.getmaxyx() return my - 10 @property def limit(self): return self.display_height def find_position(self): if not self.tasks: return 0 for i, e in enumerate(self.tasks): if self.selected_task == e[0]: return i return 0 def move_selection_up(self): self.move_selection(-1) def move_selection_down(self): self.move_selection(1) def move_selection(self, direction=1): if not self.tasks: return pos = self.find_position() try: self.selected_task = self.tasks[pos + direction][0] except IndexError: self.selected_task = self.tasks[0][0] keyalias = {curses.KEY_DOWN: 'J', curses.KEY_UP: 'K', curses.KEY_ENTER: 'I'} def handle_keypress(self): try: key = self.win.getkey().upper() except Exception: # pylint: disable=broad-except return key = self.keyalias.get(key) or key handler = self.keymap.get(key) if handler is not None: handler() def alert(self, callback, title=None): self.win.erase() my, mx = self.win.getmaxyx() y = blank_line = count(2) if title: self.win.addstr(next(y), 3, title, curses.A_BOLD | curses.A_UNDERLINE) next(blank_line) callback(my, mx, next(y)) self.win.addstr(my - 1, 0, 'Press any key to continue...', curses.A_BOLD) self.win.refresh() while 1: try: return self.win.getkey().upper() except Exception: # pylint: disable=broad-except pass def selection_rate_limit(self): if not self.selected_task: return curses.beep() task = self.state.tasks[self.selected_task] if not task.name: return curses.beep() my, mx = self.win.getmaxyx() r = 'New rate limit: ' self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE) self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r))) rlimit = self.readline(my - 2, 3 + len(r)) if rlimit: reply = self.app.control.rate_limit(task.name, rlimit.strip(), reply=True) self.alert_remote_control_reply(reply) def alert_remote_control_reply(self, reply): def callback(my, mx, xs): y = count(xs) if not reply: self.win.addstr( next(y), 3, 'No replies received in 1s deadline.', curses.A_BOLD + curses.color_pair(2), ) return for subreply in reply: curline = next(y) host, response = next(items(subreply)) host = '{0}: '.format(host) self.win.addstr(curline, 3, host, curses.A_BOLD) attr = curses.A_NORMAL text = '' if 'error' in response: text = response['error'] attr |= curses.color_pair(2) elif 'ok' in response: text = response['ok'] attr |= curses.color_pair(3) self.win.addstr(curline, 3 + len(host), text, attr) return self.alert(callback, 'Remote Control Command Replies') def readline(self, x, y): buffer = str() curses.echo() try: i = 0 while 1: ch = self.win.getch(x, y + i) if ch != -1: if ch in (10, curses.KEY_ENTER): # enter break if ch in (27,): buffer = str() break buffer += chr(ch) i += 1 finally: curses.noecho() return buffer def revoke_selection(self): if not self.selected_task: return curses.beep() reply = self.app.control.revoke(self.selected_task, reply=True) self.alert_remote_control_reply(reply) def selection_info(self): if not self.selected_task: return def alert_callback(mx, my, xs): my, mx = self.win.getmaxyx() y = count(xs) task = self.state.tasks[self.selected_task] info = task.info(extra=['state']) infoitems = [ ('args', info.pop('args', None)), ('kwargs', info.pop('kwargs', None)) ] + list(info.items()) for key, value in infoitems: if key is None: continue value = str(value) curline = next(y) keys = key + ': ' self.win.addstr(curline, 3, keys, curses.A_BOLD) wrapped = wrap(value, mx - 2) if len(wrapped) == 1: self.win.addstr( curline, len(keys) + 3, abbr(wrapped[0], self.screen_width - (len(keys) + 3))) else: for subline in wrapped: nexty = next(y) if nexty >= my - 1: subline = ' ' * 4 + '[...]' elif nexty >= my: break self.win.addstr( nexty, 3, abbr(' ' * 4 + subline, self.screen_width - 4), curses.A_NORMAL, ) return self.alert( alert_callback, 'Task details for {0.selected_task}'.format(self), ) def selection_traceback(self): if not self.selected_task: return curses.beep() task = self.state.tasks[self.selected_task] if task.state not in states.EXCEPTION_STATES: return curses.beep() def alert_callback(my, mx, xs): y = count(xs) for line in task.traceback.split('\n'): self.win.addstr(next(y), 3, line) return self.alert( alert_callback, 'Task Exception Traceback for {0.selected_task}'.format(self), ) def selection_result(self): if not self.selected_task: return def alert_callback(my, mx, xs): y = count(xs) task = self.state.tasks[self.selected_task] result = (getattr(task, 'result', None) or getattr(task, 'exception', None)) for line in wrap(result or '', mx - 2): self.win.addstr(next(y), 3, line) return self.alert( alert_callback, 'Task Result for {0.selected_task}'.format(self), ) def display_task_row(self, lineno, task): state_color = self.state_colors.get(task.state) attr = curses.A_NORMAL if task.uuid == self.selected_task: attr = curses.A_STANDOUT timestamp = datetime.utcfromtimestamp( task.timestamp or time(), ) timef = timestamp.strftime('%H:%M:%S') hostname = task.worker.hostname if task.worker else '*NONE*' line = self.format_row(task.uuid, task.name, hostname, timef, task.state) self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr) if state_color: self.win.addstr(lineno, len(line) - STATE_WIDTH + BORDER_SPACING - 1, task.state, state_color | attr) def draw(self): with self.lock: win = self.win self.handle_keypress() x = LEFT_BORDER_OFFSET y = blank_line = count(2) my, mx = win.getmaxyx() win.erase() win.bkgd(' ', curses.color_pair(1)) win.border() win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) next(blank_line) win.addstr(next(y), x, self.format_row('UUID', 'TASK', 'WORKER', 'TIME', 'STATE'), curses.A_BOLD | curses.A_UNDERLINE) tasks = self.tasks if tasks: for row, (uuid, task) in enumerate(tasks): if row > self.display_height: break if task.uuid: lineno = next(y) self.display_task_row(lineno, task) # -- Footer next(blank_line) win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) # Selected Task Info if self.selected_task: win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) info = 'Missing extended info' detail = '' try: selection = self.state.tasks[self.selected_task] except KeyError: pass else: info = selection.info() if 'runtime' in info: info['runtime'] = '{0:.2f}'.format(info['runtime']) if 'result' in info: info['result'] = abbr(info['result'], 16) info = ' '.join( '{0}={1}'.format(key, value) for key, value in items(info) ) detail = '... -> key i' infowin = abbr(info, self.screen_width - len(self.selected_str) - 2, detail) win.addstr(my - 5, x + len(self.selected_str), infowin) # Make ellipsis bold if detail in infowin: detailpos = len(infowin) - len(detail) win.addstr(my - 5, x + len(self.selected_str) + detailpos, detail, curses.A_BOLD) else: win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) # Workers if self.workers: win.addstr(my - 4, x, self.online_str, curses.A_BOLD) win.addstr(my - 4, x + len(self.online_str), ', '.join(sorted(self.workers)), curses.A_NORMAL) else: win.addstr(my - 4, x, 'No workers discovered.') # Info win.addstr(my - 3, x, self.info_str, curses.A_BOLD) win.addstr( my - 3, x + len(self.info_str), STATUS_SCREEN.format( s=self.state, w_alive=len([w for w in values(self.state.workers) if w.alive]), w_all=len(self.state.workers), ), curses.A_DIM, ) # Help self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) self.safe_add_str(my - 2, x + len(self.help_title), self.help, curses.A_DIM) win.refresh() def safe_add_str(self, y, x, string, *args, **kwargs): if x + len(string) > self.screen_width: string = string[:self.screen_width - x] self.win.addstr(y, x, string, *args, **kwargs) def init_screen(self): with self.lock: self.win = curses.initscr() self.win.nodelay(True) self.win.keypad(True) curses.start_color() curses.init_pair(1, self.foreground, self.background) # exception states curses.init_pair(2, curses.COLOR_RED, self.background) # successful state curses.init_pair(3, curses.COLOR_GREEN, self.background) # revoked state curses.init_pair(4, curses.COLOR_MAGENTA, self.background) # greeting curses.init_pair(5, curses.COLOR_BLUE, self.background) # started state curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) self.state_colors = {states.SUCCESS: curses.color_pair(3), states.REVOKED: curses.color_pair(4), states.STARTED: curses.color_pair(6)} for state in states.EXCEPTION_STATES: self.state_colors[state] = curses.color_pair(2) curses.cbreak() def resetscreen(self): with self.lock: curses.nocbreak() self.win.keypad(False) curses.echo() curses.endwin() def nap(self): curses.napms(self.screen_delay) @property def tasks(self): return list(self.state.tasks_by_time(limit=self.limit)) @property def workers(self): return [hostname for hostname, w in items(self.state.workers) if w.alive] class DisplayThread(threading.Thread): # pragma: no cover def __init__(self, display): self.display = display self.shutdown = False threading.Thread.__init__(self) def run(self): while not self.shutdown: self.display.draw() self.display.nap() def capture_events(app, state, display): # pragma: no cover def on_connection_error(exc, interval): print('Connection Error: {0!r}. Retry in {1}s.'.format( exc, interval), file=sys.stderr) while 1: print('-> evtop: starting capture...', file=sys.stderr) with app.connection_for_read() as conn: try: conn.ensure_connection(on_connection_error, app.conf.broker_connection_max_retries) recv = app.events.Receiver(conn, handlers={'*': state.event}) display.resetscreen() display.init_screen() recv.capture() except conn.connection_errors + conn.channel_errors as exc: print('Connection lost: {0!r}'.format(exc), file=sys.stderr) def evtop(app=None): # pragma: no cover """Start curses monitor.""" app = app_or_default(app) state = app.events.State() display = CursesMonitor(state, app) display.init_screen() refresher = DisplayThread(display) refresher.start() try: capture_events(app, state, display) except Exception: refresher.shutdown = True refresher.join() display.resetscreen() raise except (KeyboardInterrupt, SystemExit): refresher.shutdown = True refresher.join() display.resetscreen() if __name__ == '__main__': # pragma: no cover evtop() celery-4.1.0/celery/events/dispatcher.py0000644000175000017500000002146113130607475020226 0ustar omeromer00000000000000"""Event dispatcher sends events.""" from __future__ import absolute_import, unicode_literals import os import threading import time from collections import defaultdict, deque from kombu import Producer from celery.app import app_or_default from celery.five import items from celery.utils.nodenames import anon_nodename from celery.utils.time import utcoffset from .event import Event, get_exchange, group_from __all__ = ['EventDispatcher'] class EventDispatcher(object): """Dispatches event messages. Arguments: connection (kombu.Connection): Connection to the broker. hostname (str): Hostname to identify ourselves as, by default uses the hostname returned by :func:`~celery.utils.anon_nodename`. groups (Sequence[str]): List of groups to send events for. :meth:`send` will ignore send requests to groups not in this list. If this is :const:`None`, all events will be sent. Example groups include ``"task"`` and ``"worker"``. enabled (bool): Set to :const:`False` to not actually publish any events, making :meth:`send` a no-op. channel (kombu.Channel): Can be used instead of `connection` to specify an exact channel to use when sending events. buffer_while_offline (bool): If enabled events will be buffered while the connection is down. :meth:`flush` must be called as soon as the connection is re-established. Note: You need to :meth:`close` this after use. """ DISABLED_TRANSPORTS = {'sql'} app = None # set of callbacks to be called when :meth:`enabled`. on_enabled = None # set of callbacks to be called when :meth:`disabled`. on_disabled = None def __init__(self, connection=None, hostname=None, enabled=True, channel=None, buffer_while_offline=True, app=None, serializer=None, groups=None, delivery_mode=1, buffer_group=None, buffer_limit=24, on_send_buffered=None): self.app = app_or_default(app or self.app) self.connection = connection self.channel = channel self.hostname = hostname or anon_nodename() self.buffer_while_offline = buffer_while_offline self.buffer_group = buffer_group or frozenset() self.buffer_limit = buffer_limit self.on_send_buffered = on_send_buffered self._group_buffer = defaultdict(list) self.mutex = threading.Lock() self.producer = None self._outbound_buffer = deque() self.serializer = serializer or self.app.conf.event_serializer self.on_enabled = set() self.on_disabled = set() self.groups = set(groups or []) self.tzoffset = [-time.timezone, -time.altzone] self.clock = self.app.clock self.delivery_mode = delivery_mode if not connection and channel: self.connection = channel.connection.client self.enabled = enabled conninfo = self.connection or self.app.connection_for_write() self.exchange = get_exchange(conninfo) if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS: self.enabled = False if self.enabled: self.enable() self.headers = {'hostname': self.hostname} self.pid = os.getpid() def __enter__(self): return self def __exit__(self, *exc_info): self.close() def enable(self): self.producer = Producer(self.channel or self.connection, exchange=self.exchange, serializer=self.serializer, auto_declare=False) self.enabled = True for callback in self.on_enabled: callback() def disable(self): if self.enabled: self.enabled = False self.close() for callback in self.on_disabled: callback() def publish(self, type, fields, producer, blind=False, Event=Event, **kwargs): """Publish event using custom :class:`~kombu.Producer`. Arguments: type (str): Event type name, with group separated by dash (`-`). fields: Dictionary of event fields, must be json serializable. producer (kombu.Producer): Producer instance to use: only the ``publish`` method will be called. retry (bool): Retry in the event of connection failure. retry_policy (Mapping): Map of custom retry policy options. See :meth:`~kombu.Connection.ensure`. blind (bool): Don't set logical clock value (also don't forward the internal logical clock). Event (Callable): Event type used to create event. Defaults to :func:`Event`. utcoffset (Callable): Function returning the current utc offset in hours. """ clock = None if blind else self.clock.forward() event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), pid=self.pid, clock=clock, **fields) with self.mutex: return self._publish(event, producer, routing_key=type.replace('-', '.'), **kwargs) def _publish(self, event, producer, routing_key, retry=False, retry_policy=None, utcoffset=utcoffset): exchange = self.exchange try: producer.publish( event, routing_key=routing_key, exchange=exchange.name, retry=retry, retry_policy=retry_policy, declare=[exchange], serializer=self.serializer, headers=self.headers, delivery_mode=self.delivery_mode, ) except Exception as exc: # pylint: disable=broad-except if not self.buffer_while_offline: raise self._outbound_buffer.append((event, routing_key, exc)) def send(self, type, blind=False, utcoffset=utcoffset, retry=False, retry_policy=None, Event=Event, **fields): """Send event. Arguments: type (str): Event type name, with group separated by dash (`-`). retry (bool): Retry in the event of connection failure. retry_policy (Mapping): Map of custom retry policy options. See :meth:`~kombu.Connection.ensure`. blind (bool): Don't set logical clock value (also don't forward the internal logical clock). Event (Callable): Event type used to create event, defaults to :func:`Event`. utcoffset (Callable): unction returning the current utc offset in hours. **fields (Any): Event fields -- must be json serializable. """ if self.enabled: groups, group = self.groups, group_from(type) if groups and group not in groups: return if group in self.buffer_group: clock = self.clock.forward() event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), pid=self.pid, clock=clock, **fields) buf = self._group_buffer[group] buf.append(event) if len(buf) >= self.buffer_limit: self.flush() elif self.on_send_buffered: self.on_send_buffered() else: return self.publish(type, fields, self.producer, blind=blind, Event=Event, retry=retry, retry_policy=retry_policy) def flush(self, errors=True, groups=True): """Flush the outbound buffer.""" if errors: buf = list(self._outbound_buffer) try: with self.mutex: for event, routing_key, _ in buf: self._publish(event, self.producer, routing_key) finally: self._outbound_buffer.clear() if groups: with self.mutex: for group, events in items(self._group_buffer): self._publish(events, self.producer, '%s.multi' % group) events[:] = [] # list.clear def extend_buffer(self, other): """Copy the outbound buffer of another instance.""" self._outbound_buffer.extend(other._outbound_buffer) def close(self): """Close the event dispatcher.""" self.mutex.locked() and self.mutex.release() self.producer = None def _get_publisher(self): return self.producer def _set_publisher(self, producer): self.producer = producer publisher = property(_get_publisher, _set_publisher) # XXX compat celery-4.1.0/celery/events/state.py0000644000175000017500000006252513130607475017226 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """In-memory representation of cluster state. This module implements a data-structure used to keep track of the state of a cluster of workers and the tasks it is working on (by consuming events). For every event consumed the state is updated, so the state represents the state of the cluster at the time of the last event. Snapshots (:mod:`celery.events.snapshot`) can be used to take "pictures" of this state at regular intervals to for example, store that in a database. """ from __future__ import absolute_import, unicode_literals import bisect import sys import threading from collections import Callable, defaultdict from datetime import datetime from decimal import Decimal from itertools import islice from operator import itemgetter from time import time from weakref import WeakSet, ref from kombu.clocks import timetuple from kombu.utils.objects import cached_property from celery import states from celery.five import items, python_2_unicode_compatible, values from celery.utils.functional import LRUCache, memoize, pass1 from celery.utils.log import get_logger __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires'] # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. # pylint: disable=too-many-function-args # For some reason pylint thinks ._event is a method, when it's a property. #: Set if running PyPy PYPY = hasattr(sys, 'pypy_version_info') #: The window (in percentage) is added to the workers heartbeat #: frequency. If the time between updates exceeds this window, #: then the worker is considered to be offline. HEARTBEAT_EXPIRE_WINDOW = 200 #: Max drift between event timestamp and time of event received #: before we alert that clocks may be unsynchronized. HEARTBEAT_DRIFT_MAX = 16 DRIFT_WARNING = """\ Substantial drift from %s may mean clocks are out of sync. Current drift is %s seconds. [orig: %s recv: %s] """ logger = get_logger(__name__) warn = logger.warning R_STATE = '' R_WORKER = '>> add_tasks = state.tasks_by_type['proj.tasks.add'] while still supporting the method call:: >>> add_tasks = list(state.tasks_by_type( ... 'proj.tasks.add', reverse=True)) """ def __init__(self, fun, *args, **kwargs): self.fun = fun super(CallableDefaultdict, self).__init__(*args, **kwargs) def __call__(self, *args, **kwargs): return self.fun(*args, **kwargs) Callable.register(CallableDefaultdict) # noqa: E305 @memoize(maxsize=1000, keyfun=lambda a, _: a[0]) def _warn_drift(hostname, drift, local_received, timestamp): # we use memoize here so the warning is only logged once per hostname warn(DRIFT_WARNING, hostname, drift, datetime.fromtimestamp(local_received), datetime.fromtimestamp(timestamp)) def heartbeat_expires(timestamp, freq=60, expire_window=HEARTBEAT_EXPIRE_WINDOW, Decimal=Decimal, float=float, isinstance=isinstance): """Return time when heartbeat expires.""" # some json implementations returns decimal.Decimal objects, # which aren't compatible with float. freq = float(freq) if isinstance(freq, Decimal) else freq if isinstance(timestamp, Decimal): timestamp = float(timestamp) return timestamp + (freq * (expire_window / 1e2)) def _depickle_task(cls, fields): return cls(**fields) def with_unique_field(attr): def _decorate_cls(cls): def __eq__(this, other): if isinstance(other, this.__class__): return getattr(this, attr) == getattr(other, attr) return NotImplemented cls.__eq__ = __eq__ def __ne__(this, other): res = this.__eq__(other) return True if res is NotImplemented else not res cls.__ne__ = __ne__ def __hash__(this): return hash(getattr(this, attr)) cls.__hash__ = __hash__ return cls return _decorate_cls @with_unique_field('hostname') @python_2_unicode_compatible class Worker(object): """Worker State.""" heartbeat_max = 4 expire_window = HEARTBEAT_EXPIRE_WINDOW _fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock', 'active', 'processed', 'loadavg', 'sw_ident', 'sw_ver', 'sw_sys') if not PYPY: # pragma: no cover __slots__ = _fields + ('event', '__dict__', '__weakref__') def __init__(self, hostname=None, pid=None, freq=60, heartbeats=None, clock=0, active=None, processed=None, loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None): self.hostname = hostname self.pid = pid self.freq = freq self.heartbeats = [] if heartbeats is None else heartbeats self.clock = clock or 0 self.active = active self.processed = processed self.loadavg = loadavg self.sw_ident = sw_ident self.sw_ver = sw_ver self.sw_sys = sw_sys self.event = self._create_event_handler() def __reduce__(self): return self.__class__, (self.hostname, self.pid, self.freq, self.heartbeats, self.clock, self.active, self.processed, self.loadavg, self.sw_ident, self.sw_ver, self.sw_sys) def _create_event_handler(self): _set = object.__setattr__ hbmax = self.heartbeat_max heartbeats = self.heartbeats hb_pop = self.heartbeats.pop hb_append = self.heartbeats.append def event(type_, timestamp=None, local_received=None, fields=None, max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, int=int, insort=bisect.insort, len=len): fields = fields or {} for k, v in items(fields): _set(self, k, v) if type_ == 'offline': heartbeats[:] = [] else: if not local_received or not timestamp: return drift = abs(int(local_received) - int(timestamp)) if drift > max_drift: _warn_drift(self.hostname, drift, local_received, timestamp) if local_received: # pragma: no cover hearts = len(heartbeats) if hearts > hbmax - 1: hb_pop(0) if hearts and local_received > heartbeats[-1]: hb_append(local_received) else: insort(heartbeats, local_received) return event def update(self, f, **kw): for k, v in items(dict(f, **kw) if kw else f): setattr(self, k, v) def __repr__(self): return R_WORKER.format(self) @property def status_string(self): return 'ONLINE' if self.alive else 'OFFLINE' @property def heartbeat_expires(self): return heartbeat_expires(self.heartbeats[-1], self.freq, self.expire_window) @property def alive(self, nowfun=time): return bool(self.heartbeats and nowfun() < self.heartbeat_expires) @property def id(self): return '{0.hostname}.{0.pid}'.format(self) @with_unique_field('uuid') @python_2_unicode_compatible class Task(object): """Task State.""" name = received = sent = started = succeeded = failed = retried = \ revoked = rejected = args = kwargs = eta = expires = retries = \ worker = result = exception = timestamp = runtime = traceback = \ exchange = routing_key = root_id = parent_id = client = None state = states.PENDING clock = 0 _fields = ( 'uuid', 'name', 'state', 'received', 'sent', 'started', 'rejected', 'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs', 'eta', 'expires', 'retries', 'worker', 'result', 'exception', 'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key', 'clock', 'client', 'root', 'root_id', 'parent', 'parent_id', 'children', ) if not PYPY: # pragma: no cover __slots__ = ('__dict__', '__weakref__') #: How to merge out of order events. #: Disorder is detected by logical ordering (e.g., :event:`task-received` #: must've happened before a :event:`task-failed` event). #: #: A merge rule consists of a state and a list of fields to keep from #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args #: fields are always taken from the RECEIVED state, and any values for #: these fields received before or after is simply ignored. merge_rules = { states.RECEIVED: ( 'name', 'args', 'kwargs', 'parent_id', 'root_id' 'retries', 'eta', 'expires', ), } #: meth:`info` displays these fields by default. _info_fields = ( 'args', 'kwargs', 'retries', 'result', 'eta', 'runtime', 'expires', 'exception', 'exchange', 'routing_key', 'root_id', 'parent_id', ) def __init__(self, uuid=None, cluster_state=None, children=None, **kwargs): self.uuid = uuid self.cluster_state = cluster_state if self.cluster_state is not None: self.children = WeakSet( self.cluster_state.tasks.get(task_id) for task_id in children or () if task_id in self.cluster_state.tasks ) else: self.children = WeakSet() self._serializer_handlers = { 'children': self._serializable_children, 'root': self._serializable_root, 'parent': self._serializable_parent, } if kwargs: self.__dict__.update(kwargs) def event(self, type_, timestamp=None, local_received=None, fields=None, precedence=states.precedence, items=items, setattr=setattr, task_event_to_state=TASK_EVENT_TO_STATE.get, RETRY=states.RETRY): fields = fields or {} # using .get is faster than catching KeyError in this case. state = task_event_to_state(type_) if state is not None: # sets, for example, self.succeeded to the timestamp. setattr(self, type_, timestamp) else: state = type_.upper() # custom state # note that precedence here is reversed # see implementation in celery.states.state.__lt__ if state != RETRY and self.state != RETRY and \ precedence(state) > precedence(self.state): # this state logically happens-before the current state, so merge. keep = self.merge_rules.get(state) if keep is not None: fields = { k: v for k, v in items(fields) if k in keep } else: fields.update(state=state, timestamp=timestamp) # update current state with info from this event. self.__dict__.update(fields) def info(self, fields=None, extra=[]): """Information about this task suitable for on-screen display.""" fields = self._info_fields if fields is None else fields def _keys(): for key in list(fields) + list(extra): value = getattr(self, key, None) if value is not None: yield key, value return dict(_keys()) def __repr__(self): return R_TASK.format(self) def as_dict(self): get = object.__getattribute__ handler = self._serializer_handlers.get return { k: handler(k, pass1)(get(self, k)) for k in self._fields } def _serializable_children(self, value): return [task.id for task in self.children] def _serializable_root(self, value): return self.root_id def _serializable_parent(self, value): return self.parent_id def __reduce__(self): return _depickle_task, (self.__class__, self.as_dict()) @property def id(self): return self.uuid @property def origin(self): return self.client if self.worker is None else self.worker.id @property def ready(self): return self.state in states.READY_STATES @cached_property def parent(self): # issue github.com/mher/flower/issues/648 try: return self.parent_id and self.cluster_state.tasks[self.parent_id] except KeyError: return None @cached_property def root(self): # issue github.com/mher/flower/issues/648 try: return self.root_id and self.cluster_state.tasks[self.root_id] except KeyError: return None class State(object): """Records clusters state.""" Worker = Worker Task = Task event_count = 0 task_count = 0 heap_multiplier = 4 def __init__(self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000, on_node_join=None, on_node_leave=None, tasks_by_type=None, tasks_by_worker=None): self.event_callback = callback self.workers = (LRUCache(max_workers_in_memory) if workers is None else workers) self.tasks = (LRUCache(max_tasks_in_memory) if tasks is None else tasks) self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self.on_node_join = on_node_join self.on_node_leave = on_node_leave self._mutex = threading.Lock() self.handlers = {} self._seen_types = set() self._tasks_to_resolve = {} self.rebuild_taskheap() # type: Mapping[TaskName, WeakSet[Task]] self.tasks_by_type = CallableDefaultdict( self._tasks_by_type, WeakSet) self.tasks_by_type.update( _deserialize_Task_WeakSet_Mapping(tasks_by_type, self.tasks)) # type: Mapping[Hostname, WeakSet[Task]] self.tasks_by_worker = CallableDefaultdict( self._tasks_by_worker, WeakSet) self.tasks_by_worker.update( _deserialize_Task_WeakSet_Mapping(tasks_by_worker, self.tasks)) @cached_property def _event(self): return self._create_dispatcher() def freeze_while(self, fun, *args, **kwargs): clear_after = kwargs.pop('clear_after', False) with self._mutex: try: return fun(*args, **kwargs) finally: if clear_after: self._clear() def clear_tasks(self, ready=True): with self._mutex: return self._clear_tasks(ready) def _clear_tasks(self, ready=True): if ready: in_progress = { uuid: task for uuid, task in self.itertasks() if task.state not in states.READY_STATES } self.tasks.clear() self.tasks.update(in_progress) else: self.tasks.clear() self._taskheap[:] = [] def _clear(self, ready=True): self.workers.clear() self._clear_tasks(ready) self.event_count = 0 self.task_count = 0 def clear(self, ready=True): with self._mutex: return self._clear(ready) def get_or_create_worker(self, hostname, **kwargs): """Get or create worker by hostname. Returns: Tuple: of ``(worker, was_created)`` pairs. """ try: worker = self.workers[hostname] if kwargs: worker.update(kwargs) return worker, False except KeyError: worker = self.workers[hostname] = self.Worker( hostname, **kwargs) return worker, True def get_or_create_task(self, uuid): """Get or create task by uuid.""" try: return self.tasks[uuid], False except KeyError: task = self.tasks[uuid] = self.Task(uuid, cluster_state=self) return task, True def event(self, event): with self._mutex: return self._event(event) def task_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type='-'.join(['task', type_])))[0] def worker_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type='-'.join(['worker', type_])))[0] def _create_dispatcher(self): # noqa: C901 # pylint: disable=too-many-statements # This code is highly optimized, but not for reusability. get_handler = self.handlers.__getitem__ event_callback = self.event_callback wfields = itemgetter('hostname', 'timestamp', 'local_received') tfields = itemgetter('uuid', 'hostname', 'timestamp', 'local_received', 'clock') taskheap = self._taskheap th_append = taskheap.append th_pop = taskheap.pop # Removing events from task heap is an O(n) operation, # so easier to just account for the common number of events # for each task (PENDING->RECEIVED->STARTED->final) #: an O(n) operation max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier add_type = self._seen_types.add on_node_join, on_node_leave = self.on_node_join, self.on_node_leave tasks, Task = self.tasks, self.Task workers, Worker = self.workers, self.Worker # avoid updating LRU entry at getitem get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ get_task_by_type_set = self.tasks_by_type.__getitem__ get_task_by_worker_set = self.tasks_by_worker.__getitem__ def _event(event, timetuple=timetuple, KeyError=KeyError, insort=bisect.insort, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event['type'].partition('-') try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event), subject if group == 'worker': try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: is_offline = subject == 'offline' try: worker, created = get_worker(hostname), False except KeyError: if is_offline: worker, created = Worker(hostname), False else: worker = workers[hostname] = Worker(hostname) worker.event(subject, timestamp, local_received, event) if on_node_join and (created or subject == 'online'): on_node_join(worker) if on_node_leave and is_offline: on_node_leave(worker) workers.pop(hostname, None) return (worker, created), subject elif group == 'task': (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == 'sent' try: task, task_created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid, cluster_state=self) task_created = True if is_client_event: task.client = hostname else: try: worker = get_worker(hostname) except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id # remove oldest event if exceeding the limit. heaps = len(taskheap) if heaps + 1 > max_events_in_heap: th_pop(0) # most events will be dated later than the previous. timetup = timetuple(clock, timestamp, origin, ref(task)) if heaps and timetup > taskheap[-1]: th_append(timetup) else: insort(taskheap, timetup) if subject == 'received': self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) if task_created: # add to tasks_by_type index get_task_by_type_set(task_name).add(task) get_task_by_worker_set(hostname).add(task) if task.parent_id: try: parent_task = self.tasks[task.parent_id] except KeyError: self._add_pending_task_child(task) else: parent_task.children.add(task) try: _children = self._tasks_to_resolve.pop(uuid) except KeyError: pass else: task.children.update(_children) return (task, task_created), subject return _event def _add_pending_task_child(self, task): try: ch = self._tasks_to_resolve[task.parent_id] except KeyError: ch = self._tasks_to_resolve[task.parent_id] = WeakSet() ch.add(task) def rebuild_taskheap(self, timetuple=timetuple): heap = self._taskheap[:] = [ timetuple(t.clock, t.timestamp, t.origin, ref(t)) for t in values(self.tasks) ] heap.sort() def itertasks(self, limit=None): for index, row in enumerate(items(self.tasks)): yield row if limit and index + 1 >= limit: break def tasks_by_time(self, limit=None, reverse=True): """Generator yielding tasks ordered by time. Yields: Tuples of ``(uuid, Task)``. """ _heap = self._taskheap if reverse: _heap = reversed(_heap) seen = set() for evtup in islice(_heap, 0, limit): task = evtup[3]() if task is not None: uuid = task.uuid if uuid not in seen: yield uuid, task seen.add(uuid) tasks_by_timestamp = tasks_by_time def _tasks_by_type(self, name, limit=None, reverse=True): """Get all tasks by type. This is slower than accessing :attr:`tasks_by_type`, but will be ordered by time. Returns: Generator: giving ``(uuid, Task)`` pairs. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse) if task.name == name), 0, limit, ) def _tasks_by_worker(self, hostname, limit=None, reverse=True): """Get all tasks by worker. Slower than accessing :attr:`tasks_by_worker`, but ordered by time. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse) if task.worker.hostname == hostname), 0, limit, ) def task_types(self): """Return a list of all seen task types.""" return sorted(self._seen_types) def alive_workers(self): """Return a list of (seemingly) alive workers.""" return (w for w in values(self.workers) if w.alive) def __repr__(self): return R_STATE.format(self) def __reduce__(self): return self.__class__, ( self.event_callback, self.workers, self.tasks, None, self.max_workers_in_memory, self.max_tasks_in_memory, self.on_node_join, self.on_node_leave, _serialize_Task_WeakSet_Mapping(self.tasks_by_type), _serialize_Task_WeakSet_Mapping(self.tasks_by_worker), ) def _serialize_Task_WeakSet_Mapping(mapping): return {name: [t.id for t in tasks] for name, tasks in items(mapping)} def _deserialize_Task_WeakSet_Mapping(mapping, tasks): return {name: WeakSet(tasks[i] for i in ids if i in tasks) for name, ids in items(mapping or {})} celery-4.1.0/celery/events/dumper.py0000644000175000017500000000641313130607475017374 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Utility to dump events to screen. This is a simple program that dumps events to the console as they happen. Think of it like a `tcpdump` for Celery events. """ from __future__ import absolute_import, print_function, unicode_literals import sys from datetime import datetime from celery.app import app_or_default from celery.utils.functional import LRUCache from celery.utils.time import humanize_seconds __all__ = ['Dumper', 'evdump'] TASK_NAMES = LRUCache(limit=0xFFF) HUMAN_TYPES = { 'worker-offline': 'shutdown', 'worker-online': 'started', 'worker-heartbeat': 'heartbeat', } CONNECTION_ERROR = """\ -> Cannot connect to %s: %s. Trying again %s """ def humanize_type(type): try: return HUMAN_TYPES[type.lower()] except KeyError: return type.lower().replace('-', ' ') class Dumper(object): """Monitor events.""" def __init__(self, out=sys.stdout): self.out = out def say(self, msg): print(msg, file=self.out) # need to flush so that output can be piped. try: self.out.flush() except AttributeError: # pragma: no cover pass def on_event(self, ev): timestamp = datetime.utcfromtimestamp(ev.pop('timestamp')) type = ev.pop('type').lower() hostname = ev.pop('hostname') if type.startswith('task-'): uuid = ev.pop('uuid') if type in ('task-received', 'task-sent'): task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \ .format(ev.pop('name'), uuid, ev.pop('args'), ev.pop('kwargs')) else: task = TASK_NAMES.get(uuid, '') return self.format_task_event(hostname, timestamp, type, task, ev) fields = ', '.join( '{0}={1}'.format(key, ev[key]) for key in sorted(ev) ) sep = fields and ':' or '' self.say('{0} [{1}] {2}{3} {4}'.format( hostname, timestamp, humanize_type(type), sep, fields), ) def format_task_event(self, hostname, timestamp, type, task, event): fields = ', '.join( '{0}={1}'.format(key, event[key]) for key in sorted(event) ) sep = fields and ':' or '' self.say('{0} [{1}] {2}{3} {4} {5}'.format( hostname, timestamp, humanize_type(type), sep, task, fields), ) def evdump(app=None, out=sys.stdout): """Start event dump.""" app = app_or_default(app) dumper = Dumper(out=out) dumper.say('-> evdump: starting capture...') conn = app.connection_for_read().clone() def _error_handler(exc, interval): dumper.say(CONNECTION_ERROR % ( conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ') )) while 1: try: conn.ensure_connection(_error_handler) recv = app.events.Receiver(conn, handlers={'*': dumper.on_event}) recv.capture() except (KeyboardInterrupt, SystemExit): return conn and conn.close() except conn.connection_errors + conn.channel_errors: dumper.say('-> Connection lost, attempting reconnect') if __name__ == '__main__': # pragma: no cover evdump() celery-4.1.0/celery/events/snapshot.py0000644000175000017500000000650713130607475017743 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Periodically store events in a database. Consuming the events as a stream isn't always suitable so this module implements a system to take snapshots of the state of a cluster at regular intervals. There's a full implementation of this writing the snapshots to a database in :mod:`djcelery.snapshots` in the `django-celery` distribution. """ from __future__ import absolute_import, print_function, unicode_literals from kombu.utils.limits import TokenBucket from celery import platforms from celery.app import app_or_default from celery.utils.timer2 import Timer from celery.utils.dispatch import Signal from celery.utils.imports import instantiate from celery.utils.log import get_logger from celery.utils.time import rate __all__ = ['Polaroid', 'evcam'] logger = get_logger('celery.evcam') class Polaroid(object): """Record event snapshots.""" timer = None shutter_signal = Signal(name='shutter_signal', providing_args={'state'}) cleanup_signal = Signal(name='cleanup_signal') clear_after = False _tref = None _ctref = None def __init__(self, state, freq=1.0, maxrate=None, cleanup_freq=3600.0, timer=None, app=None): self.app = app_or_default(app) self.state = state self.freq = freq self.cleanup_freq = cleanup_freq self.timer = timer or self.timer or Timer() self.logger = logger self.maxrate = maxrate and TokenBucket(rate(maxrate)) def install(self): self._tref = self.timer.call_repeatedly(self.freq, self.capture) self._ctref = self.timer.call_repeatedly( self.cleanup_freq, self.cleanup, ) def on_shutter(self, state): pass def on_cleanup(self): pass def cleanup(self): logger.debug('Cleanup: Running...') self.cleanup_signal.send(sender=self.state) self.on_cleanup() def shutter(self): if self.maxrate is None or self.maxrate.can_consume(): logger.debug('Shutter: %s', self.state) self.shutter_signal.send(sender=self.state) self.on_shutter(self.state) def capture(self): self.state.freeze_while(self.shutter, clear_after=self.clear_after) def cancel(self): if self._tref: self._tref() # flush all received events. self._tref.cancel() if self._ctref: self._ctref.cancel() def __enter__(self): self.install() return self def __exit__(self, *exc_info): self.cancel() def evcam(camera, freq=1.0, maxrate=None, loglevel=0, logfile=None, pidfile=None, timer=None, app=None): """Start snapshot recorder.""" app = app_or_default(app) if pidfile: platforms.create_pidlock(pidfile) app.log.setup_logging_subsystem(loglevel, logfile) print('-> evcam: Taking snapshots with {0} (every {1} secs.)'.format( camera, freq)) state = app.events.State() cam = instantiate(camera, state, app=app, freq=freq, maxrate=maxrate, timer=timer) cam.install() conn = app.connection_for_read() recv = app.events.Receiver(conn, handlers={'*': state.event}) try: try: recv.capture(limit=None) except KeyboardInterrupt: raise SystemExit finally: cam.cancel() conn.close() celery-4.1.0/celery/events/receiver.py0000644000175000017500000001160213130607475017700 0ustar omeromer00000000000000"""Event receiver implementation.""" from __future__ import absolute_import, unicode_literals import time from operator import itemgetter from kombu import Queue from kombu.connection import maybe_channel from kombu.mixins import ConsumerMixin from celery import uuid from celery.app import app_or_default from celery.utils.time import adjust_timestamp from .event import get_exchange __all__ = ['EventReceiver'] CLIENT_CLOCK_SKEW = -1 _TZGETTER = itemgetter('utcoffset', 'timestamp') class EventReceiver(ConsumerMixin): """Capture events. Arguments: connection (kombu.Connection): Connection to the broker. handlers (Mapping[Callable]): Event handlers. This is a map of event type names and their handlers. The special handler `"*"` captures all events that don't have a handler. """ app = None def __init__(self, channel, handlers=None, routing_key='#', node_id=None, app=None, queue_prefix=None, accept=None, queue_ttl=None, queue_expires=None): self.app = app_or_default(app or self.app) self.channel = maybe_channel(channel) self.handlers = {} if handlers is None else handlers self.routing_key = routing_key self.node_id = node_id or uuid() self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix self.exchange = get_exchange( self.connection or self.app.connection_for_write()) if queue_ttl is None: queue_ttl = self.app.conf.event_queue_ttl if queue_expires is None: queue_expires = self.app.conf.event_queue_expires self.queue = Queue( '.'.join([self.queue_prefix, self.node_id]), exchange=self.exchange, routing_key=self.routing_key, auto_delete=True, durable=False, message_ttl=queue_ttl, expires=queue_expires, ) self.clock = self.app.clock self.adjust_clock = self.clock.adjust self.forward_clock = self.clock.forward if accept is None: accept = {self.app.conf.event_serializer, 'json'} self.accept = accept def process(self, type, event): """Process event by dispatching to configured handler.""" handler = self.handlers.get(type) or self.handlers.get('*') handler and handler(event) def get_consumers(self, Consumer, channel): return [Consumer(queues=[self.queue], callbacks=[self._receive], no_ack=True, accept=self.accept)] def on_consume_ready(self, connection, channel, consumers, wakeup=True, **kwargs): if wakeup: self.wakeup_workers(channel=channel) def itercapture(self, limit=None, timeout=None, wakeup=True): return self.consume(limit=limit, timeout=timeout, wakeup=wakeup) def capture(self, limit=None, timeout=None, wakeup=True): """Open up a consumer capturing events. This has to run in the main process, and it will never stop unless :attr:`EventDispatcher.should_stop` is set to True, or forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`. """ return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup)) def wakeup_workers(self, channel=None): self.app.control.broadcast('heartbeat', connection=self.connection, channel=channel) def event_from_message(self, body, localize=True, now=time.time, tzfields=_TZGETTER, adjust_timestamp=adjust_timestamp, CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW): type = body['type'] if type == 'task-sent': # clients never sync so cannot use their clock value _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW self.adjust_clock(_c) else: try: clock = body['clock'] except KeyError: body['clock'] = self.forward_clock() else: self.adjust_clock(clock) if localize: try: offset, timestamp = tzfields(body) except KeyError: pass else: body['timestamp'] = adjust_timestamp(timestamp, offset) body['local_received'] = now() return type, body def _receive(self, body, message, list=list, isinstance=isinstance): if isinstance(body, list): # celery 4.0: List of events process, from_message = self.process, self.event_from_message [process(*from_message(event)) for event in body] else: self.process(*self.event_from_message(body)) @property def connection(self): return self.channel.connection.client if self.channel else None celery-4.1.0/celery/canvas.py0000644000175000017500000014115213130607475016047 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Composing task work-flows. .. seealso: You should import these from :mod:`celery` and not this module. """ from __future__ import absolute_import, unicode_literals import itertools import operator import sys from collections import MutableSequence, deque from copy import deepcopy from functools import partial as _partial, reduce from operator import itemgetter from kombu.utils.functional import fxrange, reprcall from kombu.utils.objects import cached_property from kombu.utils.uuid import uuid from vine import barrier from celery._state import current_app from celery.five import python_2_unicode_compatible from celery.local import try_import from celery.result import GroupResult from celery.utils import abstract from celery.utils.functional import ( maybe_list, is_list, _regen, regen, chunks as _chunks, seq_concat_seq, seq_concat_item, ) from celery.utils.objects import getitem_property from celery.utils.text import truncate, remove_repeating_from_task __all__ = [ 'Signature', 'chain', 'xmap', 'xstarmap', 'chunks', 'group', 'chord', 'signature', 'maybe_signature', ] PY3 = sys.version_info[0] == 3 # json in Python 2.7 borks if dict contains byte keys. JSON_NEEDS_UNICODE_KEYS = PY3 and not try_import('simplejson') def maybe_unroll_group(g): """Unroll group with only one member.""" # Issue #1656 try: size = len(g.tasks) except TypeError: try: size = g.tasks.__length_hint__() except (AttributeError, TypeError): return g else: return list(g.tasks)[0] if size == 1 else g else: return g.tasks[0] if size == 1 else g def task_name_from(task): return getattr(task, 'name', task) def _upgrade(fields, sig): """Used by custom signatures in .from_dict, to keep common fields.""" sig.update(chord_size=fields.get('chord_size')) return sig @abstract.CallableSignature.register @python_2_unicode_compatible class Signature(dict): """Task Signature. Class that wraps the arguments and execution options for a single task invocation. Used as the parts in a :class:`group` and other constructs, or to pass tasks around as callbacks while being compatible with serializers with a strict type subset. Signatures can also be created from tasks: - Using the ``.signature()`` method that has the same signature as ``Task.apply_async``: .. code-block:: pycon >>> add.signature(args=(1,), kwargs={'kw': 2}, options={}) - or the ``.s()`` shortcut that works for star arguments: .. code-block:: pycon >>> add.s(1, kw=2) - the ``.s()`` shortcut does not allow you to specify execution options but there's a chaning `.set` method that returns the signature: .. code-block:: pycon >>> add.s(2, 2).set(countdown=10).set(expires=30).delay() Note: You should use :func:`~celery.signature` to create new signatures. The ``Signature`` class is the type returned by that function and should be used for ``isinstance`` checks for signatures. See Also: :ref:`guide-canvas` for the complete guide. Arguments: task (Task, str): Either a task class/instance, or the name of a task. args (Tuple): Positional arguments to apply. kwargs (Dict): Keyword arguments to apply. options (Dict): Additional options to :meth:`Task.apply_async`. Note: If the first argument is a :class:`dict`, the other arguments will be ignored and the values in the dict will be used instead:: >>> s = signature('tasks.add', args=(2, 2)) >>> signature(s) {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}} """ TYPES = {} _app = _type = None @classmethod def register_type(cls, name=None): def _inner(subclass): cls.TYPES[name or subclass.__name__] = subclass return subclass return _inner @classmethod def from_dict(cls, d, app=None): typ = d.get('subtask_type') if typ: target_cls = cls.TYPES[typ] if target_cls is not cls: return target_cls.from_dict(d, app=app) return Signature(d, app=app) def __init__(self, task=None, args=None, kwargs=None, options=None, type=None, subtask_type=None, immutable=False, app=None, **ex): self._app = app if isinstance(task, dict): super(Signature, self).__init__(task) # works like dict(d) else: # Also supports using task class/instance instead of string name. try: task_name = task.name except AttributeError: task_name = task else: self._type = task super(Signature, self).__init__( task=task_name, args=tuple(args or ()), kwargs=kwargs or {}, options=dict(options or {}, **ex), subtask_type=subtask_type, immutable=immutable, chord_size=None, ) def __call__(self, *partial_args, **partial_kwargs): """Call the task directly (in the current process).""" args, kwargs, _ = self._merge(partial_args, partial_kwargs, None) return self.type(*args, **kwargs) def delay(self, *partial_args, **partial_kwargs): """Shortcut to :meth:`apply_async` using star arguments.""" return self.apply_async(partial_args, partial_kwargs) def apply(self, args=(), kwargs={}, **options): """Call task locally. Same as :meth:`apply_async` but executed the task inline instead of sending a task message. """ # For callbacks: extra args are prepended to the stored args. args, kwargs, options = self._merge(args, kwargs, options) return self.type.apply(args, kwargs, **options) def apply_async(self, args=(), kwargs={}, route_name=None, **options): """Apply this task asynchronously. Arguments: args (Tuple): Partial args to be prepended to the existing args. kwargs (Dict): Partial kwargs to be merged with existing kwargs. options (Dict): Partial options to be merged with existing options. Returns: ~@AsyncResult: promise of future evaluation. See also: :meth:`~@Task.apply_async` and the :ref:`guide-calling` guide. """ try: _apply = self._apply_async except IndexError: # pragma: no cover # no tasks for chain, etc to find type return # For callbacks: extra args are prepended to the stored args. if args or kwargs or options: args, kwargs, options = self._merge(args, kwargs, options) else: args, kwargs, options = self.args, self.kwargs, self.options # pylint: disable=too-many-function-args # Borks on this, as it's a property return _apply(args, kwargs, **options) def _merge(self, args=(), kwargs={}, options={}, force=False): if self.immutable and not force: return (self.args, self.kwargs, dict(self.options, **options) if options else self.options) return (tuple(args) + tuple(self.args) if args else self.args, dict(self.kwargs, **kwargs) if kwargs else self.kwargs, dict(self.options, **options) if options else self.options) def clone(self, args=(), kwargs={}, **opts): """Create a copy of this signature. Arguments: args (Tuple): Partial args to be prepended to the existing args. kwargs (Dict): Partial kwargs to be merged with existing kwargs. options (Dict): Partial options to be merged with existing options. """ # need to deepcopy options so origins links etc. is not modified. if args or kwargs or opts: args, kwargs, opts = self._merge(args, kwargs, opts) else: args, kwargs, opts = self.args, self.kwargs, self.options s = Signature.from_dict({'task': self.task, 'args': tuple(args), 'kwargs': kwargs, 'options': deepcopy(opts), 'subtask_type': self.subtask_type, 'chord_size': self.chord_size, 'immutable': self.immutable}, app=self._app) s._type = self._type return s partial = clone def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None): """Finalize the signature by adding a concrete task id. The task won't be called and you shouldn't call the signature twice after freezing it as that'll result in two task messages using the same task id. Returns: ~@AsyncResult: promise of future evaluation. """ # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. opts = self.options try: tid = opts['task_id'] except KeyError: tid = opts['task_id'] = _id or uuid() if root_id: opts['root_id'] = root_id if parent_id: opts['parent_id'] = parent_id if 'reply_to' not in opts: opts['reply_to'] = self.app.oid if group_id: opts['group_id'] = group_id if chord: opts['chord'] = chord # pylint: disable=too-many-function-args # Borks on this, as it's a property. return self.AsyncResult(tid) _freeze = freeze def replace(self, args=None, kwargs=None, options=None): """Replace the args, kwargs or options set for this signature. These are only replaced if the argument for the section is not :const:`None`. """ s = self.clone() if args is not None: s.args = args if kwargs is not None: s.kwargs = kwargs if options is not None: s.options = options return s def set(self, immutable=None, **options): """Set arbitrary execution options (same as ``.options.update(…)``). Returns: Signature: This is a chaining method call (i.e., it will return ``self``). """ if immutable is not None: self.set_immutable(immutable) self.options.update(options) return self def set_immutable(self, immutable): self.immutable = immutable def _with_list_option(self, key): items = self.options.setdefault(key, []) if not isinstance(items, MutableSequence): items = self.options[key] = [items] return items def append_to_list_option(self, key, value): items = self._with_list_option(key) if value not in items: items.append(value) return value def extend_list_option(self, key, value): items = self._with_list_option(key) items.extend(maybe_list(value)) def link(self, callback): """Add callback task to be applied if this task succeeds. Returns: Signature: the argument passed, for chaining or use with :func:`~functools.reduce`. """ return self.append_to_list_option('link', callback) def link_error(self, errback): """Add callback task to be applied on error in task execution. Returns: Signature: the argument passed, for chaining or use with :func:`~functools.reduce`. """ return self.append_to_list_option('link_error', errback) def on_error(self, errback): """Version of :meth:`link_error` that supports chaining. on_error chains the original signature, not the errback so:: >>> add.s(2, 2).on_error(errback.s()).delay() calls the ``add`` task, not the ``errback`` task, but the reverse is true for :meth:`link_error`. """ self.link_error(errback) return self def flatten_links(self): """Return a recursive list of dependencies. "unchain" if you will, but with links intact. """ return list(itertools.chain.from_iterable(itertools.chain( [[self]], (link.flatten_links() for link in maybe_list(self.options.get('link')) or []) ))) def __or__(self, other): # These could be implemented in each individual class, # I'm sure, but for now we have this. if isinstance(other, chord) and len(other.tasks) == 1: # chord with one header -> header[0] | body other = other.tasks[0] | other.body if isinstance(self, group): if isinstance(other, group): # group() | group() -> single group return group( itertools.chain(self.tasks, other.tasks), app=self.app) # group() | task -> chord if len(self.tasks) == 1: # group(ONE.s()) | other -> ONE.s() | other # Issue #3323 return self.tasks[0] | other return chord(self, body=other, app=self._app) elif isinstance(other, group): # unroll group with one member other = maybe_unroll_group(other) if isinstance(self, _chain): # chain | group() -> chain sig = self.clone() sig.tasks.append(other) return sig # task | group() -> chain return _chain(self, other, app=self.app) if not isinstance(self, _chain) and isinstance(other, _chain): # task | chain -> chain return _chain( seq_concat_seq((self,), other.tasks), app=self._app) elif isinstance(other, _chain): # chain | chain -> chain sig = self.clone() if isinstance(sig.tasks, tuple): sig.tasks = list(sig.tasks) sig.tasks.extend(other.tasks) return sig elif isinstance(self, chord): # chord(ONE, body) | other -> ONE | body | other # chord with one header task is unecessary. if len(self.tasks) == 1: return self.tasks[0] | self.body | other # chord | task -> attach to body sig = self.clone() sig.body = sig.body | other return sig elif isinstance(other, Signature): if isinstance(self, _chain): if isinstance(self.tasks[-1], group): # CHAIN [last item is group] | TASK -> chord sig = self.clone() sig.tasks[-1] = chord( sig.tasks[-1], other, app=self._app) return sig elif isinstance(self.tasks[-1], chord): # CHAIN [last item is chord] -> chain with chord body. sig = self.clone() sig.tasks[-1].body = sig.tasks[-1].body | other return sig else: # chain | task -> chain return _chain( seq_concat_item(self.tasks, other), app=self._app) # task | task -> chain return _chain(self, other, app=self._app) return NotImplemented def election(self): type = self.type app = type.app tid = self.options.get('task_id') or uuid() with app.producer_or_acquire(None) as P: props = type.backend.on_task_call(P, tid) app.control.election(tid, 'task', self.clone(task_id=tid, **props), connection=P.connection) return type.AsyncResult(tid) def reprcall(self, *args, **kwargs): args, kwargs, _ = self._merge(args, kwargs, {}, force=True) return reprcall(self['task'], args, kwargs) def __deepcopy__(self, memo): memo[id(self)] = self return dict(self) def __invert__(self): return self.apply_async().get() def __reduce__(self): # for serialization, the task type is lazily loaded, # and not stored in the dict itself. return signature, (dict(self),) def __json__(self): return dict(self) def __repr__(self): return self.reprcall() if JSON_NEEDS_UNICODE_KEYS: # pragma: no cover def items(self): for k, v in dict.items(self): yield k.decode() if isinstance(k, bytes) else k, v @property def name(self): # for duck typing compatibility with Task.name return self.task @cached_property def type(self): return self._type or self.app.tasks[self['task']] @cached_property def app(self): return self._app or current_app @cached_property def AsyncResult(self): try: return self.type.AsyncResult except KeyError: # task not registered return self.app.AsyncResult @cached_property def _apply_async(self): try: return self.type.apply_async except KeyError: return _partial(self.app.send_task, self['task']) id = getitem_property('options.task_id', 'Task UUID') parent_id = getitem_property('options.parent_id', 'Task parent UUID.') root_id = getitem_property('options.root_id', 'Task root UUID.') task = getitem_property('task', 'Name of task.') args = getitem_property('args', 'Positional arguments to task.') kwargs = getitem_property('kwargs', 'Keyword arguments to task.') options = getitem_property('options', 'Task execution options.') subtask_type = getitem_property('subtask_type', 'Type of signature') chord_size = getitem_property( 'chord_size', 'Size of chord (if applicable)') immutable = getitem_property( 'immutable', 'Flag set if no longer accepts new arguments') @Signature.register_type(name='chain') @python_2_unicode_compatible class _chain(Signature): tasks = getitem_property('kwargs.tasks', 'Tasks in chain.') @classmethod def from_dict(cls, d, app=None): tasks = d['kwargs']['tasks'] if tasks: if isinstance(tasks, tuple): # aaaargh tasks = d['kwargs']['tasks'] = list(tasks) tasks = [maybe_signature(task, app=app) for task in tasks] return _upgrade(d, _chain(tasks, app=app, **d['options'])) def __init__(self, *tasks, **options): tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0]) else tasks) Signature.__init__( self, 'celery.chain', (), {'tasks': tasks}, **options ) self._use_link = options.pop('use_link', None) self.subtask_type = 'chain' self._frozen = None def __call__(self, *args, **kwargs): if self.tasks: return self.apply_async(args, kwargs) def clone(self, *args, **kwargs): to_signature = maybe_signature s = Signature.clone(self, *args, **kwargs) s.kwargs['tasks'] = [ to_signature(sig, app=self._app, clone=True) for sig in s.kwargs['tasks'] ] return s def apply_async(self, args=(), kwargs={}, **options): # python is best at unpacking kwargs, so .run is here to do that. app = self.app if app.conf.task_always_eager: return self.apply(args, kwargs, **options) return self.run(args, kwargs, app=app, **( dict(self.options, **options) if options else self.options)) def run(self, args=(), kwargs={}, group_id=None, chord=None, task_id=None, link=None, link_error=None, publisher=None, producer=None, root_id=None, parent_id=None, app=None, **options): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. app = app or self.app use_link = self._use_link if use_link is None and app.conf.task_protocol == 1: use_link = True args = (tuple(args) + tuple(self.args) if args and not self.immutable else self.args) if self._frozen: tasks, results = self._frozen else: tasks, results = self.prepare_steps( args, self.tasks, root_id, parent_id, link_error, app, task_id, group_id, chord, ) if results: if link: tasks[0].extend_list_option('link', link) first_task = tasks.pop() # chain option may already be set, resulting in # "multiple values for keyword argument 'chain'" error. # Issue #3379. options['chain'] = tasks if not use_link else None first_task.apply_async(**options) return results[0] def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. _, results = self._frozen = self.prepare_steps( self.args, self.tasks, root_id, parent_id, None, self.app, _id, group_id, chord, clone=False, ) return results[0] def prepare_steps(self, args, tasks, root_id=None, parent_id=None, link_error=None, app=None, last_task_id=None, group_id=None, chord_body=None, clone=True, from_dict=Signature.from_dict): app = app or self.app # use chain message field for protocol 2 and later. # this avoids pickle blowing the stack on the recursion # required by linking task together in a tree structure. # (why is pickle using recursion? or better yet why cannot python # do tail call optimization making recursion actually useful?) use_link = self._use_link if use_link is None and app.conf.task_protocol == 1: use_link = True steps = deque(tasks) steps_pop = steps.pop steps_extend = steps.extend prev_task = None prev_res = None tasks, results = [], [] i = 0 # NOTE: We are doing this in reverse order. # The result is a list of tasks in reverse order, that is # passed as the ``chain`` message field. # As it's reversed the worker can just do ``chain.pop()`` to # get the next task in the chain. while steps: task = steps_pop() is_first_task, is_last_task = not steps, not i if not isinstance(task, abstract.CallableSignature): task = from_dict(task, app=app) if isinstance(task, group): task = maybe_unroll_group(task) # first task gets partial args from chain if clone: task = task.clone(args) if is_first_task else task.clone() elif is_first_task: task.args = tuple(args) + tuple(task.args) if isinstance(task, _chain): # splice the chain steps_extend(task.tasks) continue if isinstance(task, group) and prev_task: # automatically upgrade group(...) | s to chord(group, s) # for chords we freeze by pretending it's a normal # signature instead of a group. tasks.pop() results.pop() task = chord( task, body=prev_task, task_id=prev_res.task_id, root_id=root_id, app=app, ) if is_last_task: # chain(task_id=id) means task id is set for the last task # in the chain. If the chord is part of a chord/group # then that chord/group must synchronize based on the # last task in the chain, so we only set the group_id and # chord callback for the last task. res = task.freeze( last_task_id, root_id=root_id, group_id=group_id, chord=chord_body, ) else: res = task.freeze(root_id=root_id) i += 1 if prev_task: if use_link: # link previous task to this task. task.link(prev_task) if prev_res and not prev_res.parent: prev_res.parent = res if link_error: for errback in maybe_list(link_error): task.link_error(errback) tasks.append(task) results.append(res) prev_task, prev_res = task, res if isinstance(task, chord): app.backend.ensure_chords_allowed() # If the task is a chord, and the body is a chain # the chain has already been prepared, and res is # set to the last task in the callback chain. # We need to change that so that it points to the # group result object. node = res while node.parent: node = node.parent prev_res = node return tasks, results def apply(self, args=(), kwargs={}, **options): last, fargs = None, args for task in self.tasks: res = task.clone(fargs).apply( last and (last.get(),), **dict(self.options, **options)) res.parent, last, fargs = last, res, None return last @property def app(self): app = self._app if app is None: try: app = self.tasks[0]._app except LookupError: pass return app or current_app def __repr__(self): if not self.tasks: return '<{0}@{1:#x}: empty>'.format( type(self).__name__, id(self)) return remove_repeating_from_task( self.tasks[0]['task'], ' | '.join(repr(t) for t in self.tasks)) class chain(_chain): """Chain tasks together. Each tasks follows one another, by being applied as a callback of the previous task. Note: If called with only one argument, then that argument must be an iterable of tasks to chain: this allows us to use generator expressions. Example: This is effectively :math:`((2 + 2) + 4)`: .. code-block:: pycon >>> res = chain(add.s(2, 2), add.s(4))() >>> res.get() 8 Calling a chain will return the result of the last task in the chain. You can get to the other tasks by following the ``result.parent``'s: .. code-block:: pycon >>> res.parent.get() 4 Using a generator expression: .. code-block:: pycon >>> lazy_chain = chain(add.s(i) for i in range(10)) >>> res = lazy_chain(3) Arguments: *tasks (Signature): List of task signatures to chain. If only one argument is passed and that argument is an iterable, then that'll be used as the list of signatures to chain instead. This means that you can use a generator expression. Returns: ~celery.chain: A lazy signature that can be called to apply the first task in the chain. When that task succeeed the next task in the chain is applied, and so on. """ # could be function, but must be able to reference as :class:`chain`. def __new__(cls, *tasks, **kwargs): # This forces `chain(X, Y, Z)` to work the same way as `X | Y | Z` if not kwargs and tasks: if len(tasks) == 1 and is_list(tasks[0]): # ensure chain(generator_expression) works. tasks = tasks[0] return reduce(operator.or_, tasks) return super(chain, cls).__new__(cls, *tasks, **kwargs) class _basemap(Signature): _task_name = None _unpack_args = itemgetter('task', 'it') @classmethod def from_dict(cls, d, app=None): return _upgrade( d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']), ) def __init__(self, task, it, **options): Signature.__init__( self, self._task_name, (), {'task': task, 'it': regen(it)}, immutable=True, **options ) def apply_async(self, args=(), kwargs={}, **opts): # need to evaluate generators task, it = self._unpack_args(self.kwargs) return self.type.apply_async( (), {'task': task, 'it': list(it)}, route_name=task_name_from(self.kwargs.get('task')), **opts ) @Signature.register_type() @python_2_unicode_compatible class xmap(_basemap): """Map operation for tasks. Note: Tasks executed sequentially in process, this is not a parallel operation like :class:`group`. """ _task_name = 'celery.map' def __repr__(self): task, it = self._unpack_args(self.kwargs) return '[{0}(x) for x in {1}]'.format( task.task, truncate(repr(it), 100)) @Signature.register_type() @python_2_unicode_compatible class xstarmap(_basemap): """Map operation for tasks, using star arguments.""" _task_name = 'celery.starmap' def __repr__(self): task, it = self._unpack_args(self.kwargs) return '[{0}(*x) for x in {1}]'.format( task.task, truncate(repr(it), 100)) @Signature.register_type() class chunks(Signature): """Partition of tasks in n chunks.""" _unpack_args = itemgetter('task', 'it', 'n') @classmethod def from_dict(cls, d, app=None): return _upgrade( d, chunks(*cls._unpack_args( d['kwargs']), app=app, **d['options']), ) def __init__(self, task, it, n, **options): Signature.__init__( self, 'celery.chunks', (), {'task': task, 'it': regen(it), 'n': n}, immutable=True, **options ) def __call__(self, **options): return self.apply_async(**options) def apply_async(self, args=(), kwargs={}, **opts): return self.group().apply_async( args, kwargs, route_name=task_name_from(self.kwargs.get('task')), **opts ) def group(self): # need to evaluate generators task, it, n = self._unpack_args(self.kwargs) return group((xstarmap(task, part, app=self._app) for part in _chunks(iter(it), n)), app=self._app) @classmethod def apply_chunks(cls, task, it, n, app=None): return cls(task, it, n, app=app)() def _maybe_group(tasks, app): if isinstance(tasks, dict): tasks = signature(tasks, app=app) if isinstance(tasks, (group, _chain)): tasks = tasks.tasks elif isinstance(tasks, abstract.CallableSignature): tasks = [tasks] else: tasks = [signature(t, app=app) for t in tasks] return tasks @Signature.register_type() @python_2_unicode_compatible class group(Signature): """Creates a group of tasks to be executed in parallel. A group is lazy so you must call it to take action and evaluate the group. Note: If only one argument is passed, and that argument is an iterable then that'll be used as the list of tasks instead: this allows us to use ``group`` with generator expressions. Example: >>> lazy_group = group([add.s(2, 2), add.s(4, 4)]) >>> promise = lazy_group() # <-- evaluate: returns lazy result. >>> promise.get() # <-- will wait for the task to return [4, 8] Arguments: *tasks (List[Signature]): A list of signatures that this group will call. If there's only one argument, and that argument is an iterable, then that'll define the list of signatures instead. **options (Any): Execution options applied to all tasks in the group. Returns: ~celery.group: signature that when called will then call all of the tasks in the group (and return a :class:`GroupResult` instance that can be used to inspect the state of the group). """ tasks = getitem_property('kwargs.tasks', 'Tasks in group.') @classmethod def from_dict(cls, d, app=None): return _upgrade( d, group(d['kwargs']['tasks'], app=app, **d['options']), ) def __init__(self, *tasks, **options): if len(tasks) == 1: tasks = tasks[0] if isinstance(tasks, group): tasks = tasks.tasks if not isinstance(tasks, _regen): tasks = regen(tasks) Signature.__init__( self, 'celery.group', (), {'tasks': tasks}, **options ) self.subtask_type = 'group' def __call__(self, *partial_args, **options): return self.apply_async(partial_args, **options) def skew(self, start=1.0, stop=None, step=1.0): it = fxrange(start, stop, step, repeatlast=True) for task in self.tasks: task.set(countdown=next(it)) return self def apply_async(self, args=(), kwargs=None, add_to_parent=True, producer=None, link=None, link_error=None, **options): if link is not None: raise TypeError('Cannot add link to group: use a chord') if link_error is not None: raise TypeError( 'Cannot add link to group: do that on individual tasks') app = self.app if app.conf.task_always_eager: return self.apply(args, kwargs, **options) if not self.tasks: return self.freeze() options, group_id, root_id = self._freeze_gid(options) tasks = self._prepared(self.tasks, [], group_id, root_id, app) p = barrier() results = list(self._apply_tasks(tasks, producer, app, p, args=args, kwargs=kwargs, **options)) result = self.app.GroupResult(group_id, results, ready_barrier=p) p.finalize() # - Special case of group(A.s() | group(B.s(), C.s())) # That is, group with single item that's a chain but the # last task in that chain is a group. # # We cannot actually support arbitrary GroupResults in chains, # but this special case we can. if len(result) == 1 and isinstance(result[0], GroupResult): result = result[0] parent_task = app.current_worker_task if add_to_parent and parent_task: parent_task.add_trail(result) return result def apply(self, args=(), kwargs={}, **options): app = self.app if not self.tasks: return self.freeze() # empty group returns GroupResult options, group_id, root_id = self._freeze_gid(options) tasks = self._prepared(self.tasks, [], group_id, root_id, app) return app.GroupResult(group_id, [ sig.apply(args=args, kwargs=kwargs, **options) for sig, _ in tasks ]) def set_immutable(self, immutable): for task in self.tasks: task.set_immutable(immutable) def link(self, sig): # Simply link to first task sig = sig.clone().set(immutable=True) return self.tasks[0].link(sig) def link_error(self, sig): sig = sig.clone().set(immutable=True) return self.tasks[0].link_error(sig) def _prepared(self, tasks, partial_args, group_id, root_id, app, CallableSignature=abstract.CallableSignature, from_dict=Signature.from_dict, isinstance=isinstance, tuple=tuple): for task in tasks: if isinstance(task, CallableSignature): # local sigs are always of type Signature, and we # clone them to make sure we don't modify the originals. task = task.clone() else: # serialized sigs must be converted to Signature. task = from_dict(task, app=app) if isinstance(task, group): # needs yield_from :( unroll = task._prepared( task.tasks, partial_args, group_id, root_id, app, ) for taskN, resN in unroll: yield taskN, resN else: if partial_args and not task.immutable: task.args = tuple(partial_args) + tuple(task.args) yield task, task.freeze(group_id=group_id, root_id=root_id) def _apply_tasks(self, tasks, producer=None, app=None, p=None, add_to_parent=None, chord=None, args=None, kwargs=None, **options): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. app = app or self.app with app.producer_or_acquire(producer) as producer: for sig, res in tasks: sig.apply_async(producer=producer, add_to_parent=False, chord=sig.options.get('chord') or chord, args=args, kwargs=kwargs, **options) # adding callback to result, such that it will gradually # fulfill the barrier. # # Using barrier.add would use result.then, but we need # to add the weak argument here to only create a weak # reference to the object. if p and not p.cancelled and not p.ready: p.size += 1 res.then(p, weak=True) yield res # <-- r.parent, etc set in the frozen result. def _freeze_gid(self, options): # remove task_id and use that as the group_id, # if we don't remove it then every task will have the same id... options = dict(self.options, **options) options['group_id'] = group_id = ( options.pop('task_id', uuid())) return options, group_id, options.get('root_id') def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. opts = self.options try: gid = opts['task_id'] except KeyError: gid = opts['task_id'] = uuid() if group_id: opts['group_id'] = group_id if chord: opts['chord'] = chord root_id = opts.setdefault('root_id', root_id) parent_id = opts.setdefault('parent_id', parent_id) new_tasks = [] # Need to unroll subgroups early so that chord gets the # right result instance for chord_unlock etc. results = list(self._freeze_unroll( new_tasks, group_id, chord, root_id, parent_id, )) if isinstance(self.tasks, MutableSequence): self.tasks[:] = new_tasks else: self.tasks = new_tasks return self.app.GroupResult(gid, results) _freeze = freeze def _freeze_unroll(self, new_tasks, group_id, chord, root_id, parent_id): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. stack = deque(self.tasks) while stack: task = maybe_signature(stack.popleft(), app=self._app).clone() if isinstance(task, group): stack.extendleft(task.tasks) else: new_tasks.append(task) yield task.freeze(group_id=group_id, chord=chord, root_id=root_id, parent_id=parent_id) def __repr__(self): if self.tasks: return remove_repeating_from_task( self.tasks[0]['task'], 'group({0.tasks!r})'.format(self)) return 'group()' def __len__(self): return len(self.tasks) @property def app(self): app = self._app if app is None: try: app = self.tasks[0].app except LookupError: pass return app if app is not None else current_app @Signature.register_type() @python_2_unicode_compatible class chord(Signature): r"""Barrier synchronization primitive. A chord consists of a header and a body. The header is a group of tasks that must complete before the callback is called. A chord is essentially a callback for a group of tasks. The body is applied with the return values of all the header tasks as a list. Example: The chord: .. code-block:: pycon >>> res = chord([add.s(2, 2), add.s(4, 4)])(sum_task.s()) is effectively :math:`\Sigma ((2 + 2) + (4 + 4))`: .. code-block:: pycon >>> res.get() 12 """ @classmethod def from_dict(cls, d, app=None): args, d['kwargs'] = cls._unpack_args(**d['kwargs']) return _upgrade(d, cls(*args, app=app, **d)) @staticmethod def _unpack_args(header=None, body=None, **kwargs): # Python signatures are better at extracting keys from dicts # than manually popping things off. return (header, body), kwargs def __init__(self, header, body=None, task='celery.chord', args=(), kwargs={}, app=None, **options): Signature.__init__( self, task, args, dict(kwargs=kwargs, header=_maybe_group(header, app), body=maybe_signature(body, app=app)), app=app, **options ) self.subtask_type = 'chord' def __call__(self, body=None, **options): return self.apply_async((), {'body': body} if body else {}, **options) def freeze(self, _id=None, group_id=None, chord=None, root_id=None, parent_id=None): # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. if not isinstance(self.tasks, group): self.tasks = group(self.tasks, app=self.app) header_result = self.tasks.freeze( parent_id=parent_id, root_id=root_id, chord=self.body) bodyres = self.body.freeze(_id, root_id=root_id) # we need to link the body result back to the group result, # but the body may actually be a chain, # so find the first result without a parent node = bodyres seen = set() while node: if node.id in seen: raise RuntimeError('Recursive result parents') seen.add(node.id) if node.parent is None: node.parent = header_result break node = node.parent self.id = self.tasks.id return bodyres def apply_async(self, args=(), kwargs={}, task_id=None, producer=None, publisher=None, connection=None, router=None, result_cls=None, **options): kwargs = kwargs or {} args = (tuple(args) + tuple(self.args) if args and not self.immutable else self.args) body = kwargs.pop('body', None) or self.kwargs['body'] kwargs = dict(self.kwargs['kwargs'], **kwargs) body = body.clone(**options) app = self._get_app(body) tasks = (self.tasks.clone() if isinstance(self.tasks, group) else group(self.tasks, app=app)) if app.conf.task_always_eager: return self.apply(args, kwargs, body=body, task_id=task_id, **options) if len(self.tasks) == 1: # chord([A], B) can be optimized as A | B # - Issue #3323 return (self.tasks[0] | body).set(task_id=task_id).apply_async( args, kwargs, **options) # chord([A, B, ...], C) return self.run(tasks, body, args, task_id=task_id, **options) def apply(self, args=(), kwargs={}, propagate=True, body=None, **options): body = self.body if body is None else body tasks = (self.tasks.clone() if isinstance(self.tasks, group) else group(self.tasks, app=self.app)) return body.apply( args=(tasks.apply(args, kwargs).get(propagate=propagate),), ) def _traverse_tasks(self, tasks, value=None): stack = deque(tasks) while stack: task = stack.popleft() if isinstance(task, group): stack.extend(task.tasks) else: yield task if value is None else value def __length_hint__(self): tasks = (self.tasks.tasks if isinstance(self.tasks, group) else self.tasks) return sum(self._traverse_tasks(tasks, 1)) def run(self, header, body, partial_args, app=None, interval=None, countdown=1, max_retries=None, eager=False, task_id=None, **options): app = app or self._get_app(body) group_id = header.options.get('task_id') or uuid() root_id = body.options.get('root_id') body.chord_size = self.__length_hint__() options = dict(self.options, **options) if options else self.options if options: options.pop('task_id', None) body.options.update(options) results = header.freeze( group_id=group_id, chord=body, root_id=root_id).results bodyres = body.freeze(task_id, root_id=root_id) # Chains should not be passed to the header tasks. See #3771 options.pop('chain', None) parent = app.backend.apply_chord( header, partial_args, group_id, body, interval=interval, countdown=countdown, options=options, max_retries=max_retries, result=results) bodyres.parent = parent return bodyres def clone(self, *args, **kwargs): s = Signature.clone(self, *args, **kwargs) # need to make copy of body try: s.kwargs['body'] = maybe_signature(s.kwargs['body'], clone=True) except (AttributeError, KeyError): pass return s def link(self, callback): self.body.link(callback) return callback def link_error(self, errback): self.body.link_error(errback) return errback def set_immutable(self, immutable): # changes mutability of header only, not callback. for task in self.tasks: task.set_immutable(immutable) def __repr__(self): if self.body: if isinstance(self.body, _chain): return remove_repeating_from_task( self.body.tasks[0]['task'], '%({0} | {1!r})'.format( self.body.tasks[0].reprcall(self.tasks), chain(self.body.tasks[1:], app=self._app), ), ) return '%' + remove_repeating_from_task( self.body['task'], self.body.reprcall(self.tasks)) return ''.format(self) @cached_property def app(self): return self._get_app(self.body) def _get_app(self, body=None): app = self._app if app is None: try: tasks = self.tasks.tasks # is a group except AttributeError: tasks = self.tasks if len(tasks): app = tasks[0]._app if app is None and body is not None: app = body._app return app if app is not None else current_app tasks = getitem_property('kwargs.header', 'Tasks in chord header.') body = getitem_property('kwargs.body', 'Body task of chord.') def signature(varies, *args, **kwargs): """Create new signature. - if the first argument is a signature already then it's cloned. - if the first argument is a dict, then a Signature version is returned. Returns: Signature: The resulting signature. """ app = kwargs.get('app') if isinstance(varies, dict): if isinstance(varies, abstract.CallableSignature): return varies.clone() return Signature.from_dict(varies, app=app) return Signature(varies, *args, **kwargs) subtask = signature # noqa: E305 XXX compat def maybe_signature(d, app=None, clone=False): """Ensure obj is a signature, or None. Arguments: d (Optional[Union[abstract.CallableSignature, Mapping]]): Signature or dict-serialized signature. app (celery.Celery): App to bind signature to. clone (bool): If d' is already a signature, the signature will be cloned when this flag is enabled. Returns: Optional[abstract.CallableSignature] """ if d is not None: if isinstance(d, abstract.CallableSignature): if clone: d = d.clone() elif isinstance(d, dict): d = signature(d) if app is not None: d._app = app return d maybe_subtask = maybe_signature # noqa: E305 XXX compat celery-4.1.0/celery/__init__.py0000644000175000017500000001427113135426314016330 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Distributed Task Queue.""" # :copyright: (c) 2015-2016 Ask Solem. All rights reserved. # :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved. # :copyright: (c) 2009 - 2012 Ask Solem and individual contributors, # All rights reserved. # :license: BSD (3 Clause), see LICENSE for more details. from __future__ import absolute_import, print_function, unicode_literals import os import re import sys from collections import namedtuple SERIES = 'latentcall' __version__ = '4.1.0' __author__ = 'Ask Solem' __contact__ = 'ask@celeryproject.org' __homepage__ = 'http://celeryproject.org' __docformat__ = 'restructuredtext' __keywords__ = 'task job queue distributed messaging actor' # -eof meta- __all__ = [ 'Celery', 'bugreport', 'shared_task', 'task', 'current_app', 'current_task', 'maybe_signature', 'chain', 'chord', 'chunks', 'group', 'signature', 'xmap', 'xstarmap', 'uuid', ] VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES) version_info_t = namedtuple('version_info_t', ( 'major', 'minor', 'micro', 'releaselevel', 'serial', )) # bumpversion can only search for {current_version} # so we have to parse the version here. _temp = re.match( r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups() VERSION = version_info = version_info_t( int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '') del _temp del re if os.environ.get('C_IMPDEBUG'): # pragma: no cover from .five import builtins def debug_import(name, locals=None, globals=None, fromlist=None, level=-1, real_import=builtins.__import__): glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals importer_name = glob and glob.get('__name__') or 'unknown' print('-- {0} imports {1}'.format(importer_name, name)) return real_import(name, locals, globals, fromlist, level) builtins.__import__ = debug_import # This is never executed, but tricks static analyzers (PyDev, PyCharm, # pylint, etc.) into knowing the types of these symbols, and what # they contain. STATICA_HACK = True globals()['kcah_acitats'[::-1].upper()] = False if STATICA_HACK: # pragma: no cover from celery.app import shared_task # noqa from celery.app.base import Celery # noqa from celery.app.utils import bugreport # noqa from celery.app.task import Task # noqa from celery._state import current_app, current_task # noqa from celery.canvas import ( # noqa chain, chord, chunks, group, signature, maybe_signature, xmap, xstarmap, subtask, ) from celery.utils import uuid # noqa # Eventlet/gevent patching must happen before importing # anything else, so these tools must be at top-level. def _find_option_with_arg(argv, short_opts=None, long_opts=None): """Search argv for options specifying short and longopt alternatives. Returns: str: value for option found Raises: KeyError: if option not found. """ for i, arg in enumerate(argv): if arg.startswith('-'): if long_opts and arg.startswith('--'): name, sep, val = arg.partition('=') if name in long_opts: return val if sep else argv[i + 1] if short_opts and arg in short_opts: return argv[i + 1] raise KeyError('|'.join(short_opts or [] + long_opts or [])) def _patch_eventlet(): import eventlet import eventlet.debug eventlet.monkey_patch() blockdetect = float(os.environ.get('EVENTLET_NOBLOCK', 0)) if blockdetect: eventlet.debug.hub_blocking_detection(blockdetect, blockdetect) def _patch_gevent(): import gevent from gevent import monkey, signal as gevent_signal monkey.patch_all() if gevent.version_info[0] == 0: # pragma: no cover # Signals aren't working in gevent versions <1.0, # and aren't monkey patched by patch_all() _signal = __import__('signal') _signal.signal = gevent_signal def maybe_patch_concurrency(argv=sys.argv, short_opts=['-P'], long_opts=['--pool'], patches={'eventlet': _patch_eventlet, 'gevent': _patch_gevent}): """Apply eventlet/gevent monkeypatches. With short and long opt alternatives that specify the command line option to set the pool, this makes sure that anything that needs to be patched is completed as early as possible. (e.g., eventlet/gevent monkey patches). """ try: pool = _find_option_with_arg(argv, short_opts, long_opts) except KeyError: pass else: try: patcher = patches[pool] except KeyError: pass else: patcher() # set up eventlet/gevent environments ASAP from celery import concurrency concurrency.get_implementation(pool) # Lazy loading from . import local # noqa # this just creates a new module, that imports stuff on first attribute # access. This makes the library faster to use. old_module, new_module = local.recreate_module( # pragma: no cover __name__, by_module={ 'celery.app': ['Celery', 'bugreport', 'shared_task'], 'celery.app.task': ['Task'], 'celery._state': ['current_app', 'current_task'], 'celery.canvas': [ 'Signature', 'chain', 'chord', 'chunks', 'group', 'signature', 'maybe_signature', 'subtask', 'xmap', 'xstarmap', ], 'celery.utils': ['uuid'], }, direct={'task': 'celery.task'}, __package__='celery', __file__=__file__, __path__=__path__, __doc__=__doc__, __version__=__version__, __author__=__author__, __contact__=__contact__, __homepage__=__homepage__, __docformat__=__docformat__, local=local, VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER, version_info_t=version_info_t, version_info=version_info, maybe_patch_concurrency=maybe_patch_concurrency, _find_option_with_arg=_find_option_with_arg, absolute_import=absolute_import, unicode_literals=unicode_literals, print_function=print_function, ) celery-4.1.0/celery/signals.py0000644000175000017500000000763113135426300016226 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Celery Signals. This module defines the signals (Observer pattern) sent by both workers and clients. Functions can be connected to these signals, and connected functions are called whenever a signal is called. .. seealso:: :ref:`signals` for more information. """ from __future__ import absolute_import, unicode_literals from .utils.dispatch import Signal __all__ = [ 'before_task_publish', 'after_task_publish', 'task_prerun', 'task_postrun', 'task_success', 'task_retry', 'task_failure', 'task_revoked', 'celeryd_init', 'celeryd_after_setup', 'worker_init', 'worker_process_init', 'worker_ready', 'worker_shutdown', 'worker_shutting_down', 'setup_logging', 'after_setup_logger', 'after_setup_task_logger', 'beat_init', 'beat_embedded_init', 'heartbeat_sent', 'eventlet_pool_started', 'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown', 'eventlet_pool_apply', ] # - Task before_task_publish = Signal( name='before_task_publish', providing_args={ 'body', 'exchange', 'routing_key', 'headers', 'properties', 'declare', 'retry_policy', }, ) after_task_publish = Signal( name='after_task_publish', providing_args={'body', 'exchange', 'routing_key'}, ) task_prerun = Signal( name='task_prerun', providing_args={'task_id', 'task', 'args', 'kwargs'}, ) task_postrun = Signal( name='task_postrun', providing_args={'task_id', 'task', 'args', 'kwargs', 'retval'}, ) task_success = Signal( name='task_success', providing_args={'result'}, ) task_retry = Signal( name='task_retry', providing_args={'request', 'reason', 'einfo'}, ) task_failure = Signal( name='task_failure', providing_args={ 'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo', }, ) task_revoked = Signal( name='task_revoked', providing_args={ 'request', 'terminated', 'signum', 'expired', }, ) task_rejected = Signal( name='task_rejected', providing_args={'message', 'exc'}, ) task_unknown = Signal( name='task_unknown', providing_args={'message', 'exc', 'name', 'id'}, ) #: Deprecated, use after_task_publish instead. task_sent = Signal( name='task_sent', providing_args={ 'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset', }, ) # - Prorgam: `celery worker` celeryd_init = Signal( name='celeryd_init', providing_args={'instance', 'conf', 'options'}, ) celeryd_after_setup = Signal( name='celeryd_after_setup', providing_args={'instance', 'conf'}, ) # - Worker import_modules = Signal(name='import_modules') worker_init = Signal(name='worker_init') worker_process_init = Signal(name='worker_process_init') worker_process_shutdown = Signal(name='worker_process_shutdown') worker_ready = Signal(name='worker_ready') worker_shutdown = Signal(name='worker_shutdown') worker_shutting_down = Signal(name='worker_shutting_down') heartbeat_sent = Signal(name='heartbeat_sent') # - Logging setup_logging = Signal( name='setup_logging', providing_args={ 'loglevel', 'logfile', 'format', 'colorize', }, ) after_setup_logger = Signal( name='after_setup_logger', providing_args={ 'logger', 'loglevel', 'logfile', 'format', 'colorize', }, ) after_setup_task_logger = Signal( name='after_setup_task_logger', providing_args={ 'logger', 'loglevel', 'logfile', 'format', 'colorize', }, ) # - Beat beat_init = Signal(name='beat_init') beat_embedded_init = Signal(name='beat_embedded_init') # - Eventlet eventlet_pool_started = Signal(name='eventlet_pool_started') eventlet_pool_preshutdown = Signal(name='eventlet_pool_preshutdown') eventlet_pool_postshutdown = Signal(name='eventlet_pool_postshutdown') eventlet_pool_apply = Signal( name='eventlet_pool_apply', providing_args={'target', 'args', 'kwargs'}, ) # - Programs user_preload_options = Signal( name='user_preload_options', providing_args={'app', 'options'}, ) celery-4.1.0/celery/concurrency/0000755000175000017500000000000013135426347016552 5ustar omeromer00000000000000celery-4.1.0/celery/concurrency/base.py0000644000175000017500000001041413130607475020034 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Base Execution Pool.""" from __future__ import absolute_import, unicode_literals import logging import os import sys from billiard.einfo import ExceptionInfo from billiard.exceptions import WorkerLostError from kombu.utils.encoding import safe_repr from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.five import monotonic, reraise from celery.utils import timer2 from celery.utils.text import truncate from celery.utils.log import get_logger __all__ = ['BasePool', 'apply_target'] logger = get_logger('celery.pool') def apply_target(target, args=(), kwargs={}, callback=None, accept_callback=None, pid=None, getpid=os.getpid, propagate=(), monotonic=monotonic, **_): """Apply function within pool context.""" if accept_callback: accept_callback(pid or getpid(), monotonic()) try: ret = target(*args, **kwargs) except propagate: raise except Exception: raise except (WorkerShutdown, WorkerTerminate): raise except BaseException as exc: try: reraise(WorkerLostError, WorkerLostError(repr(exc)), sys.exc_info()[2]) except WorkerLostError: callback(ExceptionInfo()) else: callback(ret) class BasePool(object): """Task pool.""" RUN = 0x1 CLOSE = 0x2 TERMINATE = 0x3 Timer = timer2.Timer #: set to true if the pool can be shutdown from within #: a signal handler. signal_safe = True #: set to true if pool uses greenlets. is_green = False _state = None _pool = None _does_debug = True #: only used by multiprocessing pool uses_semaphore = False task_join_will_block = True body_can_be_buffer = False def __init__(self, limit=None, putlocks=True, forking_enable=True, callbacks_propagate=(), app=None, **options): self.limit = limit self.putlocks = putlocks self.options = options self.forking_enable = forking_enable self.callbacks_propagate = callbacks_propagate self.app = app def on_start(self): pass def did_start_ok(self): return True def flush(self): pass def on_stop(self): pass def register_with_event_loop(self, loop): pass def on_apply(self, *args, **kwargs): pass def on_terminate(self): pass def on_soft_timeout(self, job): pass def on_hard_timeout(self, job): pass def maintain_pool(self, *args, **kwargs): pass def terminate_job(self, pid, signal=None): raise NotImplementedError( '{0} does not implement kill_job'.format(type(self))) def restart(self): raise NotImplementedError( '{0} does not implement restart'.format(type(self))) def stop(self): self.on_stop() self._state = self.TERMINATE def terminate(self): self._state = self.TERMINATE self.on_terminate() def start(self): self._does_debug = logger.isEnabledFor(logging.DEBUG) self.on_start() self._state = self.RUN def close(self): self._state = self.CLOSE self.on_close() def on_close(self): pass def apply_async(self, target, args=[], kwargs={}, **options): """Equivalent of the :func:`apply` built-in function. Callbacks should optimally return as soon as possible since otherwise the thread which handles the result will get blocked. """ if self._does_debug: logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', target, truncate(safe_repr(args), 1024), truncate(safe_repr(kwargs), 1024)) return self.on_apply(target, args, kwargs, waitforslot=self.putlocks, callbacks_propagate=self.callbacks_propagate, **options) def _get_info(self): return { 'max-concurrency': self.limit, } @property def info(self): return self._get_info() @property def active(self): return self._state == self.RUN @property def num_processes(self): return self.limit celery-4.1.0/celery/concurrency/__init__.py0000644000175000017500000000142113130607475020657 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Pool implementation abstract factory, and alias definitions.""" from __future__ import absolute_import, unicode_literals # Import from kombu directly as it's used # early in the import stage, where celery.utils loads # too much (e.g., for eventlet patching) from kombu.utils.imports import symbol_by_name __all__ = ['get_implementation'] ALIASES = { 'prefork': 'celery.concurrency.prefork:TaskPool', 'eventlet': 'celery.concurrency.eventlet:TaskPool', 'gevent': 'celery.concurrency.gevent:TaskPool', 'solo': 'celery.concurrency.solo:TaskPool', 'processes': 'celery.concurrency.prefork:TaskPool', # XXX compat alias } def get_implementation(cls): """Return pool implementation by name.""" return symbol_by_name(cls, ALIASES) celery-4.1.0/celery/concurrency/solo.py0000644000175000017500000000130013130607475020070 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Single-threaded execution pool.""" from __future__ import absolute_import, unicode_literals import os from .base import BasePool, apply_target __all__ = ['TaskPool'] class TaskPool(BasePool): """Solo task pool (blocking, inline, fast).""" body_can_be_buffer = True def __init__(self, *args, **kwargs): super(TaskPool, self).__init__(*args, **kwargs) self.on_apply = apply_target self.limit = 1 def _get_info(self): return { 'max-concurrency': 1, 'processes': [os.getpid()], 'max-tasks-per-child': None, 'put-guarded-by-semaphore': True, 'timeouts': (), } celery-4.1.0/celery/concurrency/gevent.py0000644000175000017500000000663113130607475020420 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Gevent execution pool.""" from __future__ import absolute_import, unicode_literals from kombu.async import timer as _timer from kombu.five import monotonic from . import base try: from gevent import Timeout except ImportError: # pragma: no cover Timeout = None # noqa __all__ = ['TaskPool'] # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. def apply_timeout(target, args=(), kwargs={}, callback=None, accept_callback=None, pid=None, timeout=None, timeout_callback=None, Timeout=Timeout, apply_target=base.apply_target, **rest): try: with Timeout(timeout): return apply_target(target, args, kwargs, callback, accept_callback, pid, propagate=(Timeout,), **rest) except Timeout: return timeout_callback(False, timeout) class Timer(_timer.Timer): def __init__(self, *args, **kwargs): from gevent.greenlet import Greenlet, GreenletExit class _Greenlet(Greenlet): cancel = Greenlet.kill self._Greenlet = _Greenlet self._GreenletExit = GreenletExit super(Timer, self).__init__(*args, **kwargs) self._queue = set() def _enter(self, eta, priority, entry, **kwargs): secs = max(eta - monotonic(), 0) g = self._Greenlet.spawn_later(secs, entry) self._queue.add(g) g.link(self._entry_exit) g.entry = entry g.eta = eta g.priority = priority g.canceled = False return g def _entry_exit(self, g): try: g.kill() finally: self._queue.discard(g) def clear(self): queue = self._queue while queue: try: queue.pop().kill() except KeyError: pass @property def queue(self): return self._queue class TaskPool(base.BasePool): """GEvent Pool.""" Timer = Timer signal_safe = False is_green = True task_join_will_block = False _pool = None _quick_put = None def __init__(self, *args, **kwargs): from gevent import spawn_raw from gevent.pool import Pool self.Pool = Pool self.spawn_n = spawn_raw self.timeout = kwargs.get('timeout') super(TaskPool, self).__init__(*args, **kwargs) def on_start(self): self._pool = self.Pool(self.limit) self._quick_put = self._pool.spawn def on_stop(self): if self._pool is not None: self._pool.join() def on_apply(self, target, args=None, kwargs=None, callback=None, accept_callback=None, timeout=None, timeout_callback=None, apply_target=base.apply_target, **_): timeout = self.timeout if timeout is None else timeout return self._quick_put(apply_timeout if timeout else apply_target, target, args, kwargs, callback, accept_callback, timeout=timeout, timeout_callback=timeout_callback) def grow(self, n=1): self._pool._semaphore.counter += n self._pool.size += n def shrink(self, n=1): self._pool._semaphore.counter -= n self._pool.size -= n @property def num_processes(self): return len(self._pool) celery-4.1.0/celery/concurrency/prefork.py0000644000175000017500000001272413130607475020600 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Prefork execution pool. Pool implementation using :mod:`multiprocessing`. """ from __future__ import absolute_import, unicode_literals import os from billiard.common import REMAP_SIGTERM, TERM_SIGNAME from billiard import forking_enable from billiard.pool import RUN, CLOSE, Pool as BlockingPool from celery import platforms from celery import signals from celery._state import set_default_app, _set_task_join_will_block from celery.app import trace from celery.concurrency.base import BasePool from celery.five import items from celery.utils.functional import noop from celery.utils.log import get_logger from .asynpool import AsynPool __all__ = ['TaskPool', 'process_initializer', 'process_destructor'] #: List of signals to reset when a child process starts. WORKER_SIGRESET = { 'SIGTERM', 'SIGHUP', 'SIGTTIN', 'SIGTTOU', 'SIGUSR1', } #: List of signals to ignore when a child process starts. if REMAP_SIGTERM: WORKER_SIGIGNORE = {'SIGINT', TERM_SIGNAME} else: WORKER_SIGIGNORE = {'SIGINT'} logger = get_logger(__name__) warning, debug = logger.warning, logger.debug def process_initializer(app, hostname): """Pool child process initializer. Initialize the child pool process to ensure the correct app instance is used and things like logging works. """ _set_task_join_will_block(True) platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title('celeryd', hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.loader.init_worker() app.loader.init_worker_process() logfile = os.environ.get('CELERY_LOG_FILE') or None if logfile and '%i' in logfile.lower(): # logfile path will differ so need to set up logging again. app.log.already_setup = False app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), logfile, bool(os.environ.get('CELERY_LOG_REDIRECT', False)), str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')), hostname=hostname) if os.environ.get('FORKED_BY_MULTIPROCESSING'): # pool did execv after fork trace.setup_worker_optimizations(app, hostname) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. # rebuild execution handler for all tasks. from celery.app.trace import build_tracer for name, task in items(app.tasks): task.__trace__ = build_tracer(name, task, app.loader, hostname, app=app) from celery.worker import state as worker_state worker_state.reset_state() signals.worker_process_init.send(sender=None) def process_destructor(pid, exitcode): """Pool child process destructor. Dispatch the :signal:`worker_process_shutdown` signal. """ signals.worker_process_shutdown.send( sender=None, pid=pid, exitcode=exitcode, ) class TaskPool(BasePool): """Multiprocessing Pool implementation.""" Pool = AsynPool BlockingPool = BlockingPool uses_semaphore = True write_stats = None def on_start(self): forking_enable(self.forking_enable) Pool = (self.BlockingPool if self.options.get('threads', True) else self.Pool) P = self._pool = Pool(processes=self.limit, initializer=process_initializer, on_process_exit=process_destructor, enable_timeouts=True, synack=False, **self.options) # Create proxy methods self.on_apply = P.apply_async self.maintain_pool = P.maintain_pool self.terminate_job = P.terminate_job self.grow = P.grow self.shrink = P.shrink self.flush = getattr(P, 'flush', None) # FIXME add to billiard def restart(self): self._pool.restart() self._pool.apply_async(noop) def did_start_ok(self): return self._pool.did_start_ok() def register_with_event_loop(self, loop): try: reg = self._pool.register_with_event_loop except AttributeError: return return reg(loop) def on_stop(self): """Gracefully stop the pool.""" if self._pool is not None and self._pool._state in (RUN, CLOSE): self._pool.close() self._pool.join() self._pool = None def on_terminate(self): """Force terminate the pool.""" if self._pool is not None: self._pool.terminate() self._pool = None def on_close(self): if self._pool is not None and self._pool._state == RUN: self._pool.close() def _get_info(self): write_stats = getattr(self._pool, 'human_write_stats', None) return { 'max-concurrency': self.limit, 'processes': [p.pid for p in self._pool._pool], 'max-tasks-per-child': self._pool._maxtasksperchild or 'N/A', 'put-guarded-by-semaphore': self.putlocks, 'timeouts': (self._pool.soft_timeout or 0, self._pool.timeout or 0), 'writes': write_stats() if write_stats is not None else 'N/A', } @property def num_processes(self): return self._pool._processes celery-4.1.0/celery/concurrency/eventlet.py0000644000175000017500000001034313130607475020751 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Eventlet execution pool.""" from __future__ import absolute_import, unicode_literals import sys from kombu.five import monotonic __all__ = ['TaskPool'] W_RACE = """\ Celery module with %s imported before eventlet patched\ """ RACE_MODS = ('billiard.', 'celery.', 'kombu.') #: Warn if we couldn't patch early enough, #: and thread/socket depending celery modules have already been loaded. for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)): for side in ('thread', 'threading', 'socket'): # pragma: no cover if getattr(mod, side, None): import warnings warnings.warn(RuntimeWarning(W_RACE % side)) # idiotic pep8.py does not allow expressions before imports # so have to silence errors here from kombu.async import timer as _timer # noqa from celery import signals # noqa from . import base # noqa def apply_target(target, args=(), kwargs={}, callback=None, accept_callback=None, getpid=None): return base.apply_target(target, args, kwargs, callback, accept_callback, pid=getpid()) class Timer(_timer.Timer): """Eventlet Timer.""" def __init__(self, *args, **kwargs): from eventlet.greenthread import spawn_after from greenlet import GreenletExit super(Timer, self).__init__(*args, **kwargs) self.GreenletExit = GreenletExit self._spawn_after = spawn_after self._queue = set() def _enter(self, eta, priority, entry, **kwargs): secs = max(eta - monotonic(), 0) g = self._spawn_after(secs, entry) self._queue.add(g) g.link(self._entry_exit, entry) g.entry = entry g.eta = eta g.priority = priority g.canceled = False return g def _entry_exit(self, g, entry): try: try: g.wait() except self.GreenletExit: entry.cancel() g.canceled = True finally: self._queue.discard(g) def clear(self): queue = self._queue while queue: try: queue.pop().cancel() except (KeyError, self.GreenletExit): pass def cancel(self, tref): try: tref.cancel() except self.GreenletExit: pass @property def queue(self): return self._queue class TaskPool(base.BasePool): """Eventlet Task Pool.""" Timer = Timer signal_safe = False is_green = True task_join_will_block = False _pool = None _quick_put = None def __init__(self, *args, **kwargs): from eventlet import greenthread from eventlet.greenpool import GreenPool self.Pool = GreenPool self.getcurrent = greenthread.getcurrent self.getpid = lambda: id(greenthread.getcurrent()) self.spawn_n = greenthread.spawn_n super(TaskPool, self).__init__(*args, **kwargs) def on_start(self): self._pool = self.Pool(self.limit) signals.eventlet_pool_started.send(sender=self) self._quick_put = self._pool.spawn_n self._quick_apply_sig = signals.eventlet_pool_apply.send def on_stop(self): signals.eventlet_pool_preshutdown.send(sender=self) if self._pool is not None: self._pool.waitall() signals.eventlet_pool_postshutdown.send(sender=self) def on_apply(self, target, args=None, kwargs=None, callback=None, accept_callback=None, **_): self._quick_apply_sig( sender=self, target=target, args=args, kwargs=kwargs, ) self._quick_put(apply_target, target, args, kwargs, callback, accept_callback, self.getpid) def grow(self, n=1): limit = self.limit + n self._pool.resize(limit) self.limit = limit def shrink(self, n=1): limit = self.limit - n self._pool.resize(limit) self.limit = limit def _get_info(self): info = super(TaskPool, self)._get_info() info.update({ 'max-concurrency': self.limit, 'free-threads': self._pool.free(), 'running-threads': self._pool.running(), }) return info celery-4.1.0/celery/concurrency/asynpool.py0000644000175000017500000013666513130607475021007 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Version of multiprocessing.Pool using Async I/O. .. note:: This module will be moved soon, so don't use it directly. This is a non-blocking version of :class:`multiprocessing.Pool`. This code deals with three major challenges: #. Starting up child processes and keeping them running. #. Sending jobs to the processes and receiving results back. #. Safely shutting down this system. """ from __future__ import absolute_import, unicode_literals import errno import gc import os import select import socket import struct import sys import time from collections import deque, namedtuple from io import BytesIO from numbers import Integral from pickle import HIGHEST_PROTOCOL from time import sleep from weakref import WeakValueDictionary, ref from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined from billiard import pool as _pool from billiard.compat import buf_t, setblocking, isblocking from billiard.queues import _SimpleQueue from kombu.async import WRITE, ERR from kombu.serialization import pickle as _pickle from kombu.utils.eventio import SELECT_BAD_FD from kombu.utils.functional import fxrange from vine import promise from celery.five import Counter, items, values from celery.utils.functional import noop from celery.utils.log import get_logger from celery.worker import state as worker_state # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. try: from _billiard import read as __read__ from struct import unpack_from as _unpack_from memoryview = memoryview readcanbuf = True if sys.version_info[0] == 2 and sys.version_info < (2, 7, 6): def unpack_from(fmt, view, _unpack_from=_unpack_from): # noqa return _unpack_from(fmt, view.tobytes()) # <- memoryview else: # unpack_from supports memoryview in 2.7.6 and 3.3+ unpack_from = _unpack_from # noqa except (ImportError, NameError): # pragma: no cover def __read__(fd, buf, size, read=os.read): # noqa chunk = read(fd, size) n = len(chunk) if n != 0: buf.write(chunk) return n readcanbuf = False # noqa def unpack_from(fmt, iobuf, unpack=struct.unpack): # noqa return unpack(fmt, iobuf.getvalue()) # <-- BytesIO __all__ = ['AsynPool'] logger = get_logger(__name__) error, debug = logger.error, logger.debug UNAVAIL = frozenset({errno.EAGAIN, errno.EINTR}) #: Constant sent by child process when started (ready to accept work) WORKER_UP = 15 #: A process must've started before this timeout (in secs.) expires. PROC_ALIVE_TIMEOUT = 4.0 SCHED_STRATEGY_FCFS = 1 SCHED_STRATEGY_FAIR = 4 SCHED_STRATEGIES = { None: SCHED_STRATEGY_FAIR, 'fast': SCHED_STRATEGY_FCFS, 'fcfs': SCHED_STRATEGY_FCFS, 'fair': SCHED_STRATEGY_FAIR, } SCHED_STRATEGY_TO_NAME = {v: k for k, v in SCHED_STRATEGIES.items()} Ack = namedtuple('Ack', ('id', 'fd', 'payload')) def gen_not_started(gen): """Return true if generator is not started.""" # gi_frame is None when generator stopped. return gen.gi_frame and gen.gi_frame.f_lasti == -1 def _get_job_writer(job): try: writer = job._writer except AttributeError: pass else: return writer() # is a weakref if hasattr(select, 'poll'): def _select_imp(readers=None, writers=None, err=None, timeout=0, poll=select.poll, POLLIN=select.POLLIN, POLLOUT=select.POLLOUT, POLLERR=select.POLLERR): poller = poll() register = poller.register if readers: [register(fd, POLLIN) for fd in readers] if writers: [register(fd, POLLOUT) for fd in writers] if err: [register(fd, POLLERR) for fd in err] R, W = set(), set() timeout = 0 if timeout and timeout < 0 else round(timeout * 1e3) events = poller.poll(timeout) for fd, event in events: if not isinstance(fd, Integral): fd = fd.fileno() if event & POLLIN: R.add(fd) if event & POLLOUT: W.add(fd) if event & POLLERR: R.add(fd) return R, W, 0 else: def _select_imp(readers=None, writers=None, err=None, timeout=0): r, w, e = select.select(readers, writers, err, timeout) if e: r = list(set(r) | set(e)) return r, w, 0 def _select(readers=None, writers=None, err=None, timeout=0, poll=_select_imp): """Simple wrapper to :class:`~select.select`, using :`~select.poll`. Arguments: readers (Set[Fd]): Set of reader fds to test if readable. writers (Set[Fd]): Set of writer fds to test if writable. err (Set[Fd]): Set of fds to test for error condition. All fd sets passed must be mutable as this function will remove non-working fds from them, this also means the caller must make sure there are still fds in the sets before calling us again. Returns: Tuple[Set, Set, Set]: of ``(readable, writable, again)``, where ``readable`` is a set of fds that have data available for read, ``writable`` is a set of fds that's ready to be written to and ``again`` is a flag that if set means the caller must throw away the result and call us again. """ readers = set() if readers is None else readers writers = set() if writers is None else writers err = set() if err is None else err try: return poll(readers, writers, err, timeout) except (select.error, socket.error) as exc: if exc.errno == errno.EINTR: return set(), set(), 1 elif exc.errno in SELECT_BAD_FD: for fd in readers | writers | err: try: select.select([fd], [], [], 0) except (select.error, socket.error) as exc: if getattr(exc, 'errno', None) not in SELECT_BAD_FD: raise readers.discard(fd) writers.discard(fd) err.discard(fd) return set(), set(), 1 else: raise class Worker(_pool.Worker): """Pool worker process.""" def on_loop_start(self, pid): # our version sends a WORKER_UP message when the process is ready # to accept work, this will tell the parent that the inqueue fd # is writable. self.outq.put((WORKER_UP, (pid,))) class ResultHandler(_pool.ResultHandler): """Handles messages from the pool processes.""" def __init__(self, *args, **kwargs): self.fileno_to_outq = kwargs.pop('fileno_to_outq') self.on_process_alive = kwargs.pop('on_process_alive') super(ResultHandler, self).__init__(*args, **kwargs) # add our custom message handler self.state_handlers[WORKER_UP] = self.on_process_alive def _recv_message(self, add_reader, fd, callback, __read__=__read__, readcanbuf=readcanbuf, BytesIO=BytesIO, unpack_from=unpack_from, load=_pickle.load): Hr = Br = 0 if readcanbuf: buf = bytearray(4) bufv = memoryview(buf) else: buf = bufv = BytesIO() # header while Hr < 4: try: n = __read__( fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr, ) except OSError as exc: if exc.errno not in UNAVAIL: raise yield else: if n == 0: raise (OSError('End of file during message') if Hr else EOFError()) Hr += n body_size, = unpack_from(b'>i', bufv) if readcanbuf: buf = bytearray(body_size) bufv = memoryview(buf) else: buf = bufv = BytesIO() while Br < body_size: try: n = __read__( fd, bufv[Br:] if readcanbuf else bufv, body_size - Br, ) except OSError as exc: if exc.errno not in UNAVAIL: raise yield else: if n == 0: raise (OSError('End of file during message') if Br else EOFError()) Br += n add_reader(fd, self.handle_event, fd) if readcanbuf: message = load(BytesIO(bufv)) else: bufv.seek(0) message = load(bufv) if message: callback(message) def _make_process_result(self, hub): """Coroutine reading messages from the pool processes.""" fileno_to_outq = self.fileno_to_outq on_state_change = self.on_state_change add_reader = hub.add_reader remove_reader = hub.remove_reader recv_message = self._recv_message def on_result_readable(fileno): try: fileno_to_outq[fileno] except KeyError: # process gone return remove_reader(fileno) it = recv_message(add_reader, fileno, on_state_change) try: next(it) except StopIteration: pass except (IOError, OSError, EOFError): remove_reader(fileno) else: add_reader(fileno, it) return on_result_readable def register_with_event_loop(self, hub): self.handle_event = self._make_process_result(hub) def handle_event(self, *args): # pylint: disable=method-hidden # register_with_event_loop overrides this raise RuntimeError('Not registered with event loop') def on_stop_not_started(self): # This is always used, since we do not start any threads. cache = self.cache check_timeouts = self.check_timeouts fileno_to_outq = self.fileno_to_outq on_state_change = self.on_state_change join_exited_workers = self.join_exited_workers # flush the processes outqueues until they've all terminated. outqueues = set(fileno_to_outq) while cache and outqueues and self._state != TERMINATE: if check_timeouts is not None: # make sure tasks with a time limit will time out. check_timeouts() # cannot iterate and remove at the same time pending_remove_fd = set() for fd in outqueues: self._flush_outqueue( fd, pending_remove_fd.add, fileno_to_outq, on_state_change, ) try: join_exited_workers(shutdown=True) except WorkersJoined: return debug('result handler: all workers terminated') outqueues.difference_update(pending_remove_fd) def _flush_outqueue(self, fd, remove, process_index, on_state_change): try: proc = process_index[fd] except KeyError: # process already found terminated # this means its outqueue has already been processed # by the worker lost handler. return remove(fd) reader = proc.outq._reader try: setblocking(reader, 1) except (OSError, IOError): return remove(fd) try: if reader.poll(0): task = reader.recv() else: task = None sleep(0.5) except (IOError, EOFError): return remove(fd) else: if task: on_state_change(task) finally: try: setblocking(reader, 0) except (OSError, IOError): return remove(fd) class AsynPool(_pool.Pool): """AsyncIO Pool (no threads).""" ResultHandler = ResultHandler Worker = Worker def WorkerProcess(self, worker): worker = super(AsynPool, self).WorkerProcess(worker) worker.dead = False return worker def __init__(self, processes=None, synack=False, sched_strategy=None, *args, **kwargs): self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy, sched_strategy) processes = self.cpu_count() if processes is None else processes self.synack = synack # create queue-pairs for all our processes in advance. self._queues = { self.create_process_queues(): None for _ in range(processes) } # inqueue fileno -> process mapping self._fileno_to_inq = {} # outqueue fileno -> process mapping self._fileno_to_outq = {} # synqueue fileno -> process mapping self._fileno_to_synq = {} # We keep track of processes that haven't yet # sent a WORKER_UP message. If a process fails to send # this message within proc_up_timeout we terminate it # and hope the next process will recover. self._proc_alive_timeout = PROC_ALIVE_TIMEOUT self._waiting_to_start = set() # denormalized set of all inqueues. self._all_inqueues = set() # Set of fds being written to (busy) self._active_writes = set() # Set of active co-routines currently writing jobs. self._active_writers = set() # Set of fds that are busy (executing task) self._busy_workers = set() self._mark_worker_as_available = self._busy_workers.discard # Holds jobs waiting to be written to child processes. self.outbound_buffer = deque() self.write_stats = Counter() super(AsynPool, self).__init__(processes, *args, **kwargs) for proc in self._pool: # create initial mappings, these will be updated # as processes are recycled, or found lost elsewhere. self._fileno_to_outq[proc.outqR_fd] = proc self._fileno_to_synq[proc.synqW_fd] = proc self.on_soft_timeout = getattr( self._timeout_handler, 'on_soft_timeout', noop, ) self.on_hard_timeout = getattr( self._timeout_handler, 'on_hard_timeout', noop, ) def _create_worker_process(self, i): gc.collect() # Issue #2927 return super(AsynPool, self)._create_worker_process(i) def _event_process_exit(self, hub, proc): # This method is called whenever the process sentinel is readable. self._untrack_child_process(proc, hub) self.maintain_pool() def _track_child_process(self, proc, hub): try: fd = proc._sentinel_poll except AttributeError: # we need to duplicate the fd here to carefully # control when the fd is removed from the process table, # as once the original fd is closed we cannot unregister # the fd from epoll(7) anymore, causing a 100% CPU poll loop. fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) hub.add_reader(fd, self._event_process_exit, hub, proc) def _untrack_child_process(self, proc, hub): if proc._sentinel_poll is not None: fd, proc._sentinel_poll = proc._sentinel_poll, None hub.remove(fd) os.close(fd) def register_with_event_loop(self, hub): """Register the async pool with the current event loop.""" self._result_handler.register_with_event_loop(hub) self.handle_result_event = self._result_handler.handle_event self._create_timelimit_handlers(hub) self._create_process_handlers(hub) self._create_write_handlers(hub) # Add handler for when a process exits (calls maintain_pool) [self._track_child_process(w, hub) for w in self._pool] # Handle_result_event is called whenever one of the # result queues are readable. [hub.add_reader(fd, self.handle_result_event, fd) for fd in self._fileno_to_outq] # Timers include calling maintain_pool at a regular interval # to be certain processes are restarted. for handler, interval in items(self.timers): hub.call_repeatedly(interval, handler) hub.on_tick.add(self.on_poll_start) def _create_timelimit_handlers(self, hub): """Create handlers used to implement time limits.""" call_later = hub.call_later trefs = self._tref_for_id = WeakValueDictionary() def on_timeout_set(R, soft, hard): if soft: trefs[R._job] = call_later( soft, self._on_soft_timeout, R._job, soft, hard, hub, ) elif hard: trefs[R._job] = call_later( hard, self._on_hard_timeout, R._job, ) self.on_timeout_set = on_timeout_set def _discard_tref(job): try: tref = trefs.pop(job) tref.cancel() del tref except (KeyError, AttributeError): pass # out of scope self._discard_tref = _discard_tref def on_timeout_cancel(R): _discard_tref(R._job) self.on_timeout_cancel = on_timeout_cancel def _on_soft_timeout(self, job, soft, hard, hub): # only used by async pool. if hard: self._tref_for_id[job] = hub.call_later( hard - soft, self._on_hard_timeout, job, ) try: result = self._cache[job] except KeyError: pass # job ready else: self.on_soft_timeout(result) finally: if not hard: # remove tref self._discard_tref(job) def _on_hard_timeout(self, job): # only used by async pool. try: result = self._cache[job] except KeyError: pass # job ready else: self.on_hard_timeout(result) finally: # remove tref self._discard_tref(job) def on_job_ready(self, job, i, obj, inqW_fd): self._mark_worker_as_available(inqW_fd) def _create_process_handlers(self, hub): """Create handlers called on process up/down, etc.""" add_reader, remove_reader, remove_writer = ( hub.add_reader, hub.remove_reader, hub.remove_writer, ) cache = self._cache all_inqueues = self._all_inqueues fileno_to_inq = self._fileno_to_inq fileno_to_outq = self._fileno_to_outq fileno_to_synq = self._fileno_to_synq busy_workers = self._busy_workers handle_result_event = self.handle_result_event process_flush_queues = self.process_flush_queues waiting_to_start = self._waiting_to_start def verify_process_alive(proc): proc = proc() # is a weakref if (proc is not None and proc._is_alive() and proc in waiting_to_start): assert proc.outqR_fd in fileno_to_outq assert fileno_to_outq[proc.outqR_fd] is proc assert proc.outqR_fd in hub.readers error('Timed out waiting for UP message from %r', proc) os.kill(proc.pid, 9) def on_process_up(proc): """Called when a process has started.""" # If we got the same fd as a previous process then we'll also # receive jobs in the old buffer, so we need to reset the # job._write_to and job._scheduled_for attributes used to recover # message boundaries when processes exit. infd = proc.inqW_fd for job in values(cache): if job._write_to and job._write_to.inqW_fd == infd: job._write_to = proc if job._scheduled_for and job._scheduled_for.inqW_fd == infd: job._scheduled_for = proc fileno_to_outq[proc.outqR_fd] = proc # maintain_pool is called whenever a process exits. self._track_child_process(proc, hub) assert not isblocking(proc.outq._reader) # handle_result_event is called when the processes outqueue is # readable. add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) waiting_to_start.add(proc) hub.call_later( self._proc_alive_timeout, verify_process_alive, ref(proc), ) self.on_process_up = on_process_up def _remove_from_index(obj, proc, index, remove_fun, callback=None): # this remove the file descriptors for a process from # the indices. we have to make sure we don't overwrite # another processes fds, as the fds may be reused. try: fd = obj.fileno() except (IOError, OSError): return try: if index[fd] is proc: # fd hasn't been reused so we can remove it from index. index.pop(fd, None) except KeyError: pass else: remove_fun(fd) if callback is not None: callback(fd) return fd def on_process_down(proc): """Called when a worker process exits.""" if getattr(proc, 'dead', None): return process_flush_queues(proc) _remove_from_index( proc.outq._reader, proc, fileno_to_outq, remove_reader, ) if proc.synq: _remove_from_index( proc.synq._writer, proc, fileno_to_synq, remove_writer, ) inq = _remove_from_index( proc.inq._writer, proc, fileno_to_inq, remove_writer, callback=all_inqueues.discard, ) if inq: busy_workers.discard(inq) self._untrack_child_process(proc, hub) waiting_to_start.discard(proc) self._active_writes.discard(proc.inqW_fd) remove_writer(proc.inq._writer) remove_reader(proc.outq._reader) if proc.synqR_fd: remove_reader(proc.synq._reader) if proc.synqW_fd: self._active_writes.discard(proc.synqW_fd) remove_reader(proc.synq._writer) self.on_process_down = on_process_down def _create_write_handlers(self, hub, pack=struct.pack, dumps=_pickle.dumps, protocol=HIGHEST_PROTOCOL): """Create handlers used to write data to child processes.""" fileno_to_inq = self._fileno_to_inq fileno_to_synq = self._fileno_to_synq outbound = self.outbound_buffer pop_message = outbound.popleft put_message = outbound.append all_inqueues = self._all_inqueues active_writes = self._active_writes active_writers = self._active_writers busy_workers = self._busy_workers diff = all_inqueues.difference add_writer = hub.add_writer hub_add, hub_remove = hub.add, hub.remove mark_write_fd_as_active = active_writes.add mark_write_gen_as_active = active_writers.add mark_worker_as_busy = busy_workers.add write_generator_done = active_writers.discard get_job = self._cache.__getitem__ write_stats = self.write_stats is_fair_strategy = self.sched_strategy == SCHED_STRATEGY_FAIR revoked_tasks = worker_state.revoked getpid = os.getpid precalc = {ACK: self._create_payload(ACK, (0,)), NACK: self._create_payload(NACK, (0,))} def _put_back(job, _time=time.time): # puts back at the end of the queue if job._terminated is not None or \ job.correlation_id in revoked_tasks: if not job._accepted: job._ack(None, _time(), getpid(), None) job._set_terminated(job._terminated) else: # XXX linear lookup, should find a better way, # but this happens rarely and is here to protect against races. if job not in outbound: outbound.appendleft(job) self._put_back = _put_back # called for every event loop iteration, and if there # are messages pending this will schedule writing one message # by registering the 'schedule_writes' function for all currently # inactive inqueues (not already being written to) # consolidate means the event loop will merge them # and call the callback once with the list writable fds as # argument. Using this means we minimize the risk of having # the same fd receive every task if the pipe read buffer is not # full. if is_fair_strategy: def on_poll_start(): if outbound and len(busy_workers) < len(all_inqueues): # print('ALL: %r ACTIVE: %r' % (len(all_inqueues), # len(active_writes))) inactive = diff(active_writes) [hub_add(fd, None, WRITE | ERR, consolidate=True) for fd in inactive] else: [hub_remove(fd) for fd in diff(active_writes)] else: def on_poll_start(): # noqa if outbound: [hub_add(fd, None, WRITE | ERR, consolidate=True) for fd in diff(active_writes)] else: [hub_remove(fd) for fd in diff(active_writes)] self.on_poll_start = on_poll_start def on_inqueue_close(fd, proc): # Makes sure the fd is removed from tracking when # the connection is closed, this is essential as fds may be reused. busy_workers.discard(fd) try: if fileno_to_inq[fd] is proc: fileno_to_inq.pop(fd, None) active_writes.discard(fd) all_inqueues.discard(fd) hub_remove(fd) except KeyError: pass self.on_inqueue_close = on_inqueue_close def schedule_writes(ready_fds, total_write_count=[0]): # Schedule write operation to ready file descriptor. # The file descriptor is writable, but that does not # mean the process is currently reading from the socket. # The socket is buffered so writable simply means that # the buffer can accept at least 1 byte of data. # This means we have to cycle between the ready fds. # the first version used shuffle, but this version # using `total_writes % ready_fds` is about 30% faster # with many processes, and also leans more towards fairness # in write stats when used with many processes # [XXX On macOS, this may vary depending # on event loop implementation (i.e, select/poll vs epoll), so # have to test further] num_ready = len(ready_fds) for _ in range(num_ready): ready_fd = ready_fds[total_write_count[0] % num_ready] total_write_count[0] += 1 if ready_fd in active_writes: # already writing to this fd continue if is_fair_strategy and ready_fd in busy_workers: # worker is already busy with another task continue if ready_fd not in all_inqueues: hub_remove(ready_fd) continue try: job = pop_message() except IndexError: # no more messages, remove all inactive fds from the hub. # this is important since the fds are always writable # as long as there's 1 byte left in the buffer, and so # this may create a spinloop where the event loop # always wakes up. for inqfd in diff(active_writes): hub_remove(inqfd) break else: if not job._accepted: # job not accepted by another worker try: # keep track of what process the write operation # was scheduled for. proc = job._scheduled_for = fileno_to_inq[ready_fd] except KeyError: # write was scheduled for this fd but the process # has since exited and the message must be sent to # another process. put_message(job) continue cor = _write_job(proc, ready_fd, job) job._writer = ref(cor) mark_write_gen_as_active(cor) mark_write_fd_as_active(ready_fd) mark_worker_as_busy(ready_fd) # Try to write immediately, in case there's an error. try: next(cor) except StopIteration: pass except OSError as exc: if exc.errno != errno.EBADF: raise else: add_writer(ready_fd, cor) hub.consolidate_callback = schedule_writes def send_job(tup): # Schedule writing job request for when one of the process # inqueues are writable. body = dumps(tup, protocol=protocol) body_size = len(body) header = pack(b'>I', body_size) # index 1,0 is the job ID. job = get_job(tup[1][0]) job._payload = buf_t(header), buf_t(body), body_size put_message(job) self._quick_put = send_job def on_not_recovering(proc, fd, job, exc): logger.exception( 'Process inqueue damaged: %r %r: %r', proc, proc.exitcode, exc) if proc._is_alive(): proc.terminate() hub.remove(fd) self._put_back(job) def _write_job(proc, fd, job): # writes job to the worker process. # Operation must complete if more than one byte of data # was written. If the broker connection is lost # and no data was written the operation shall be canceled. header, body, body_size = job._payload errors = 0 try: # job result keeps track of what process the job is sent to. job._write_to = proc send = proc.send_job_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job, exc) raise StopIteration() yield else: errors = 0 # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job, exc) raise StopIteration() yield else: errors = 0 finally: hub_remove(fd) write_stats[proc.index] += 1 # message written, so this fd is now available active_writes.discard(fd) write_generator_done(job._writer()) # is a weakref def send_ack(response, pid, job, fd): # Only used when synack is enabled. # Schedule writing ack response for when the fd is writable. msg = Ack(job, fd, precalc[response]) callback = promise(write_generator_done) cor = _write_ack(fd, msg, callback=callback) mark_write_gen_as_active(cor) mark_write_fd_as_active(fd) callback.args = (cor,) add_writer(fd, cor) self.send_ack = send_ack def _write_ack(fd, ack, callback=None): # writes ack back to the worker if synack enabled. # this operation *MUST* complete, otherwise # the worker process will hang waiting for the ack. header, body, body_size = ack[2] try: try: proc = fileno_to_synq[fd] except KeyError: # process died, we can safely discard the ack at this # point. raise StopIteration() send = proc.send_syn_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise yield # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: # pylint: disable=broad-except if getattr(exc, 'errno', None) not in UNAVAIL: raise # suspend until more data yield finally: if callback: callback() # message written, so this fd is now available active_writes.discard(fd) def flush(self): if self._state == TERMINATE: return # cancel all tasks that haven't been accepted so that NACK is sent. for job in values(self._cache): if not job._accepted: job._cancel() # clear the outgoing buffer as the tasks will be redelivered by # the broker anyway. if self.outbound_buffer: self.outbound_buffer.clear() self.maintain_pool() try: # ...but we must continue writing the payloads we already started # to keep message boundaries. # The messages may be NACK'ed later if synack is enabled. if self._state == RUN: # flush outgoing buffers intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) owned_by = {} for job in values(self._cache): writer = _get_job_writer(job) if writer is not None: owned_by[writer] = job while self._active_writers: writers = list(self._active_writers) for gen in writers: if (gen.__name__ == '_write_job' and gen_not_started(gen)): # hasn't started writing the job so can # discard the task, but we must also remove # it from the Pool._cache. try: job = owned_by[gen] except KeyError: pass else: # removes from Pool._cache job.discard() self._active_writers.discard(gen) else: try: job = owned_by[gen] except KeyError: pass else: job_proc = job._write_to if job_proc._is_alive(): self._flush_writer(job_proc, gen) # workers may have exited in the meantime. self.maintain_pool() sleep(next(intervals)) # don't busyloop finally: self.outbound_buffer.clear() self._active_writers.clear() self._active_writes.clear() self._busy_workers.clear() def _flush_writer(self, proc, writer): fds = {proc.inq._writer} try: while fds: if not proc._is_alive(): break # process exited readable, writable, again = _select( writers=fds, err=fds, timeout=0.5, ) if not again and (writable or readable): try: next(writer) except (StopIteration, OSError, IOError, EOFError): break finally: self._active_writers.discard(writer) def get_process_queues(self): """Get queues for a new process. Here we'll find an unused slot, as there should always be one available when we start a new process. """ return next(q for q, owner in items(self._queues) if owner is None) def on_grow(self, n): """Grow the pool by ``n`` proceses.""" diff = max(self._processes - len(self._queues), 0) if diff: self._queues.update({ self.create_process_queues(): None for _ in range(diff) }) def on_shrink(self, n): """Shrink the pool by ``n`` processes.""" pass def create_process_queues(self): """Create new in, out, etc. queues, returned as a tuple.""" # NOTE: Pipes must be set O_NONBLOCK at creation time (the original # fd), otherwise it won't be possible to change the flags until # there's an actual reader/writer on the other side. inq = _SimpleQueue(wnonblock=True) outq = _SimpleQueue(rnonblock=True) synq = None assert isblocking(inq._reader) assert not isblocking(inq._writer) assert not isblocking(outq._reader) assert isblocking(outq._writer) if self.synack: synq = _SimpleQueue(wnonblock=True) assert isblocking(synq._reader) assert not isblocking(synq._writer) return inq, outq, synq def on_process_alive(self, pid): """Called when reciving the :const:`WORKER_UP` message. Marks the process as ready to receive work. """ try: proc = next(w for w in self._pool if w.pid == pid) except StopIteration: return logger.warning('process with pid=%s already exited', pid) assert proc.inqW_fd not in self._fileno_to_inq assert proc.inqW_fd not in self._all_inqueues self._waiting_to_start.discard(proc) self._fileno_to_inq[proc.inqW_fd] = proc self._fileno_to_synq[proc.synqW_fd] = proc self._all_inqueues.add(proc.inqW_fd) def on_job_process_down(self, job, pid_gone): """Called for each job when the process assigned to it exits.""" if job._write_to and not job._write_to._is_alive(): # job was partially written self.on_partial_read(job, job._write_to) elif job._scheduled_for and not job._scheduled_for._is_alive(): # job was only scheduled to be written to this process, # but no data was sent so put it back on the outbound_buffer. self._put_back(job) def on_job_process_lost(self, job, pid, exitcode): """Called when the process executing job' exits. This happens when the process job' was assigned to exited by mysterious means (error exitcodes and signals). """ self.mark_as_worker_lost(job, exitcode) def human_write_stats(self): if self.write_stats is None: return 'N/A' vals = list(values(self.write_stats)) total = sum(vals) def per(v, total): return '{0:.2%}'.format((float(v) / total) if v else 0) return { 'total': total, 'avg': per(total / len(self.write_stats) if total else 0, total), 'all': ', '.join(per(v, total) for v in vals), 'raw': ', '.join(map(str, vals)), 'strategy': SCHED_STRATEGY_TO_NAME.get( self.sched_strategy, self.sched_strategy, ), 'inqueues': { 'total': len(self._all_inqueues), 'active': len(self._active_writes), } } def _process_cleanup_queues(self, proc): """Called to clean up queues after process exit.""" if not proc.dead: try: self._queues[self._find_worker_queues(proc)] = None except (KeyError, ValueError): pass @staticmethod def _stop_task_handler(task_handler): """Called at shutdown to tell processes that we're shutting down.""" for proc in task_handler.pool: try: setblocking(proc.inq._writer, 1) except (OSError, IOError): pass else: try: proc.inq.put(None) except OSError as exc: if exc.errno != errno.EBADF: raise def create_result_handler(self): return super(AsynPool, self).create_result_handler( fileno_to_outq=self._fileno_to_outq, on_process_alive=self.on_process_alive, ) def _process_register_queues(self, proc, queues): """Mark new ownership for ``queues`` to update fileno indices.""" assert queues in self._queues b = len(self._queues) self._queues[queues] = proc assert b == len(self._queues) def _find_worker_queues(self, proc): """Find the queues owned by ``proc``.""" try: return next(q for q, owner in items(self._queues) if owner == proc) except StopIteration: raise ValueError(proc) def _setup_queues(self): # this is only used by the original pool that used a shared # queue for all processes. self._quick_put = None # these attributes are unused by this class, but we'll still # have to initialize them for compatibility. self._inqueue = self._outqueue = \ self._quick_get = self._poll_result = None def process_flush_queues(self, proc): """Flush all queues. Including the outbound buffer, so that all tasks that haven't been started will be discarded. In Celery this is called whenever the transport connection is lost (consumer restart), and when a process is terminated. """ resq = proc.outq._reader on_state_change = self._result_handler.on_state_change fds = {resq} while fds and not resq.closed and self._state != TERMINATE: readable, _, _ = _select(fds, None, fds, timeout=0.01) if readable: try: task = resq.recv() except (OSError, IOError, EOFError) as exc: _errno = getattr(exc, 'errno', None) if _errno == errno.EINTR: continue elif _errno == errno.EAGAIN: break elif _errno not in UNAVAIL: debug('got %r while flushing process %r', exc, proc, exc_info=1) break else: if task is None: debug('got sentinel while flushing process %r', proc) break else: on_state_change(task) else: break def on_partial_read(self, job, proc): """Called when a job was partially written to exited child.""" # worker terminated by signal: # we cannot reuse the sockets again, because we don't know if # the process wrote/read anything frmo them, and if so we cannot # restore the message boundaries. if not job._accepted: # job was not acked, so find another worker to send it to. self._put_back(job) writer = _get_job_writer(job) if writer: self._active_writers.discard(writer) del writer if not proc.dead: proc.dead = True # Replace queues to avoid reuse before = len(self._queues) try: queues = self._find_worker_queues(proc) if self.destroy_queues(queues, proc): self._queues[self.create_process_queues()] = None except ValueError: pass assert len(self._queues) == before def destroy_queues(self, queues, proc): """Destroy queues that can no longer be used. This way they can be replaced by new usable sockets. """ assert not proc._is_alive() self._waiting_to_start.discard(proc) removed = 1 try: self._queues.pop(queues) except KeyError: removed = 0 try: self.on_inqueue_close(queues[0]._writer.fileno(), proc) except IOError: pass for queue in queues: if queue: for sock in (queue._reader, queue._writer): if not sock.closed: try: sock.close() except (IOError, OSError): pass return removed def _create_payload(self, type_, args, dumps=_pickle.dumps, pack=struct.pack, protocol=HIGHEST_PROTOCOL): body = dumps((type_, args), protocol=protocol) size = len(body) header = pack(b'>I', size) return header, body, size @classmethod def _set_result_sentinel(cls, _outqueue, _pool): # unused pass def _help_stuff_finish_args(self): # Pool._help_stuff_finished is a classmethod so we have to use this # trick to modify the arguments passed to it. return (self._pool,) @classmethod def _help_stuff_finish(cls, pool): # pylint: disable=arguments-differ debug( 'removing tasks from inqueue until task handler finished', ) fileno_to_proc = {} inqR = set() for w in pool: try: fd = w.inq._reader.fileno() inqR.add(fd) fileno_to_proc[fd] = w except IOError: pass while inqR: readable, _, again = _select(inqR, timeout=0.5) if again: continue if not readable: break for fd in readable: fileno_to_proc[fd].inq._reader.recv() sleep(0) @property def timers(self): return {self.maintain_pool: 5.0} celery-4.1.0/celery/app/0000755000175000017500000000000013135426347015000 5ustar omeromer00000000000000celery-4.1.0/celery/app/base.py0000644000175000017500000013044213130607475016266 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Actual App instance implementation.""" from __future__ import absolute_import, unicode_literals import os import threading import warnings from collections import defaultdict, deque from operator import attrgetter from kombu import pools from kombu.clocks import LamportClock from kombu.common import oid_from from kombu.utils.compat import register_after_fork from kombu.utils.objects import cached_property from kombu.utils.uuid import uuid from vine import starpromise from vine.utils import wraps from celery import platforms from celery import signals from celery._state import ( _task_stack, get_current_app, _set_current_app, set_default_app, _register_app, _deregister_app, get_current_worker_task, connect_on_app_finalize, _announce_app_finalized, ) from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured from celery.five import ( UserDict, bytes_if_py2, python_2_unicode_compatible, values, ) from celery.loaders import get_loader_cls from celery.local import PromiseProxy, maybe_evaluate from celery.utils import abstract from celery.utils.collections import AttributeDictMixin from celery.utils.dispatch import Signal from celery.utils.functional import first, maybe_list, head_from_fun from celery.utils.time import timezone from celery.utils.imports import gen_task_name, instantiate, symbol_by_name from celery.utils.log import get_logger from celery.utils.objects import FallbackContext, mro_lookup from .annotations import prepare as prepare_annotations from . import backends from .defaults import find_deprecated_settings from .registry import TaskRegistry from .utils import ( AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, _old_key_to_new, _new_key_to_old, appstr, detect_settings, ) # Load all builtin tasks from . import builtins # noqa __all__ = ['Celery'] logger = get_logger(__name__) BUILTIN_FIXUPS = { 'celery.fixups.django:fixup', } USING_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING') ERR_ENVVAR_NOT_SET = """ The environment variable {0!r} is not set, and as such the configuration could not be loaded. Please set this variable and make sure it points to a valid configuration module. Example: {0}="proj.celeryconfig" """ def app_has_custom(app, attr): """Return true if app has customized method `attr`. Note: This is used for optimizations in cases where we know how the default behavior works, but need to account for someone using inheritance to override a method/property. """ return mro_lookup(app.__class__, attr, stop={Celery, object}, monkey_patched=[__name__]) def _unpickle_appattr(reverse_name, args): """Unpickle app.""" # Given an attribute name and a list of args, gets # the attribute from the current app and calls it. return get_current_app()._rgetattr(reverse_name)(*args) def _after_fork_cleanup_app(app): # This is used with multiprocessing.register_after_fork, # so need to be at module level. try: app._after_fork() except Exception as exc: # pylint: disable=broad-except logger.info('after forker raised exception: %r', exc, exc_info=1) class PendingConfiguration(UserDict, AttributeDictMixin): # `app.conf` will be of this type before being explicitly configured, # meaning the app can keep any configuration set directly # on `app.conf` before the `app.config_from_object` call. # # accessing any key will finalize the configuration, # replacing `app.conf` with a concrete settings object. callback = None _data = None def __init__(self, conf, callback): object.__setattr__(self, '_data', conf) object.__setattr__(self, 'callback', callback) def __setitem__(self, key, value): self._data[key] = value def clear(self): self._data.clear() def update(self, *args, **kwargs): self._data.update(*args, **kwargs) def setdefault(self, *args, **kwargs): return self._data.setdefault(*args, **kwargs) def __contains__(self, key): # XXX will not show finalized configuration # setdefault will cause `key in d` to happen, # so for setdefault to be lazy, so does contains. return key in self._data def __len__(self): return len(self.data) def __repr__(self): return repr(self.data) @cached_property def data(self): return self.callback() @python_2_unicode_compatible class Celery(object): """Celery application. Arguments: main (str): Name of the main module if running as `__main__`. This is used as the prefix for auto-generated task names. Keyword Arguments: broker (str): URL of the default broker used. backend (Union[str, type]): The result store backend class, or the name of the backend class to use. Default is the value of the :setting:`result_backend` setting. autofinalize (bool): If set to False a :exc:`RuntimeError` will be raised if the task registry or tasks are used before the app is finalized. set_as_current (bool): Make this the global current app. include (List[str]): List of modules every worker should import. amqp (Union[str, type]): AMQP object or class name. events (Union[str, type]): Events object or class name. log (Union[str, type]): Log object or class name. control (Union[str, type]): Control object or class name. tasks (Union[str, type]): A task registry, or the name of a registry class. fixups (List[str]): List of fix-up plug-ins (e.g., see :mod:`celery.fixups.django`). config_source (Union[str, type]): Take configuration from a class, or object. Attributes may include any setings described in the documentation. """ #: This is deprecated, use :meth:`reduce_keys` instead Pickler = AppPickler SYSTEM = platforms.SYSTEM IS_macOS, IS_WINDOWS = platforms.IS_macOS, platforms.IS_WINDOWS #: Name of the `__main__` module. Required for standalone scripts. #: #: If set this will be used instead of `__main__` when automatically #: generating task names. main = None #: Custom options for command-line programs. #: See :ref:`extending-commandoptions` user_options = None #: Custom bootsteps to extend and modify the worker. #: See :ref:`extending-bootsteps`. steps = None builtin_fixups = BUILTIN_FIXUPS amqp_cls = 'celery.app.amqp:AMQP' backend_cls = None events_cls = 'celery.app.events:Events' loader_cls = None log_cls = 'celery.app.log:Logging' control_cls = 'celery.app.control:Control' task_cls = 'celery.app.task:Task' registry_cls = TaskRegistry _fixups = None _pool = None _conf = None _after_fork_registered = False #: Signal sent when app is loading configuration. on_configure = None #: Signal sent after app has prepared the configuration. on_after_configure = None #: Signal sent after app has been finalized. on_after_finalize = None #: Signal sent by every new process after fork. on_after_fork = None def __init__(self, main=None, loader=None, backend=None, amqp=None, events=None, log=None, control=None, set_as_current=True, tasks=None, broker=None, include=None, changes=None, config_source=None, fixups=None, task_cls=None, autofinalize=True, namespace=None, strict_typing=True, **kwargs): self.clock = LamportClock() self.main = main self.amqp_cls = amqp or self.amqp_cls self.events_cls = events or self.events_cls self.loader_cls = loader or self._get_default_loader() self.log_cls = log or self.log_cls self.control_cls = control or self.control_cls self.task_cls = task_cls or self.task_cls self.set_as_current = set_as_current self.registry_cls = symbol_by_name(self.registry_cls) self.user_options = defaultdict(set) self.steps = defaultdict(set) self.autofinalize = autofinalize self.namespace = namespace self.strict_typing = strict_typing self.configured = False self._config_source = config_source self._pending_defaults = deque() self._pending_periodic_tasks = deque() self.finalized = False self._finalize_mutex = threading.Lock() self._pending = deque() self._tasks = tasks if not isinstance(self._tasks, TaskRegistry): self._tasks = self.registry_cls(self._tasks or {}) # If the class defines a custom __reduce_args__ we need to use # the old way of pickling apps: pickling a list of # args instead of the new way that pickles a dict of keywords. self._using_v1_reduce = app_has_custom(self, '__reduce_args__') # these options are moved to the config to # simplify pickling of the app object. self._preconf = changes or {} self._preconf_set_by_auto = set() self.__autoset('broker_url', broker) self.__autoset('result_backend', backend) self.__autoset('include', include) self._conf = Settings( PendingConfiguration( self._preconf, self._finalize_pending_conf), prefix=self.namespace, keys=(_old_key_to_new, _new_key_to_old), ) # - Apply fix-ups. self.fixups = set(self.builtin_fixups) if fixups is None else fixups # ...store fixup instances in _fixups to keep weakrefs alive. self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups] if self.set_as_current: self.set_current() # Signals if self.on_configure is None: # used to be a method pre 4.0 self.on_configure = Signal(name='app.on_configure') self.on_after_configure = Signal( name='app.on_after_configure', providing_args={'source'}, ) self.on_after_finalize = Signal(name='app.on_after_finalize') self.on_after_fork = Signal(name='app.on_after_fork') self.on_init() _register_app(self) def _get_default_loader(self): # the --loader command-line argument sets the environment variable. return ( os.environ.get('CELERY_LOADER') or self.loader_cls or 'celery.loaders.app:AppLoader' ) def on_init(self): """Optional callback called at init.""" pass def __autoset(self, key, value): if value: self._preconf[key] = value self._preconf_set_by_auto.add(key) def set_current(self): """Make this the current app for this thread.""" _set_current_app(self) def set_default(self): """Make this the default app for all threads.""" set_default_app(self) def _ensure_after_fork(self): if not self._after_fork_registered: self._after_fork_registered = True if register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_app) def close(self): """Clean up after the application. Only necessary for dynamically created apps, and you should probably use the :keyword:`with` statement instead. Example: >>> with Celery(set_as_current=False) as app: ... with app.connection_for_write() as conn: ... pass """ self._pool = None _deregister_app(self) def start(self, argv=None): """Run :program:`celery` using `argv`. Uses :data:`sys.argv` if `argv` is not specified. """ return instantiate( 'celery.bin.celery:CeleryCommand', app=self ).execute_from_commandline(argv) def worker_main(self, argv=None): """Run :program:`celery worker` using `argv`. Uses :data:`sys.argv` if `argv` is not specified. """ return instantiate( 'celery.bin.worker:worker', app=self ).execute_from_commandline(argv) def task(self, *args, **opts): """Decorator to create a task class out of any callable. Examples: .. code-block:: python @app.task def refresh_feed(url): store_feed(feedparser.parse(url)) with setting extra options: .. code-block:: python @app.task(exchange='feeds') def refresh_feed(url): return store_feed(feedparser.parse(url)) Note: App Binding: For custom apps the task decorator will return a proxy object, so that the act of creating the task is not performed until the task is used or the task registry is accessed. If you're depending on binding to be deferred, then you must not access any attributes on the returned object until the application is fully set up (finalized). """ if USING_EXECV and opts.get('lazy', True): # When using execv the task in the original module will point to a # different app, so doing things like 'add.request' will point to # a different task instance. This makes sure it will always use # the task instance from the current app. # Really need a better solution for this :( from . import shared_task return shared_task(*args, lazy=False, **opts) def inner_create_task_cls(shared=True, filter=None, lazy=True, **opts): _filt = filter # stupid 2to3 def _create_task_cls(fun): if shared: def cons(app): return app._task_from_fun(fun, **opts) cons.__name__ = fun.__name__ connect_on_app_finalize(cons) if not lazy or self.finalized: ret = self._task_from_fun(fun, **opts) else: # return a proxy object that evaluates on first use ret = PromiseProxy(self._task_from_fun, (fun,), opts, __doc__=fun.__doc__) self._pending.append(ret) if _filt: return _filt(ret) return ret return _create_task_cls if len(args) == 1: if callable(args[0]): return inner_create_task_cls(**opts)(*args) raise TypeError('argument 1 to @task() must be a callable') if args: raise TypeError( '@task() takes exactly 1 argument ({0} given)'.format( sum([len(args), len(opts)]))) return inner_create_task_cls(**opts) def _task_from_fun(self, fun, name=None, base=None, bind=False, **options): if not self.finalized and not self.autofinalize: raise RuntimeError('Contract breach: app not finalized') name = name or self.gen_task_name(fun.__name__, fun.__module__) base = base or self.Task if name not in self._tasks: run = fun if bind else staticmethod(fun) task = type(fun.__name__, (base,), dict({ 'app': self, 'name': name, 'run': run, '_decorated': True, '__doc__': fun.__doc__, '__module__': fun.__module__, '__header__': staticmethod(head_from_fun(fun, bound=bind)), '__wrapped__': run}, **options))() # for some reason __qualname__ cannot be set in type() # so we have to set it here. try: task.__qualname__ = fun.__qualname__ except AttributeError: pass self._tasks[task.name] = task task.bind(self) # connects task to this app autoretry_for = tuple(options.get('autoretry_for', ())) retry_kwargs = options.get('retry_kwargs', {}) if autoretry_for and not hasattr(task, '_orig_run'): @wraps(task.run) def run(*args, **kwargs): try: return task._orig_run(*args, **kwargs) except autoretry_for as exc: raise task.retry(exc=exc, **retry_kwargs) task._orig_run, task.run = task.run, run else: task = self._tasks[name] return task def register_task(self, task): """Utility for registering a task-based class. Note: This is here for compatibility with old Celery 1.0 style task classes, you should not need to use this for new projects. """ if not task.name: task_cls = type(task) task.name = self.gen_task_name( task_cls.__name__, task_cls.__module__) self.tasks[task.name] = task task._app = self task.bind(self) return task def gen_task_name(self, name, module): return gen_task_name(self, name, module) def finalize(self, auto=False): """Finalize the app. This loads built-in tasks, evaluates pending task decorators, reads configuration, etc. """ with self._finalize_mutex: if not self.finalized: if auto and not self.autofinalize: raise RuntimeError('Contract breach: app not finalized') self.finalized = True _announce_app_finalized(self) pending = self._pending while pending: maybe_evaluate(pending.popleft()) for task in values(self._tasks): task.bind(self) self.on_after_finalize.send(sender=self) def add_defaults(self, fun): """Add default configuration from dict ``d``. If the argument is a callable function then it will be regarded as a promise, and it won't be loaded until the configuration is actually needed. This method can be compared to: .. code-block:: pycon >>> celery.conf.update(d) with a difference that 1) no copy will be made and 2) the dict will not be transferred when the worker spawns child processes, so it's important that the same configuration happens at import time when pickle restores the object on the other side. """ if not callable(fun): d, fun = fun, lambda: d if self.configured: return self._conf.add_defaults(fun()) self._pending_defaults.append(fun) def config_from_object(self, obj, silent=False, force=False, namespace=None): """Read configuration from object. Object is either an actual object or the name of a module to import. Example: >>> celery.config_from_object('myapp.celeryconfig') >>> from myapp import celeryconfig >>> celery.config_from_object(celeryconfig) Arguments: silent (bool): If true then import errors will be ignored. force (bool): Force reading configuration immediately. By default the configuration will be read only when required. """ self._config_source = obj self.namespace = namespace or self.namespace if force or self.configured: self._conf = None if self.loader.config_from_object(obj, silent=silent): return self.conf def config_from_envvar(self, variable_name, silent=False, force=False): """Read configuration from environment variable. The value of the environment variable must be the name of a module to import. Example: >>> os.environ['CELERY_CONFIG_MODULE'] = 'myapp.celeryconfig' >>> celery.config_from_envvar('CELERY_CONFIG_MODULE') """ module_name = os.environ.get(variable_name) if not module_name: if silent: return False raise ImproperlyConfigured( ERR_ENVVAR_NOT_SET.strip().format(variable_name)) return self.config_from_object(module_name, silent=silent, force=force) def config_from_cmdline(self, argv, namespace='celery'): self._conf.update( self.loader.cmdline_config_parser(argv, namespace) ) def setup_security(self, allowed_serializers=None, key=None, cert=None, store=None, digest='sha1', serializer='json'): """Setup the message-signing serializer. This will affect all application instances (a global operation). Disables untrusted serializers and if configured to use the ``auth`` serializer will register the ``auth`` serializer with the provided settings into the Kombu serializer registry. Arguments: allowed_serializers (Set[str]): List of serializer names, or content_types that should be exempt from being disabled. key (str): Name of private key file to use. Defaults to the :setting:`security_key` setting. cert (str): Name of certificate file to use. Defaults to the :setting:`security_certificate` setting. store (str): Directory containing certificates. Defaults to the :setting:`security_cert_store` setting. digest (str): Digest algorithm used when signing messages. Default is ``sha1``. serializer (str): Serializer used to encode messages after they've been signed. See :setting:`task_serializer` for the serializers supported. Default is ``json``. """ from celery.security import setup_security return setup_security(allowed_serializers, key, cert, store, digest, serializer, app=self) def autodiscover_tasks(self, packages=None, related_name='tasks', force=False): """Auto-discover task modules. Searches a list of packages for a "tasks.py" module (or use related_name argument). If the name is empty, this will be delegated to fix-ups (e.g., Django). For example if you have a directory layout like this: .. code-block:: text foo/__init__.py tasks.py models.py bar/__init__.py tasks.py models.py baz/__init__.py models.py Then calling ``app.autodiscover_tasks(['foo', bar', 'baz'])`` will result in the modules ``foo.tasks`` and ``bar.tasks`` being imported. Arguments: packages (List[str]): List of packages to search. This argument may also be a callable, in which case the value returned is used (for lazy evaluation). related_name (str): The name of the module to find. Defaults to "tasks": meaning "look for 'module.tasks' for every module in ``packages``." force (bool): By default this call is lazy so that the actual auto-discovery won't happen until an application imports the default modules. Forcing will cause the auto-discovery to happen immediately. """ if force: return self._autodiscover_tasks(packages, related_name) signals.import_modules.connect(starpromise( self._autodiscover_tasks, packages, related_name, ), weak=False, sender=self) def _autodiscover_tasks(self, packages, related_name, **kwargs): if packages: return self._autodiscover_tasks_from_names(packages, related_name) return self._autodiscover_tasks_from_fixups(related_name) def _autodiscover_tasks_from_names(self, packages, related_name): # packages argument can be lazy return self.loader.autodiscover_tasks( packages() if callable(packages) else packages, related_name, ) def _autodiscover_tasks_from_fixups(self, related_name): return self._autodiscover_tasks_from_names([ pkg for fixup in self._fixups for pkg in fixup.autodiscover_tasks() if hasattr(fixup, 'autodiscover_tasks') ], related_name=related_name) def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, group_id=None, retries=0, chord=None, reply_to=None, time_limit=None, soft_time_limit=None, root_id=None, parent_id=None, route_name=None, shadow=None, chain=None, task_type=None, **options): """Send task by name. Supports the same arguments as :meth:`@-Task.apply_async`. Arguments: name (str): Name of task to call (e.g., `"tasks.add"`). result_cls (~@AsyncResult): Specify custom result class. """ parent = have_parent = None amqp = self.amqp task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or amqp.router conf = self.conf if conf.task_always_eager: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'task_always_eager has no effect on send_task', ), stacklevel=2) options = router.route( options, route_name or name, args, kwargs, task_type) if not root_id or not parent_id: parent = self.current_worker_task if parent: if not root_id: root_id = parent.request.root_id or parent.request.id if not parent_id: parent_id = parent.request.id message = amqp.create_task_message( task_id, name, args, kwargs, countdown, eta, group_id, expires, retries, chord, maybe_list(link), maybe_list(link_error), reply_to or self.oid, time_limit, soft_time_limit, self.conf.task_send_sent_event, root_id, parent_id, shadow, chain, ) if connection: producer = amqp.Producer(connection, auto_declare=False) with self.producer_or_acquire(producer) as P: with P.connection._reraise_as_library_errors(): self.backend.on_task_call(P, task_id) amqp.send_task_message(P, name, message, **options) result = (result_cls or self.AsyncResult)(task_id) if add_to_parent: if not have_parent: parent, have_parent = self.current_worker_task, True if parent: parent.add_trail(result) return result def connection_for_read(self, url=None, **kwargs): """Establish connection used for consuming. See Also: :meth:`connection` for supported arguments. """ return self._connection(url or self.conf.broker_read_url, **kwargs) def connection_for_write(self, url=None, **kwargs): """Establish connection used for producing. See Also: :meth:`connection` for supported arguments. """ return self._connection(url or self.conf.broker_write_url, **kwargs) def connection(self, hostname=None, userid=None, password=None, virtual_host=None, port=None, ssl=None, connect_timeout=None, transport=None, transport_options=None, heartbeat=None, login_method=None, failover_strategy=None, **kwargs): """Establish a connection to the message broker. Please use :meth:`connection_for_read` and :meth:`connection_for_write` instead, to convey the intent of use for this connection. Arguments: url: Either the URL or the hostname of the broker to use. hostname (str): URL, Hostname/IP-address of the broker. If a URL is used, then the other argument below will be taken from the URL instead. userid (str): Username to authenticate as. password (str): Password to authenticate with virtual_host (str): Virtual host to use (domain). port (int): Port to connect to. ssl (bool, Dict): Defaults to the :setting:`broker_use_ssl` setting. transport (str): defaults to the :setting:`broker_transport` setting. transport_options (Dict): Dictionary of transport specific options. heartbeat (int): AMQP Heartbeat in seconds (``pyamqp`` only). login_method (str): Custom login method to use (AMQP only). failover_strategy (str, Callable): Custom failover strategy. **kwargs: Additional arguments to :class:`kombu.Connection`. Returns: kombu.Connection: the lazy connection instance. """ return self.connection_for_write( hostname or self.conf.broker_write_url, userid=userid, password=password, virtual_host=virtual_host, port=port, ssl=ssl, connect_timeout=connect_timeout, transport=transport, transport_options=transport_options, heartbeat=heartbeat, login_method=login_method, failover_strategy=failover_strategy, **kwargs ) def _connection(self, url, userid=None, password=None, virtual_host=None, port=None, ssl=None, connect_timeout=None, transport=None, transport_options=None, heartbeat=None, login_method=None, failover_strategy=None, **kwargs): conf = self.conf return self.amqp.Connection( url, userid or conf.broker_user, password or conf.broker_password, virtual_host or conf.broker_vhost, port or conf.broker_port, transport=transport or conf.broker_transport, ssl=self.either('broker_use_ssl', ssl), heartbeat=heartbeat, login_method=login_method or conf.broker_login_method, failover_strategy=( failover_strategy or conf.broker_failover_strategy ), transport_options=dict( conf.broker_transport_options, **transport_options or {} ), connect_timeout=self.either( 'broker_connection_timeout', connect_timeout ), ) broker_connection = connection def _acquire_connection(self, pool=True): """Helper for :meth:`connection_or_acquire`.""" if pool: return self.pool.acquire(block=True) return self.connection_for_write() def connection_or_acquire(self, connection=None, pool=True, *_, **__): """Context used to acquire a connection from the pool. For use within a :keyword:`with` statement to get a connection from the pool if one is not already provided. Arguments: connection (kombu.Connection): If not provided, a connection will be acquired from the connection pool. """ return FallbackContext(connection, self._acquire_connection, pool=pool) default_connection = connection_or_acquire # XXX compat def producer_or_acquire(self, producer=None): """Context used to acquire a producer from the pool. For use within a :keyword:`with` statement to get a producer from the pool if one is not already provided Arguments: producer (kombu.Producer): If not provided, a producer will be acquired from the producer pool. """ return FallbackContext( producer, self.producer_pool.acquire, block=True, ) default_producer = producer_or_acquire # XXX compat def prepare_config(self, c): """Prepare configuration before it is merged with the defaults.""" return find_deprecated_settings(c) def now(self): """Return the current time and date as a datetime.""" from datetime import datetime return datetime.utcnow().replace(tzinfo=self.timezone) def select_queues(self, queues=None): """Select subset of queues. Arguments: queues (Sequence[str]): a list of queue names to keep. """ return self.amqp.queues.select(queues) def either(self, default_key, *defaults): """Get key from configuration or use default values. Fallback to the value of a configuration key if none of the `*values` are true. """ return first(None, [ first(None, defaults), starpromise(self.conf.get, default_key), ]) def bugreport(self): """Return information useful in bug reports.""" return bugreport(self) def _get_backend(self): backend, url = backends.by_url( self.backend_cls or self.conf.result_backend, self.loader) return backend(app=self, url=url) def _finalize_pending_conf(self): """Get config value by key and finalize loading the configuration. Note: This is used by PendingConfiguration: as soon as you access a key the configuration is read. """ conf = self._conf = self._load_config() return conf def _load_config(self): if isinstance(self.on_configure, Signal): self.on_configure.send(sender=self) else: # used to be a method pre 4.0 self.on_configure() if self._config_source: self.loader.config_from_object(self._config_source) self.configured = True settings = detect_settings( self.prepare_config(self.loader.conf), self._preconf, ignore_keys=self._preconf_set_by_auto, prefix=self.namespace, ) if self._conf is not None: # replace in place, as someone may have referenced app.conf, # done some changes, accessed a key, and then try to make more # changes to the reference and not the finalized value. self._conf.swap_with(settings) else: self._conf = settings # load lazy config dict initializers. pending_def = self._pending_defaults while pending_def: self._conf.add_defaults(maybe_evaluate(pending_def.popleft()())) # load lazy periodic tasks pending_beat = self._pending_periodic_tasks while pending_beat: self._add_periodic_task(*pending_beat.popleft()) self.on_after_configure.send(sender=self, source=self._conf) return self._conf def _after_fork(self): self._pool = None try: self.__dict__['amqp']._producer_pool = None except (AttributeError, KeyError): pass self.on_after_fork.send(sender=self) def signature(self, *args, **kwargs): """Return a new :class:`~celery.Signature` bound to this app.""" kwargs['app'] = self return self._canvas.signature(*args, **kwargs) def add_periodic_task(self, schedule, sig, args=(), kwargs=(), name=None, **opts): key, entry = self._sig_to_periodic_task_entry( schedule, sig, args, kwargs, name, **opts) if self.configured: self._add_periodic_task(key, entry) else: self._pending_periodic_tasks.append((key, entry)) return key def _sig_to_periodic_task_entry(self, schedule, sig, args=(), kwargs={}, name=None, **opts): sig = (sig.clone(args, kwargs) if isinstance(sig, abstract.CallableSignature) else self.signature(sig.name, args, kwargs)) return name or repr(sig), { 'schedule': schedule, 'task': sig.name, 'args': sig.args, 'kwargs': sig.kwargs, 'options': dict(sig.options, **opts), } def _add_periodic_task(self, key, entry): self._conf.beat_schedule[key] = entry def create_task_cls(self): """Create a base task class bound to this app.""" return self.subclass_with_self( self.task_cls, name='Task', attribute='_app', keep_reduce=True, abstract=True, ) def subclass_with_self(self, Class, name=None, attribute='app', reverse=None, keep_reduce=False, **kw): """Subclass an app-compatible class. App-compatible means that the class has a class attribute that provides the default app it should use, for example: ``class Foo: app = None``. Arguments: Class (type): The app-compatible class to subclass. name (str): Custom name for the target class. attribute (str): Name of the attribute holding the app, Default is 'app'. reverse (str): Reverse path to this object used for pickling purposes. For example, to get ``app.AsyncResult``, use ``"AsyncResult"``. keep_reduce (bool): If enabled a custom ``__reduce__`` implementation won't be provided. """ Class = symbol_by_name(Class) reverse = reverse if reverse else Class.__name__ def __reduce__(self): return _unpickle_appattr, (reverse, self.__reduce_args__()) attrs = dict( {attribute: self}, __module__=Class.__module__, __doc__=Class.__doc__, **kw) if not keep_reduce: attrs['__reduce__'] = __reduce__ return type(bytes_if_py2(name or Class.__name__), (Class,), attrs) def _rgetattr(self, path): return attrgetter(path)(self) def __enter__(self): return self def __exit__(self, *exc_info): self.close() def __repr__(self): return '<{0} {1}>'.format(type(self).__name__, appstr(self)) def __reduce__(self): if self._using_v1_reduce: return self.__reduce_v1__() return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__())) def __reduce_v1__(self): # Reduce only pickles the configuration changes, # so the default configuration doesn't have to be passed # between processes. return ( _unpickle_app, (self.__class__, self.Pickler) + self.__reduce_args__(), ) def __reduce_keys__(self): """Keyword arguments used to reconstruct the object when unpickling.""" return { 'main': self.main, 'changes': self._conf.changes if self.configured else self._preconf, 'loader': self.loader_cls, 'backend': self.backend_cls, 'amqp': self.amqp_cls, 'events': self.events_cls, 'log': self.log_cls, 'control': self.control_cls, 'fixups': self.fixups, 'config_source': self._config_source, 'task_cls': self.task_cls, 'namespace': self.namespace, } def __reduce_args__(self): """Deprecated method, please use :meth:`__reduce_keys__` instead.""" return (self.main, self._conf.changes if self.configured else {}, self.loader_cls, self.backend_cls, self.amqp_cls, self.events_cls, self.log_cls, self.control_cls, False, self._config_source) @cached_property def Worker(self): """Worker application. See Also: :class:`~@Worker`. """ return self.subclass_with_self('celery.apps.worker:Worker') @cached_property def WorkController(self, **kwargs): """Embeddable worker. See Also: :class:`~@WorkController`. """ return self.subclass_with_self('celery.worker:WorkController') @cached_property def Beat(self, **kwargs): """:program:`celery beat` scheduler application. See Also: :class:`~@Beat`. """ return self.subclass_with_self('celery.apps.beat:Beat') @cached_property def Task(self): """Base task class for this app.""" return self.create_task_cls() @cached_property def annotations(self): return prepare_annotations(self.conf.task_annotations) @cached_property def AsyncResult(self): """Create new result instance. See Also: :class:`celery.result.AsyncResult`. """ return self.subclass_with_self('celery.result:AsyncResult') @cached_property def ResultSet(self): return self.subclass_with_self('celery.result:ResultSet') @cached_property def GroupResult(self): """Create new group result instance. See Also: :class:`celery.result.GroupResult`. """ return self.subclass_with_self('celery.result:GroupResult') @property def pool(self): """Broker connection pool: :class:`~@pool`. Note: This attribute is not related to the workers concurrency pool. """ if self._pool is None: self._ensure_after_fork() limit = self.conf.broker_pool_limit pools.set_limit(limit) self._pool = pools.connections[self.connection_for_write()] return self._pool @property def current_task(self): """Instance of task being executed, or :const:`None`.""" return _task_stack.top @property def current_worker_task(self): """The task currently being executed by a worker or :const:`None`. Differs from :data:`current_task` in that it's not affected by tasks calling other tasks directly, or eagerly. """ return get_current_worker_task() @cached_property def oid(self): """Universally unique identifier for this app.""" # since 4.0: thread.get_ident() is not included when # generating the process id. This is due to how the RPC # backend now dedicates a single thread to receive results, # which would not work if each thread has a separate id. return oid_from(self, threads=False) @cached_property def amqp(self): """AMQP related functionality: :class:`~@amqp`.""" return instantiate(self.amqp_cls, app=self) @cached_property def backend(self): """Current backend instance.""" return self._get_backend() @property def conf(self): """Current configuration.""" if self._conf is None: self._conf = self._load_config() return self._conf @conf.setter def conf(self, d): # noqa self._conf = d @cached_property def control(self): """Remote control: :class:`~@control`.""" return instantiate(self.control_cls, app=self) @cached_property def events(self): """Consuming and sending events: :class:`~@events`.""" return instantiate(self.events_cls, app=self) @cached_property def loader(self): """Current loader instance.""" return get_loader_cls(self.loader_cls)(app=self) @cached_property def log(self): """Logging: :class:`~@log`.""" return instantiate(self.log_cls, app=self) @cached_property def _canvas(self): from celery import canvas return canvas @cached_property def tasks(self): """Task registry. Warning: Accessing this attribute will also auto-finalize the app. """ self.finalize(auto=True) return self._tasks @property def producer_pool(self): return self.amqp.producer_pool def uses_utc_timezone(self): """Check if the application uses the UTC timezone.""" return self.conf.timezone == 'UTC' or self.conf.timezone is None @cached_property def timezone(self): """Current timezone for this app. This is a cached property taking the time zone from the :setting:`timezone` setting. """ conf = self.conf tz = conf.timezone or 'UTC' if not tz: if conf.enable_utc: return timezone.get_timezone('UTC') else: if not conf.timezone: return timezone.local return timezone.get_timezone(tz) App = Celery # noqa: E305 XXX compat celery-4.1.0/celery/app/trace.py0000644000175000017500000005556313135426300016453 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Trace task execution. This module defines how the task execution is traced: errors are recorded, handlers are applied and so on. """ from __future__ import absolute_import, unicode_literals # ## --- # This is the heart of the worker, the inner loop so to speak. # It used to be split up into nice little classes and methods, # but in the end it only resulted in bad performance and horrible tracebacks, # so instead we now use one closure per task class. # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. # pylint: disable=broad-except # We know what we're doing... import logging import os import sys from collections import namedtuple from warnings import warn from billiard.einfo import ExceptionInfo from kombu.exceptions import EncodeError from kombu.serialization import loads as loads_message, prepare_accept_content from kombu.utils.encoding import safe_repr, safe_str from celery import current_app, group from celery import states, signals from celery._state import _task_stack from celery.app.task import Task as BaseTask, Context from celery.exceptions import Ignore, Reject, Retry, InvalidTaskError from celery.five import monotonic, text_t from celery.utils.log import get_logger from celery.utils.nodenames import gethostname from celery.utils.objects import mro_lookup from celery.utils.saferepr import saferepr from celery.utils.serialization import ( get_pickleable_exception, get_pickled_exception, get_pickleable_etype, ) __all__ = [ 'TraceInfo', 'build_tracer', 'trace_task', 'setup_worker_optimizations', 'reset_worker_optimizations', ] logger = get_logger(__name__) #: Format string used to log task success. LOG_SUCCESS = """\ Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\ """ #: Format string used to log task failure. LOG_FAILURE = """\ Task %(name)s[%(id)s] %(description)s: %(exc)s\ """ #: Format string used to log task internal error. LOG_INTERNAL_ERROR = """\ Task %(name)s[%(id)s] %(description)s: %(exc)s\ """ #: Format string used to log task ignored. LOG_IGNORED = """\ Task %(name)s[%(id)s] %(description)s\ """ #: Format string used to log task rejected. LOG_REJECTED = """\ Task %(name)s[%(id)s] %(exc)s\ """ #: Format string used to log task retry. LOG_RETRY = """\ Task %(name)s[%(id)s] retry: %(exc)s\ """ log_policy_t = namedtuple( 'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'), ) log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1) log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0) log_policy_internal = log_policy_t( LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1, ) log_policy_expected = log_policy_t( LOG_FAILURE, 'raised expected', logging.INFO, 0, 0, ) log_policy_unexpected = log_policy_t( LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1, ) send_prerun = signals.task_prerun.send send_postrun = signals.task_postrun.send send_success = signals.task_success.send STARTED = states.STARTED SUCCESS = states.SUCCESS IGNORED = states.IGNORED REJECTED = states.REJECTED RETRY = states.RETRY FAILURE = states.FAILURE EXCEPTION_STATES = states.EXCEPTION_STATES IGNORE_STATES = frozenset({IGNORED, RETRY, REJECTED}) #: set by :func:`setup_worker_optimizations` _localized = [] _patched = {} trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr')) def info(fmt, context): """Log 'fmt % context' with severity 'INFO'. 'context' is also passed in extra with key 'data' for custom handlers. """ logger.info(fmt, context, extra={'data': context}) def task_has_custom(task, attr): """Return true if the task overrides ``attr``.""" return mro_lookup(task.__class__, attr, stop={BaseTask, object}, monkey_patched=['celery.app.task']) def get_log_policy(task, einfo, exc): if isinstance(exc, Reject): return log_policy_reject elif isinstance(exc, Ignore): return log_policy_ignore elif einfo.internal: return log_policy_internal else: if task.throws and isinstance(exc, task.throws): return log_policy_expected return log_policy_unexpected class TraceInfo(object): """Information about task execution.""" __slots__ = ('state', 'retval') def __init__(self, state, retval=None): self.state = state self.retval = retval def handle_error_state(self, task, req, eager=False, call_errbacks=True): store_errors = not eager if task.ignore_result: store_errors = task.store_errors_even_if_ignored return { RETRY: self.handle_retry, FAILURE: self.handle_failure, }[self.state](task, req, store_errors=store_errors, call_errbacks=call_errbacks) def handle_reject(self, task, req, **kwargs): self._log_error(task, req, ExceptionInfo()) def handle_ignore(self, task, req, **kwargs): self._log_error(task, req, ExceptionInfo()) def handle_retry(self, task, req, store_errors=True, **kwargs): """Handle retry exception.""" # the exception raised is the Retry semi-predicate, # and it's exc' attribute is the original exception raised (if any). type_, _, tb = sys.exc_info() try: reason = self.retval einfo = ExceptionInfo((type_, reason, tb)) if store_errors: task.backend.mark_as_retry( req.id, reason.exc, einfo.traceback, request=req, ) task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) signals.task_retry.send(sender=task, request=req, reason=reason, einfo=einfo) info(LOG_RETRY, { 'id': req.id, 'name': task.name, 'exc': text_t(reason), }) return einfo finally: del tb def handle_failure(self, task, req, store_errors=True, call_errbacks=True): """Handle exception.""" _, _, tb = sys.exc_info() try: exc = self.retval # make sure we only send pickleable exceptions back to parent. einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) task.backend.mark_as_failure( req.id, exc, einfo.traceback, request=req, store_result=store_errors, call_errbacks=call_errbacks, ) task.on_failure(exc, req.id, req.args, req.kwargs, einfo) signals.task_failure.send(sender=task, task_id=req.id, exception=exc, args=req.args, kwargs=req.kwargs, traceback=tb, einfo=einfo) self._log_error(task, req, einfo) return einfo finally: del tb def _log_error(self, task, req, einfo): eobj = einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, sargs, skwargs = ( safe_repr(eobj), safe_str(einfo.traceback), einfo.exc_info, safe_repr(req.args), safe_repr(req.kwargs), ) policy = get_log_policy(task, einfo, eobj) context = { 'hostname': req.hostname, 'id': req.id, 'name': task.name, 'exc': exception, 'traceback': traceback, 'args': sargs, 'kwargs': skwargs, 'description': policy.description, 'internal': einfo.internal, } logger.log(policy.severity, policy.format.strip(), context, exc_info=exc_info if policy.traceback else None, extra={'data': context}) def build_tracer(name, task, loader=None, hostname=None, store_errors=True, Info=TraceInfo, eager=False, propagate=False, app=None, monotonic=monotonic, trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES): """Return a function that traces task execution. Catches all exceptions and updates result backend with the state and result. If the call was successful, it saves the result to the task result backend, and sets the task status to `"SUCCESS"`. If the call raises :exc:`~@Retry`, it extracts the original exception, uses that as the result and sets the task state to `"RETRY"`. If the call results in an exception, it saves the exception as the task result, and sets the task state to `"FAILURE"`. Return a function that takes the following arguments: :param uuid: The id of the task. :param args: List of positional args to pass on to the function. :param kwargs: Keyword arguments mapping to pass on to the function. :keyword request: Request dict. """ # noqa: C901 # pylint: disable=too-many-statements # If the task doesn't define a custom __call__ method # we optimize it away by simply calling the run method directly, # saving the extra method call and a line less in the stack trace. fun = task if task_has_custom(task, '__call__') else task.run loader = loader or app.loader backend = task.backend ignore_result = task.ignore_result track_started = task.track_started track_started = not eager and (task.track_started and not ignore_result) publish_result = not eager and not ignore_result hostname = hostname or gethostname() loader_task_init = loader.on_task_init loader_cleanup = loader.on_process_cleanup task_on_success = None task_after_return = None if task_has_custom(task, 'on_success'): task_on_success = task.on_success if task_has_custom(task, 'after_return'): task_after_return = task.after_return store_result = backend.store_result mark_as_done = backend.mark_as_done backend_cleanup = backend.process_cleanup pid = os.getpid() request_stack = task.request_stack push_request = request_stack.push pop_request = request_stack.pop push_task = _task_stack.push pop_task = _task_stack.pop _does_info = logger.isEnabledFor(logging.INFO) resultrepr_maxsize = task.resultrepr_maxsize prerun_receivers = signals.task_prerun.receivers postrun_receivers = signals.task_postrun.receivers success_receivers = signals.task_success.receivers from celery import canvas signature = canvas.maybe_signature # maybe_ does not clone if already def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True): if propagate: raise I = Info(state, exc) R = I.handle_error_state( task, request, eager=eager, call_errbacks=call_errbacks, ) return I, R, I.state, I.retval def trace_task(uuid, args, kwargs, request=None): # R - is the possibly prepared return value. # I - is the Info object. # T - runtime # Rstr - textual representation of return value # retval - is the always unmodified return value. # state - is the resulting task state. # This function is very long because we've unrolled all the calls # for performance reasons, and because the function is so long # we want the main variables (I, and R) to stand out visually from the # the rest of the variables, so breaking PEP8 is worth it ;) R = I = T = Rstr = retval = state = None task_request = None time_start = monotonic() try: try: kwargs.items except AttributeError: raise InvalidTaskError( 'Task keyword arguments is not a mapping') push_task(task) task_request = Context(request or {}, args=args, called_directly=False, kwargs=kwargs) root_id = task_request.root_id or uuid push_request(task_request) try: # -*- PRE -*- if prerun_receivers: send_prerun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs) loader_task_init(uuid, task) if track_started: store_result( uuid, {'pid': pid, 'hostname': hostname}, STARTED, request=task_request, ) # -*- TRACE -*- try: R = retval = fun(*args, **kwargs) state = SUCCESS except Reject as exc: I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval I.handle_reject(task, task_request) except Ignore as exc: I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval I.handle_ignore(task, task_request) except Retry as exc: I, R, state, retval = on_error( task_request, exc, uuid, RETRY, call_errbacks=False) except Exception as exc: I, R, state, retval = on_error(task_request, exc, uuid) except BaseException as exc: raise else: try: # callback tasks must be applied before the result is # stored, so that result.children is populated. # groups are called inline and will store trail # separately, so need to call them separately # so that the trail's not added multiple times :( # (Issue #1936) callbacks = task.request.callbacks if callbacks: if len(task.request.callbacks) > 1: sigs, groups = [], [] for sig in callbacks: sig = signature(sig, app=app) if isinstance(sig, group): groups.append(sig) else: sigs.append(sig) for group_ in groups: group_.apply_async( (retval,), parent_id=uuid, root_id=root_id, ) if sigs: group(sigs, app=app).apply_async( (retval,), parent_id=uuid, root_id=root_id, ) else: signature(callbacks[0], app=app).apply_async( (retval,), parent_id=uuid, root_id=root_id, ) # execute first task in chain chain = task_request.chain if chain: _chsig = signature(chain.pop(), app=app) _chsig.apply_async( (retval,), chain=chain, parent_id=uuid, root_id=root_id, ) mark_as_done( uuid, retval, task_request, publish_result, ) except EncodeError as exc: I, R, state, retval = on_error(task_request, exc, uuid) else: Rstr = saferepr(R, resultrepr_maxsize) T = monotonic() - time_start if task_on_success: task_on_success(retval, uuid, args, kwargs) if success_receivers: send_success(sender=task, result=retval) if _does_info: info(LOG_SUCCESS, { 'id': uuid, 'name': name, 'return_value': Rstr, 'runtime': T, }) # -* POST *- if state not in IGNORE_STATES: if task_after_return: task_after_return( state, retval, uuid, args, kwargs, None, ) finally: try: if postrun_receivers: send_postrun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs, retval=retval, state=state) finally: pop_task() pop_request() if not eager: try: backend_cleanup() loader_cleanup() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception as exc: logger.error('Process cleanup failed: %r', exc, exc_info=True) except MemoryError: raise except Exception as exc: if eager: raise R = report_internal_error(task, exc) if task_request is not None: I, _, _, _ = on_error(task_request, exc, uuid) return trace_ok_t(R, I, T, Rstr) return trace_task def trace_task(task, uuid, args, kwargs, request={}, **opts): """Trace task execution.""" try: if task.__trace__ is None: task.__trace__ = build_tracer(task.name, task, **opts) return task.__trace__(uuid, args, kwargs, request) except Exception as exc: return trace_ok_t(report_internal_error(task, exc), None, 0.0, None) def _trace_task_ret(name, uuid, request, body, content_type, content_encoding, loads=loads_message, app=None, **extra_request): app = app or current_app._get_current_object() embed = None if content_type: accept = prepare_accept_content(app.conf.accept_content) args, kwargs, embed = loads( body, content_type, content_encoding, accept=accept, ) else: args, kwargs, embed = body hostname = gethostname() request.update({ 'args': args, 'kwargs': kwargs, 'hostname': hostname, 'is_eager': False, }, **embed or {}) R, I, T, Rstr = trace_task(app.tasks[name], uuid, args, kwargs, request, app=app) return (1, R, T) if I else (0, Rstr, T) trace_task_ret = _trace_task_ret # noqa: E305 def _fast_trace_task(task, uuid, request, body, content_type, content_encoding, loads=loads_message, _loc=_localized, hostname=None, **_): embed = None tasks, accept, hostname = _loc if content_type: args, kwargs, embed = loads( body, content_type, content_encoding, accept=accept, ) else: args, kwargs, embed = body request.update({ 'args': args, 'kwargs': kwargs, 'hostname': hostname, 'is_eager': False, }, **embed or {}) R, I, T, Rstr = tasks[task].__trace__( uuid, args, kwargs, request, ) return (1, R, T) if I else (0, Rstr, T) def report_internal_error(task, exc): _type, _value, _tb = sys.exc_info() try: _value = task.backend.prepare_exception(exc, 'pickle') exc_info = ExceptionInfo((_type, _value, _tb), internal=True) warn(RuntimeWarning( 'Exception raised outside body: {0!r}:\n{1}'.format( exc, exc_info.traceback))) return exc_info finally: del _tb def setup_worker_optimizations(app, hostname=None): """Setup worker related optimizations.""" global trace_task_ret hostname = hostname or gethostname() # make sure custom Task.__call__ methods that calls super # won't mess up the request/task stack. _install_stack_protection() # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. app.set_current() app.set_default() # evaluate all task classes by finalizing the app. app.finalize() # set fast shortcut to task registry _localized[:] = [ app._tasks, prepare_accept_content(app.conf.accept_content), hostname, ] trace_task_ret = _fast_trace_task from celery.worker import request as request_module request_module.trace_task_ret = _fast_trace_task request_module.__optimize__() def reset_worker_optimizations(): """Reset previously configured optimizations.""" global trace_task_ret trace_task_ret = _trace_task_ret try: delattr(BaseTask, '_stackprotected') except AttributeError: pass try: BaseTask.__call__ = _patched.pop('BaseTask.__call__') except KeyError: pass from celery.worker import request as request_module request_module.trace_task_ret = _trace_task_ret def _install_stack_protection(): # Patches BaseTask.__call__ in the worker to handle the edge case # where people override it and also call super. # # - The worker optimizes away BaseTask.__call__ and instead # calls task.run directly. # - so with the addition of current_task and the request stack # BaseTask.__call__ now pushes to those stacks so that # they work when tasks are called directly. # # The worker only optimizes away __call__ in the case # where it hasn't been overridden, so the request/task stack # will blow if a custom task class defines __call__ and also # calls super(). if not getattr(BaseTask, '_stackprotected', False): _patched['BaseTask.__call__'] = orig = BaseTask.__call__ def __protected_call__(self, *args, **kwargs): stack = self.request_stack req = stack.top if req and not req._protected and \ len(stack) == 1 and not req.called_directly: req._protected = 1 return self.run(*args, **kwargs) return orig(self, *args, **kwargs) BaseTask.__call__ = __protected_call__ BaseTask._stackprotected = True celery-4.1.0/celery/app/__init__.py0000644000175000017500000000475313130607475017120 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Celery Application.""" from __future__ import absolute_import, print_function, unicode_literals from celery.local import Proxy from celery import _state from celery._state import ( app_or_default, enable_trace, disable_trace, push_current_task, pop_current_task, ) from .base import Celery from .utils import AppPickler __all__ = [ 'Celery', 'AppPickler', 'app_or_default', 'default_app', 'bugreport', 'enable_trace', 'disable_trace', 'shared_task', 'push_current_task', 'pop_current_task', ] #: Proxy always returning the app set as default. default_app = Proxy(lambda: _state.default_app) def bugreport(app=None): """Return information useful in bug reports.""" return (app or _state.get_current_app()).bugreport() def shared_task(*args, **kwargs): """Create shared task (decorator). This can be used by library authors to create tasks that'll work for any app environment. Returns: ~celery.local.Proxy: A proxy that always takes the task from the current apps task registry. Example: >>> from celery import Celery, shared_task >>> @shared_task ... def add(x, y): ... return x + y ... >>> app1 = Celery(broker='amqp://') >>> add.app is app1 True >>> app2 = Celery(broker='redis://') >>> add.app is app2 True """ def create_shared_task(**options): def __inner(fun): name = options.get('name') # Set as shared task so that unfinalized apps, # and future apps will register a copy of this task. _state.connect_on_app_finalize( lambda app: app._task_from_fun(fun, **options) ) # Force all finalized apps to take this task as well. for app in _state._get_active_apps(): if app.finalized: with app._finalize_mutex: app._task_from_fun(fun, **options) # Return a proxy that always gets the task from the current # apps task registry. def task_by_cons(): app = _state.get_current_app() return app.tasks[ name or app.gen_task_name(fun.__name__, fun.__module__) ] return Proxy(task_by_cons) return __inner if len(args) == 1 and callable(args[0]): return create_shared_task(**kwargs)(args[0]) return create_shared_task(*args, **kwargs) celery-4.1.0/celery/app/log.py0000644000175000017500000002166113130607475016137 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Logging configuration. The Celery instances logging section: ``Celery.log``. Sets up logging for the worker and other programs, redirects standard outs, colors log output, patches logging related compatibility fixes, and so on. """ from __future__ import absolute_import, unicode_literals import logging import os import sys from logging.handlers import WatchedFileHandler from kombu.utils.encoding import set_default_encoding_file from celery import signals from celery._state import get_current_task from celery.five import string_t from celery.local import class_property from celery.platforms import isatty from celery.utils.log import ( get_logger, mlevel, ColorFormatter, LoggingProxy, get_multiprocessing_logger, reset_multiprocessing_logger, ) from celery.utils.nodenames import node_format from celery.utils.term import colored __all__ = ['TaskFormatter', 'Logging'] MP_LOG = os.environ.get('MP_LOG', False) class TaskFormatter(ColorFormatter): """Formatter for tasks, adding the task name and id.""" def format(self, record): task = get_current_task() if task and task.request: record.__dict__.update(task_id=task.request.id, task_name=task.name) else: record.__dict__.setdefault('task_name', '???') record.__dict__.setdefault('task_id', '???') return ColorFormatter.format(self, record) class Logging(object): """Application logging setup (app.log).""" #: The logging subsystem is only configured once per process. #: setup_logging_subsystem sets this flag, and subsequent calls #: will do nothing. _setup = False def __init__(self, app): self.app = app self.loglevel = mlevel(logging.WARN) self.format = self.app.conf.worker_log_format self.task_format = self.app.conf.worker_task_log_format self.colorize = self.app.conf.worker_log_color def setup(self, loglevel=None, logfile=None, redirect_stdouts=False, redirect_level='WARNING', colorize=None, hostname=None): loglevel = mlevel(loglevel) handled = self.setup_logging_subsystem( loglevel, logfile, colorize=colorize, hostname=hostname, ) if not handled: if redirect_stdouts: self.redirect_stdouts(redirect_level) os.environ.update( CELERY_LOG_LEVEL=str(loglevel) if loglevel else '', CELERY_LOG_FILE=str(logfile) if logfile else '', ) return handled def redirect_stdouts(self, loglevel=None, name='celery.redirected'): self.redirect_stdouts_to_logger( get_logger(name), loglevel=loglevel ) os.environ.update( CELERY_LOG_REDIRECT='1', CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''), ) def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, hostname=None, **kwargs): if self.already_setup: return if logfile and hostname: logfile = node_format(logfile, hostname) Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.worker_hijack_root_logger: root.handlers = [] get_logger('celery').handlers = [] get_logger('celery.task').handlers = [] get_logger('celery.redirected').handlers = [] # Configure root logger self._configure_logger( root, logfile, loglevel, format, colorize, **kwargs ) # Configure the multiprocessing logger self._configure_logger( get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs ) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) try: stream = logging.getLogger().handlers[0].stream except (AttributeError, IndexError): pass else: set_default_encoding_file(stream) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers def _configure_logger(self, logger, logfile, loglevel, format, colorize, **kwargs): if logger is not None: self.setup_handlers(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) def setup_task_loggers(self, loglevel=None, logfile=None, format=None, colorize=None, propagate=False, **kwargs): """Setup the task logger. If `logfile` is not specified, then `sys.stderr` is used. Will return the base task logger object. """ loglevel = mlevel(loglevel or self.loglevel) format = format or self.task_format colorize = self.supports_color(colorize, logfile) logger = self.setup_handlers( get_logger('celery.task'), logfile, format, colorize, formatter=TaskFormatter, **kwargs ) logger.setLevel(loglevel) # this is an int for some reason, better to not question why. logger.propagate = int(propagate) signals.after_setup_task_logger.send( sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) return logger def redirect_stdouts_to_logger(self, logger, loglevel=None, stdout=True, stderr=True): """Redirect :class:`sys.stdout` and :class:`sys.stderr` to logger. Arguments: logger (logging.Logger): Logger instance to redirect to. loglevel (int, str): The loglevel redirected message will be logged as. """ proxy = LoggingProxy(logger, loglevel) if stdout: sys.stdout = proxy if stderr: sys.stderr = proxy return proxy def supports_color(self, colorize=None, logfile=None): colorize = self.colorize if colorize is None else colorize if self.app.IS_WINDOWS: # Windows does not support ANSI color codes. return False if colorize or colorize is None: # Only use color if there's no active log file # and stderr is an actual terminal. return logfile is None and isatty(sys.stderr) return colorize def colored(self, logfile=None, enabled=None): return colored(enabled=self.supports_color(enabled, logfile)) def setup_handlers(self, logger, logfile, format, colorize, formatter=ColorFormatter, **kwargs): if self._is_configured(logger): return logger handler = self._detect_handler(logfile) handler.setFormatter(formatter(format, use_color=colorize)) logger.addHandler(handler) return logger def _detect_handler(self, logfile=None): """Create handler from filename, an open stream or `None` (stderr).""" logfile = sys.__stderr__ if logfile is None else logfile if hasattr(logfile, 'write'): return logging.StreamHandler(logfile) return WatchedFileHandler(logfile) def _has_handler(self, logger): return any( not isinstance(h, logging.NullHandler) for h in logger.handlers or [] ) def _is_configured(self, logger): return self._has_handler(logger) and not getattr( logger, '_rudimentary_setup', False) def setup_logger(self, name='celery', *args, **kwargs): """Deprecated: No longer used.""" self.setup_logging_subsystem(*args, **kwargs) return logging.root def get_default_logger(self, name='celery', **kwargs): return get_logger(name) @class_property def already_setup(self): return self._setup @already_setup.setter # noqa def already_setup(self, was_setup): self._setup = was_setup celery-4.1.0/celery/app/annotations.py0000644000175000017500000000303413130607475017705 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Task Annotations. Annotations is a nice term for monkey-patching task classes in the configuration. This prepares and performs the annotations in the :setting:`task_annotations` setting. """ from __future__ import absolute_import, unicode_literals from celery.five import string_t from celery.utils.functional import firstmethod, mlazy from celery.utils.imports import instantiate _first_match = firstmethod('annotate') _first_match_any = firstmethod('annotate_any') __all__ = ['MapAnnotation', 'prepare', 'resolve_all'] class MapAnnotation(dict): """Annotation map: task_name => attributes.""" def annotate_any(self): try: return dict(self['*']) except KeyError: pass def annotate(self, task): try: return dict(self[task.name]) except KeyError: pass def prepare(annotations): """Expand the :setting:`task_annotations` setting.""" def expand_annotation(annotation): if isinstance(annotation, dict): return MapAnnotation(annotation) elif isinstance(annotation, string_t): return mlazy(instantiate, annotation) return annotation if annotations is None: return () elif not isinstance(annotations, (list, tuple)): annotations = (annotations,) return [expand_annotation(anno) for anno in annotations] def resolve_all(anno, task): """Resolve all pending annotations.""" return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) celery-4.1.0/celery/app/backends.py0000644000175000017500000000471613130607475017132 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Backend selection.""" from __future__ import absolute_import, unicode_literals import sys import types from celery.exceptions import ImproperlyConfigured from celery._state import current_app from celery.five import reraise from celery.utils.imports import load_extension_class_names, symbol_by_name __all__ = ['by_name', 'by_url'] UNKNOWN_BACKEND = """ Unknown result backend: {0!r}. Did you spell that correctly? ({1!r}) """ BACKEND_ALIASES = { 'amqp': 'celery.backends.amqp:AMQPBackend', 'rpc': 'celery.backends.rpc.RPCBackend', 'cache': 'celery.backends.cache:CacheBackend', 'redis': 'celery.backends.redis:RedisBackend', 'mongodb': 'celery.backends.mongodb:MongoBackend', 'db': 'celery.backends.database:DatabaseBackend', 'database': 'celery.backends.database:DatabaseBackend', 'elasticsearch': 'celery.backends.elasticsearch:ElasticsearchBackend', 'cassandra': 'celery.backends.cassandra:CassandraBackend', 'couchbase': 'celery.backends.couchbase:CouchbaseBackend', 'couchdb': 'celery.backends.couchdb:CouchBackend', 'riak': 'celery.backends.riak:RiakBackend', 'file': 'celery.backends.filesystem:FilesystemBackend', 'disabled': 'celery.backends.base:DisabledBackend', 'consul': 'celery.backends.consul:ConsulBackend', 'dynamodb': 'celery.backends.dynamodb:DynamoDBBackend', } def by_name(backend=None, loader=None, extension_namespace='celery.result_backends'): """Get backend class by name/alias.""" backend = backend or 'disabled' loader = loader or current_app.loader aliases = dict(BACKEND_ALIASES, **loader.override_backends) aliases.update( load_extension_class_names(extension_namespace) or {}) try: cls = symbol_by_name(backend, aliases) except ValueError as exc: reraise(ImproperlyConfigured, ImproperlyConfigured( UNKNOWN_BACKEND.strip().format(backend, exc)), sys.exc_info()[2]) if isinstance(cls, types.ModuleType): raise ImproperlyConfigured(UNKNOWN_BACKEND.strip().format( backend, 'is a Python module, not a backend class.')) return cls def by_url(backend=None, loader=None): """Get backend class by URL.""" url = None if backend and '://' in backend: url = backend scheme, _, _ = url.partition('://') if '+' in scheme: backend, url = url.split('+', 1) else: backend = scheme return by_name(backend, loader), url celery-4.1.0/celery/app/task.py0000644000175000017500000011026413130607475016316 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Task implementation: request context and the task base class.""" from __future__ import absolute_import, unicode_literals import sys from billiard.einfo import ExceptionInfo from kombu.exceptions import OperationalError from kombu.utils.uuid import uuid from celery import current_app, group from celery import states from celery._state import _task_stack from celery.canvas import signature from celery.exceptions import Ignore, MaxRetriesExceededError, Reject, Retry from celery.five import items, python_2_unicode_compatible from celery.local import class_property from celery.result import EagerResult from celery.utils import abstract from celery.utils.functional import mattrgetter, maybe_list from celery.utils.imports import instantiate from celery.utils.nodenames import gethostname from celery.utils.serialization import raise_with_context from .annotations import resolve_all as resolve_all_annotations from .registry import _unpickle_task_v2 from .utils import appstr __all__ = ['Context', 'Task'] #: extracts attributes related to publishing a message from an object. extract_exec_options = mattrgetter( 'queue', 'routing_key', 'exchange', 'priority', 'expires', 'serializer', 'delivery_mode', 'compression', 'time_limit', 'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated ) # We take __repr__ very seriously around here ;) R_BOUND_TASK = '' R_UNBOUND_TASK = '' R_SELF_TASK = '<@task {0.name} bound to other {0.__self__}>' R_INSTANCE = '<@task: {0.name} of {app}{flags}>' #: Here for backwards compatibility as tasks no longer use a custom meta-class. TaskType = type def _strflags(flags, default=''): if flags: return ' ({0})'.format(', '.join(flags)) return default def _reprtask(task, fmt=None, flags=None): flags = list(flags) if flags is not None else [] flags.append('v2 compatible') if task.__v2_compat__ else None if not fmt: fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK return fmt.format( task, flags=_strflags(flags), app=appstr(task._app) if task._app else None, ) @python_2_unicode_compatible class Context(object): """Task request variables (Task.request).""" logfile = None loglevel = None hostname = None id = None args = None kwargs = None retries = 0 eta = None expires = None is_eager = False headers = None delivery_info = None reply_to = None root_id = None parent_id = None correlation_id = None taskset = None # compat alias to group group = None chord = None chain = None utc = None called_directly = True callbacks = None errbacks = None timelimit = None origin = None _children = None # see property _protected = 0 def __init__(self, *args, **kwargs): self.update(*args, **kwargs) def update(self, *args, **kwargs): return self.__dict__.update(*args, **kwargs) def clear(self): return self.__dict__.clear() def get(self, key, default=None): return getattr(self, key, default) def __repr__(self): return ''.format(vars(self)) def as_execution_options(self): limit_hard, limit_soft = self.timelimit or (None, None) return { 'task_id': self.id, 'root_id': self.root_id, 'parent_id': self.parent_id, 'group_id': self.group, 'chord': self.chord, 'chain': self.chain, 'link': self.callbacks, 'link_error': self.errbacks, 'expires': self.expires, 'soft_time_limit': limit_soft, 'time_limit': limit_hard, 'headers': self.headers, 'retries': self.retries, 'reply_to': self.reply_to, 'origin': self.origin, } @property def children(self): # children must be an empy list for every thread if self._children is None: self._children = [] return self._children @abstract.CallableTask.register @python_2_unicode_compatible class Task(object): """Task base class. Note: When called tasks apply the :meth:`run` method. This method must be defined by all tasks (that is unless the :meth:`__call__` method is overridden). """ __trace__ = None __v2_compat__ = False # set by old base in celery.task.base MaxRetriesExceededError = MaxRetriesExceededError OperationalError = OperationalError #: Execution strategy used, or the qualified name of one. Strategy = 'celery.worker.strategy:default' #: This is the instance bound to if the task is a method of a class. __self__ = None #: The application instance associated with this task class. _app = None #: Name of the task. name = None #: Enable argument checking. #: You can set this to false if you don't want the signature to be #: checked when calling the task. #: Defaults to :attr:`app.strict_typing <@Celery.strict_typing>`. typing = None #: Maximum number of retries before giving up. If set to :const:`None`, #: it will **never** stop retrying. max_retries = 3 #: Default time in seconds before a retry of the task should be #: executed. 3 minutes by default. default_retry_delay = 3 * 60 #: Rate limit for this task type. Examples: :const:`None` (no rate #: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks #: a minute),`'100/h'` (hundred tasks an hour) rate_limit = None #: If enabled the worker won't store task state and return values #: for this task. Defaults to the :setting:`task_ignore_result` #: setting. ignore_result = None #: If enabled the request will keep track of subtasks started by #: this task, and this information will be sent with the result #: (``result.children``). trail = True #: If enabled the worker will send monitoring events related to #: this task (but only if the worker is configured to send #: task related events). #: Note that this has no effect on the task-failure event case #: where a task is not registered (as it will have no task class #: to check this flag). send_events = True #: When enabled errors will be stored even if the task is otherwise #: configured to ignore results. store_errors_even_if_ignored = None #: The name of a serializer that are registered with #: :mod:`kombu.serialization.registry`. Default is `'pickle'`. serializer = None #: Hard time limit. #: Defaults to the :setting:`task_time_limit` setting. time_limit = None #: Soft time limit. #: Defaults to the :setting:`task_soft_time_limit` setting. soft_time_limit = None #: The result store backend used for this task. backend = None #: If disabled this task won't be registered automatically. autoregister = True #: If enabled the task will report its status as 'started' when the task #: is executed by a worker. Disabled by default as the normal behavior #: is to not report that level of granularity. Tasks are either pending, #: finished, or waiting to be retried. #: #: Having a 'started' status can be useful for when there are long #: running tasks and there's a need to report what task is currently #: running. #: #: The application default can be overridden using the #: :setting:`task_track_started` setting. track_started = None #: When enabled messages for this task will be acknowledged **after** #: the task has been executed, and not *just before* (the #: default behavior). #: #: Please note that this means the task may be executed twice if the #: worker crashes mid execution. #: #: The application default can be overridden with the #: :setting:`task_acks_late` setting. acks_late = None #: Even if :attr:`acks_late` is enabled, the worker will #: acknowledge tasks when the worker process executing them abruptly #: exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc). #: #: Setting this to true allows the message to be re-queued instead, #: so that the task will execute again by the same worker, or another #: worker. #: #: Warning: Enabling this can cause message loops; make sure you know #: what you're doing. reject_on_worker_lost = None #: Tuple of expected exceptions. #: #: These are errors that are expected in normal operation #: and that shouldn't be regarded as a real error by the worker. #: Currently this means that the state will be updated to an error #: state, but the worker won't log the event as an error. throws = () #: Default task expiry time. expires = None #: Max length of result representation used in logs and events. resultrepr_maxsize = 1024 #: Task request stack, the current request will be the topmost. request_stack = None #: Some may expect a request to exist even if the task hasn't been #: called. This should probably be deprecated. _default_request = None #: Deprecated attribute ``abstract`` here for compatibility. abstract = True _exec_options = None __bound__ = False from_config = ( ('serializer', 'task_serializer'), ('rate_limit', 'task_default_rate_limit'), ('track_started', 'task_track_started'), ('acks_late', 'task_acks_late'), ('reject_on_worker_lost', 'task_reject_on_worker_lost'), ('ignore_result', 'task_ignore_result'), ('store_errors_even_if_ignored', 'task_store_errors_even_if_ignored'), ) _backend = None # set by backend property. # - Tasks are lazily bound, so that configuration is not set # - until the task is actually used @classmethod def bind(cls, app): was_bound, cls.__bound__ = cls.__bound__, True cls._app = app conf = app.conf cls._exec_options = None # clear option cache if cls.typing is None: cls.typing = app.strict_typing for attr_name, config_name in cls.from_config: if getattr(cls, attr_name, None) is None: setattr(cls, attr_name, conf[config_name]) # decorate with annotations from config. if not was_bound: cls.annotate() from celery.utils.threads import LocalStack cls.request_stack = LocalStack() # PeriodicTask uses this to add itself to the PeriodicTask schedule. cls.on_bound(app) return app @classmethod def on_bound(cls, app): """Called when the task is bound to an app. Note: This class method can be defined to do additional actions when the task class is bound to an app. """ pass @classmethod def _get_app(cls): if cls._app is None: cls._app = current_app if not cls.__bound__: # The app property's __set__ method is not called # if Task.app is set (on the class), so must bind on use. cls.bind(cls._app) return cls._app app = class_property(_get_app, bind) @classmethod def annotate(cls): for d in resolve_all_annotations(cls.app.annotations, cls): for key, value in items(d): if key.startswith('@'): cls.add_around(key[1:], value) else: setattr(cls, key, value) @classmethod def add_around(cls, attr, around): orig = getattr(cls, attr) if getattr(orig, '__wrapped__', None): orig = orig.__wrapped__ meth = around(orig) meth.__wrapped__ = orig setattr(cls, attr, meth) def __call__(self, *args, **kwargs): _task_stack.push(self) self.push_request(args=args, kwargs=kwargs) try: # add self if this is a bound task if self.__self__ is not None: return self.run(self.__self__, *args, **kwargs) return self.run(*args, **kwargs) finally: self.pop_request() _task_stack.pop() def __reduce__(self): # - tasks are pickled into the name of the task only, and the reciever # - simply grabs it from the local registry. # - in later versions the module of the task is also included, # - and the receiving side tries to import that module so that # - it will work even if the task hasn't been registered. mod = type(self).__module__ mod = mod if mod and mod in sys.modules else None return (_unpickle_task_v2, (self.name, mod), None) def run(self, *args, **kwargs): """The body of the task executed by workers.""" raise NotImplementedError('Tasks must define the run method.') def start_strategy(self, app, consumer, **kwargs): return instantiate(self.Strategy, self, app, consumer, **kwargs) def delay(self, *args, **kwargs): """Star argument version of :meth:`apply_async`. Does not support the extra options enabled by :meth:`apply_async`. Arguments: *args (Any): Positional arguments passed on to the task. **kwargs (Any): Keyword arguments passed on to the task. Returns: celery.result.AsyncResult: Future promise. """ return self.apply_async(args, kwargs) def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, link=None, link_error=None, shadow=None, **options): """Apply tasks asynchronously by sending a message. Arguments: args (Tuple): The positional arguments to pass on to the task. kwargs (Dict): The keyword arguments to pass on to the task. countdown (float): Number of seconds into the future that the task should execute. Defaults to immediate execution. eta (~datetime.datetime): Absolute time and date of when the task should be executed. May not be specified if `countdown` is also supplied. expires (float, ~datetime.datetime): Datetime or seconds in the future for the task should expire. The task won't be executed after the expiration time. shadow (str): Override task name used in logs/monitoring. Default is retrieved from :meth:`shadow_name`. connection (kombu.Connection): Re-use existing broker connection instead of acquiring one from the connection pool. retry (bool): If enabled sending of the task message will be retried in the event of connection loss or failure. Default is taken from the :setting:`task_publish_retry` setting. Note that you need to handle the producer/connection manually for this to work. retry_policy (Mapping): Override the retry policy used. See the :setting:`task_publish_retry_policy` setting. queue (str, kombu.Queue): The queue to route the task to. This must be a key present in :setting:`task_queues`, or :setting:`task_create_missing_queues` must be enabled. See :ref:`guide-routing` for more information. exchange (str, kombu.Exchange): Named custom exchange to send the task to. Usually not used in combination with the ``queue`` argument. routing_key (str): Custom routing key used to route the task to a worker server. If in combination with a ``queue`` argument only used to specify custom routing keys to topic exchanges. priority (int): The task priority, a number between 0 and 9. Defaults to the :attr:`priority` attribute. serializer (str): Serialization method to use. Can be `pickle`, `json`, `yaml`, `msgpack` or any custom serialization method that's been registered with :mod:`kombu.serialization.registry`. Defaults to the :attr:`serializer` attribute. compression (str): Optional compression method to use. Can be one of ``zlib``, ``bzip2``, or any custom compression methods registered with :func:`kombu.compression.register`. Defaults to the :setting:`task_compression` setting. link (~@Signature): A single, or a list of tasks signatures to apply if the task returns successfully. link_error (~@Signature): A single, or a list of task signatures to apply if an error occurs while executing the task. producer (kombu.Producer): custom producer to use when publishing the task. add_to_parent (bool): If set to True (default) and the task is applied while executing another task, then the result will be appended to the parent tasks ``request.children`` attribute. Trailing can also be disabled by default using the :attr:`trail` attribute publisher (kombu.Producer): Deprecated alias to ``producer``. headers (Dict): Message headers to be included in the message. Returns: ~@AsyncResult: Promise of future evaluation. Raises: TypeError: If not enough arguments are passed, or too many arguments are passed. Note that signature checks may be disabled by specifying ``@task(typing=False)``. kombu.exceptions.OperationalError: If a connection to the transport cannot be made, or if the connection is lost. Note: Also supports all keyword arguments supported by :meth:`kombu.Producer.publish`. """ if self.typing: try: check_arguments = self.__header__ except AttributeError: # pragma: no cover pass else: check_arguments(*(args or ()), **(kwargs or {})) app = self._get_app() if app.conf.task_always_eager: return self.apply(args, kwargs, task_id=task_id or uuid(), link=link, link_error=link_error, **options) # add 'self' if this is a "task_method". if self.__self__ is not None: args = args if isinstance(args, tuple) else tuple(args or ()) args = (self.__self__,) + args shadow = shadow or self.shadow_name(args, kwargs, options) preopts = self._get_exec_options() options = dict(preopts, **options) if options else preopts return app.send_task( self.name, args, kwargs, task_id=task_id, producer=producer, link=link, link_error=link_error, result_cls=self.AsyncResult, shadow=shadow, task_type=self, **options ) def shadow_name(self, args, kwargs, options): """Override for custom task name in worker logs/monitoring. Example: .. code-block:: python from celery.utils.imports import qualname def shadow_name(task, args, kwargs, options): return qualname(args[0]) @app.task(shadow_name=shadow_name, serializer='pickle') def apply_function_async(fun, *args, **kwargs): return fun(*args, **kwargs) Arguments: args (Tuple): Task positional arguments. kwargs (Dict): Task keyword arguments. options (Dict): Task execution options. """ pass def signature_from_request(self, request=None, args=None, kwargs=None, queue=None, **extra_options): request = self.request if request is None else request args = request.args if args is None else args kwargs = request.kwargs if kwargs is None else kwargs options = request.as_execution_options() if queue: options['queue'] = queue else: delivery_info = request.delivery_info or {} exchange = delivery_info.get('exchange') routing_key = delivery_info.get('routing_key') if exchange == '' and routing_key: # sent to anon-exchange options['queue'] = routing_key else: options.update(delivery_info) return self.signature( args, kwargs, options, type=self, **extra_options ) subtask_from_request = signature_from_request # XXX compat def retry(self, args=None, kwargs=None, exc=None, throw=True, eta=None, countdown=None, max_retries=None, **options): """Retry the task. Example: >>> from imaginary_twitter_lib import Twitter >>> from proj.celery import app >>> @app.task(bind=True) ... def tweet(self, auth, message): ... twitter = Twitter(oauth=auth) ... try: ... twitter.post_status_update(message) ... except twitter.FailWhale as exc: ... # Retry in 5 minutes. ... raise self.retry(countdown=60 * 5, exc=exc) Note: Although the task will never return above as `retry` raises an exception to notify the worker, we use `raise` in front of the retry to convey that the rest of the block won't be executed. Arguments: args (Tuple): Positional arguments to retry with. kwargs (Dict): Keyword arguments to retry with. exc (Exception): Custom exception to report when the max retry limit has been exceeded (default: :exc:`~@MaxRetriesExceededError`). If this argument is set and retry is called while an exception was raised (``sys.exc_info()`` is set) it will attempt to re-raise the current exception. If no exception was raised it will raise the ``exc`` argument provided. countdown (float): Time in seconds to delay the retry for. eta (~datetime.dateime): Explicit time and date to run the retry at. max_retries (int): If set, overrides the default retry limit for this execution. Changes to this parameter don't propagate to subsequent task retry attempts. A value of :const:`None`, means "use the default", so if you want infinite retries you'd have to set the :attr:`max_retries` attribute of the task to :const:`None` first. time_limit (int): If set, overrides the default time limit. soft_time_limit (int): If set, overrides the default soft time limit. throw (bool): If this is :const:`False`, don't raise the :exc:`~@Retry` exception, that tells the worker to mark the task as being retried. Note that this means the task will be marked as failed if the task raises an exception, or successful if it returns after the retry call. **options (Any): Extra options to pass on to :meth:`apply_async`. Raises: celery.exceptions.Retry: To tell the worker that the task has been re-sent for retry. This always happens, unless the `throw` keyword argument has been explicitly set to :const:`False`, and is considered normal operation. """ request = self.request retries = request.retries + 1 max_retries = self.max_retries if max_retries is None else max_retries # Not in worker or emulated by (apply/always_eager), # so just raise the original exception. if request.called_directly: # raises orig stack if PyErr_Occurred, # and augments with exc' if that argument is defined. raise_with_context(exc or Retry('Task can be retried', None)) if not eta and countdown is None: countdown = self.default_retry_delay is_eager = request.is_eager S = self.signature_from_request( request, args, kwargs, countdown=countdown, eta=eta, retries=retries, **options ) if max_retries is not None and retries > max_retries: if exc: # On Py3: will augment any current exception with # the exc' argument provided (raise exc from orig) raise_with_context(exc) raise self.MaxRetriesExceededError( "Can't retry {0}[{1}] args:{2} kwargs:{3}".format( self.name, request.id, S.args, S.kwargs)) ret = Retry(exc=exc, when=eta or countdown) if is_eager: # if task was executed eagerly using apply(), # then the retry must also be executed eagerly. S.apply().get() if throw: raise ret return ret try: S.apply_async() except Exception as exc: raise Reject(exc, requeue=False) if throw: raise ret return ret def apply(self, args=None, kwargs=None, link=None, link_error=None, task_id=None, retries=None, throw=None, logfile=None, loglevel=None, headers=None, **options): """Execute this task locally, by blocking until the task returns. Arguments: args (Tuple): positional arguments passed on to the task. kwargs (Dict): keyword arguments passed on to the task. throw (bool): Re-raise task exceptions. Defaults to the :setting:`task_eager_propagates` setting. Returns: celery.result.EagerResult: pre-evaluated result. """ # trace imports Task, so need to import inline. from celery.app.trace import build_tracer app = self._get_app() args = args or () # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__,) + tuple(args) kwargs = kwargs or {} task_id = task_id or uuid() retries = retries or 0 if throw is None: throw = app.conf.task_eager_propagates # Make sure we get the task instance, not class. task = app._tasks[self.name] request = { 'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': logfile, 'loglevel': loglevel or 0, 'hostname': gethostname(), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'headers': headers, 'delivery_info': {'is_eager': True}, } tb = None tracer = build_tracer( task.name, task, eager=True, propagate=throw, app=self._get_app(), ) ret = tracer(task_id, args, kwargs, request) retval = ret.retval if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if ret.info is None else ret.info.state return EagerResult(task_id, retval, state, traceback=tb) def AsyncResult(self, task_id, **kwargs): """Get AsyncResult instance for this kind of task. Arguments: task_id (str): Task id to get result for. """ return self._get_app().AsyncResult(task_id, backend=self.backend, task_name=self.name, **kwargs) def signature(self, args=None, *starargs, **starkwargs): """Create signature. Returns: :class:`~celery.signature`: object for this task, wrapping arguments and execution options for a single task invocation. """ starkwargs.setdefault('app', self.app) return signature(self, args, *starargs, **starkwargs) subtask = signature def s(self, *args, **kwargs): """Create signature. Shortcut for ``.s(*a, **k) -> .signature(a, k)``. """ return self.signature(args, kwargs) def si(self, *args, **kwargs): """Create immutable signature. Shortcut for ``.si(*a, **k) -> .signature(a, k, immutable=True)``. """ return self.signature(args, kwargs, immutable=True) def chunks(self, it, n): """Create a :class:`~celery.canvas.chunks` task for this task.""" from celery import chunks return chunks(self.s(), it, n, app=self.app) def map(self, it): """Create a :class:`~celery.canvas.xmap` task from ``it``.""" from celery import xmap return xmap(self.s(), it, app=self.app) def starmap(self, it): """Create a :class:`~celery.canvas.xstarmap` task from ``it``.""" from celery import xstarmap return xstarmap(self.s(), it, app=self.app) def send_event(self, type_, retry=True, retry_policy=None, **fields): """Send monitoring event message. This can be used to add custom event types in :pypi:`Flower` and other monitors. Arguments: type_ (str): Type of event, e.g. ``"task-failed"``. Keyword Arguments: retry (bool): Retry sending the message if the connection is lost. Default is taken from the :setting:`task_publish_retry` setting. retry_policy (Mapping): Retry settings. Default is taken from the :setting:`task_publish_retry_policy` setting. **fields (Any): Map containing information about the event. Must be JSON serializable. """ req = self.request if retry_policy is None: retry_policy = self.app.conf.task_publish_retry_policy with self.app.events.default_dispatcher(hostname=req.hostname) as d: return d.send( type_, uuid=req.id, retry=retry, retry_policy=retry_policy, **fields) def replace(self, sig): """Replace this task, with a new task inheriting the task id. .. versionadded:: 4.0 Arguments: sig (~@Signature): signature to replace with. Raises: ~@Ignore: This is always raised, so the best practice is to always use ``raise self.replace(...)`` to convey to the reader that the task won't continue after being replaced. """ chord = self.request.chord if 'chord' in sig.options: if chord: chord = sig.options['chord'] | chord else: chord = sig.options['chord'] if isinstance(sig, group): sig |= self.app.tasks['celery.accumulate'].s(index=0).set( chord=chord, link=self.request.callbacks, link_error=self.request.errbacks, ) chord = None if self.request.chain: for t in reversed(self.request.chain): sig |= signature(t, app=self.app) sig.freeze(self.request.id, group_id=self.request.group, chord=chord, root_id=self.request.root_id) sig.delay() raise Ignore('Replaced by new task') def add_to_chord(self, sig, lazy=False): """Add signature to the chord the current task is a member of. .. versionadded:: 4.0 Currently only supported by the Redis result backend. Arguments: sig (~@Signature): Signature to extend chord with. lazy (bool): If enabled the new task won't actually be called, and ``sig.delay()`` must be called manually. """ if not self.request.chord: raise ValueError('Current task is not member of any chord') result = sig.freeze(group_id=self.request.group, chord=self.request.chord, root_id=self.request.root_id) self.backend.add_to_chord(self.request.group, result) return sig.delay() if not lazy else sig def update_state(self, task_id=None, state=None, meta=None): """Update task state. Arguments: task_id (str): Id of the task to update. Defaults to the id of the current task. state (str): New state. meta (Dict): State meta-data. """ if task_id is None: task_id = self.request.id self.backend.store_result(task_id, meta, state) def on_success(self, retval, task_id, args, kwargs): """Success handler. Run by the worker if the task executes successfully. Arguments: retval (Any): The return value of the task. task_id (str): Unique id of the executed task. args (Tuple): Original arguments for the executed task. kwargs (Dict): Original keyword arguments for the executed task. Returns: None: The return value of this handler is ignored. """ pass def on_retry(self, exc, task_id, args, kwargs, einfo): """Retry handler. This is run by the worker when the task is to be retried. Arguments: exc (Exception): The exception sent to :meth:`retry`. task_id (str): Unique id of the retried task. args (Tuple): Original arguments for the retried task. kwargs (Dict): Original keyword arguments for the retried task. einfo (~billiard.einfo.ExceptionInfo): Exception information. Returns: None: The return value of this handler is ignored. """ pass def on_failure(self, exc, task_id, args, kwargs, einfo): """Error handler. This is run by the worker when the task fails. Arguments: exc (Exception): The exception raised by the task. task_id (str): Unique id of the failed task. args (Tuple): Original arguments for the task that failed. kwargs (Dict): Original keyword arguments for the task that failed. einfo (~billiard.einfo.ExceptionInfo): Exception information. Returns: None: The return value of this handler is ignored. """ pass def after_return(self, status, retval, task_id, args, kwargs, einfo): """Handler called after the task returns. Arguments: status (str): Current task state. retval (Any): Task return value/exception. task_id (str): Unique id of the task. args (Tuple): Original arguments for the task. kwargs (Dict): Original keyword arguments for the task. einfo (~billiard.einfo.ExceptionInfo): Exception information. Returns: None: The return value of this handler is ignored. """ pass def add_trail(self, result): if self.trail: self.request.children.append(result) return result def push_request(self, *args, **kwargs): self.request_stack.push(Context(*args, **kwargs)) def pop_request(self): self.request_stack.pop() def __repr__(self): """``repr(task)``.""" return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE) def _get_request(self): """Get current request object.""" req = self.request_stack.top if req is None: # task was not called, but some may still expect a request # to be there, perhaps that should be deprecated. if self._default_request is None: self._default_request = Context() return self._default_request return req request = property(_get_request) def _get_exec_options(self): if self._exec_options is None: self._exec_options = extract_exec_options(self) return self._exec_options @property def backend(self): backend = self._backend if backend is None: return self.app.backend return backend @backend.setter def backend(self, value): # noqa self._backend = value @property def __name__(self): return self.__class__.__name__ BaseTask = Task # noqa: E305 XXX compat alias celery-4.1.0/celery/app/utils.py0000644000175000017500000002730213135426300016503 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """App utilities: Compat settings, bug-report tool, pickling apps.""" from __future__ import absolute_import, unicode_literals import os import platform as _platform import re from collections import Mapping, namedtuple from copy import deepcopy from types import ModuleType from kombu.utils.url import maybe_sanitize_url from celery.exceptions import ImproperlyConfigured from celery.five import items, keys, string_t, values from celery.platforms import pyimplementation from celery.utils.collections import ConfigurationView from celery.utils.text import pretty from celery.utils.imports import import_from_cwd, symbol_by_name, qualname from .defaults import ( _TO_NEW_KEY, _TO_OLD_KEY, _OLD_DEFAULTS, _OLD_SETTING_KEYS, DEFAULTS, SETTING_KEYS, find, ) __all__ = [ 'Settings', 'appstr', 'bugreport', 'filter_hidden_settings', 'find_app', ] #: Format used to generate bug-report information. BUGREPORT_INFO = """ software -> celery:{celery_v} kombu:{kombu_v} py:{py_v} billiard:{billiard_v} {driver_v} platform -> system:{system} arch:{arch} imp:{py_i} loader -> {loader} settings -> transport:{transport} results:{results} {human_settings} """ HIDDEN_SETTINGS = re.compile( 'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE', re.IGNORECASE, ) E_MIX_OLD_INTO_NEW = """ Cannot mix new and old setting keys, please rename the following settings to the new format: {renames} """ E_MIX_NEW_INTO_OLD = """ Cannot mix new setting names with old setting names, please rename the following settings to use the old format: {renames} Or change all of the settings to use the new format :) """ FMT_REPLACE_SETTING = '{replace:<36} -> {with_}' def appstr(app): """String used in __repr__ etc, to id app instances.""" return '{0} at {1:#x}'.format(app.main or '__main__', id(app)) class Settings(ConfigurationView): """Celery settings object. .. seealso: :ref:`configuration` for a full list of configuration keys. """ @property def broker_read_url(self): return ( os.environ.get('CELERY_BROKER_READ_URL') or self.get('broker_read_url') or self.broker_url ) @property def broker_write_url(self): return ( os.environ.get('CELERY_BROKER_WRITE_URL') or self.get('broker_write_url') or self.broker_url ) @property def broker_url(self): return ( os.environ.get('CELERY_BROKER_URL') or self.first('broker_url', 'broker_host') ) @property def task_default_exchange(self): return self.first( 'task_default_exchange', 'task_default_queue', ) @property def task_default_routing_key(self): return self.first( 'task_default_routing_key', 'task_default_queue', ) @property def timezone(self): # this way we also support django's time zone. return self.first('timezone', 'time_zone') def without_defaults(self): """Return the current configuration, but without defaults.""" # the last stash is the default settings, so just skip that return Settings({}, self.maps[:-1]) def value_set_for(self, key): return key in self.without_defaults() def find_option(self, name, namespace=''): """Search for option by name. Example: >>> from proj.celery import app >>> app.conf.find_option('disable_rate_limits') ('worker', 'prefetch_multiplier', bool default->False>)) Arguments: name (str): Name of option, cannot be partial. namespace (str): Preferred name-space (``None`` by default). Returns: Tuple: of ``(namespace, key, type)``. """ return find(name, namespace) def find_value_for_key(self, name, namespace='celery'): """Shortcut to ``get_by_parts(*find_option(name)[:-1])``.""" return self.get_by_parts(*self.find_option(name, namespace)[:-1]) def get_by_parts(self, *parts): """Return the current value for setting specified as a path. Example: >>> from proj.celery import app >>> app.conf.get_by_parts('worker', 'disable_rate_limits') False """ return self['_'.join(part for part in parts if part)] def finalize(self): # See PendingConfiguration in celery/app/base.py # first access will read actual configuration. try: self['__bogus__'] except KeyError: pass return self def table(self, with_defaults=False, censored=True): filt = filter_hidden_settings if censored else lambda v: v dict_members = dir(dict) self.finalize() return filt({ k: v for k, v in items( self if with_defaults else self.without_defaults()) if not k.startswith('_') and k not in dict_members }) def humanize(self, with_defaults=False, censored=True): """Return a human readable text showing configuration changes.""" return '\n'.join( '{0}: {1}'.format(key, pretty(value, width=50)) for key, value in items(self.table(with_defaults, censored))) def _new_key_to_old(key, convert=_TO_OLD_KEY.get): return convert(key, key) def _old_key_to_new(key, convert=_TO_NEW_KEY.get): return convert(key, key) _settings_info_t = namedtuple('settings_info_t', ( 'defaults', 'convert', 'key_t', 'mix_error', )) _settings_info = _settings_info_t( DEFAULTS, _TO_NEW_KEY, _old_key_to_new, E_MIX_OLD_INTO_NEW, ) _old_settings_info = _settings_info_t( _OLD_DEFAULTS, _TO_OLD_KEY, _new_key_to_old, E_MIX_NEW_INTO_OLD, ) def detect_settings(conf, preconf={}, ignore_keys=set(), prefix=None, all_keys=SETTING_KEYS, old_keys=_OLD_SETTING_KEYS): source = conf if conf is None: source, conf = preconf, {} have = set(keys(source)) - ignore_keys is_in_new = have.intersection(all_keys) is_in_old = have.intersection(old_keys) info = None if is_in_new: # have new setting names info, left = _settings_info, is_in_old if is_in_old and len(is_in_old) > len(is_in_new): # Majority of the settings are old. info, left = _old_settings_info, is_in_new if is_in_old: # have old setting names, or a majority of the names are old. if not info: info, left = _old_settings_info, is_in_new if is_in_new and len(is_in_new) > len(is_in_old): # Majority of the settings are new info, left = _settings_info, is_in_old else: # no settings, just use new format. info, left = _settings_info, is_in_old if prefix: # always use new format if prefix is used. info, left = _settings_info, set() # only raise error for keys that the user didn't provide two keys # for (e.g., both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``). really_left = {key for key in left if info.convert[key] not in have} if really_left: # user is mixing old/new, or new/old settings, give renaming # suggestions. raise ImproperlyConfigured(info.mix_error.format(renames='\n'.join( FMT_REPLACE_SETTING.format(replace=key, with_=info.convert[key]) for key in sorted(really_left) ))) preconf = {info.convert.get(k, k): v for k, v in items(preconf)} defaults = dict(deepcopy(info.defaults), **preconf) return Settings( preconf, [conf, defaults], (_old_key_to_new, _new_key_to_old), prefix=prefix, ) class AppPickler(object): """Old application pickler/unpickler (< 3.1).""" def __call__(self, cls, *args): kwargs = self.build_kwargs(*args) app = self.construct(cls, **kwargs) self.prepare(app, **kwargs) return app def prepare(self, app, **kwargs): app.conf.update(kwargs['changes']) def build_kwargs(self, *args): return self.build_standard_kwargs(*args) def build_standard_kwargs(self, main, changes, loader, backend, amqp, events, log, control, accept_magic_kwargs, config_source=None): return dict(main=main, loader=loader, backend=backend, amqp=amqp, changes=changes, events=events, log=log, control=control, set_as_current=False, config_source=config_source) def construct(self, cls, **kwargs): return cls(**kwargs) def _unpickle_app(cls, pickler, *args): """Rebuild app for versions 2.5+.""" return pickler()(cls, *args) def _unpickle_app_v2(cls, kwargs): """Rebuild app for versions 3.1+.""" kwargs['set_as_current'] = False return cls(**kwargs) def filter_hidden_settings(conf): """Filter sensitive settings.""" def maybe_censor(key, value, mask='*' * 8): if isinstance(value, Mapping): return filter_hidden_settings(value) if isinstance(key, string_t): if HIDDEN_SETTINGS.search(key): return mask elif 'broker_url' in key.lower(): from kombu import Connection return Connection(value).as_uri(mask=mask) elif 'backend' in key.lower(): return maybe_sanitize_url(value, mask=mask) return value return {k: maybe_censor(k, v) for k, v in items(conf)} def bugreport(app): """Return a string containing information useful in bug-reports.""" import billiard import celery import kombu try: conn = app.connection() driver_v = '{0}:{1}'.format(conn.transport.driver_name, conn.transport.driver_version()) transport = conn.transport_cls except Exception: # pylint: disable=broad-except transport = driver_v = '' return BUGREPORT_INFO.format( system=_platform.system(), arch=', '.join(x for x in _platform.architecture() if x), py_i=pyimplementation(), celery_v=celery.VERSION_BANNER, kombu_v=kombu.__version__, billiard_v=billiard.__version__, py_v=_platform.python_version(), driver_v=driver_v, transport=transport, results=maybe_sanitize_url(app.conf.result_backend or 'disabled'), human_settings=app.conf.humanize(), loader=qualname(app.loader.__class__), ) def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd): """Find app by name.""" from .base import Celery try: sym = symbol_by_name(app, imp=imp) except AttributeError: # last part was not an attribute, but a module sym = imp(app) if isinstance(sym, ModuleType) and ':' not in app: try: found = sym.app if isinstance(found, ModuleType): raise AttributeError() except AttributeError: try: found = sym.celery if isinstance(found, ModuleType): raise AttributeError() except AttributeError: if getattr(sym, '__path__', None): try: return find_app( '{0}.celery'.format(app), symbol_by_name=symbol_by_name, imp=imp, ) except ImportError: pass for suspect in values(vars(sym)): if isinstance(suspect, Celery): return suspect raise else: return found else: return found return sym celery-4.1.0/celery/app/routes.py0000644000175000017500000001105313130607475016671 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Task Routing. Contains utilities for working with task routers, (:setting:`task_routes`). """ from __future__ import absolute_import, unicode_literals import re import string from collections import Mapping, OrderedDict from kombu import Queue from celery.exceptions import QueueNotFound from celery.five import items, string_t from celery.utils.collections import lpmerge from celery.utils.functional import maybe_evaluate, mlazy from celery.utils.imports import symbol_by_name __all__ = ['MapRoute', 'Router', 'prepare'] def glob_to_re(glob, quote=string.punctuation.replace('*', '')): glob = ''.join('\\' + c if c in quote else c for c in glob) return glob.replace('*', '.+?') class MapRoute(object): """Creates a router out of a :class:`dict`.""" def __init__(self, map): map = items(map) if isinstance(map, Mapping) else map self.map = {} self.patterns = OrderedDict() for k, v in map: if isinstance(k, re._pattern_type): self.patterns[k] = v elif '*' in k: self.patterns[re.compile(glob_to_re(k))] = v else: self.map[k] = v def __call__(self, name, *args, **kwargs): try: return dict(self.map[name]) except KeyError: pass except ValueError: return {'queue': self.map[name]} for regex, route in items(self.patterns): if regex.match(name): try: return dict(route) except ValueError: return {'queue': route} class Router(object): """Route tasks based on the :setting:`task_routes` setting.""" def __init__(self, routes=None, queues=None, create_missing=False, app=None): self.app = app self.queues = {} if queues is None else queues self.routes = [] if routes is None else routes self.create_missing = create_missing def route(self, options, name, args=(), kwargs={}, task_type=None): options = self.expand_destination(options) # expands 'queue' if self.routes: route = self.lookup_route(name, args, kwargs, options, task_type) if route: # expands 'queue' in route. return lpmerge(self.expand_destination(route), options) if 'queue' not in options: options = lpmerge(self.expand_destination( self.app.conf.task_default_queue), options) return options def expand_destination(self, route): # Route can be a queue name: convenient for direct exchanges. if isinstance(route, string_t): queue, route = route, {} else: # can use defaults from configured queue, but override specific # things (like the routing_key): great for topic exchanges. queue = route.pop('queue', None) if queue: if isinstance(queue, Queue): route['queue'] = queue else: try: route['queue'] = self.queues[queue] except KeyError: raise QueueNotFound( 'Queue {0!r} missing from task_queues'.format(queue)) return route def lookup_route(self, name, args=None, kwargs=None, options=None, task_type=None): query = self.query_router for router in self.routes: route = query(router, name, args, kwargs, options, task_type) if route is not None: return route def query_router(self, router, task, args, kwargs, options, task_type): router = maybe_evaluate(router) if hasattr(router, 'route_for_task'): # pre 4.0 router class return router.route_for_task(task, args, kwargs) return router(task, args, kwargs, options, task=task_type) def expand_router_string(router): router = symbol_by_name(router) if hasattr(router, 'route_for_task'): # need to instantiate pre 4.0 router classes router = router() return router def prepare(routes): """Expand the :setting:`task_routes` setting.""" def expand_route(route): if isinstance(route, (Mapping, list, tuple)): return MapRoute(route) if isinstance(route, string_t): return mlazy(expand_router_string, route) return route if routes is None: return () if not isinstance(routes, (list, tuple)): routes = (routes,) return [expand_route(route) for route in routes] celery-4.1.0/celery/app/amqp.py0000644000175000017500000005670413135426300016311 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Sending/Receiving Messages (Kombu integration).""" from __future__ import absolute_import, unicode_literals import numbers import sys from collections import Mapping, namedtuple from datetime import timedelta from weakref import WeakValueDictionary from kombu import pools from kombu import Connection, Consumer, Exchange, Producer, Queue from kombu.common import Broadcast from kombu.utils.functional import maybe_list from kombu.utils.objects import cached_property from celery import signals from celery.five import items, string_t from celery.local import try_import from celery.utils.nodenames import anon_nodename from celery.utils.saferepr import saferepr from celery.utils.text import indent as textindent from celery.utils.time import maybe_make_aware from . import routes as _routes __all__ = ['AMQP', 'Queues', 'task_message'] PY3 = sys.version_info[0] == 3 #: earliest date supported by time.mktime. INT_MIN = -2147483648 # json in Python 2.7 borks if dict contains byte keys. JSON_NEEDS_UNICODE_KEYS = not PY3 and not try_import('simplejson') #: Human readable queue declaration. QUEUE_FORMAT = """ .> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \ key={0.routing_key} """ task_message = namedtuple('task_message', ('headers', 'properties', 'body', 'sent_event')) def utf8dict(d, encoding='utf-8'): return {k.decode(encoding) if isinstance(k, bytes) else k: v for k, v in items(d)} class Queues(dict): """Queue name⇒ declaration mapping. Arguments: queues (Iterable): Initial list/tuple or dict of queues. create_missing (bool): By default any unknown queues will be added automatically, but if this flag is disabled the occurrence of unknown queues in `wanted` will raise :exc:`KeyError`. ha_policy (Sequence, str): Default HA policy for queues with none set. max_priority (int): Default x-max-priority for queues with none set. """ #: If set, this is a subset of queues to consume from. #: The rest of the queues are then used for routing only. _consume_from = None def __init__(self, queues=None, default_exchange=None, create_missing=True, ha_policy=None, autoexchange=None, max_priority=None, default_routing_key=None): dict.__init__(self) self.aliases = WeakValueDictionary() self.default_exchange = default_exchange self.default_routing_key = default_routing_key self.create_missing = create_missing self.ha_policy = ha_policy self.autoexchange = Exchange if autoexchange is None else autoexchange self.max_priority = max_priority if queues is not None and not isinstance(queues, Mapping): queues = {q.name: q for q in queues} for name, q in items(queues or {}): self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q) def __getitem__(self, name): try: return self.aliases[name] except KeyError: return dict.__getitem__(self, name) def __setitem__(self, name, queue): if self.default_exchange and not queue.exchange: queue.exchange = self.default_exchange dict.__setitem__(self, name, queue) if queue.alias: self.aliases[queue.alias] = queue def __missing__(self, name): if self.create_missing: return self.add(self.new_missing(name)) raise KeyError(name) def add(self, queue, **kwargs): """Add new queue. The first argument can either be a :class:`kombu.Queue` instance, or the name of a queue. If the former the rest of the keyword arguments are ignored, and options are simply taken from the queue instance. Arguments: queue (kombu.Queue, str): Queue to add. exchange (kombu.Exchange, str): if queue is str, specifies exchange name. routing_key (str): if queue is str, specifies binding key. exchange_type (str): if queue is str, specifies type of exchange. **options (Any): Additional declaration options used when queue is a str. """ if not isinstance(queue, Queue): return self.add_compat(queue, **kwargs) return self._add(queue) def add_compat(self, name, **options): # docs used to use binding_key as routing key options.setdefault('routing_key', options.get('binding_key')) if options['routing_key'] is None: options['routing_key'] = name return self._add(Queue.from_dict(name, **options)) def _add(self, queue): if not queue.routing_key: if queue.exchange is None or queue.exchange.name == '': queue.exchange = self.default_exchange queue.routing_key = self.default_routing_key if self.ha_policy: if queue.queue_arguments is None: queue.queue_arguments = {} self._set_ha_policy(queue.queue_arguments) if self.max_priority is not None: if queue.queue_arguments is None: queue.queue_arguments = {} self._set_max_priority(queue.queue_arguments) self[queue.name] = queue return queue def _set_ha_policy(self, args): policy = self.ha_policy if isinstance(policy, (list, tuple)): return args.update({'x-ha-policy': 'nodes', 'x-ha-policy-params': list(policy)}) args['x-ha-policy'] = policy def _set_max_priority(self, args): if 'x-max-priority' not in args and self.max_priority is not None: return args.update({'x-max-priority': self.max_priority}) def format(self, indent=0, indent_first=True): """Format routing table into string for log dumps.""" active = self.consume_from if not active: return '' info = [QUEUE_FORMAT.strip().format(q) for _, q in sorted(items(active))] if indent_first: return textindent('\n'.join(info), indent) return info[0] + '\n' + textindent('\n'.join(info[1:]), indent) def select_add(self, queue, **kwargs): """Add new task queue that'll be consumed from. The queue will be active even when a subset has been selected using the :option:`celery worker -Q` option. """ q = self.add(queue, **kwargs) if self._consume_from is not None: self._consume_from[q.name] = q return q def select(self, include): """Select a subset of currently defined queues to consume from. Arguments: include (Sequence[str], str): Names of queues to consume from. """ if include: self._consume_from = { name: self[name] for name in maybe_list(include) } def deselect(self, exclude): """Deselect queues so that they won't be consumed from. Arguments: exclude (Sequence[str], str): Names of queues to avoid consuming from. """ if exclude: exclude = maybe_list(exclude) if self._consume_from is None: # using selection return self.select(k for k in self if k not in exclude) # using all queues for queue in exclude: self._consume_from.pop(queue, None) def new_missing(self, name): return Queue(name, self.autoexchange(name), name) @property def consume_from(self): if self._consume_from is not None: return self._consume_from return self class AMQP(object): """App AMQP API: app.amqp.""" Connection = Connection Consumer = Consumer Producer = Producer #: compat alias to Connection BrokerConnection = Connection queues_cls = Queues #: Cached and prepared routing table. _rtable = None #: Underlying producer pool instance automatically #: set by the :attr:`producer_pool`. _producer_pool = None # Exchange class/function used when defining automatic queues. # For example, you can use ``autoexchange = lambda n: None`` to use the # AMQP default exchange: a shortcut to bypass routing # and instead send directly to the queue named in the routing key. autoexchange = None #: Max size of positional argument representation used for #: logging purposes. argsrepr_maxsize = 1024 #: Max size of keyword argument representation used for logging purposes. kwargsrepr_maxsize = 1024 def __init__(self, app): self.app = app self.task_protocols = { 1: self.as_task_v1, 2: self.as_task_v2, } @cached_property def create_task_message(self): return self.task_protocols[self.app.conf.task_protocol] @cached_property def send_task_message(self): return self._create_task_sender() def Queues(self, queues, create_missing=None, ha_policy=None, autoexchange=None, max_priority=None): # Create new :class:`Queues` instance, using queue defaults # from the current configuration. conf = self.app.conf default_routing_key = conf.task_default_routing_key if create_missing is None: create_missing = conf.task_create_missing_queues if ha_policy is None: ha_policy = conf.task_queue_ha_policy if max_priority is None: max_priority = conf.task_queue_max_priority if not queues and conf.task_default_queue: queues = (Queue(conf.task_default_queue, exchange=self.default_exchange, routing_key=default_routing_key),) autoexchange = (self.autoexchange if autoexchange is None else autoexchange) return self.queues_cls( queues, self.default_exchange, create_missing, ha_policy, autoexchange, max_priority, default_routing_key, ) def Router(self, queues=None, create_missing=None): """Return the current task router.""" return _routes.Router(self.routes, queues or self.queues, self.app.either('task_create_missing_queues', create_missing), app=self.app) def flush_routes(self): self._rtable = _routes.prepare(self.app.conf.task_routes) def TaskConsumer(self, channel, queues=None, accept=None, **kw): if accept is None: accept = self.app.conf.accept_content return self.Consumer( channel, accept=accept, queues=queues or list(self.queues.consume_from.values()), **kw ) def as_task_v2(self, task_id, name, args=None, kwargs=None, countdown=None, eta=None, group_id=None, expires=None, retries=0, chord=None, callbacks=None, errbacks=None, reply_to=None, time_limit=None, soft_time_limit=None, create_sent_event=False, root_id=None, parent_id=None, shadow=None, chain=None, now=None, timezone=None, origin=None, argsrepr=None, kwargsrepr=None): args = args or () kwargs = kwargs or {} if not isinstance(args, (list, tuple)): raise TypeError('task args must be a list or tuple') if not isinstance(kwargs, Mapping): raise TypeError('task keyword arguments must be a mapping') if countdown: # convert countdown to ETA self._verify_seconds(countdown, 'countdown') now = now or self.app.now() timezone = timezone or self.app.timezone eta = maybe_make_aware( now + timedelta(seconds=countdown), tz=timezone, ) if isinstance(expires, numbers.Real): self._verify_seconds(expires, 'expires') now = now or self.app.now() timezone = timezone or self.app.timezone expires = maybe_make_aware( now + timedelta(seconds=expires), tz=timezone, ) eta = eta and eta.isoformat() # If we retry a task `expires` will already be ISO8601-formatted. if not isinstance(expires, string_t): expires = expires and expires.isoformat() if argsrepr is None: argsrepr = saferepr(args, self.argsrepr_maxsize) if kwargsrepr is None: kwargsrepr = saferepr(kwargs, self.kwargsrepr_maxsize) if JSON_NEEDS_UNICODE_KEYS: # pragma: no cover if callbacks: callbacks = [utf8dict(callback) for callback in callbacks] if errbacks: errbacks = [utf8dict(errback) for errback in errbacks] if chord: chord = utf8dict(chord) if not root_id: # empty root_id defaults to task_id root_id = task_id return task_message( headers={ 'lang': 'py', 'task': name, 'id': task_id, 'eta': eta, 'expires': expires, 'group': group_id, 'retries': retries, 'timelimit': [time_limit, soft_time_limit], 'root_id': root_id, 'parent_id': parent_id, 'argsrepr': argsrepr, 'kwargsrepr': kwargsrepr, 'origin': origin or anon_nodename() }, properties={ 'correlation_id': task_id, 'reply_to': reply_to or '', }, body=( args, kwargs, { 'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain, 'chord': chord, }, ), sent_event={ 'uuid': task_id, 'root_id': root_id, 'parent_id': parent_id, 'name': name, 'args': argsrepr, 'kwargs': kwargsrepr, 'retries': retries, 'eta': eta, 'expires': expires, } if create_sent_event else None, ) def as_task_v1(self, task_id, name, args=None, kwargs=None, countdown=None, eta=None, group_id=None, expires=None, retries=0, chord=None, callbacks=None, errbacks=None, reply_to=None, time_limit=None, soft_time_limit=None, create_sent_event=False, root_id=None, parent_id=None, shadow=None, now=None, timezone=None): args = args or () kwargs = kwargs or {} utc = self.utc if not isinstance(args, (list, tuple)): raise TypeError('task args must be a list or tuple') if not isinstance(kwargs, Mapping): raise TypeError('task keyword arguments must be a mapping') if countdown: # convert countdown to ETA self._verify_seconds(countdown, 'countdown') now = now or self.app.now() eta = now + timedelta(seconds=countdown) if isinstance(expires, numbers.Real): self._verify_seconds(expires, 'expires') now = now or self.app.now() expires = now + timedelta(seconds=expires) eta = eta and eta.isoformat() expires = expires and expires.isoformat() if JSON_NEEDS_UNICODE_KEYS: # pragma: no cover if callbacks: callbacks = [utf8dict(callback) for callback in callbacks] if errbacks: errbacks = [utf8dict(errback) for errback in errbacks] if chord: chord = utf8dict(chord) return task_message( headers={}, properties={ 'correlation_id': task_id, 'reply_to': reply_to or '', }, body={ 'task': name, 'id': task_id, 'args': args, 'kwargs': kwargs, 'group': group_id, 'retries': retries, 'eta': eta, 'expires': expires, 'utc': utc, 'callbacks': callbacks, 'errbacks': errbacks, 'timelimit': (time_limit, soft_time_limit), 'taskset': group_id, 'chord': chord, }, sent_event={ 'uuid': task_id, 'name': name, 'args': saferepr(args), 'kwargs': saferepr(kwargs), 'retries': retries, 'eta': eta, 'expires': expires, } if create_sent_event else None, ) def _verify_seconds(self, s, what): if s < INT_MIN: raise ValueError('%s is out of range: %r' % (what, s)) return s def _create_task_sender(self): default_retry = self.app.conf.task_publish_retry default_policy = self.app.conf.task_publish_retry_policy default_delivery_mode = self.app.conf.task_default_delivery_mode default_queue = self.default_queue queues = self.queues send_before_publish = signals.before_task_publish.send before_receivers = signals.before_task_publish.receivers send_after_publish = signals.after_task_publish.send after_receivers = signals.after_task_publish.receivers send_task_sent = signals.task_sent.send # XXX compat sent_receivers = signals.task_sent.receivers default_evd = self._event_dispatcher default_exchange = self.default_exchange default_rkey = self.app.conf.task_default_routing_key default_serializer = self.app.conf.task_serializer default_compressor = self.app.conf.result_compression def send_task_message(producer, name, message, exchange=None, routing_key=None, queue=None, event_dispatcher=None, retry=None, retry_policy=None, serializer=None, delivery_mode=None, compression=None, declare=None, headers=None, exchange_type=None, **kwargs): retry = default_retry if retry is None else retry headers2, properties, body, sent_event = message if headers: headers2.update(headers) if kwargs: properties.update(kwargs) qname = queue if queue is None and exchange is None: queue = default_queue if queue is not None: if isinstance(queue, string_t): qname, queue = queue, queues[queue] else: qname = queue.name if delivery_mode is None: try: delivery_mode = queue.exchange.delivery_mode except AttributeError: pass delivery_mode = delivery_mode or default_delivery_mode if exchange_type is None: try: exchange_type = queue.exchange.type except AttributeError: exchange_type = 'direct' # convert to anon-exchange, when exchange not set and direct ex. if (not exchange or not routing_key) and exchange_type == 'direct': exchange, routing_key = '', qname elif exchange is None: # not topic exchange, and exchange not undefined exchange = queue.exchange.name or default_exchange routing_key = routing_key or queue.routing_key or default_rkey if declare is None and queue and not isinstance(queue, Broadcast): declare = [queue] # merge default and custom policy retry = default_retry if retry is None else retry _rp = (dict(default_policy, **retry_policy) if retry_policy else default_policy) if before_receivers: send_before_publish( sender=name, body=body, exchange=exchange, routing_key=routing_key, declare=declare, headers=headers2, properties=properties, retry_policy=retry_policy, ) ret = producer.publish( body, exchange=exchange, routing_key=routing_key, serializer=serializer or default_serializer, compression=compression or default_compressor, retry=retry, retry_policy=_rp, delivery_mode=delivery_mode, declare=declare, headers=headers2, **properties ) if after_receivers: send_after_publish(sender=name, body=body, headers=headers2, exchange=exchange, routing_key=routing_key) if sent_receivers: # XXX deprecated if isinstance(body, tuple): # protocol version 2 send_task_sent( sender=name, task_id=headers2['id'], task=name, args=body[0], kwargs=body[1], eta=headers2['eta'], taskset=headers2['group'], ) else: # protocol version 1 send_task_sent( sender=name, task_id=body['id'], task=name, args=body['args'], kwargs=body['kwargs'], eta=body['eta'], taskset=body['taskset'], ) if sent_event: evd = event_dispatcher or default_evd exname = exchange if isinstance(exname, Exchange): exname = exname.name sent_event.update({ 'queue': qname, 'exchange': exname, 'routing_key': routing_key, }) evd.publish('task-sent', sent_event, producer, retry=retry, retry_policy=retry_policy) return ret return send_task_message @cached_property def default_queue(self): return self.queues[self.app.conf.task_default_queue] @cached_property def queues(self): """Queue name⇒ declaration mapping.""" return self.Queues(self.app.conf.task_queues) @queues.setter # noqa def queues(self, queues): return self.Queues(queues) @property def routes(self): if self._rtable is None: self.flush_routes() return self._rtable @cached_property def router(self): return self.Router() @property def producer_pool(self): if self._producer_pool is None: self._producer_pool = pools.producers[ self.app.connection_for_write()] self._producer_pool.limit = self.app.pool.limit return self._producer_pool publisher_pool = producer_pool # compat alias @cached_property def default_exchange(self): return Exchange(self.app.conf.task_default_exchange, self.app.conf.task_default_exchange_type) @cached_property def utc(self): return self.app.conf.enable_utc @cached_property def _event_dispatcher(self): # We call Dispatcher.publish with a custom producer # so don't need the diuspatcher to be enabled. return self.app.events.Dispatcher(enabled=False) celery-4.1.0/celery/app/control.py0000644000175000017500000003550613130607475017041 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Worker Remote Control Client. Client for worker remote control commands. Server implementation is in :mod:`celery.worker.control`. """ from __future__ import absolute_import, unicode_literals import warnings from billiard.common import TERM_SIGNAME from kombu.pidbox import Mailbox from kombu.utils.functional import lazy from kombu.utils.objects import cached_property from celery.exceptions import DuplicateNodenameWarning from celery.utils.text import pluralize __all__ = ['Inspect', 'Control', 'flatten_reply'] W_DUPNODE = """\ Received multiple replies from node {0}: {1}. Please make sure you give each node a unique nodename using the celery worker `-n` option.\ """ def flatten_reply(reply): """Flatten node replies. Convert from a list of replies in this format:: [{'a@example.com': reply}, {'b@example.com': reply}] into this format:: {'a@example.com': reply, 'b@example.com': reply} """ nodes, dupes = {}, set() for item in reply: [dupes.add(name) for name in item if name in nodes] nodes.update(item) if dupes: warnings.warn(DuplicateNodenameWarning( W_DUPNODE.format( pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)), ), )) return nodes class Inspect(object): """API for app.control.inspect.""" app = None def __init__(self, destination=None, timeout=1.0, callback=None, connection=None, app=None, limit=None): self.app = app or self.app self.destination = destination self.timeout = timeout self.callback = callback self.connection = connection self.limit = limit def _prepare(self, reply): if reply: by_node = flatten_reply(reply) if (self.destination and not isinstance(self.destination, (list, tuple))): return by_node.get(self.destination) return by_node def _request(self, command, **kwargs): return self._prepare(self.app.control.broadcast( command, arguments=kwargs, destination=self.destination, callback=self.callback, connection=self.connection, limit=self.limit, timeout=self.timeout, reply=True, )) def report(self): return self._request('report') def clock(self): return self._request('clock') def active(self, safe=None): # safe is ignored since 4.0 # as no objects will need serialization now that we # have argsrepr/kwargsrepr. return self._request('active') def scheduled(self, safe=None): return self._request('scheduled') def reserved(self, safe=None): return self._request('reserved') def stats(self): return self._request('stats') def revoked(self): return self._request('revoked') def registered(self, *taskinfoitems): return self._request('registered', taskinfoitems=taskinfoitems) registered_tasks = registered def ping(self, destination=None): return self._request('ping') def active_queues(self): return self._request('active_queues') def query_task(self, *ids): # signature used be unary: query_task(ids=[id1, id2]) # we need this to preserve backward compatibility. if len(ids) == 1 and isinstance(ids[0], (list, tuple)): ids = ids[0] return self._request('query_task', ids=ids) def conf(self, with_defaults=False): return self._request('conf', with_defaults=with_defaults) def hello(self, from_node, revoked=None): return self._request('hello', from_node=from_node, revoked=revoked) def memsample(self): return self._request('memsample') def memdump(self, samples=10): return self._request('memdump', samples=samples) def objgraph(self, type='Request', n=200, max_depth=10): return self._request('objgraph', num=n, max_depth=max_depth, type=type) class Control(object): """Worker remote control client.""" Mailbox = Mailbox def __init__(self, app=None): self.app = app self.mailbox = self.Mailbox( 'celery', type='fanout', accept=['json'], producer_pool=lazy(lambda: self.app.amqp.producer_pool), queue_ttl=app.conf.control_queue_ttl, reply_queue_ttl=app.conf.control_queue_ttl, queue_expires=app.conf.control_queue_expires, reply_queue_expires=app.conf.control_queue_expires, ) @cached_property def inspect(self): return self.app.subclass_with_self(Inspect, reverse='control.inspect') def purge(self, connection=None): """Discard all waiting tasks. This will ignore all tasks waiting for execution, and they will be deleted from the messaging server. Arguments: connection (kombu.Connection): Optional specific connection instance to use. If not provided a connection will be acquired from the connection pool. Returns: int: the number of tasks discarded. """ with self.app.connection_or_acquire(connection) as conn: return self.app.amqp.TaskConsumer(conn).purge() discard_all = purge def election(self, id, topic, action=None, connection=None): self.broadcast( 'election', connection=connection, destination=None, arguments={ 'id': id, 'topic': topic, 'action': action, }, ) def revoke(self, task_id, destination=None, terminate=False, signal=TERM_SIGNAME, **kwargs): """Tell all (or specific) workers to revoke a task by id. If a task is revoked, the workers will ignore the task and not execute it after all. Arguments: task_id (str): Id of the task to revoke. terminate (bool): Also terminate the process currently working on the task (if any). signal (str): Name of signal to send to process if terminate. Default is TERM. See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast('revoke', destination=destination, arguments={ 'task_id': task_id, 'terminate': terminate, 'signal': signal, }, **kwargs) def terminate(self, task_id, destination=None, signal=TERM_SIGNAME, **kwargs): """Tell all (or specific) workers to terminate a task by id. See Also: This is just a shortcut to :meth:`revoke` with the terminate argument enabled. """ return self.revoke( task_id, destination=destination, terminate=True, signal=signal, **kwargs) def ping(self, destination=None, timeout=1.0, **kwargs): """Ping all (or specific) workers. Returns: List[Dict]: List of ``{'hostname': reply}`` dictionaries. See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast( 'ping', reply=True, arguments={}, destination=destination, timeout=timeout, **kwargs) def rate_limit(self, task_name, rate_limit, destination=None, **kwargs): """Tell workers to set a new rate limit for task by type. Arguments: task_name (str): Name of task to change rate limit for. rate_limit (int, str): The rate limit as tasks per second, or a rate limit string (`'100/m'`, etc. see :attr:`celery.task.base.Task.rate_limit` for more information). See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast( 'rate_limit', destination=destination, arguments={ 'task_name': task_name, 'rate_limit': rate_limit, }, **kwargs) def add_consumer(self, queue, exchange=None, exchange_type='direct', routing_key=None, options=None, destination=None, **kwargs): """Tell all (or specific) workers to start consuming from a new queue. Only the queue name is required as if only the queue is specified then the exchange/routing key will be set to the same name ( like automatic queues do). Note: This command does not respect the default queue/exchange options in the configuration. Arguments: queue (str): Name of queue to start consuming from. exchange (str): Optional name of exchange. exchange_type (str): Type of exchange (defaults to 'direct') command to, when empty broadcast to all workers. routing_key (str): Optional routing key. options (Dict): Additional options as supported by :meth:`kombu.entitiy.Queue.from_dict`. See Also: :meth:`broadcast` for supported keyword arguments. """ return self.broadcast( 'add_consumer', destination=destination, arguments=dict({ 'queue': queue, 'exchange': exchange, 'exchange_type': exchange_type, 'routing_key': routing_key, }, **options or {}), **kwargs ) def cancel_consumer(self, queue, destination=None, **kwargs): """Tell all (or specific) workers to stop consuming from ``queue``. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'cancel_consumer', destination=destination, arguments={'queue': queue}, **kwargs) def time_limit(self, task_name, soft=None, hard=None, destination=None, **kwargs): """Tell workers to set time limits for a task by type. Arguments: task_name (str): Name of task to change time limits for. soft (float): New soft time limit (in seconds). hard (float): New hard time limit (in seconds). **kwargs (Any): arguments passed on to :meth:`broadcast`. """ return self.broadcast( 'time_limit', arguments={ 'task_name': task_name, 'hard': hard, 'soft': soft, }, destination=destination, **kwargs) def enable_events(self, destination=None, **kwargs): """Tell all (or specific) workers to enable events. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'enable_events', arguments={}, destination=destination, **kwargs) def disable_events(self, destination=None, **kwargs): """Tell all (or specific) workers to disable events. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'disable_events', arguments={}, destination=destination, **kwargs) def pool_grow(self, n=1, destination=None, **kwargs): """Tell all (or specific) workers to grow the pool by ``n``. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'pool_grow', arguments={'n': n}, destination=destination, **kwargs) def pool_shrink(self, n=1, destination=None, **kwargs): """Tell all (or specific) workers to shrink the pool by ``n``. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'pool_shrink', arguments={'n': n}, destination=destination, **kwargs) def autoscale(self, max, min, destination=None, **kwargs): """Change worker(s) autoscale setting. See Also: Supports the same arguments as :meth:`broadcast`. """ return self.broadcast( 'autoscale', arguments={'max': max, 'min': min}, destination=destination, **kwargs) def shutdown(self, destination=None, **kwargs): """Shutdown worker(s). See Also: Supports the same arguments as :meth:`broadcast` """ return self.broadcast( 'shutdown', arguments={}, destination=destination, **kwargs) def pool_restart(self, modules=None, reload=False, reloader=None, destination=None, **kwargs): """Restart the execution pools of all or specific workers. Keyword Arguments: modules (Sequence[str]): List of modules to reload. reload (bool): Flag to enable module reloading. Default is False. reloader (Any): Function to reload a module. destination (Sequence[str]): List of worker names to send this command to. See Also: Supports the same arguments as :meth:`broadcast` """ return self.broadcast( 'pool_restart', arguments={ 'modules': modules, 'reload': reload, 'reloader': reloader, }, destination=destination, **kwargs) def heartbeat(self, destination=None, **kwargs): """Tell worker(s) to send a heartbeat immediately. See Also: Supports the same arguments as :meth:`broadcast` """ return self.broadcast( 'heartbeat', arguments={}, destination=destination, **kwargs) def broadcast(self, command, arguments=None, destination=None, connection=None, reply=False, timeout=1.0, limit=None, callback=None, channel=None, **extra_kwargs): """Broadcast a control command to the celery workers. Arguments: command (str): Name of command to send. arguments (Dict): Keyword arguments for the command. destination (List): If set, a list of the hosts to send the command to, when empty broadcast to all workers. connection (kombu.Connection): Custom broker connection to use, if not set, a connection will be acquired from the pool. reply (bool): Wait for and return the reply. timeout (float): Timeout in seconds to wait for the reply. limit (int): Limit number of replies. callback (Callable): Callback called immediately for each reply received. """ with self.app.connection_or_acquire(connection) as conn: arguments = dict(arguments or {}, **extra_kwargs) return self.mailbox(conn)._broadcast( command, arguments, destination, reply, timeout, limit, callback, channel=channel, ) celery-4.1.0/celery/app/defaults.py0000644000175000017500000003071113135426300017150 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Configuration introspection and defaults.""" from __future__ import absolute_import, unicode_literals import sys from collections import deque, namedtuple from datetime import timedelta from celery.five import items, keys, python_2_unicode_compatible from celery.utils.functional import memoize from celery.utils.serialization import strtobool __all__ = ['Option', 'NAMESPACES', 'flatten', 'find'] is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') DEFAULT_POOL = 'prefork' if is_jython: DEFAULT_POOL = 'solo' elif is_pypy: if sys.pypy_version_info[0:3] < (1, 5, 0): DEFAULT_POOL = 'solo' else: DEFAULT_POOL = 'prefork' DEFAULT_ACCEPT_CONTENT = ['json'] DEFAULT_PROCESS_LOG_FMT = """ [%(asctime)s: %(levelname)s/%(processName)s] %(message)s """.strip() DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \ %(task_name)s[%(task_id)s]: %(message)s""" OLD_NS = {'celery_{0}'} OLD_NS_BEAT = {'celerybeat_{0}'} OLD_NS_WORKER = {'celeryd_{0}'} searchresult = namedtuple('searchresult', ('namespace', 'key', 'type')) def Namespace(__old__=None, **options): if __old__ is not None: for key, opt in items(options): if not opt.old: opt.old = {o.format(key) for o in __old__} return options def old_ns(ns): return {'{0}_{{0}}'.format(ns)} @python_2_unicode_compatible class Option(object): """Decribes a Celery configuration option.""" alt = None deprecate_by = None remove_by = None old = set() typemap = dict(string=str, int=int, float=float, any=lambda v: v, bool=strtobool, dict=dict, tuple=tuple) def __init__(self, default=None, *args, **kwargs): self.default = default self.type = kwargs.get('type') or 'string' for attr, value in items(kwargs): setattr(self, attr, value) def to_python(self, value): return self.typemap[self.type](value) def __repr__(self): return '{0} default->{1!r}>'.format(self.type, self.default) NAMESPACES = Namespace( accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS), enable_utc=Option(True, type='bool'), imports=Option((), type='tuple', old=OLD_NS), include=Option((), type='tuple', old=OLD_NS), timezone=Option(type='string', old=OLD_NS), beat=Namespace( __old__=OLD_NS_BEAT, max_loop_interval=Option(0, type='float'), schedule=Option({}, type='dict'), scheduler=Option('celery.beat:PersistentScheduler'), schedule_filename=Option('celerybeat-schedule'), sync_every=Option(0, type='int'), ), broker=Namespace( url=Option(None, type='string'), read_url=Option(None, type='string'), write_url=Option(None, type='string'), transport=Option(type='string'), transport_options=Option({}, type='dict'), connection_timeout=Option(4, type='float'), connection_retry=Option(True, type='bool'), connection_max_retries=Option(100, type='int'), failover_strategy=Option(None, type='string'), heartbeat=Option(120, type='int'), heartbeat_checkrate=Option(3.0, type='int'), login_method=Option(None, type='string'), pool_limit=Option(10, type='int'), use_ssl=Option(False, type='bool'), host=Option(type='string'), port=Option(type='int'), user=Option(type='string'), password=Option(type='string'), vhost=Option(type='string'), ), cache=Namespace( __old__=old_ns('celery_cache'), backend=Option(), backend_options=Option({}, type='dict'), ), cassandra=Namespace( entry_ttl=Option(type='float'), keyspace=Option(type='string'), port=Option(type='string'), read_consistency=Option(type='string'), servers=Option(type='list'), table=Option(type='string'), write_consistency=Option(type='string'), auth_provider=Option(type='string'), auth_kwargs=Option(type='string'), ), control=Namespace( queue_ttl=Option(300.0, type='float'), queue_expires=Option(10.0, type='float'), ), couchbase=Namespace( __old__=old_ns('celery_couchbase'), backend_settings=Option(None, type='dict'), ), mongodb=Namespace( __old__=old_ns('celery_mongodb'), backend_settings=Option(type='dict'), ), event=Namespace( __old__=old_ns('celery_event'), queue_expires=Option(60.0, type='float'), queue_ttl=Option(5.0, type='float'), queue_prefix=Option('celeryev'), serializer=Option('json'), ), redis=Namespace( __old__=old_ns('celery_redis'), backend_use_ssl=Option(type='dict'), db=Option(type='int'), host=Option(type='string'), max_connections=Option(type='int'), password=Option(type='string'), port=Option(type='int'), socket_timeout=Option(120.0, type='float'), socket_connect_timeout=Option(None, type='float'), ), result=Namespace( __old__=old_ns('celery_result'), backend=Option(type='string'), cache_max=Option( -1, type='int', old={'celery_max_cached_results'}, ), compression=Option(type='str'), exchange=Option('celeryresults'), exchange_type=Option('direct'), expires=Option( timedelta(days=1), type='float', old={'celery_task_result_expires'}, ), persistent=Option(None, type='bool'), serializer=Option('json'), ), elasticsearch=Namespace( __old__=old_ns('celery_elasticsearch'), retry_on_timeout=Option(type='bool'), max_retries=Option(type='int'), timeout=Option(type='float'), ), riak=Namespace( __old__=old_ns('celery_riak'), backend_settings=Option(type='dict'), ), security=Namespace( __old__=old_ns('celery_security'), certificate=Option(type='string'), cert_store=Option(type='string'), key=Option(type='string'), ), database=Namespace( url=Option(old={'celery_result_dburi'}), engine_options=Option( type='dict', old={'celery_result_engine_options'}, ), short_lived_sessions=Option( False, type='bool', old={'celery_result_db_short_lived_sessions'}, ), table_names=Option(type='dict', old={'celery_result_db_tablenames'}), ), task=Namespace( __old__=OLD_NS, acks_late=Option(False, type='bool'), always_eager=Option(False, type='bool'), annotations=Option(type='any'), compression=Option(type='string', old={'celery_message_compression'}), create_missing_queues=Option(True, type='bool'), default_delivery_mode=Option(2, type='string'), default_queue=Option('celery'), default_exchange=Option(None, type='string'), # taken from queue default_exchange_type=Option('direct'), default_routing_key=Option(None, type='string'), # taken from queue default_rate_limit=Option(type='string'), eager_propagates=Option( False, type='bool', old={'celery_eager_propagates_exceptions'}, ), ignore_result=Option(False, type='bool'), protocol=Option(2, type='int', old={'celery_task_protocol'}), publish_retry=Option( True, type='bool', old={'celery_task_publish_retry'}, ), publish_retry_policy=Option( {'max_retries': 3, 'interval_start': 0, 'interval_max': 1, 'interval_step': 0.2}, type='dict', old={'celery_task_publish_retry_policy'}, ), queues=Option(type='dict'), queue_ha_policy=Option(None, type='string'), queue_max_priority=Option(None, type='int'), reject_on_worker_lost=Option(type='bool'), remote_tracebacks=Option(False, type='bool'), routes=Option(type='any'), send_sent_event=Option( False, type='bool', old={'celery_send_task_sent_event'}, ), serializer=Option('json', old={'celery_task_serializer'}), soft_time_limit=Option( type='float', old={'celeryd_task_soft_time_limit'}, ), time_limit=Option( type='float', old={'celeryd_task_time_limit'}, ), store_errors_even_if_ignored=Option(False, type='bool'), track_started=Option(False, type='bool'), ), worker=Namespace( __old__=OLD_NS_WORKER, agent=Option(None, type='string'), autoscaler=Option('celery.worker.autoscale:Autoscaler'), concurrency=Option(0, type='int'), consumer=Option('celery.worker.consumer:Consumer', type='string'), direct=Option(False, type='bool', old={'celery_worker_direct'}), disable_rate_limits=Option( False, type='bool', old={'celery_disable_rate_limits'}, ), enable_remote_control=Option( True, type='bool', old={'celery_enable_remote_control'}, ), hijack_root_logger=Option(True, type='bool'), log_color=Option(type='bool'), log_format=Option(DEFAULT_PROCESS_LOG_FMT), lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}), max_memory_per_child=Option(type='int'), max_tasks_per_child=Option(type='int'), pool=Option(DEFAULT_POOL), pool_putlocks=Option(True, type='bool'), pool_restarts=Option(False, type='bool'), prefetch_multiplier=Option(4, type='int'), redirect_stdouts=Option( True, type='bool', old={'celery_redirect_stdouts'}, ), redirect_stdouts_level=Option( 'WARNING', old={'celery_redirect_stdouts_level'}, ), send_task_events=Option( False, type='bool', old={'celery_send_events'}, ), state_db=Option(), task_log_format=Option(DEFAULT_TASK_LOG_FMT), timer=Option(type='string'), timer_precision=Option(1.0, type='float'), ), ) def _flatten_keys(ns, key, opt): return [(ns + key, opt)] def _to_compat(ns, key, opt): if opt.old: return [ (oldkey.format(key).upper(), ns + key, opt) for oldkey in opt.old ] return [((ns + key).upper(), ns + key, opt)] def flatten(d, root='', keyfilter=_flatten_keys): """Flatten settings.""" stack = deque([(root, d)]) while stack: ns, options = stack.popleft() for key, opt in items(options): if isinstance(opt, dict): stack.append((ns + key + '_', opt)) else: for ret in keyfilter(ns, key, opt): yield ret DEFAULTS = { key: opt.default for key, opt in flatten(NAMESPACES) } __compat = list(flatten(NAMESPACES, keyfilter=_to_compat)) _OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat} _TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat} _TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat} __compat = None SETTING_KEYS = set(keys(DEFAULTS)) _OLD_SETTING_KEYS = set(keys(_TO_NEW_KEY)) def find_deprecated_settings(source): # pragma: no cover from celery.utils import deprecated for name, opt in flatten(NAMESPACES): if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None): deprecated.warn(description='The {0!r} setting'.format(name), deprecation=opt.deprecate_by, removal=opt.remove_by, alternative='Use the {0.alt} instead'.format(opt)) return source @memoize(maxsize=None) def find(name, namespace='celery'): """Find setting by name.""" # - Try specified name-space first. namespace = namespace.lower() try: return searchresult( namespace, name.lower(), NAMESPACES[namespace][name.lower()], ) except KeyError: # - Try all the other namespaces. for ns, opts in items(NAMESPACES): if ns.lower() == name.lower(): return searchresult(None, ns, opts) elif isinstance(opts, dict): try: return searchresult(ns, name.lower(), opts[name.lower()]) except KeyError: pass # - See if name is a qualname last. return searchresult(None, name.lower(), DEFAULTS[name.lower()]) celery-4.1.0/celery/app/registry.py0000644000175000017500000000371413130607475017225 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Registry of available tasks.""" from __future__ import absolute_import, unicode_literals import inspect from importlib import import_module from celery._state import get_current_app from celery.exceptions import NotRegistered, InvalidTaskError from celery.five import items __all__ = ['TaskRegistry'] class TaskRegistry(dict): """Map of registered tasks.""" NotRegistered = NotRegistered def __missing__(self, key): raise self.NotRegistered(key) def register(self, task): """Register a task in the task registry. The task will be automatically instantiated if not already an instance. Name must be configured prior to registration. """ if task.name is None: raise InvalidTaskError( 'Task class {0!r} must specify .name attribute'.format( type(task).__name__)) self[task.name] = inspect.isclass(task) and task() or task def unregister(self, name): """Unregister task by name. Arguments: name (str): name of the task to unregister, or a :class:`celery.task.base.Task` with a valid `name` attribute. Raises: celery.exceptions.NotRegistered: if the task is not registered. """ try: self.pop(getattr(name, 'name', name)) except KeyError: raise self.NotRegistered(name) # -- these methods are irrelevant now and will be removed in 4.0 def regular(self): return self.filter_types('regular') def periodic(self): return self.filter_types('periodic') def filter_types(self, type): return {name: task for name, task in items(self) if getattr(task, 'type', 'regular') == type} def _unpickle_task(name): return get_current_app().tasks[name] def _unpickle_task_v2(name, module=None): if module: import_module(module) return get_current_app().tasks[name] celery-4.1.0/celery/app/events.py0000644000175000017500000000255613130607475016664 0ustar omeromer00000000000000"""Implementation for the app.events shortcuts.""" from __future__ import absolute_import, unicode_literals from contextlib import contextmanager from kombu.utils.objects import cached_property class Events(object): """Implements app.events.""" receiver_cls = 'celery.events.receiver:EventReceiver' dispatcher_cls = 'celery.events.dispatcher:EventDispatcher' state_cls = 'celery.events.state:State' def __init__(self, app=None): self.app = app @cached_property def Receiver(self): return self.app.subclass_with_self( self.receiver_cls, reverse='events.Receiver') @cached_property def Dispatcher(self): return self.app.subclass_with_self( self.dispatcher_cls, reverse='events.Dispatcher') @cached_property def State(self): return self.app.subclass_with_self( self.state_cls, reverse='events.State') @contextmanager def default_dispatcher(self, hostname=None, enabled=True, buffer_while_offline=False): with self.app.amqp.producer_pool.acquire(block=True) as prod: # pylint: disable=too-many-function-args # This is a property pylint... with self.Dispatcher(prod.connection, hostname, enabled, prod.channel, buffer_while_offline) as d: yield d celery-4.1.0/celery/app/builtins.py0000644000175000017500000001474413130607475017213 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Built-in Tasks. The built-in tasks are always available in all app instances. """ from __future__ import absolute_import, unicode_literals from celery._state import connect_on_app_finalize from celery.utils.log import get_logger __all__ = [] logger = get_logger(__name__) @connect_on_app_finalize def add_backend_cleanup_task(app): """Task used to clean up expired results. If the configured backend requires periodic cleanup this task is also automatically configured to run every day at 4am (requires :program:`celery beat` to be running). """ @app.task(name='celery.backend_cleanup', shared=False, lazy=False) def backend_cleanup(): app.backend.cleanup() return backend_cleanup @connect_on_app_finalize def add_accumulate_task(app): """Task used by Task.replace when replacing task with group.""" @app.task(bind=True, name='celery.accumulate', shared=False, lazy=False) def accumulate(self, *args, **kwargs): index = kwargs.get('index') return args[index] if index is not None else args return accumulate @connect_on_app_finalize def add_unlock_chord_task(app): """Task used by result backends without native chord support. Will joins chord by creating a task chain polling the header for completion. """ from celery.canvas import maybe_signature from celery.exceptions import ChordError from celery.result import allow_join_result, result_from_tuple @app.task(name='celery.chord_unlock', max_retries=None, shared=False, default_retry_delay=1, ignore_result=True, lazy=False, bind=True) def unlock_chord(self, group_id, callback, interval=None, max_retries=None, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, result_from_tuple=result_from_tuple, **kwargs): if interval is None: interval = self.default_retry_delay # check if the task group is ready, and if so apply the callback. callback = maybe_signature(callback, app) deps = GroupResult( group_id, [result_from_tuple(r, app=app) for r in result], app=app, ) j = deps.join_native if deps.supports_native_join else deps.join try: ready = deps.ready() except Exception as exc: raise self.retry( exc=exc, countdown=interval, max_retries=max_retries, ) else: if not ready: raise self.retry(countdown=interval, max_retries=max_retries) callback = maybe_signature(callback, app=app) try: with allow_join_result(): ret = j(timeout=3.0, propagate=True) except Exception as exc: # pylint: disable=broad-except try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format(culprit, exc) except StopIteration: reason = repr(exc) logger.exception('Chord %r raised: %r', group_id, exc) app.backend.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', group_id, exc) app.backend.chord_error_from_stack( callback, exc=ChordError('Callback error: {0!r}'.format(exc)), ) return unlock_chord @connect_on_app_finalize def add_map_task(app): from celery.canvas import signature @app.task(name='celery.map', shared=False, lazy=False) def xmap(task, it): task = signature(task, app=app).type return [task(item) for item in it] return xmap @connect_on_app_finalize def add_starmap_task(app): from celery.canvas import signature @app.task(name='celery.starmap', shared=False, lazy=False) def xstarmap(task, it): task = signature(task, app=app).type return [task(*item) for item in it] return xstarmap @connect_on_app_finalize def add_chunk_task(app): from celery.canvas import chunks as _chunks @app.task(name='celery.chunks', shared=False, lazy=False) def chunks(task, it, n): return _chunks.apply_chunks(task, it, n) return chunks @connect_on_app_finalize def add_group_task(app): """No longer used, but here for backwards compatibility.""" from celery.canvas import maybe_signature from celery.result import result_from_tuple @app.task(name='celery.group', bind=True, shared=False, lazy=False) def group(self, tasks, result, group_id, partial_args, add_to_parent=True): app = self.app result = result_from_tuple(result, app) # any partial args are added to all tasks in the group taskit = (maybe_signature(task, app=app).clone(partial_args) for i, task in enumerate(tasks)) with app.producer_or_acquire() as producer: [stask.apply_async(group_id=group_id, producer=producer, add_to_parent=False) for stask in taskit] parent = app.current_worker_task if add_to_parent and parent: parent.add_trail(result) return result return group @connect_on_app_finalize def add_chain_task(app): """No longer used, but here for backwards compatibility.""" @app.task(name='celery.chain', shared=False, lazy=False) def chain(*args, **kwargs): raise NotImplementedError('chain is not a real task') return chain @connect_on_app_finalize def add_chord_task(app): """No longer used, but here for backwards compatibility.""" from celery import group, chord as _chord from celery.canvas import maybe_signature @app.task(name='celery.chord', bind=True, ignore_result=False, shared=False, lazy=False) def chord(self, header, body, partial_args=(), interval=None, countdown=1, max_retries=None, eager=False, **kwargs): app = self.app # - convert back to group if serialized tasks = header.tasks if isinstance(header, group) else header header = group([ maybe_signature(s, app=app) for s in tasks ], app=self.app) body = maybe_signature(body, app=app) ch = _chord(header, body) return ch.run(header, body, partial_args, app, interval, countdown, max_retries, **kwargs) return chord celery-4.1.0/celery/fixups/0000755000175000017500000000000013135426347015536 5ustar omeromer00000000000000celery-4.1.0/celery/fixups/__init__.py0000644000175000017500000000001613130607475017642 0ustar omeromer00000000000000"""Fixups.""" celery-4.1.0/celery/fixups/django.py0000644000175000017500000001446713130607475017364 0ustar omeromer00000000000000"""Django-specific customization.""" from __future__ import absolute_import, unicode_literals import os import sys import warnings from kombu.utils.imports import symbol_by_name from kombu.utils.objects import cached_property from datetime import datetime from importlib import import_module from celery import _state from celery import signals from celery.exceptions import FixupWarning, ImproperlyConfigured __all__ = ['DjangoFixup', 'fixup'] ERR_NOT_INSTALLED = """\ Environment variable DJANGO_SETTINGS_MODULE is defined but Django isn't installed. Won't apply Django fix-ups! """ def _maybe_close_fd(fh): try: os.close(fh.fileno()) except (AttributeError, OSError, TypeError): # TypeError added for celery#962 pass def _verify_django_version(django): if django.VERSION < (1, 8): raise ImproperlyConfigured('Celery 4.x requires Django 1.8 or later.') def fixup(app, env='DJANGO_SETTINGS_MODULE'): """Install Django fixup if settings module environment is set.""" SETTINGS_MODULE = os.environ.get(env) if SETTINGS_MODULE and 'django' not in app.loader_cls.lower(): try: import django # noqa except ImportError: warnings.warn(FixupWarning(ERR_NOT_INSTALLED)) else: _verify_django_version(django) return DjangoFixup(app).install() class DjangoFixup(object): """Fixup installed when using Django.""" def __init__(self, app): self.app = app if _state.default_app is None: self.app.set_default() self._worker_fixup = None def install(self): # Need to add project directory to path sys.path.append(os.getcwd()) self._settings = symbol_by_name('django.conf:settings') self.app.loader.now = self.now signals.import_modules.connect(self.on_import_modules) signals.worker_init.connect(self.on_worker_init) return self @property def worker_fixup(self): if self._worker_fixup is None: self._worker_fixup = DjangoWorkerFixup(self.app) return self._worker_fixup @worker_fixup.setter def worker_fixup(self, value): self._worker_fixup = value def on_import_modules(self, **kwargs): # call django.setup() before task modules are imported self.worker_fixup.validate_models() def on_worker_init(self, **kwargs): self.worker_fixup.install() def now(self, utc=False): return datetime.utcnow() if utc else self._now() def autodiscover_tasks(self): from django.apps import apps return [config.name for config in apps.get_app_configs()] @cached_property def _now(self): return symbol_by_name('django.utils.timezone:now') class DjangoWorkerFixup(object): _db_recycles = 0 def __init__(self, app): self.app = app self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None) self._db = import_module('django.db') self._cache = import_module('django.core.cache') self._settings = symbol_by_name('django.conf:settings') self.interface_errors = ( symbol_by_name('django.db.utils.InterfaceError'), ) self.DatabaseError = symbol_by_name('django.db:DatabaseError') def django_setup(self): import django django.setup() def validate_models(self): from django.core.checks import run_checks self.django_setup() run_checks() def install(self): signals.beat_embedded_init.connect(self.close_database) signals.worker_ready.connect(self.on_worker_ready) signals.task_prerun.connect(self.on_task_prerun) signals.task_postrun.connect(self.on_task_postrun) signals.worker_process_init.connect(self.on_worker_process_init) self.close_database() self.close_cache() return self def on_worker_process_init(self, **kwargs): # Child process must validate models again if on Windows, # or if they were started using execv. if os.environ.get('FORKED_BY_MULTIPROCESSING'): self.validate_models() # close connections: # the parent process may have established these, # so need to close them. # calling db.close() on some DB connections will cause # the inherited DB conn to also get broken in the parent # process so we need to remove it without triggering any # network IO that close() might cause. for c in self._db.connections.all(): if c and c.connection: self._maybe_close_db_fd(c.connection) # use the _ version to avoid DB_REUSE preventing the conn.close() call self._close_database() self.close_cache() def _maybe_close_db_fd(self, fd): try: _maybe_close_fd(fd) except self.interface_errors: pass def on_task_prerun(self, sender, **kwargs): """Called before every task.""" if not getattr(sender.request, 'is_eager', False): self.close_database() def on_task_postrun(self, sender, **kwargs): # See https://groups.google.com/group/django-users/ # browse_thread/thread/78200863d0c07c6d/ if not getattr(sender.request, 'is_eager', False): self.close_database() self.close_cache() def close_database(self, **kwargs): if not self.db_reuse_max: return self._close_database() if self._db_recycles >= self.db_reuse_max * 2: self._db_recycles = 0 self._close_database() self._db_recycles += 1 def _close_database(self): for conn in self._db.connections.all(): try: conn.close() except self.interface_errors: pass except self.DatabaseError as exc: str_exc = str(exc) if 'closed' not in str_exc and 'not connected' not in str_exc: raise def close_cache(self): try: self._cache.cache.close() except (TypeError, AttributeError): pass def on_worker_ready(self, **kwargs): if self._settings.DEBUG: warnings.warn('Using settings.DEBUG leads to a memory leak, never ' 'use this setting in production environments!') celery-4.1.0/celery/task/0000755000175000017500000000000013135426347015162 5ustar omeromer00000000000000celery-4.1.0/celery/task/base.py0000644000175000017500000002211313130607475016443 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Deprecated task base class. The task implementation has been moved to :mod:`celery.app.task`. This contains the backward compatible Task class used in the old API, and shouldn't be used in new applications. """ from __future__ import absolute_import, unicode_literals from kombu import Exchange from celery import current_app from celery.app.task import Context, Task as BaseTask, _reprtask from celery.five import python_2_unicode_compatible, with_metaclass from celery.local import Proxy, class_property, reclassmethod from celery.schedules import maybe_schedule from celery.utils.log import get_task_logger __all__ = ['Context', 'Task', 'TaskType', 'PeriodicTask', 'task'] #: list of methods that must be classmethods in the old API. _COMPAT_CLASSMETHODS = ( 'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request', 'signature_from_request', 'signature', 'AsyncResult', 'subtask', '_get_request', '_get_exec_options', ) @python_2_unicode_compatible class _CompatShared(object): def __init__(self, name, cons): self.name = name self.cons = cons def __hash__(self): return hash(self.name) def __repr__(self): return '' % (self.name,) def __call__(self, app): return self.cons(app) class TaskType(type): """Meta class for tasks. Automatically registers the task in the task registry (except if the :attr:`Task.abstract`` attribute is set). If no :attr:`Task.name` attribute is provided, then the name is generated from the module and class name. """ _creation_count = {} # used by old non-abstract task classes def __new__(cls, name, bases, attrs): new = super(TaskType, cls).__new__ task_module = attrs.get('__module__') or '__main__' # - Abstract class: abstract attribute shouldn't be inherited. abstract = attrs.pop('abstract', None) if abstract or not attrs.get('autoregister', True): return new(cls, name, bases, attrs) # The 'app' attribute is now a property, with the real app located # in the '_app' attribute. Previously this was a regular attribute, # so we should support classes defining it. app = attrs.pop('_app', None) or attrs.pop('app', None) # Attempt to inherit app from one the bases if not isinstance(app, Proxy) and app is None: for base in bases: if getattr(base, '_app', None): app = base._app break else: app = current_app._get_current_object() attrs['_app'] = app # - Automatically generate missing/empty name. task_name = attrs.get('name') if not task_name: attrs['name'] = task_name = app.gen_task_name(name, task_module) if not attrs.get('_decorated'): # non decorated tasks must also be shared in case # an app is created multiple times due to modules # imported under multiple names. # Hairy stuff, here to be compatible with 2.x. # People shouldn't use non-abstract task classes anymore, # use the task decorator. from celery._state import connect_on_app_finalize unique_name = '.'.join([task_module, name]) if unique_name not in cls._creation_count: # the creation count is used as a safety # so that the same task isn't added recursively # to the set of constructors. cls._creation_count[unique_name] = 1 connect_on_app_finalize(_CompatShared( unique_name, lambda app: TaskType.__new__(cls, name, bases, dict(attrs, _app=app)), )) # - Create and register class. # Because of the way import happens (recursively) # we may or may not be the first time the task tries to register # with the framework. There should only be one class for each task # name, so we always return the registered version. tasks = app._tasks if task_name not in tasks: tasks.register(new(cls, name, bases, attrs)) instance = tasks[task_name] instance.bind(app) return instance.__class__ def __repr__(self): return _reprtask(self) @with_metaclass(TaskType) @python_2_unicode_compatible class Task(BaseTask): """Deprecated Task base class. Modern applications should use :class:`celery.Task` instead. """ abstract = True __bound__ = False __v2_compat__ = True # - Deprecated compat. attributes -: queue = None routing_key = None exchange = None exchange_type = None delivery_mode = None mandatory = False # XXX deprecated immediate = False # XXX deprecated priority = None type = 'regular' from_config = BaseTask.from_config + ( ('exchange_type', 'task_default_exchange_type'), ('delivery_mode', 'task_default_delivery_mode'), ) # In old Celery the @task decorator didn't exist, so one would create # classes instead and use them directly (e.g., MyTask.apply_async()). # the use of classmethods was a hack so that it was not necessary # to instantiate the class before using it, but it has only # given us pain (like all magic). for name in _COMPAT_CLASSMETHODS: locals()[name] = reclassmethod(getattr(BaseTask, name)) @class_property def request(self): return self._get_request() @class_property def backend(self): if self._backend is None: return self.app.backend return self._backend @backend.setter def backend(cls, value): # noqa cls._backend = value @classmethod def get_logger(cls, **kwargs): return get_task_logger(cls.name) @classmethod def establish_connection(cls): """Deprecated method used to get a broker connection. Should be replaced with :meth:`@Celery.connection` instead, or by acquiring connections from the connection pool: Examples: >>> # using the connection pool >>> with celery.pool.acquire(block=True) as conn: ... pass >>> # establish fresh connection >>> with celery.connection_for_write() as conn: ... pass """ return cls._get_app().connection_for_write() def get_publisher(self, connection=None, exchange=None, exchange_type=None, **options): """Deprecated method to get the task publisher (now called producer). Should be replaced with :class:`kombu.Producer`: .. code-block:: python with app.connection_for_write() as conn: with app.amqp.Producer(conn) as prod: my_task.apply_async(producer=prod) or even better is to use the :class:`@amqp.producer_pool`: .. code-block:: python with app.producer_or_acquire() as prod: my_task.apply_async(producer=prod) """ exchange = self.exchange if exchange is None else exchange if exchange_type is None: exchange_type = self.exchange_type connection = connection or self.establish_connection() return self._get_app().amqp.Producer( connection, exchange=exchange and Exchange(exchange, exchange_type), routing_key=self.routing_key, auto_declare=False, **options) @classmethod def get_consumer(cls, connection=None, queues=None, **kwargs): """Get consumer for the queue this task is sent to. Deprecated! Should be replaced by :class:`@amqp.TaskConsumer`. """ Q = cls._get_app().amqp connection = connection or cls.establish_connection() if queues is None: queues = Q.queues[cls.queue] if cls.queue else Q.default_queue return Q.TaskConsumer(connection, queues, **kwargs) class PeriodicTask(Task): """A task that adds itself to the :setting:`beat_schedule` setting.""" abstract = True ignore_result = True relative = False options = None compat = True def __init__(self): if not hasattr(self, 'run_every'): raise NotImplementedError( 'Periodic tasks must have a run_every attribute') self.run_every = maybe_schedule(self.run_every, self.relative) super(PeriodicTask, self).__init__() @classmethod def on_bound(cls, app): app.conf.beat_schedule[cls.name] = { 'task': cls.name, 'schedule': cls.run_every, 'args': (), 'kwargs': {}, 'options': cls.options or {}, 'relative': cls.relative, } def task(*args, **kwargs): """Deprecated decorator, please use :func:`celery.task`.""" return current_app.task(*args, **dict({'base': Task}, **kwargs)) def periodic_task(*args, **options): """Deprecated decorator, please use :setting:`beat_schedule`.""" return task(**dict({'base': PeriodicTask}, **options)) celery-4.1.0/celery/task/__init__.py0000644000175000017500000000314013130607475017267 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Old deprecated task module. This is the old task module, it shouldn't be used anymore, import from the main 'celery' module instead. If you're looking for the decorator implementation then that's in ``celery.app.base.Celery.task``. """ from __future__ import absolute_import, unicode_literals from celery._state import current_app, current_task as current from celery.local import LazyModule, Proxy, recreate_module __all__ = [ 'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task', 'group', 'chord', 'subtask', ] STATICA_HACK = True globals()['kcah_acitats'[::-1].upper()] = False if STATICA_HACK: # pragma: no cover # This is never executed, but tricks static analyzers (PyDev, PyCharm, # pylint, etc.) into knowing the types of these symbols, and what # they contain. from celery.canvas import group, chord, subtask from .base import BaseTask, Task, PeriodicTask, task, periodic_task class module(LazyModule): def __call__(self, *args, **kwargs): return self.task(*args, **kwargs) old_module, new_module = recreate_module( # pragma: no cover __name__, by_module={ 'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task'], 'celery.canvas': ['group', 'chord', 'subtask'], }, base=module, __package__='celery.task', __file__=__file__, __path__=__path__, __doc__=__doc__, current=current, discard_all=Proxy(lambda: current_app.control.purge), backend_cleanup=Proxy( lambda: current_app.tasks['celery.backend_cleanup'] ), ) celery-4.1.0/celery/bin/0000755000175000017500000000000013135426347014770 5ustar omeromer00000000000000celery-4.1.0/celery/bin/base.py0000644000175000017500000005227213130607475016262 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Base command-line interface.""" from __future__ import absolute_import, print_function, unicode_literals import argparse import os import random import re import sys import warnings import json from collections import defaultdict from heapq import heappush from pprint import pformat from celery import VERSION_BANNER, Celery, maybe_patch_concurrency from celery import signals from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning from celery.five import ( getfullargspec, items, python_2_unicode_compatible, string, string_t, text_t, long_t, ) from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE, isatty from celery.utils import imports from celery.utils import term from celery.utils import text from celery.utils.functional import dictfilter from celery.utils.nodenames import node_format, host_format from celery.utils.objects import Bunch # Option is here for backwards compatiblity, as third-party commands # may import it from here. try: from optparse import Option # pylint: disable=deprecated-module except ImportError: # pragma: no cover Option = None # noqa try: input = raw_input except NameError: # pragma: no cover pass __all__ = [ 'Error', 'UsageError', 'Extensions', 'Command', 'Option', 'daemon_options', ] # always enable DeprecationWarnings, so our users can see them. for warning in (CDeprecationWarning, CPendingDeprecationWarning): warnings.simplefilter('once', warning, 0) ARGV_DISABLED = """ Unrecognized command-line arguments: {0} Try --help? """ find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)') find_rst_ref = re.compile(r':\w+:`(.+?)`') find_rst_decl = re.compile(r'^\s*\.\. .+?::.+$') def _optparse_callback_to_type(option, callback): parser = Bunch(values=Bunch()) def _on_arg(value): callback(option, None, value, parser) return getattr(parser.values, option.dest) return _on_arg def _add_optparse_argument(parser, opt, typemap={ 'string': text_t, 'int': int, 'long': long_t, 'float': float, 'complex': complex, 'choice': None}): if opt.callback: opt.type = _optparse_callback_to_type(opt, opt.type) # argparse checks for existence of this kwarg if opt.action == 'callback': opt.action = None # store_true sets value to "('NO', 'DEFAULT')" for some # crazy reason, so not to set a sane default here. if opt.action == 'store_true' and opt.default is None: opt.default = False parser.add_argument( *opt._long_opts + opt._short_opts, **dictfilter(dict( action=opt.action, type=typemap.get(opt.type, opt.type), dest=opt.dest, nargs=opt.nargs, choices=opt.choices, help=opt.help, metavar=opt.metavar, default=opt.default))) def _add_compat_options(parser, options): for option in options or (): if callable(option): option(parser) else: _add_optparse_argument(parser, option) @python_2_unicode_compatible class Error(Exception): """Exception raised by commands.""" status = EX_FAILURE def __init__(self, reason, status=None): self.reason = reason self.status = status if status is not None else self.status super(Error, self).__init__(reason, status) def __str__(self): return self.reason class UsageError(Error): """Exception raised for malformed arguments.""" status = EX_USAGE class Extensions(object): """Loads extensions from setuptools entrypoints.""" def __init__(self, namespace, register): self.names = [] self.namespace = namespace self.register = register def add(self, cls, name): heappush(self.names, name) self.register(cls, name=name) def load(self): for name, cls in imports.load_extension_classes(self.namespace): self.add(cls, name) return self.names class Command(object): """Base class for command-line applications. Arguments: app (~@Celery): The app to use. get_app (Callable): Fucntion returning the current app when no app provided. """ Error = Error UsageError = UsageError Parser = argparse.ArgumentParser #: Arg list used in help. args = '' #: Application version. version = VERSION_BANNER #: If false the parser will raise an exception if positional #: args are provided. supports_args = True #: List of options (without preload options). option_list = None # module Rst documentation to parse help from (if any) doc = None # Some programs (multi) does not want to load the app specified # (Issue #1008). respects_app_option = True #: Enable if the application should support config from the cmdline. enable_config_from_cmdline = False #: Default configuration name-space. namespace = None #: Text to print at end of --help epilog = None #: Text to print in --help before option list. description = '' #: Set to true if this command doesn't have sub-commands leaf = True # used by :meth:`say_remote_command_reply`. show_body = True # used by :meth:`say_chat`. show_reply = True prog_name = 'celery' #: Name of argparse option used for parsing positional args. args_name = 'args' def __init__(self, app=None, get_app=None, no_color=False, stdout=None, stderr=None, quiet=False, on_error=None, on_usage_error=None): self.app = app self.get_app = get_app or self._get_default_app self.stdout = stdout or sys.stdout self.stderr = stderr or sys.stderr self._colored = None self._no_color = no_color self.quiet = quiet if not self.description: self.description = self._strip_restructeredtext(self.__doc__) if on_error: self.on_error = on_error if on_usage_error: self.on_usage_error = on_usage_error def run(self, *args, **options): raise NotImplementedError('subclass responsibility') def on_error(self, exc): # pylint: disable=method-hidden # on_error argument to __init__ may override this method. self.error(self.colored.red('Error: {0}'.format(exc))) def on_usage_error(self, exc): # pylint: disable=method-hidden # on_usage_error argument to __init__ may override this method. self.handle_error(exc) def on_concurrency_setup(self): pass def __call__(self, *args, **kwargs): random.seed() # maybe we were forked. self.verify_args(args) try: ret = self.run(*args, **kwargs) return ret if ret is not None else EX_OK except self.UsageError as exc: self.on_usage_error(exc) return exc.status except self.Error as exc: self.on_error(exc) return exc.status def verify_args(self, given, _index=0): S = getfullargspec(self.run) _index = 1 if S.args and S.args[0] == 'self' else _index required = S.args[_index:-len(S.defaults) if S.defaults else None] missing = required[len(given):] if missing: raise self.UsageError('Missing required {0}: {1}'.format( text.pluralize(len(missing), 'argument'), ', '.join(missing) )) def execute_from_commandline(self, argv=None): """Execute application from command-line. Arguments: argv (List[str]): The list of command-line arguments. Defaults to ``sys.argv``. """ if argv is None: argv = list(sys.argv) # Should we load any special concurrency environment? self.maybe_patch_concurrency(argv) self.on_concurrency_setup() # Dump version and exit if '--version' arg set. self.early_version(argv) argv = self.setup_app_from_commandline(argv) self.prog_name = os.path.basename(argv[0]) return self.handle_argv(self.prog_name, argv[1:]) def run_from_argv(self, prog_name, argv=None, command=None): return self.handle_argv(prog_name, sys.argv if argv is None else argv, command) def maybe_patch_concurrency(self, argv=None): argv = argv or sys.argv pool_option = self.with_pool_option(argv) if pool_option: maybe_patch_concurrency(argv, *pool_option) def usage(self, command): return '%(prog)s {0} [options] {self.args}'.format(command, self=self) def add_arguments(self, parser): pass def get_options(self): # This is for optparse options, please use add_arguments. return self.option_list def add_preload_arguments(self, parser): group = parser.add_argument_group('Global Options') group.add_argument('-A', '--app', default=None) group.add_argument('-b', '--broker', default=None) group.add_argument('--loader', default=None) group.add_argument('--config', default=None) group.add_argument('--workdir', default=None) group.add_argument( '--no-color', '-C', action='store_true', default=None) group.add_argument('--quiet', '-q', action='store_true') def _add_version_argument(self, parser): parser.add_argument( '--version', action='version', version=self.version, ) def prepare_arguments(self, parser): pass def expanduser(self, value): if isinstance(value, string_t): return os.path.expanduser(value) return value def ask(self, q, choices, default=None): """Prompt user to choose from a tuple of string values. If a default is not specified the question will be repeated until the user gives a valid choice. Matching is case insensitive. Arguments: q (str): the question to ask (don't include questionark) choice (Tuple[str]): tuple of possible choices, must be lowercase. default (Any): Default value if any. """ schoices = choices if default is not None: schoices = [c.upper() if c == default else c.lower() for c in choices] schoices = '/'.join(schoices) p = '{0} ({1})? '.format(q.capitalize(), schoices) while 1: val = input(p).lower() if val in choices: return val elif default is not None: break return default def handle_argv(self, prog_name, argv, command=None): """Parse arguments from argv and dispatch to :meth:`run`. Warning: Exits with an error message if :attr:`supports_args` is disabled and ``argv`` contains positional arguments. Arguments: prog_name (str): The program name (``argv[0]``). argv (List[str]): Rest of command-line arguments. """ options, args = self.prepare_args( *self.parse_options(prog_name, argv, command)) return self(*args, **options) def prepare_args(self, options, args): if options: options = { k: self.expanduser(v) for k, v in items(options) if not k.startswith('_') } args = [self.expanduser(arg) for arg in args] self.check_args(args) return options, args def check_args(self, args): if not self.supports_args and args: self.die(ARGV_DISABLED.format(', '.join(args)), EX_USAGE) def error(self, s): self.out(s, fh=self.stderr) def out(self, s, fh=None): print(s, file=fh or self.stdout) def die(self, msg, status=EX_FAILURE): self.error(msg) sys.exit(status) def early_version(self, argv): if '--version' in argv: print(self.version, file=self.stdout) sys.exit(0) def parse_options(self, prog_name, arguments, command=None): """Parse the available options.""" # Don't want to load configuration to just print the version, # so we handle --version manually here. self.parser = self.create_parser(prog_name, command) options = vars(self.parser.parse_args(arguments)) return options, options.pop(self.args_name, None) or [] def create_parser(self, prog_name, command=None): # for compatibility with optparse usage. usage = self.usage(command).replace('%prog', '%(prog)s') parser = self.Parser( prog=prog_name, usage=usage, epilog=self._format_epilog(self.epilog), formatter_class=argparse.RawDescriptionHelpFormatter, description=self._format_description(self.description), ) self._add_version_argument(parser) self.add_preload_arguments(parser) self.add_arguments(parser) self.add_compat_options(parser, self.get_options()) self.add_compat_options(parser, self.app.user_options['preload']) if self.supports_args: # for backward compatibility with optparse, we automatically # add arbitrary positional args. parser.add_argument(self.args_name, nargs='*') return self.prepare_parser(parser) def _format_epilog(self, epilog): if epilog: return '\n{0}\n\n'.format(epilog) return '' def _format_description(self, description): width = argparse.HelpFormatter('prog')._width return text.ensure_newlines( text.fill_paragraphs(text.dedent(description), width)) def add_compat_options(self, parser, options): _add_compat_options(parser, options) def prepare_parser(self, parser): docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc] for doc in docs: for long_opt, help in items(doc): option = parser._option_string_actions[long_opt] if option is not None: option.help = ' '.join(help).format(default=option.default) return parser def setup_app_from_commandline(self, argv): preload_options = self.parse_preload_options(argv) quiet = preload_options.get('quiet') if quiet is not None: self.quiet = quiet try: self.no_color = preload_options['no_color'] except KeyError: pass workdir = preload_options.get('workdir') if workdir: os.chdir(workdir) app = (preload_options.get('app') or os.environ.get('CELERY_APP') or self.app) preload_loader = preload_options.get('loader') if preload_loader: # Default app takes loader from this env (Issue #1066). os.environ['CELERY_LOADER'] = preload_loader loader = (preload_loader, os.environ.get('CELERY_LOADER') or 'default') broker = preload_options.get('broker', None) if broker: os.environ['CELERY_BROKER_URL'] = broker config = preload_options.get('config') if config: os.environ['CELERY_CONFIG_MODULE'] = config if self.respects_app_option: if app: self.app = self.find_app(app) elif self.app is None: self.app = self.get_app(loader=loader) if self.enable_config_from_cmdline: argv = self.process_cmdline_config(argv) else: self.app = Celery(fixups=[]) self._handle_user_preload_options(argv) return argv def _handle_user_preload_options(self, argv): user_preload = tuple(self.app.user_options['preload'] or ()) if user_preload: user_options = self._parse_preload_options(argv, user_preload) signals.user_preload_options.send( sender=self, app=self.app, options=user_options, ) def find_app(self, app): from celery.app.utils import find_app return find_app(app, symbol_by_name=self.symbol_by_name) def symbol_by_name(self, name, imp=imports.import_from_cwd): return imports.symbol_by_name(name, imp=imp) get_cls_by_name = symbol_by_name # XXX compat def process_cmdline_config(self, argv): try: cargs_start = argv.index('--') except ValueError: return argv argv, cargs = argv[:cargs_start], argv[cargs_start + 1:] self.app.config_from_cmdline(cargs, namespace=self.namespace) return argv def parse_preload_options(self, args): return self._parse_preload_options(args, [self.add_preload_arguments]) def _parse_preload_options(self, args, options): args = [arg for arg in args if arg not in ('-h', '--help')] parser = self.Parser() self.add_compat_options(parser, options) namespace, _ = parser.parse_known_args(args) return vars(namespace) def add_append_opt(self, acc, opt, value): default = opt.default or [] if opt.dest not in acc: acc[opt.dest] = default acc[opt.dest].append(value) def parse_doc(self, doc): options, in_option = defaultdict(list), None for line in doc.splitlines(): if line.startswith('.. cmdoption::'): m = find_long_opt.match(line) if m: in_option = m.groups()[0].strip() assert in_option, 'missing long opt' elif in_option and line.startswith(' ' * 4): if not find_rst_decl.match(line): options[in_option].append( find_rst_ref.sub( r'\1', line.strip()).replace('`', '')) return options def _strip_restructeredtext(self, s): return '\n'.join( find_rst_ref.sub(r'\1', line.replace('`', '')) for line in (s or '').splitlines() if not find_rst_decl.match(line) ) def with_pool_option(self, argv): """Return tuple of ``(short_opts, long_opts)``. Returns only if the command supports a pool argument, and used to monkey patch eventlet/gevent environments as early as possible. Example: >>> has_pool_option = (['-P'], ['--pool']) """ pass def node_format(self, s, nodename, **extra): return node_format(s, nodename, **extra) def host_format(self, s, **extra): return host_format(s, **extra) def _get_default_app(self, *args, **kwargs): from celery._state import get_current_app return get_current_app() # omit proxy def pretty_list(self, n): c = self.colored if not n: return '- empty -' return '\n'.join( str(c.reset(c.white('*'), ' {0}'.format(item))) for item in n ) def pretty_dict_ok_error(self, n): c = self.colored try: return (c.green('OK'), text.indent(self.pretty(n['ok'])[1], 4)) except KeyError: pass return (c.red('ERROR'), text.indent(self.pretty(n['error'])[1], 4)) def say_remote_command_reply(self, replies): c = self.colored node = next(iter(replies)) # <-- take first. reply = replies[node] status, preply = self.pretty(reply) self.say_chat('->', c.cyan(node, ': ') + status, text.indent(preply, 4) if self.show_reply else '') def pretty(self, n): OK = str(self.colored.green('OK')) if isinstance(n, list): return OK, self.pretty_list(n) if isinstance(n, dict): if 'ok' in n or 'error' in n: return self.pretty_dict_ok_error(n) else: return OK, json.dumps(n, sort_keys=True, indent=4) if isinstance(n, string_t): return OK, string(n) return OK, pformat(n) def say_chat(self, direction, title, body=''): c = self.colored if direction == '<-' and self.quiet: return dirstr = not self.quiet and c.bold(c.white(direction), ' ') or '' self.out(c.reset(dirstr, title)) if body and self.show_body: self.out(body) @property def colored(self): if self._colored is None: self._colored = term.colored( enabled=isatty(self.stdout) and not self.no_color) return self._colored @colored.setter def colored(self, obj): self._colored = obj @property def no_color(self): return self._no_color @no_color.setter def no_color(self, value): self._no_color = value if self._colored is not None: self._colored.enabled = not self._no_color def daemon_options(parser, default_pidfile=None, default_logfile=None): """Add daemon options to argparse parser.""" group = parser.add_argument_group('Daemonization Options') group.add_argument('-f', '--logfile', default=default_logfile), group.add_argument('--pidfile', default=default_pidfile), group.add_argument('--uid', default=None), group.add_argument('--gid', default=None), group.add_argument('--umask', default=None), group.add_argument('--executable', default=None), celery-4.1.0/celery/bin/worker.py0000644000175000017500000002637713130607475016670 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Program used to start a Celery worker instance. The :program:`celery worker` command (previously known as ``celeryd``) .. program:: celery worker .. seealso:: See :ref:`preload-options`. .. cmdoption:: -c, --concurrency Number of child processes processing the queue. The default is the number of CPUs available on your system. .. cmdoption:: -P, --pool Pool implementation: prefork (default), eventlet, gevent or solo. .. cmdoption:: -n, --hostname Set custom hostname (e.g., 'w1@%%h'). Expands: %%h (hostname), %%n (name) and %%d, (domain). .. cmdoption:: -B, --beat Also run the `celery beat` periodic task scheduler. Please note that there must only be one instance of this service. .. note:: ``-B`` is meant to be used for development purposes. For production environment, you need to start :program:`celery beat` separately. .. cmdoption:: -Q, --queues List of queues to enable for this worker, separated by comma. By default all configured queues are enabled. Example: `-Q video,image` .. cmdoption:: -X, --exclude-queues List of queues to disable for this worker, separated by comma. By default all configured queues are enabled. Example: `-X video,image`. .. cmdoption:: -I, --include Comma separated list of additional modules to import. Example: -I foo.tasks,bar.tasks .. cmdoption:: -s, --schedule Path to the schedule database if running with the `-B` option. Defaults to `celerybeat-schedule`. The extension ".db" may be appended to the filename. .. cmdoption:: -O Apply optimization profile. Supported: default, fair .. cmdoption:: --prefetch-multiplier Set custom prefetch multiplier value for this worker instance. .. cmdoption:: --scheduler Scheduler class to use. Default is :class:`celery.beat.PersistentScheduler` .. cmdoption:: -S, --statedb Path to the state database. The extension '.db' may be appended to the filename. Default: {default} .. cmdoption:: -E, --task-events Send task-related events that can be captured by monitors like :program:`celery events`, `celerymon`, and others. .. cmdoption:: --without-gossip Don't subscribe to other workers events. .. cmdoption:: --without-mingle Don't synchronize with other workers at start-up. .. cmdoption:: --without-heartbeat Don't send event heartbeats. .. cmdoption:: --heartbeat-interval Interval in seconds at which to send worker heartbeat .. cmdoption:: --purge Purges all waiting tasks before the daemon is started. **WARNING**: This is unrecoverable, and the tasks will be deleted from the messaging server. .. cmdoption:: --time-limit Enables a hard time limit (in seconds int/float) for tasks. .. cmdoption:: --soft-time-limit Enables a soft time limit (in seconds int/float) for tasks. .. cmdoption:: --max-tasks-per-child Maximum number of tasks a pool worker can execute before it's terminated and replaced by a new worker. .. cmdoption:: --max-memory-per-child Maximum amount of resident memory, in KiB, that may be consumed by a child process before it will be replaced by a new one. If a single task causes a child process to exceed this limit, the task will be completed and the child process will be replaced afterwards. Default: no limit. .. cmdoption:: --autoscale Enable autoscaling by providing max_concurrency, min_concurrency. Example:: --autoscale=10,3 (always keep 3 processes, but grow to 10 if necessary) .. cmdoption:: --detach Start worker as a background process. .. cmdoption:: -f, --logfile Path to log file. If no logfile is specified, `stderr` is used. .. cmdoption:: -l, --loglevel Logging level, choose between `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`, or `FATAL`. .. cmdoption:: --pidfile Optional file used to store the process pid. The program won't start if this file already exists and the pid is still alive. .. cmdoption:: --uid User id, or user name of the user to run as after detaching. .. cmdoption:: --gid Group id, or group name of the main group to change to after detaching. .. cmdoption:: --umask Effective :manpage:`umask(1)` (in octal) of the process after detaching. Inherits the :manpage:`umask(1)` of the parent process by default. .. cmdoption:: --workdir Optional directory to change to after detaching. .. cmdoption:: --executable Executable to use for the detached process. """ from __future__ import absolute_import, unicode_literals import sys from celery import concurrency from celery.bin.base import Command, daemon_options from celery.bin.celeryd_detach import detached_celeryd from celery.five import string_t from celery.platforms import maybe_drop_privileges from celery.utils.log import LOG_LEVELS, mlevel from celery.utils.nodenames import default_nodename __all__ = ['worker', 'main'] HELP = __doc__ class worker(Command): """Start worker instance. Examples: .. code-block:: console $ celery worker --app=proj -l info $ celery worker -A proj -l info -Q hipri,lopri $ celery worker -A proj --concurrency=4 $ celery worker -A proj --concurrency=1000 -P eventlet $ celery worker --autoscale=10,0 """ doc = HELP # parse help from this too namespace = 'worker' enable_config_from_cmdline = True supports_args = False removed_flags = {'--no-execv', '--force-execv'} def run_from_argv(self, prog_name, argv=None, command=None): argv = [x for x in argv if x not in self.removed_flags] command = sys.argv[0] if command is None else command argv = sys.argv[1:] if argv is None else argv # parse options before detaching so errors can be handled. options, args = self.prepare_args( *self.parse_options(prog_name, argv, command)) self.maybe_detach([command] + argv) return self(*args, **options) def maybe_detach(self, argv, dopts=['-D', '--detach']): if any(arg in argv for arg in dopts): argv = [v for v in argv if v not in dopts] # will never return detached_celeryd(self.app).execute_from_commandline(argv) raise SystemExit(0) def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, loglevel=None, logfile=None, pidfile=None, statedb=None, **kwargs): maybe_drop_privileges(uid=uid, gid=gid) # Pools like eventlet/gevent needs to patch libs as early # as possible. pool_cls = (concurrency.get_implementation(pool_cls) or self.app.conf.worker_pool) if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' 'Please run celery beat as a separate service.') hostname = self.host_format(default_nodename(hostname)) if loglevel: try: loglevel = mlevel(loglevel) except KeyError: # pragma: no cover self.die('Unknown level {0!r}. Please use one of {1}.'.format( loglevel, '|'.join( l for l in LOG_LEVELS if isinstance(l, string_t)))) worker = self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, # node format handled by celery.app.log.setup pidfile=self.node_format(pidfile, hostname), statedb=self.node_format(statedb, hostname), **kwargs) worker.start() return worker.exitcode def with_pool_option(self, argv): # this command support custom pools # that may have to be loaded as early as possible. return (['-P'], ['--pool']) def add_arguments(self, parser): conf = self.app.conf wopts = parser.add_argument_group('Worker Options') wopts.add_argument('-n', '--hostname') wopts.add_argument( '-D', '--detach', action='store_true', default=False, ) wopts.add_argument( '-S', '--statedb', default=conf.worker_state_db, ) wopts.add_argument('-l', '--loglevel', default='WARN') wopts.add_argument('-O', dest='optimization') wopts.add_argument( '--prefetch-multiplier', type=int, default=conf.worker_prefetch_multiplier, ) topts = parser.add_argument_group('Pool Options') topts.add_argument( '-c', '--concurrency', default=conf.worker_concurrency, type=int, ) topts.add_argument( '-P', '--pool', default=conf.worker_pool, ) topts.add_argument( '-E', '--task-events', '--events', action='store_true', default=conf.worker_send_task_events, ) topts.add_argument( '--time-limit', type=float, default=conf.task_time_limit, ) topts.add_argument( '--soft-time-limit', type=float, default=conf.task_soft_time_limit, ) topts.add_argument( '--max-tasks-per-child', '--maxtasksperchild', type=int, default=conf.worker_max_tasks_per_child, ) topts.add_argument( '--max-memory-per-child', '--maxmemperchild', type=int, default=conf.worker_max_memory_per_child, ) qopts = parser.add_argument_group('Queue Options') qopts.add_argument( '--purge', '--discard', action='store_true', default=False, ) qopts.add_argument('--queues', '-Q', default=[]) qopts.add_argument('--exclude-queues', '-X', default=[]) qopts.add_argument('--include', '-I', default=[]) fopts = parser.add_argument_group('Features') fopts.add_argument( '--without-gossip', action='store_true', default=False, ) fopts.add_argument( '--without-mingle', action='store_true', default=False, ) fopts.add_argument( '--without-heartbeat', action='store_true', default=False, ) fopts.add_argument('--heartbeat-interval', type=int) fopts.add_argument('--autoscale') daemon_options(parser) bopts = parser.add_argument_group('Embedded Beat Options') bopts.add_argument('-B', '--beat', action='store_true', default=False) bopts.add_argument( '-s', '--schedule-filename', '--schedule', default=conf.beat_schedule_filename, ) bopts.add_argument('--scheduler') user_options = self.app.user_options['worker'] if user_options: uopts = parser.add_argument_group('User Options') self.add_compat_options(uopts, user_options) def main(app=None): """Start worker.""" # Fix for setuptools generated scripts, so that it will # work with multiprocessing fork emulation. # (see multiprocessing.forking.get_preparation_data()) if __name__ != '__main__': # pragma: no cover sys.modules['__main__'] = sys.modules[__name__] from billiard import freeze_support freeze_support() worker(app=app).execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/bin/result.py0000644000175000017500000000246413130607475016664 0ustar omeromer00000000000000"""The ``celery result`` program, used to inspect task results.""" from __future__ import absolute_import, unicode_literals from celery.bin.base import Command class result(Command): """Gives the return value for a given task id. Examples: .. code-block:: console $ celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 $ celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 -t tasks.add $ celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 --traceback """ args = '' def add_arguments(self, parser): group = parser.add_argument_group('Result Options') group.add_argument( '--task', '-t', help='name of task (if custom backend)', ) group.add_argument( '--traceback', action='store_true', default=False, help='show traceback instead', ) def run(self, task_id, *args, **kwargs): result_cls = self.app.AsyncResult task = kwargs.get('task') traceback = kwargs.get('traceback', False) if task: result_cls = self.app.tasks[task].AsyncResult task_result = result_cls(task_id) if traceback: value = task_result.traceback else: value = task_result.get() self.out(self.pretty(value)[1]) celery-4.1.0/celery/bin/multi.py0000644000175000017500000003451513130607475016502 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Start multiple worker instances from the command-line. .. program:: celery multi Examples ======== .. code-block:: console $ # Single worker with explicit name and events enabled. $ celery multi start Leslie -E $ # Pidfiles and logfiles are stored in the current directory $ # by default. Use --pidfile and --logfile argument to change $ # this. The abbreviation %n will be expanded to the current $ # node name. $ celery multi start Leslie -E --pidfile=/var/run/celery/%n.pid --logfile=/var/log/celery/%n%I.log $ # You need to add the same arguments when you restart, $ # as these aren't persisted anywhere. $ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid --logfile=/var/run/celery/%n%I.log $ # To stop the node, you need to specify the same pidfile. $ celery multi stop Leslie --pidfile=/var/run/celery/%n.pid $ # 3 workers, with 3 processes each $ celery multi start 3 -c 3 celery worker -n celery1@myhost -c 3 celery worker -n celery2@myhost -c 3 celery worker -n celery3@myhost -c 3 $ # start 3 named workers $ celery multi start image video data -c 3 celery worker -n image@myhost -c 3 celery worker -n video@myhost -c 3 celery worker -n data@myhost -c 3 $ # specify custom hostname $ celery multi start 2 --hostname=worker.example.com -c 3 celery worker -n celery1@worker.example.com -c 3 celery worker -n celery2@worker.example.com -c 3 $ # specify fully qualified nodenames $ celery multi start foo@worker.example.com bar@worker.example.com -c 3 $ # fully qualified nodenames but using the current hostname $ celery multi start foo@%h bar@%h $ # Advanced example starting 10 workers in the background: $ # * Three of the workers processes the images and video queue $ # * Two of the workers processes the data queue with loglevel DEBUG $ # * the rest processes the default' queue. $ celery multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data -Q default -L:4,5 DEBUG $ # You can show the commands necessary to start the workers with $ # the 'show' command: $ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data -Q default -L:4,5 DEBUG $ # Additional options are added to each celery worker' comamnd, $ # but you can also modify the options for ranges of, or specific workers $ # 3 workers: Two with 3 processes, and one with 10 processes. $ celery multi start 3 -c 3 -c:1 10 celery worker -n celery1@myhost -c 10 celery worker -n celery2@myhost -c 3 celery worker -n celery3@myhost -c 3 $ # can also specify options for named workers $ celery multi start image video data -c 3 -c:image 10 celery worker -n image@myhost -c 10 celery worker -n video@myhost -c 3 celery worker -n data@myhost -c 3 $ # ranges and lists of workers in options is also allowed: $ # (-c:1-3 can also be written as -c:1,2,3) $ celery multi start 5 -c 3 -c:1-3 10 celery worker -n celery1@myhost -c 10 celery worker -n celery2@myhost -c 10 celery worker -n celery3@myhost -c 10 celery worker -n celery4@myhost -c 3 celery worker -n celery5@myhost -c 3 $ # lists also works with named workers $ celery multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 celery worker -n foo@myhost -c 10 celery worker -n bar@myhost -c 10 celery worker -n baz@myhost -c 10 celery worker -n xuzzy@myhost -c 3 """ from __future__ import absolute_import, print_function, unicode_literals import os import signal import sys from functools import wraps from kombu.utils.objects import cached_property from celery import VERSION_BANNER from celery.apps.multi import Cluster, MultiParser, NamespacedOptionParser from celery.platforms import EX_FAILURE, EX_OK, signals from celery.utils import term from celery.utils.text import pluralize __all__ = ['MultiTool'] USAGE = """\ usage: {prog_name} start [worker options] {prog_name} stop [-SIG (default: -TERM)] {prog_name} restart [-SIG] [worker options] {prog_name} kill {prog_name} show [worker options] {prog_name} get hostname [-qv] [worker options] {prog_name} names {prog_name} expand template {prog_name} help additional options (must appear after command name): * --nosplash: Don't display program info. * --quiet: Don't show as much output. * --verbose: Show more output. * --no-color: Don't display colors. """ def main(): sys.exit(MultiTool().execute_from_commandline(sys.argv)) def splash(fun): @wraps(fun) def _inner(self, *args, **kwargs): self.splash() return fun(self, *args, **kwargs) return _inner def using_cluster(fun): @wraps(fun) def _inner(self, *argv, **kwargs): return fun(self, self.cluster_from_argv(argv), **kwargs) return _inner def using_cluster_and_sig(fun): @wraps(fun) def _inner(self, *argv, **kwargs): p, cluster = self._cluster_from_argv(argv) sig = self._find_sig_argument(p) return fun(self, cluster, sig, **kwargs) return _inner class TermLogger(object): splash_text = 'celery multi v{version}' splash_context = {'version': VERSION_BANNER} #: Final exit code. retcode = 0 def setup_terminal(self, stdout, stderr, nosplash=False, quiet=False, verbose=False, no_color=False, **kwargs): self.stdout = stdout or sys.stdout self.stderr = stderr or sys.stderr self.nosplash = nosplash self.quiet = quiet self.verbose = verbose self.no_color = no_color def ok(self, m, newline=True, file=None): self.say(m, newline=newline, file=file) return EX_OK def say(self, m, newline=True, file=None): print(m, file=file or self.stdout, end='\n' if newline else '') def carp(self, m, newline=True, file=None): return self.say(m, newline, file or self.stderr) def error(self, msg=None): if msg: self.carp(msg) self.usage() return EX_FAILURE def info(self, msg, newline=True): if self.verbose: self.note(msg, newline=newline) def note(self, msg, newline=True): if not self.quiet: self.say(str(msg), newline=newline) @splash def usage(self): self.say(USAGE.format(prog_name=self.prog_name)) def splash(self): if not self.nosplash: self.note(self.colored.cyan( self.splash_text.format(**self.splash_context))) @cached_property def colored(self): return term.colored(enabled=not self.no_color) class MultiTool(TermLogger): """The ``celery multi`` program.""" MultiParser = MultiParser OptionParser = NamespacedOptionParser reserved_options = [ ('--nosplash', 'nosplash'), ('--quiet', 'quiet'), ('-q', 'quiet'), ('--verbose', 'verbose'), ('--no-color', 'no_color'), ] def __init__(self, env=None, cmd=None, fh=None, stdout=None, stderr=None, **kwargs): # fh is an old alias to stdout. self.env = env self.cmd = cmd self.setup_terminal(stdout or fh, stderr, **kwargs) self.fh = self.stdout self.prog_name = 'celery multi' self.commands = { 'start': self.start, 'show': self.show, 'stop': self.stop, 'stopwait': self.stopwait, 'stop_verify': self.stopwait, # compat alias 'restart': self.restart, 'kill': self.kill, 'names': self.names, 'expand': self.expand, 'get': self.get, 'help': self.help, } def execute_from_commandline(self, argv, cmd=None): # Reserve the --nosplash|--quiet|-q/--verbose options. argv = self._handle_reserved_options(argv) self.cmd = cmd if cmd is not None else self.cmd self.prog_name = os.path.basename(argv.pop(0)) if not self.validate_arguments(argv): return self.error() return self.call_command(argv[0], argv[1:]) def validate_arguments(self, argv): return argv and argv[0][0] != '-' def call_command(self, command, argv): try: return self.commands[command](*argv) or EX_OK except KeyError: return self.error('Invalid command: {0}'.format(command)) def _handle_reserved_options(self, argv): argv = list(argv) # don't modify callers argv. for arg, attr in self.reserved_options: if arg in argv: setattr(self, attr, bool(argv.pop(argv.index(arg)))) return argv @splash @using_cluster def start(self, cluster): self.note('> Starting nodes...') return int(any(cluster.start())) @splash @using_cluster_and_sig def stop(self, cluster, sig, **kwargs): return cluster.stop(sig=sig, **kwargs) @splash @using_cluster_and_sig def stopwait(self, cluster, sig, **kwargs): return cluster.stopwait(sig=sig, **kwargs) stop_verify = stopwait # compat @splash @using_cluster_and_sig def restart(self, cluster, sig, **kwargs): return int(any(cluster.restart(sig=sig, **kwargs))) @using_cluster def names(self, cluster): self.say('\n'.join(n.name for n in cluster)) def get(self, wanted, *argv): try: node = self.cluster_from_argv(argv).find(wanted) except KeyError: return EX_FAILURE else: return self.ok(' '.join(node.argv)) @using_cluster def show(self, cluster): return self.ok('\n'.join( ' '.join(node.argv_with_executable) for node in cluster )) @splash @using_cluster def kill(self, cluster): return cluster.kill() def expand(self, template, *argv): return self.ok('\n'.join( node.expander(template) for node in self.cluster_from_argv(argv) )) def help(self, *argv): self.say(__doc__) def _find_sig_argument(self, p, default=signal.SIGTERM): args = p.args[len(p.values):] for arg in reversed(args): if len(arg) == 2 and arg[0] == '-': try: return int(arg[1]) except ValueError: pass if arg[0] == '-': try: return signals.signum(arg[1:]) except (AttributeError, TypeError): pass return default def _nodes_from_argv(self, argv, cmd=None): cmd = cmd if cmd is not None else self.cmd p = self.OptionParser(argv) p.parse() return p, self.MultiParser(cmd=cmd).parse(p) def cluster_from_argv(self, argv, cmd=None): _, cluster = self._cluster_from_argv(argv, cmd=cmd) return cluster def _cluster_from_argv(self, argv, cmd=None): p, nodes = self._nodes_from_argv(argv, cmd=cmd) return p, self.Cluster(list(nodes), cmd=cmd) def Cluster(self, nodes, cmd=None): return Cluster( nodes, cmd=cmd, env=self.env, on_stopping_preamble=self.on_stopping_preamble, on_send_signal=self.on_send_signal, on_still_waiting_for=self.on_still_waiting_for, on_still_waiting_progress=self.on_still_waiting_progress, on_still_waiting_end=self.on_still_waiting_end, on_node_start=self.on_node_start, on_node_restart=self.on_node_restart, on_node_shutdown_ok=self.on_node_shutdown_ok, on_node_status=self.on_node_status, on_node_signal_dead=self.on_node_signal_dead, on_node_signal=self.on_node_signal, on_node_down=self.on_node_down, on_child_spawn=self.on_child_spawn, on_child_signalled=self.on_child_signalled, on_child_failure=self.on_child_failure, ) def on_stopping_preamble(self, nodes): self.note(self.colored.blue('> Stopping nodes...')) def on_send_signal(self, node, sig): self.note('\t> {0.name}: {1} -> {0.pid}'.format(node, sig)) def on_still_waiting_for(self, nodes): num_left = len(nodes) if num_left: self.note(self.colored.blue( '> Waiting for {0} {1} -> {2}...'.format( num_left, pluralize(num_left, 'node'), ', '.join(str(node.pid) for node in nodes)), ), newline=False) def on_still_waiting_progress(self, nodes): self.note('.', newline=False) def on_still_waiting_end(self): self.note('') def on_node_signal_dead(self, node): self.note( 'Could not signal {0.name} ({0.pid}): No such process'.format( node)) def on_node_start(self, node): self.note('\t> {0.name}: '.format(node), newline=False) def on_node_restart(self, node): self.note(self.colored.blue( '> Restarting node {0.name}: '.format(node)), newline=False) def on_node_down(self, node): self.note('> {0.name}: {1.DOWN}'.format(node, self)) def on_node_shutdown_ok(self, node): self.note('\n\t> {0.name}: {1.OK}'.format(node, self)) def on_node_status(self, node, retval): self.note(retval and self.FAILED or self.OK) def on_node_signal(self, node, sig): self.note('Sending {sig} to node {0.name} ({0.pid})'.format( node, sig=sig)) def on_child_spawn(self, node, argstr, env): self.info(' {0}'.format(argstr)) def on_child_signalled(self, node, signum): self.note('* Child was terminated by signal {0}'.format(signum)) def on_child_failure(self, node, retcode): self.note('* Child terminated with exit code {0}'.format(retcode)) @cached_property def OK(self): return str(self.colored.green('OK')) @cached_property def FAILED(self): return str(self.colored.red('FAILED')) @cached_property def DOWN(self): return str(self.colored.magenta('DOWN')) if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/bin/__init__.py0000644000175000017500000000015013130607475017073 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from .base import Option __all__ = ['Option'] celery-4.1.0/celery/bin/list.py0000644000175000017500000000266113130607475016320 0ustar omeromer00000000000000"""The ``celery list bindings`` command, used to inspect queue bindings.""" from __future__ import absolute_import, unicode_literals from celery.bin.base import Command class list_(Command): """Get info from broker. Note: For RabbitMQ the management plugin is required. Example: .. code-block:: console $ celery list bindings """ args = '[bindings]' def list_bindings(self, management): try: bindings = management.get_bindings() except NotImplementedError: raise self.Error('Your transport cannot list bindings.') def fmt(q, e, r): return self.out('{0:<28} {1:<28} {2}'.format(q, e, r)) fmt('Queue', 'Exchange', 'Routing Key') fmt('-' * 16, '-' * 16, '-' * 16) for b in bindings: fmt(b['destination'], b['source'], b['routing_key']) def run(self, what=None, *_, **kw): topics = {'bindings': self.list_bindings} available = ', '.join(topics) if not what: raise self.UsageError( 'Missing argument, specify one of: {0}'.format(available)) if what not in topics: raise self.UsageError( 'unknown topic {0!r} (choose one of: {1})'.format( what, available)) with self.app.connection() as conn: self.app.amqp.TaskConsumer(conn).declare() topics[what](conn.manager) celery-4.1.0/celery/bin/purge.py0000644000175000017500000000503213130607475016462 0ustar omeromer00000000000000"""The ``celery purge`` program, used to delete messages from queues.""" from __future__ import absolute_import, unicode_literals from celery.five import keys from celery.bin.base import Command from celery.utils import text class purge(Command): """Erase all messages from all known task queues. Warning: There's no undo operation for this command. """ warn_prelude = ( '{warning}: This will remove all tasks from {queues}: {names}.\n' ' There is no undo for this operation!\n\n' '(to skip this prompt use the -f option)\n' ) warn_prompt = 'Are you sure you want to delete all tasks' fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.' fmt_empty = 'No messages purged from {qnum} {queues}' def add_arguments(self, parser): group = parser.add_argument_group('Purging Options') group.add_argument( '--force', '-f', action='store_true', default=False, help="Don't prompt for verification", ) group.add_argument( '--queues', '-Q', default=[], help='Comma separated list of queue names to purge.', ) group.add_argument( '--exclude-queues', '-X', default=[], help='Comma separated list of queues names not to purge.', ) def run(self, force=False, queues=None, exclude_queues=None, **kwargs): queues = set(text.str_to_list(queues or [])) exclude = set(text.str_to_list(exclude_queues or [])) names = (queues or set(keys(self.app.amqp.queues))) - exclude qnum = len(names) messages = None if names: if not force: self.out(self.warn_prelude.format( warning=self.colored.red('WARNING'), queues=text.pluralize(qnum, 'queue'), names=', '.join(sorted(names)), )) if self.ask(self.warn_prompt, ('yes', 'no'), 'no') != 'yes': return with self.app.connection_for_write() as conn: messages = sum(self._purge(conn, queue) for queue in names) fmt = self.fmt_purged if messages else self.fmt_empty self.out(fmt.format( mnum=messages, qnum=qnum, messages=text.pluralize(messages, 'message'), queues=text.pluralize(qnum, 'queue'))) def _purge(self, conn, queue): try: return conn.default_channel.queue_purge(queue) or 0 except conn.channel_errors: return 0 celery-4.1.0/celery/bin/graph.py0000644000175000017500000001514713135426300016440 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The :program:`celery graph` command. .. program:: celery graph """ from __future__ import absolute_import, unicode_literals from operator import itemgetter from celery.five import items, python_2_unicode_compatible from celery.utils.graph import DependencyGraph, GraphFormatter from .base import Command __all__ = ['graph'] class graph(Command): """The ``celery graph`` command.""" args = """ [arguments] ..... bootsteps [worker] [consumer] ..... workers [enumerate] """ def run(self, what=None, *args, **kwargs): map = {'bootsteps': self.bootsteps, 'workers': self.workers} if not what: raise self.UsageError('missing type') elif what not in map: raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map))) return map[what](*args, **kwargs) def bootsteps(self, *args, **kwargs): worker = self.app.WorkController() include = {arg.lower() for arg in args or ['worker', 'consumer']} if 'worker' in include: worker_graph = worker.blueprint.graph if 'consumer' in include: worker.blueprint.connect_with(worker.consumer.blueprint) else: worker_graph = worker.consumer.blueprint.graph worker_graph.to_dot(self.stdout) def workers(self, *args, **kwargs): def simplearg(arg): return maybe_list(itemgetter(0, 2)(arg.partition(':'))) def maybe_list(l, sep=','): return (l[0], l[1].split(sep) if sep in l[1] else l[1]) args = dict(simplearg(arg) for arg in args) generic = 'generic' in args def generic_label(node): return '{0} ({1}://)'.format(type(node).__name__, node._label.split('://')[0]) @python_2_unicode_compatible class Node(object): force_label = None scheme = {} def __init__(self, label, pos=None): self._label = label self.pos = pos def label(self): return self._label def __str__(self): return self.label() class Thread(Node): scheme = { 'fillcolor': 'lightcyan4', 'fontcolor': 'yellow', 'shape': 'oval', 'fontsize': 10, 'width': 0.3, 'color': 'black', } def __init__(self, label, **kwargs): self.real_label = label super(Thread, self).__init__( label='thr-{0}'.format(next(tids)), pos=0, ) class Formatter(GraphFormatter): def label(self, obj): return obj and obj.label() def node(self, obj): scheme = dict(obj.scheme) if obj.pos else obj.scheme if isinstance(obj, Thread): scheme['label'] = obj.real_label return self.draw_node( obj, dict(self.node_scheme, **scheme), ) def terminal_node(self, obj): return self.draw_node( obj, dict(self.term_scheme, **obj.scheme), ) def edge(self, a, b, **attrs): if isinstance(a, Thread): attrs.update(arrowhead='none', arrowtail='tee') return self.draw_edge(a, b, self.edge_scheme, attrs) def subscript(n): S = {'0': 'â‚€', '1': 'â‚', '2': 'â‚‚', '3': '₃', '4': 'â‚„', '5': 'â‚…', '6': '₆', '7': '₇', '8': '₈', '9': '₉'} return ''.join([S[i] for i in str(n)]) class Worker(Node): pass class Backend(Node): scheme = { 'shape': 'folder', 'width': 2, 'height': 1, 'color': 'black', 'fillcolor': 'peachpuff3', } def label(self): return generic_label(self) if generic else self._label class Broker(Node): scheme = { 'shape': 'circle', 'fillcolor': 'cadetblue3', 'color': 'cadetblue4', 'height': 1, } def label(self): return generic_label(self) if generic else self._label from itertools import count tids = count(1) Wmax = int(args.get('wmax', 4) or 0) Tmax = int(args.get('tmax', 3) or 0) def maybe_abbr(l, name, max=Wmax): size = len(l) abbr = max and size > max if 'enumerate' in args: l = ['{0}{1}'.format(name, subscript(i + 1)) for i, obj in enumerate(l)] if abbr: l = l[0:max - 1] + [l[size - 1]] l[max - 2] = '{0}⎨…{1}⎬'.format( name[0], subscript(size - (max - 1))) return l try: workers = args['nodes'] threads = args.get('threads') or [] except KeyError: replies = self.app.control.inspect().stats() or {} workers, threads = [], [] for worker, reply in items(replies): workers.append(worker) threads.append(reply['pool']['max-concurrency']) wlen = len(workers) backend = args.get('backend', self.app.conf.result_backend) threads_for = {} workers = maybe_abbr(workers, 'Worker') if Wmax and wlen > Wmax: threads = threads[0:3] + [threads[-1]] for i, threads in enumerate(threads): threads_for[workers[i]] = maybe_abbr( list(range(int(threads))), 'P', Tmax, ) broker = Broker(args.get( 'broker', self.app.connection_for_read().as_uri())) backend = Backend(backend) if backend else None deps = DependencyGraph(formatter=Formatter()) deps.add_arc(broker) if backend: deps.add_arc(backend) curworker = [0] for i, worker in enumerate(workers): worker = Worker(worker, pos=i) deps.add_arc(worker) deps.add_edge(worker, broker) if backend: deps.add_edge(worker, backend) threads = threads_for.get(worker._label) if threads: for thread in threads: thread = Thread(thread) deps.add_arc(thread) deps.add_edge(thread, worker) curworker[0] += 1 deps.to_dot(self.stdout) celery-4.1.0/celery/bin/celeryd_detach.py0000644000175000017500000001116713130607475020305 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Program used to daemonize the worker. Using :func:`os.execv` as forking and multiprocessing leads to weird issues (it was a long time ago now, but it could have something to do with the threading mutex bug) """ from __future__ import absolute_import, unicode_literals import argparse import celery import os import sys from celery.platforms import EX_FAILURE, detached from celery.utils.log import get_logger from celery.utils.nodenames import default_nodename, node_format from celery.bin.base import daemon_options __all__ = ['detached_celeryd', 'detach'] logger = get_logger(__name__) C_FAKEFORK = os.environ.get('C_FAKEFORK') def detach(path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, fake=False, app=None, executable=None, hostname=None): """Detach program by argv'.""" hostname = default_nodename(hostname) logfile = node_format(logfile, hostname) pidfile = node_format(pidfile, hostname) fake = 1 if C_FAKEFORK else fake with detached(logfile, pidfile, uid, gid, umask, workdir, fake, after_forkers=False): try: if executable is not None: path = executable os.execv(path, [path] + argv) except Exception: # pylint: disable=broad-except if app is None: from celery import current_app app = current_app app.log.setup_logging_subsystem( 'ERROR', logfile, hostname=hostname) logger.critical("Can't exec %r", ' '.join([path] + argv), exc_info=True) return EX_FAILURE class detached_celeryd(object): """Daemonize the celery worker process.""" usage = '%(prog)s [options] [celeryd options]' version = celery.VERSION_BANNER description = ('Detaches Celery worker nodes. See `celery worker --help` ' 'for the list of supported worker arguments.') command = sys.executable execv_path = sys.executable execv_argv = ['-m', 'celery', 'worker'] def __init__(self, app=None): self.app = app def create_parser(self, prog_name): parser = argparse.ArgumentParser( prog=prog_name, usage=self.usage, description=self.description, ) self._add_version_argument(parser) self.add_arguments(parser) return parser def _add_version_argument(self, parser): parser.add_argument( '--version', action='version', version=self.version, ) def parse_options(self, prog_name, argv): parser = self.create_parser(prog_name) options, leftovers = parser.parse_known_args(argv) if options.logfile: leftovers.append('--logfile={0}'.format(options.logfile)) if options.pidfile: leftovers.append('--pidfile={0}'.format(options.pidfile)) if options.hostname: leftovers.append('--hostname={0}'.format(options.hostname)) return options, leftovers def execute_from_commandline(self, argv=None): argv = sys.argv if argv is None else argv prog_name = os.path.basename(argv[0]) config, argv = self._split_command_line_config(argv) options, leftovers = self.parse_options(prog_name, argv[1:]) sys.exit(detach( app=self.app, path=self.execv_path, argv=self.execv_argv + leftovers + config, **vars(options) )) def _split_command_line_config(self, argv): config = list(self._extract_command_line_config(argv)) try: argv = argv[:argv.index('--')] except ValueError: pass return config, argv def _extract_command_line_config(self, argv): # Extracts command-line config appearing after '--': # celery worker -l info -- worker.prefetch_multiplier=10 # This to make sure argparse doesn't gobble it up. seen_cargs = 0 for arg in argv: if seen_cargs: yield arg else: if arg == '--': seen_cargs = 1 yield arg def add_arguments(self, parser): daemon_options(parser, default_pidfile='celeryd.pid') parser.add_argument('--workdir', default=None) parser.add_argument('-n', '--hostname') parser.add_argument( '--fake', action='store_true', default=False, help="Don't fork (for debugging purposes)", ) def main(app=None): detached_celeryd(app).execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/bin/migrate.py0000644000175000017500000000414413130607475016773 0ustar omeromer00000000000000"""The ``celery migrate`` command, used to filter and move messages.""" from __future__ import absolute_import, unicode_literals from celery.bin.base import Command MIGRATE_PROGRESS_FMT = """\ Migrating task {state.count}/{state.strtotal}: \ {body[task]}[{body[id]}]\ """ class migrate(Command): """Migrate tasks from one broker to another. Warning: This command is experimental, make sure you have a backup of the tasks before you continue. Example: .. code-block:: console $ celery migrate amqp://A.example.com amqp://guest@B.example.com// $ celery migrate redis://localhost amqp://guest@localhost// """ args = ' ' progress_fmt = MIGRATE_PROGRESS_FMT def add_arguments(self, parser): group = parser.add_argument_group('Migration Options') group.add_argument( '--limit', '-n', type=int, help='Number of tasks to consume (int)', ) group.add_argument( '--timeout', '-t', type=float, default=1.0, help='Timeout in seconds (float) waiting for tasks', ) group.add_argument( '--ack-messages', '-a', action='store_true', default=False, help='Ack messages from source broker.', ) group.add_argument( '--tasks', '-T', help='List of task names to filter on.', ) group.add_argument( '--queues', '-Q', help='List of queues to migrate.', ) group.add_argument( '--forever', '-F', action='store_true', default=False, help='Continually migrate tasks until killed.', ) def on_migrate_task(self, state, body, message): self.out(self.progress_fmt.format(state=state, body=body)) def run(self, source, destination, **kwargs): from kombu import Connection from celery.contrib.migrate import migrate_tasks migrate_tasks(Connection(source), Connection(destination), callback=self.on_migrate_task, **kwargs) celery-4.1.0/celery/bin/upgrade.py0000644000175000017500000000710313130607475016770 0ustar omeromer00000000000000"""The ``celery upgrade`` command, used to upgrade from previous versions.""" from __future__ import absolute_import, print_function, unicode_literals import codecs from celery.app import defaults from celery.bin.base import Command from celery.utils.functional import pass1 class upgrade(Command): """Perform upgrade between versions.""" choices = {'settings'} def add_arguments(self, parser): group = parser.add_argument_group('Upgrading Options') group.add_argument( '--django', action='store_true', default=False, help='Upgrade Django project', ) group.add_argument( '--compat', action='store_true', default=False, help='Maintain backwards compatibility', ) group.add_argument( '--no-backup', action='store_true', default=False, help='Dont backup original files', ) def usage(self, command): return '%(prog)s settings [filename] [options]' def run(self, *args, **kwargs): try: command = args[0] except IndexError: raise self.UsageError( 'missing upgrade type: try `celery upgrade settings` ?') if command not in self.choices: raise self.UsageError('unknown upgrade type: {0}'.format(command)) return getattr(self, command)(*args, **kwargs) def settings(self, command, filename, no_backup=False, django=False, compat=False, **kwargs): lines = self._slurp(filename) keyfilter = self._compat_key if django or compat else pass1 print('processing {0}...'.format(filename), file=self.stderr) # gives list of tuples: ``(did_change, line_contents)`` new_lines = [ self._to_new_key(line, keyfilter) for line in lines ] if any(n[0] for n in new_lines): # did have changes if not no_backup: self._backup(filename) with codecs.open(filename, 'w', 'utf-8') as write_fh: for _, line in new_lines: write_fh.write(line) print('Changes to your setting have been made!', file=self.stdout) else: print('Does not seem to require any changes :-)', file=self.stdout) def _slurp(self, filename): with codecs.open(filename, 'r', 'utf-8') as read_fh: return [line for line in read_fh] def _backup(self, filename, suffix='.orig'): lines = [] backup_filename = ''.join([filename, suffix]) print('writing backup to {0}...'.format(backup_filename), file=self.stderr) with codecs.open(filename, 'r', 'utf-8') as read_fh: with codecs.open(backup_filename, 'w', 'utf-8') as backup_fh: for line in read_fh: backup_fh.write(line) lines.append(line) return lines def _to_new_key(self, line, keyfilter=pass1, source=defaults._TO_NEW_KEY): # sort by length to avoid, for example, broker_transport overriding # broker_transport_options. for old_key in reversed(sorted(source, key=lambda x: len(x))): new_line = line.replace(old_key, keyfilter(source[old_key])) if line != new_line and 'CELERY_CELERY' not in new_line: return 1, new_line # only one match per line. return 0, line def _compat_key(self, key, namespace='CELERY'): key = key.upper() if not key.startswith(namespace): key = '_'.join([namespace, key]) return key celery-4.1.0/celery/bin/logtool.py0000644000175000017500000001117213130607475017021 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The :program:`celery logtool` command. .. program:: celery logtool """ from __future__ import absolute_import, unicode_literals import re from collections import Counter from fileinput import FileInput from .base import Command __all__ = ['logtool'] RE_LOG_START = re.compile(r'^\[\d\d\d\d\-\d\d-\d\d ') RE_TASK_RECEIVED = re.compile(r'.+?\] Received') RE_TASK_READY = re.compile(r'.+?\] Task') RE_TASK_INFO = re.compile(r'.+?([\w\.]+)\[(.+?)\].+') RE_TASK_RESULT = re.compile(r'.+?[\w\.]+\[.+?\] (.+)') REPORT_FORMAT = """ Report ====== Task total: {task[total]} Task errors: {task[errors]} Task success: {task[succeeded]} Task completed: {task[completed]} Tasks ===== {task[types].format} """ class _task_counts(list): @property def format(self): return '\n'.join('{0}: {1}'.format(*i) for i in self) def task_info(line): m = RE_TASK_INFO.match(line) return m.groups() class Audit(object): def __init__(self, on_task_error=None, on_trace=None, on_debug=None): self.ids = set() self.names = {} self.results = {} self.ready = set() self.task_types = Counter() self.task_errors = 0 self.on_task_error = on_task_error self.on_trace = on_trace self.on_debug = on_debug self.prev_line = None def run(self, files): for line in FileInput(files): self.feed(line) return self def task_received(self, line, task_name, task_id): self.names[task_id] = task_name self.ids.add(task_id) self.task_types[task_name] += 1 def task_ready(self, line, task_name, task_id, result): self.ready.add(task_id) self.results[task_id] = result if 'succeeded' not in result: self.task_error(line, task_name, task_id, result) def task_error(self, line, task_name, task_id, result): self.task_errors += 1 if self.on_task_error: self.on_task_error(line, task_name, task_id, result) def feed(self, line): if RE_LOG_START.match(line): if RE_TASK_RECEIVED.match(line): task_name, task_id = task_info(line) self.task_received(line, task_name, task_id) elif RE_TASK_READY.match(line): task_name, task_id = task_info(line) result = RE_TASK_RESULT.match(line) if result: result, = result.groups() self.task_ready(line, task_name, task_id, result) else: if self.on_debug: self.on_debug(line) self.prev_line = line else: if self.on_trace: self.on_trace('\n'.join(filter(None, [self.prev_line, line]))) self.prev_line = None def incomplete_tasks(self): return self.ids ^ self.ready def report(self): return { 'task': { 'types': _task_counts(self.task_types.most_common()), 'total': len(self.ids), 'errors': self.task_errors, 'completed': len(self.ready), 'succeeded': len(self.ready) - self.task_errors, } } class logtool(Command): """The ``celery logtool`` command.""" args = """ [arguments] ..... stats [file1|- [file2 [...]]] ..... traces [file1|- [file2 [...]]] ..... errors [file1|- [file2 [...]]] ..... incomplete [file1|- [file2 [...]]] ..... debug [file1|- [file2 [...]]] """ def run(self, what=None, *files, **kwargs): map = { 'stats': self.stats, 'traces': self.traces, 'errors': self.errors, 'incomplete': self.incomplete, 'debug': self.debug, } if not what: raise self.UsageError('missing action') elif what not in map: raise self.Error( 'action {0} not in {1}'.format(what, '|'.join(map)), ) return map[what](files) def stats(self, files): self.out(REPORT_FORMAT.format( **Audit().run(files).report() )) def traces(self, files): Audit(on_trace=self.out).run(files) def errors(self, files): Audit(on_task_error=self.say1).run(files) def incomplete(self, files): audit = Audit() audit.run(files) for task_id in audit.incomplete_tasks(): self.error('Did not complete: %r' % (task_id,)) def debug(self, files): Audit(on_debug=self.out).run(files) def say1(self, line, *_): self.out(line) celery-4.1.0/celery/bin/beat.py0000644000175000017500000000701013130607475016251 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The :program:`celery beat` command. .. program:: celery beat .. seealso:: See :ref:`preload-options` and :ref:`daemon-options`. .. cmdoption:: --detach Detach and run in the background as a daemon. .. cmdoption:: -s, --schedule Path to the schedule database. Defaults to `celerybeat-schedule`. The extension '.db' may be appended to the filename. Default is {default}. .. cmdoption:: -S, --scheduler Scheduler class to use. Default is :class:`celery.beat.PersistentScheduler`. .. cmdoption:: --max-interval Max seconds to sleep between schedule iterations. .. cmdoption:: -f, --logfile Path to log file. If no logfile is specified, `stderr` is used. .. cmdoption:: -l, --loglevel Logging level, choose between `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`, or `FATAL`. .. cmdoption:: --pidfile File used to store the process pid. Defaults to `celerybeat.pid`. The program won't start if this file already exists and the pid is still alive. .. cmdoption:: --uid User id, or user name of the user to run as after detaching. .. cmdoption:: --gid Group id, or group name of the main group to change to after detaching. .. cmdoption:: --umask Effective umask (in octal) of the process after detaching. Inherits the umask of the parent process by default. .. cmdoption:: --workdir Optional directory to change to after detaching. .. cmdoption:: --executable Executable to use for the detached process. """ from __future__ import absolute_import, unicode_literals from functools import partial from celery.platforms import detached, maybe_drop_privileges from celery.bin.base import Command, daemon_options __all__ = ['beat'] HELP = __doc__ class beat(Command): """Start the beat periodic task scheduler. Examples: .. code-block:: console $ celery beat -l info $ celery beat -s /var/run/celery/beat-schedule --detach $ celery beat -S django The last example requires the :pypi:`django-celery-beat` extension package found on PyPI. """ doc = HELP enable_config_from_cmdline = True supports_args = False def run(self, detach=False, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, **kwargs): if not detach: maybe_drop_privileges(uid=uid, gid=gid) kwargs.pop('app', None) beat = partial(self.app.Beat, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return beat().run() else: return beat().run() def add_arguments(self, parser): c = self.app.conf bopts = parser.add_argument_group('Beat Options') bopts.add_argument('--detach', action='store_true', default=False) bopts.add_argument( '-s', '--schedule', default=c.beat_schedule_filename) bopts.add_argument('--max-interval', type=float) bopts.add_argument('-S', '--scheduler') bopts.add_argument('-l', '--loglevel', default='WARN') daemon_options(parser, default_pidfile='celerybeat.pid') user_options = self.app.user_options['beat'] if user_options: uopts = parser.add_argument_group('User Options') self.add_compat_options(uopts, user_options) def main(app=None): beat(app=app).execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/bin/shell.py0000644000175000017500000001201113130607475016442 0ustar omeromer00000000000000"""The ``celery shell`` program, used to start a REPL.""" from __future__ import absolute_import, unicode_literals import os import sys from importlib import import_module from celery.five import values from celery.bin.base import Command class shell(Command): # pragma: no cover """Start shell session with convenient access to celery symbols. The following symbols will be added to the main globals: - ``celery``: the current application. - ``chord``, ``group``, ``chain``, ``chunks``, ``xmap``, ``xstarmap`` ``subtask``, ``Task`` - all registered tasks. """ def add_arguments(self, parser): group = parser.add_argument_group('Shell Options') group.add_argument( '--ipython', '-I', action='store_true', help='force iPython.', default=False, ) group.add_argument( '--bpython', '-B', action='store_true', help='force bpython.', default=False, ) group.add_argument( '--python', action='store_true', default=False, help='force default Python shell.', ) group.add_argument( '--without-tasks', '-T', action='store_true', default=False, help="don't add tasks to locals.", ) group.add_argument( '--eventlet', action='store_true', default=False, help='use eventlet.', ) group.add_argument( '--gevent', action='store_true', default=False, help='use gevent.', ) def run(self, *args, **kwargs): if args: raise self.UsageError( 'shell command does not take arguments: {0}'.format(args)) return self._run(**kwargs) def _run(self, ipython=False, bpython=False, python=False, without_tasks=False, eventlet=False, gevent=False, **kwargs): sys.path.insert(0, os.getcwd()) if eventlet: import_module('celery.concurrency.eventlet') if gevent: import_module('celery.concurrency.gevent') import celery import celery.task.base self.app.loader.import_default_modules() # pylint: disable=attribute-defined-outside-init self.locals = { 'app': self.app, 'celery': self.app, 'Task': celery.Task, 'chord': celery.chord, 'group': celery.group, 'chain': celery.chain, 'chunks': celery.chunks, 'xmap': celery.xmap, 'xstarmap': celery.xstarmap, 'subtask': celery.subtask, 'signature': celery.signature, } if not without_tasks: self.locals.update({ task.__name__: task for task in values(self.app.tasks) if not task.name.startswith('celery.') }) if python: return self.invoke_fallback_shell() elif bpython: return self.invoke_bpython_shell() elif ipython: return self.invoke_ipython_shell() return self.invoke_default_shell() def invoke_default_shell(self): try: import IPython # noqa except ImportError: try: import bpython # noqa except ImportError: return self.invoke_fallback_shell() else: return self.invoke_bpython_shell() else: return self.invoke_ipython_shell() def invoke_fallback_shell(self): import code try: import readline except ImportError: pass else: import rlcompleter readline.set_completer( rlcompleter.Completer(self.locals).complete) readline.parse_and_bind('tab:complete') code.interact(local=self.locals) def invoke_ipython_shell(self): for ip in (self._ipython, self._ipython_pre_10, self._ipython_terminal, self._ipython_010, self._no_ipython): try: return ip() except ImportError: pass def _ipython(self): from IPython import start_ipython start_ipython(argv=[], user_ns=self.locals) def _ipython_pre_10(self): # pragma: no cover from IPython.frontend.terminal.ipapp import TerminalIPythonApp app = TerminalIPythonApp.instance() app.initialize(argv=[]) app.shell.user_ns.update(self.locals) app.start() def _ipython_terminal(self): # pragma: no cover from IPython.terminal import embed embed.TerminalInteractiveShell(user_ns=self.locals).mainloop() def _ipython_010(self): # pragma: no cover from IPython.Shell import IPShell IPShell(argv=[], user_ns=self.locals).mainloop() def _no_ipython(self): # pragma: no cover raise ImportError('no suitable ipython found') def invoke_bpython_shell(self): import bpython bpython.embed(self.locals) celery-4.1.0/celery/bin/amqp.py0000644000175000017500000002715713130607475016312 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The :program:`celery amqp` command. .. program:: celery amqp """ from __future__ import absolute_import, print_function, unicode_literals import cmd as _cmd import sys import shlex import pprint from functools import partial from itertools import count from kombu.utils.encoding import safe_str from celery.utils.functional import padlist from celery.bin.base import Command from celery.five import string_t from celery.utils.serialization import strtobool __all__ = ['AMQPAdmin', 'AMQShell', 'Spec', 'amqp'] # Map to coerce strings to other types. COERCE = {bool: strtobool} HELP_HEADER = """ Commands -------- """.rstrip() EXAMPLE_TEXT = """ Example: -> queue.delete myqueue yes no """ say = partial(print, file=sys.stderr) class Spec(object): """AMQP Command specification. Used to convert arguments to Python values and display various help and tool-tips. Arguments: args (Sequence): see :attr:`args`. returns (str): see :attr:`returns`. """ #: List of arguments this command takes. #: Should contain ``(argument_name, argument_type)`` tuples. args = None #: Helpful human string representation of what this command returns. #: May be :const:`None`, to signify the return type is unknown. returns = None def __init__(self, *args, **kwargs): self.args = args self.returns = kwargs.get('returns') def coerce(self, index, value): """Coerce value for argument at index.""" arg_info = self.args[index] arg_type = arg_info[1] # Might be a custom way to coerce the string value, # so look in the coercion map. return COERCE.get(arg_type, arg_type)(value) def str_args_to_python(self, arglist): """Process list of string arguments to values according to spec. Example: >>> spec = Spec([('queue', str), ('if_unused', bool)]) >>> spec.str_args_to_python('pobox', 'true') ('pobox', True) """ return tuple( self.coerce(index, value) for index, value in enumerate(arglist)) def format_response(self, response): """Format the return value of this command in a human-friendly way.""" if not self.returns: return 'ok.' if response is None else response if callable(self.returns): return self.returns(response) return self.returns.format(response) def format_arg(self, name, type, default_value=None): if default_value is not None: return '{0}:{1}'.format(name, default_value) return name def format_signature(self): return ' '.join(self.format_arg(*padlist(list(arg), 3)) for arg in self.args) def dump_message(message): if message is None: return 'No messages in queue. basic.publish something.' return {'body': message.body, 'properties': message.properties, 'delivery_info': message.delivery_info} def format_declare_queue(ret): return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret) class AMQShell(_cmd.Cmd): """AMQP API Shell. Arguments: connect (Callable): Function used to connect to the server. Must return :class:`kombu.Connection` object. silent (bool): If enabled, the commands won't have annoying output not relevant when running in non-shell mode. """ conn = None chan = None prompt_fmt = '{self.counter}> ' identchars = _cmd.IDENTCHARS = '.' needs_reconnect = False counter = 1 inc_counter = count(2) #: Map of built-in command names -> method names builtins = { 'EOF': 'do_exit', 'exit': 'do_exit', 'help': 'do_help', } #: Map of AMQP API commands and their :class:`Spec`. amqp = { 'exchange.declare': Spec(('exchange', str), ('type', str), ('passive', bool, 'no'), ('durable', bool, 'no'), ('auto_delete', bool, 'no'), ('internal', bool, 'no')), 'exchange.delete': Spec(('exchange', str), ('if_unused', bool)), 'queue.bind': Spec(('queue', str), ('exchange', str), ('routing_key', str)), 'queue.declare': Spec(('queue', str), ('passive', bool, 'no'), ('durable', bool, 'no'), ('exclusive', bool, 'no'), ('auto_delete', bool, 'no'), returns=format_declare_queue), 'queue.delete': Spec(('queue', str), ('if_unused', bool, 'no'), ('if_empty', bool, 'no'), returns='ok. {0} messages deleted.'), 'queue.purge': Spec(('queue', str), returns='ok. {0} messages deleted.'), 'basic.get': Spec(('queue', str), ('no_ack', bool, 'off'), returns=dump_message), 'basic.publish': Spec(('msg', str), ('exchange', str), ('routing_key', str), ('mandatory', bool, 'no'), ('immediate', bool, 'no')), 'basic.ack': Spec(('delivery_tag', int)), } def _prepare_spec(self, conn): # XXX Hack to fix Issue #2013 from amqp import Connection, Message if isinstance(conn.connection, Connection): self.amqp['basic.publish'] = Spec(('msg', Message), ('exchange', str), ('routing_key', str), ('mandatory', bool, 'no'), ('immediate', bool, 'no')) def __init__(self, *args, **kwargs): self.connect = kwargs.pop('connect') self.silent = kwargs.pop('silent', False) self.out = kwargs.pop('out', sys.stderr) _cmd.Cmd.__init__(self, *args, **kwargs) self._reconnect() def note(self, m): """Say something to the user. Disabled if :attr:`silent`.""" if not self.silent: say(m, file=self.out) def say(self, m): say(m, file=self.out) def get_amqp_api_command(self, cmd, arglist): """Get AMQP command wrapper. With a command name and a list of arguments, convert the arguments to Python values and find the corresponding method on the AMQP channel object. Returns: Tuple: of `(method, processed_args)` pairs. """ spec = self.amqp[cmd] args = spec.str_args_to_python(arglist) attr_name = cmd.replace('.', '_') if self.needs_reconnect: self._reconnect() return getattr(self.chan, attr_name), args, spec.format_response def do_exit(self, *args): """The `'exit'` command.""" self.note("\n-> please, don't leave!") sys.exit(0) def display_command_help(self, cmd, short=False): spec = self.amqp[cmd] self.say('{0} {1}'.format(cmd, spec.format_signature())) def do_help(self, *args): if not args: self.say(HELP_HEADER) for cmd_name in self.amqp: self.display_command_help(cmd_name, short=True) self.say(EXAMPLE_TEXT) else: self.display_command_help(args[0]) def default(self, line): self.say("unknown syntax: {0!r}. how about some 'help'?".format(line)) def get_names(self): return set(self.builtins) | set(self.amqp) def completenames(self, text, *ignored): """Return all commands starting with `text`, for tab-completion.""" names = self.get_names() first = [cmd for cmd in names if cmd.startswith(text.replace('_', '.'))] if first: return first return [cmd for cmd in names if cmd.partition('.')[2].startswith(text)] def dispatch(self, cmd, arglist): """Dispatch and execute the command. Look-up order is: :attr:`builtins` -> :attr:`amqp`. """ if isinstance(arglist, string_t): arglist = shlex.split(safe_str(arglist)) if cmd in self.builtins: return getattr(self, self.builtins[cmd])(*arglist) fun, args, formatter = self.get_amqp_api_command(cmd, arglist) return formatter(fun(*args)) def parseline(self, parts): """Parse input line. Returns: Tuple: of three items: `(command_name, arglist, original_line)` """ if parts: return parts[0], parts[1:], ' '.join(parts) return '', '', '' def onecmd(self, line): """Parse line and execute command.""" if isinstance(line, string_t): line = shlex.split(safe_str(line)) cmd, arg, line = self.parseline(line) if not line: return self.emptyline() self.lastcmd = line self.counter = next(self.inc_counter) try: self.respond(self.dispatch(cmd, arg)) except (AttributeError, KeyError) as exc: self.default(line) except Exception as exc: # pylint: disable=broad-except self.say(exc) self.needs_reconnect = True def respond(self, retval): """What to do with the return value of a command.""" if retval is not None: if isinstance(retval, string_t): self.say(retval) else: self.say(pprint.pformat(retval)) def _reconnect(self): """Re-establish connection to the AMQP server.""" self.conn = self.connect(self.conn) self._prepare_spec(self.conn) self.chan = self.conn.default_channel self.needs_reconnect = False @property def prompt(self): return self.prompt_fmt.format(self=self) class AMQPAdmin(object): """The celery :program:`celery amqp` utility.""" Shell = AMQShell def __init__(self, *args, **kwargs): self.app = kwargs['app'] self.out = kwargs.setdefault('out', sys.stderr) self.silent = kwargs.get('silent') self.args = args def connect(self, conn=None): if conn: conn.close() conn = self.app.connection() self.note('-> connecting to {0}.'.format(conn.as_uri())) conn.connect() self.note('-> connected.') return conn def run(self): shell = self.Shell(connect=self.connect, out=self.out) if self.args: return shell.onecmd(self.args) try: return shell.cmdloop() except KeyboardInterrupt: self.note('(bibi)') def note(self, m): if not self.silent: say(m, file=self.out) class amqp(Command): """AMQP Administration Shell. Also works for non-AMQP transports (but not ones that store declarations in memory). Examples: .. code-block:: console $ # start shell mode $ celery amqp $ # show list of commands $ celery amqp help $ celery amqp exchange.delete name $ celery amqp queue.delete queue $ celery amqp queue.delete queue yes yes """ def run(self, *args, **options): options['app'] = self.app return AMQPAdmin(*args, **options).run() def main(): amqp().execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/bin/control.py0000644000175000017500000001743513130607475017032 0ustar omeromer00000000000000"""The ``celery control``, ``. inspect`` and ``. status`` programs.""" from __future__ import absolute_import, unicode_literals from kombu.utils.json import dumps from kombu.utils.objects import cached_property from celery.five import items, string_t from celery.bin.base import Command from celery.platforms import EX_UNAVAILABLE, EX_USAGE from celery.utils import text class _RemoteControl(Command): name = None leaf = False control_group = None def __init__(self, *args, **kwargs): self.show_body = kwargs.pop('show_body', True) self.show_reply = kwargs.pop('show_reply', True) super(_RemoteControl, self).__init__(*args, **kwargs) def add_arguments(self, parser): group = parser.add_argument_group('Remote Control Options') group.add_argument( '--timeout', '-t', type=float, help='Timeout in seconds (float) waiting for reply', ) group.add_argument( '--destination', '-d', help='Comma separated list of destination node names.') group.add_argument( '--json', '-j', action='store_true', default=False, help='Use json as output format.', ) @classmethod def get_command_info(cls, command, indent=0, prefix='', color=None, help=False, app=None, choices=None): if choices is None: choices = cls._choices_by_group(app) meta = choices[command] if help: help = '|' + text.indent(meta.help, indent + 4) else: help = None return text.join([ '|' + text.indent('{0}{1} {2}'.format( prefix, color(command), meta.signature or ''), indent), help, ]) @classmethod def list_commands(cls, indent=0, prefix='', color=None, help=False, app=None): choices = cls._choices_by_group(app) color = color if color else lambda x: x prefix = prefix + ' ' if prefix else '' return '\n'.join( cls.get_command_info(c, indent, prefix, color, help, app=app, choices=choices) for c in sorted(choices)) def usage(self, command): return '%(prog)s {0} [options] {1} [arg1 .. argN]'.format( command, self.args) def call(self, *args, **kwargs): raise NotImplementedError('call') def run(self, *args, **kwargs): if not args: raise self.UsageError( 'Missing {0.name} method. See --help'.format(self)) return self.do_call_method(args, **kwargs) def _ensure_fanout_supported(self): with self.app.connection_for_write() as conn: if not conn.supports_exchange_type('fanout'): raise self.Error( 'Broadcast not supported by transport {0!r}'.format( conn.info()['transport'])) def do_call_method(self, args, timeout=None, destination=None, json=False, **kwargs): method = args[0] if method == 'help': raise self.Error("Did you mean '{0.name} --help'?".format(self)) try: meta = self.choices[method] except KeyError: raise self.UsageError( 'Unknown {0.name} method {1}'.format(self, method)) self._ensure_fanout_supported() timeout = timeout or meta.default_timeout if destination and isinstance(destination, string_t): destination = [dest.strip() for dest in destination.split(',')] replies = self.call( method, arguments=self.compile_arguments(meta, method, args[1:]), timeout=timeout, destination=destination, callback=None if json else self.say_remote_command_reply, ) if not replies: raise self.Error('No nodes replied within time constraint.', status=EX_UNAVAILABLE) if json: self.out(dumps(replies)) return replies def compile_arguments(self, meta, method, args): args = list(args) kw = {} if meta.args: kw.update({ k: v for k, v in self._consume_args(meta, method, args) }) if meta.variadic: kw.update({meta.variadic: args}) if not kw and args: raise self.Error( 'Command {0!r} takes no arguments.'.format(method), status=EX_USAGE) return kw or {} def _consume_args(self, meta, method, args): i = 0 try: for i, arg in enumerate(args): try: name, typ = meta.args[i] except IndexError: if meta.variadic: break raise self.Error( 'Command {0!r} takes arguments: {1}'.format( method, meta.signature), status=EX_USAGE) else: yield name, typ(arg) if typ is not None else arg finally: args[:] = args[i:] @classmethod def _choices_by_group(cls, app): from celery.worker.control import Panel # need to import task modules for custom user-remote control commands. app.loader.import_default_modules() return { name: info for name, info in items(Panel.meta) if info.type == cls.control_group and info.visible } @cached_property def choices(self): return self._choices_by_group(self.app) @property def epilog(self): return '\n'.join([ '[Commands]', self.list_commands(indent=4, help=True, app=self.app) ]) class inspect(_RemoteControl): """Inspect the worker at runtime. Availability: RabbitMQ (AMQP) and Redis transports. Examples: .. code-block:: console $ celery inspect active --timeout=5 $ celery inspect scheduled -d worker1@example.com $ celery inspect revoked -d w1@e.com,w2@e.com """ name = 'inspect' control_group = 'inspect' def call(self, method, arguments, **options): return self.app.control.inspect(**options)._request( method, **arguments) class control(_RemoteControl): """Workers remote control. Availability: RabbitMQ (AMQP), Redis, and MongoDB transports. Examples: .. code-block:: console $ celery control enable_events --timeout=5 $ celery control -d worker1@example.com enable_events $ celery control -d w1.e.com,w2.e.com enable_events $ celery control -d w1.e.com add_consumer queue_name $ celery control -d w1.e.com cancel_consumer queue_name $ celery control add_consumer queue exchange direct rkey """ name = 'control' control_group = 'control' def call(self, method, arguments, **options): return self.app.control.broadcast( method, arguments=arguments, reply=True, **options) class status(Command): """Show list of workers that are online.""" option_list = inspect.option_list def run(self, *args, **kwargs): I = inspect( app=self.app, no_color=kwargs.get('no_color', False), stdout=self.stdout, stderr=self.stderr, show_reply=False, show_body=False, quiet=True, ) replies = I.run('ping', **kwargs) if not replies: raise self.Error('No nodes replied within time constraint', status=EX_UNAVAILABLE) nodecount = len(replies) if not kwargs.get('quiet', False): self.out('\n{0} {1} online.'.format( nodecount, text.pluralize(nodecount, 'node'))) celery-4.1.0/celery/bin/events.py0000644000175000017500000001206113130607475016644 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The :program:`celery events` command. .. program:: celery events .. seealso:: See :ref:`preload-options` and :ref:`daemon-options`. .. cmdoption:: -d, --dump Dump events to stdout. .. cmdoption:: -c, --camera Take snapshots of events using this camera. .. cmdoption:: --detach Camera: Detach and run in the background as a daemon. .. cmdoption:: -F, --freq, --frequency Camera: Shutter frequency. Default is every 1.0 seconds. .. cmdoption:: -r, --maxrate Camera: Optional shutter rate limit (e.g., 10/m). .. cmdoption:: -l, --loglevel Logging level, choose between `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`, or `FATAL`. Default is INFO. .. cmdoption:: -f, --logfile Path to log file. If no logfile is specified, `stderr` is used. .. cmdoption:: --pidfile Optional file used to store the process pid. The program won't start if this file already exists and the pid is still alive. .. cmdoption:: --uid User id, or user name of the user to run as after detaching. .. cmdoption:: --gid Group id, or group name of the main group to change to after detaching. .. cmdoption:: --umask Effective umask (in octal) of the process after detaching. Inherits the umask of the parent process by default. .. cmdoption:: --workdir Optional directory to change to after detaching. .. cmdoption:: --executable Executable to use for the detached process. """ from __future__ import absolute_import, unicode_literals import sys from functools import partial from celery.platforms import detached, set_process_title, strargv from celery.bin.base import Command, daemon_options __all__ = ['events'] HELP = __doc__ class events(Command): """Event-stream utilities. Notes: .. code-block:: console # - Start graphical monitor (requires curses) $ celery events --app=proj $ celery events -d --app=proj # - Dump events to screen. $ celery events -b amqp:// # - Run snapshot camera. $ celery events -c [options] Examples: .. code-block:: console $ celery events $ celery events -d $ celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info """ doc = HELP supports_args = False def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, loglevel='INFO', logfile=None, prog_name='celery events', pidfile=None, uid=None, gid=None, umask=None, workdir=None, detach=False, **kwargs): self.prog_name = prog_name if dump: return self.run_evdump() if camera: return self.run_evcam(camera, freq=frequency, maxrate=maxrate, loglevel=loglevel, logfile=logfile, pidfile=pidfile, uid=uid, gid=gid, umask=umask, workdir=workdir, detach=detach) return self.run_evtop() def run_evdump(self): from celery.events.dumper import evdump self.set_process_status('dump') return evdump(app=self.app) def run_evtop(self): from celery.events.cursesmon import evtop self.set_process_status('top') return evtop(app=self.app) def run_evcam(self, camera, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, detach=False, **kwargs): from celery.events.snapshot import evcam self.set_process_status('cam') kwargs['app'] = self.app cam = partial(evcam, camera, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return cam() else: return cam() def set_process_status(self, prog, info=''): prog = '{0}:{1}'.format(self.prog_name, prog) info = '{0} {1}'.format(info, strargv(sys.argv)) return set_process_title(prog, info=info) def add_arguments(self, parser): dopts = parser.add_argument_group('Dumper') dopts.add_argument('-d', '--dump', action='store_true', default=False) copts = parser.add_argument_group('Snapshot') copts.add_argument('-c', '--camera') copts.add_argument('--detach', action='store_true', default=False) copts.add_argument('-F', '--frequency', '--freq', type=float, default=1.0) copts.add_argument('-r', '--maxrate') copts.add_argument('-l', '--loglevel', default='INFO') daemon_options(parser, default_pidfile='celeryev.pid') user_options = self.app.user_options['events'] if user_options: self.add_compat_options( parser.add_argument_group('User Options'), user_options) def main(): ev = events() ev.execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/bin/celery.py0000644000175000017500000003347713130607475016641 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The :program:`celery` umbrella command. .. program:: celery .. _preload-options: Preload Options --------------- These options are supported by all commands, and usually parsed before command-specific arguments. .. cmdoption:: -A, --app app instance to use (e.g., ``module.attr_name``) .. cmdoption:: -b, --broker URL to broker. default is ``amqp://guest@localhost//`` .. cmdoption:: --loader name of custom loader class to use. .. cmdoption:: --config Name of the configuration module .. cmdoption:: -C, --no-color Disable colors in output. .. cmdoption:: -q, --quiet Give less verbose output (behavior depends on the sub command). .. cmdoption:: --help Show help and exit. .. _daemon-options: Daemon Options -------------- These options are supported by commands that can detach into the background (daemon). They will be present in any command that also has a `--detach` option. .. cmdoption:: -f, --logfile Path to log file. If no logfile is specified, `stderr` is used. .. cmdoption:: --pidfile Optional file used to store the process pid. The program won't start if this file already exists and the pid is still alive. .. cmdoption:: --uid User id, or user name of the user to run as after detaching. .. cmdoption:: --gid Group id, or group name of the main group to change to after detaching. .. cmdoption:: --umask Effective umask (in octal) of the process after detaching. Inherits the umask of the parent process by default. .. cmdoption:: --workdir Optional directory to change to after detaching. .. cmdoption:: --executable Executable to use for the detached process. ``celery inspect`` ------------------ .. program:: celery inspect .. cmdoption:: -t, --timeout Timeout in seconds (float) waiting for reply .. cmdoption:: -d, --destination Comma separated list of destination node names. .. cmdoption:: -j, --json Use json as output format. ``celery control`` ------------------ .. program:: celery control .. cmdoption:: -t, --timeout Timeout in seconds (float) waiting for reply .. cmdoption:: -d, --destination Comma separated list of destination node names. .. cmdoption:: -j, --json Use json as output format. ``celery migrate`` ------------------ .. program:: celery migrate .. cmdoption:: -n, --limit Number of tasks to consume (int). .. cmdoption:: -t, -timeout Timeout in seconds (float) waiting for tasks. .. cmdoption:: -a, --ack-messages Ack messages from source broker. .. cmdoption:: -T, --tasks List of task names to filter on. .. cmdoption:: -Q, --queues List of queues to migrate. .. cmdoption:: -F, --forever Continually migrate tasks until killed. ``celery upgrade`` ------------------ .. program:: celery upgrade .. cmdoption:: --django Upgrade a Django project. .. cmdoption:: --compat Maintain backwards compatibility. .. cmdoption:: --no-backup Don't backup original files. ``celery shell`` ---------------- .. program:: celery shell .. cmdoption:: -I, --ipython Force :pypi:`iPython` implementation. .. cmdoption:: -B, --bpython Force :pypi:`bpython` implementation. .. cmdoption:: -P, --python Force default Python shell. .. cmdoption:: -T, --without-tasks Don't add tasks to locals. .. cmdoption:: --eventlet Use :pypi:`eventlet` monkey patches. .. cmdoption:: --gevent Use :pypi:`gevent` monkey patches. ``celery result`` ----------------- .. program:: celery result .. cmdoption:: -t, --task Name of task (if custom backend). .. cmdoption:: --traceback Show traceback if any. ``celery purge`` ---------------- .. program:: celery purge .. cmdoption:: -f, --force Don't prompt for verification before deleting messages (DANGEROUS) ``celery call`` --------------- .. program:: celery call .. cmdoption:: -a, --args Positional arguments (json format). .. cmdoption:: -k, --kwargs Keyword arguments (json format). .. cmdoption:: --eta Scheduled time in ISO-8601 format. .. cmdoption:: --countdown ETA in seconds from now (float/int). .. cmdoption:: --expires Expiry time in float/int seconds, or a ISO-8601 date. .. cmdoption:: --serializer Specify serializer to use (default is json). .. cmdoption:: --queue Destination queue. .. cmdoption:: --exchange Destination exchange (defaults to the queue exchange). .. cmdoption:: --routing-key Destination routing key (defaults to the queue routing key). """ from __future__ import absolute_import, unicode_literals, print_function import numbers import sys from functools import partial from celery.platforms import EX_OK, EX_FAILURE, EX_USAGE from celery.utils import term from celery.utils import text # Cannot use relative imports here due to a Windows issue (#1111). from celery.bin.base import Command, Extensions # Import commands from other modules from celery.bin.amqp import amqp from celery.bin.beat import beat from celery.bin.call import call from celery.bin.control import _RemoteControl # noqa from celery.bin.control import control, inspect, status from celery.bin.events import events from celery.bin.graph import graph from celery.bin.list import list_ from celery.bin.logtool import logtool from celery.bin.migrate import migrate from celery.bin.purge import purge from celery.bin.result import result from celery.bin.shell import shell from celery.bin.worker import worker from celery.bin.upgrade import upgrade __all__ = ['CeleryCommand', 'main'] HELP = """ ---- -- - - ---- Commands- -------------- --- ------------ {commands} ---- -- - - --------- -- - -------------- --- ------------ Type '{prog_name} --help' for help using a specific command. """ command_classes = [ ('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'), ('Remote Control', ['status', 'inspect', 'control'], 'blue'), ('Utils', ['purge', 'list', 'call', 'result', 'migrate', 'graph', 'upgrade'], None), ('Debugging', ['report', 'logtool'], 'red'), ] def determine_exit_status(ret): if isinstance(ret, numbers.Integral): return ret return EX_OK if ret else EX_FAILURE def main(argv=None): """Start celery umbrella command.""" # Fix for setuptools generated scripts, so that it will # work with multiprocessing fork emulation. # (see multiprocessing.forking.get_preparation_data()) try: if __name__ != '__main__': # pragma: no cover sys.modules['__main__'] = sys.modules[__name__] cmd = CeleryCommand() cmd.maybe_patch_concurrency() from billiard import freeze_support freeze_support() cmd.execute_from_commandline(argv) except KeyboardInterrupt: pass class multi(Command): """Start multiple worker instances.""" respects_app_option = False def run_from_argv(self, prog_name, argv, command=None): from celery.bin.multi import MultiTool cmd = MultiTool(quiet=self.quiet, no_color=self.no_color) return cmd.execute_from_commandline([command] + argv) class help(Command): """Show help screen and exit.""" def usage(self, command): return '%(prog)s [options] {0.args}'.format(self) def run(self, *args, **kwargs): self.parser.print_help() self.out(HELP.format( prog_name=self.prog_name, commands=CeleryCommand.list_commands( colored=self.colored, app=self.app), )) return EX_USAGE class report(Command): """Shows information useful to include in bug-reports.""" def run(self, *args, **kwargs): self.out(self.app.bugreport()) return EX_OK class CeleryCommand(Command): """Base class for commands.""" commands = { 'amqp': amqp, 'beat': beat, 'call': call, 'control': control, 'events': events, 'graph': graph, 'help': help, 'inspect': inspect, 'list': list_, 'logtool': logtool, 'migrate': migrate, 'multi': multi, 'purge': purge, 'report': report, 'result': result, 'shell': shell, 'status': status, 'upgrade': upgrade, 'worker': worker, } ext_fmt = '{self.namespace}.commands' enable_config_from_cmdline = True prog_name = 'celery' namespace = 'celery' @classmethod def register_command(cls, fun, name=None): cls.commands[name or fun.__name__] = fun return fun def execute(self, command, argv=None): try: cls = self.commands[command] except KeyError: cls, argv = self.commands['help'], ['help'] cls = self.commands.get(command) or self.commands['help'] try: return cls( app=self.app, on_error=self.on_error, no_color=self.no_color, quiet=self.quiet, on_usage_error=partial(self.on_usage_error, command=command), ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) except self.UsageError as exc: self.on_usage_error(exc) return exc.status except self.Error as exc: self.on_error(exc) return exc.status def on_usage_error(self, exc, command=None): if command: helps = '{self.prog_name} {command} --help' else: helps = '{self.prog_name} --help' self.error(self.colored.magenta('Error: {0}'.format(exc))) self.error("""Please try '{0}'""".format(helps.format( self=self, command=command, ))) def _relocate_args_from_start(self, argv, index=0): if argv: rest = [] while index < len(argv): value = argv[index] if value.startswith('--'): rest.append(value) elif value.startswith('-'): # we eat the next argument even though we don't know # if this option takes an argument or not. # instead we'll assume what's the command name in the # return statements below. try: nxt = argv[index + 1] if nxt.startswith('-'): # is another option rest.append(value) else: # is (maybe) a value for this option rest.extend([value, nxt]) index += 1 except IndexError: # pragma: no cover rest.append(value) break else: break index += 1 if argv[index:]: # pragma: no cover # if there are more arguments left then divide and swap # we assume the first argument in argv[i:] is the command # name. return argv[index:] + rest # if there are no more arguments then the last arg in rest' # must be the command. [rest.pop()] + rest return [] def prepare_prog_name(self, name): if name == '__main__.py': return sys.modules['__main__'].__file__ return name def handle_argv(self, prog_name, argv, **kwargs): self.prog_name = self.prepare_prog_name(prog_name) argv = self._relocate_args_from_start(argv) _, argv = self.prepare_args(None, argv) try: command = argv[0] except IndexError: command, argv = 'help', ['help'] return self.execute(command, argv) def execute_from_commandline(self, argv=None): argv = sys.argv if argv is None else argv if 'multi' in argv[1:3]: # Issue 1008 self.respects_app_option = False try: sys.exit(determine_exit_status( super(CeleryCommand, self).execute_from_commandline(argv))) except KeyboardInterrupt: sys.exit(EX_FAILURE) @classmethod def get_command_info(cls, command, indent=0, color=None, colored=None, app=None): colored = term.colored() if colored is None else colored colored = colored.names[color] if color else lambda x: x obj = cls.commands[command] cmd = 'celery {0}'.format(colored(command)) if obj.leaf: return '|' + text.indent(cmd, indent) return text.join([ ' ', '|' + text.indent('{0} --help'.format(cmd), indent), obj.list_commands(indent, 'celery {0}'.format(command), colored, app=app), ]) @classmethod def list_commands(cls, indent=0, colored=None, app=None): colored = term.colored() if colored is None else colored white = colored.white ret = [] for command_cls, commands, color in command_classes: ret.extend([ text.indent('+ {0}: '.format(white(command_cls)), indent), '\n'.join( cls.get_command_info( command, indent + 4, color, colored, app=app) for command in commands), '' ]) return '\n'.join(ret).strip() def with_pool_option(self, argv): if len(argv) > 1 and 'worker' in argv[0:3]: # this command supports custom pools # that may have to be loaded as early as possible. return (['-P'], ['--pool']) def on_concurrency_setup(self): self.load_extension_commands() def load_extension_commands(self): names = Extensions(self.ext_fmt.format(self=self), self.register_command).load() if names: command_classes.append(('Extensions', names, 'magenta')) if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/bin/call.py0000644000175000017500000000543613130607475016263 0ustar omeromer00000000000000"""The ``celery call`` program used to send tasks from the command-line.""" from __future__ import absolute_import, unicode_literals from kombu.utils.json import loads from celery.bin.base import Command from celery.five import string_t from celery.utils.time import maybe_iso8601 class call(Command): """Call a task by name. Examples: .. code-block:: console $ celery call tasks.add --args='[2, 2]' $ celery call tasks.add --args='[2, 2]' --countdown=10 """ args = '' # since we have an argument --args, we need to name this differently. args_name = 'posargs' def add_arguments(self, parser): group = parser.add_argument_group('Calling Options') group.add_argument('--args', '-a', help='positional arguments (json).') group.add_argument('--kwargs', '-k', help='keyword arguments (json).') group.add_argument('--eta', help='scheduled time (ISO-8601).') group.add_argument( '--countdown', type=float, help='eta in seconds from now (float/int).', ) group.add_argument( '--expires', help='expiry time (ISO-8601/float/int).', ), group.add_argument( '--serializer', default='json', help='defaults to json.'), ropts = parser.add_argument_group('Routing Options') ropts.add_argument('--queue', help='custom queue name.') ropts.add_argument('--exchange', help='custom exchange name.') ropts.add_argument('--routing-key', help='custom routing key.') def run(self, name, *_, **kwargs): self._send_task(name, **kwargs) def _send_task(self, name, args=None, kwargs=None, countdown=None, serializer=None, queue=None, exchange=None, routing_key=None, eta=None, expires=None, **_): # arguments args = loads(args) if isinstance(args, string_t) else args kwargs = loads(kwargs) if isinstance(kwargs, string_t) else kwargs # Expires can be int/float. try: expires = float(expires) except (TypeError, ValueError): # or a string describing an ISO 8601 datetime. try: expires = maybe_iso8601(expires) except (TypeError, ValueError): raise # send the task and print the id. self.out(self.app.send_task( name, args=args or (), kwargs=kwargs or {}, countdown=countdown, serializer=serializer, queue=queue, exchange=exchange, routing_key=routing_key, eta=maybe_iso8601(eta), expires=expires, ).id) celery-4.1.0/celery/utils/0000755000175000017500000000000013135426347015360 5ustar omeromer00000000000000celery-4.1.0/celery/utils/dispatch/0000755000175000017500000000000013135426347017157 5ustar omeromer00000000000000celery-4.1.0/celery/utils/dispatch/__init__.py0000644000175000017500000000023213130607475021263 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Observer pattern.""" from __future__ import absolute_import, unicode_literals from .signal import Signal __all__ = ['Signal'] celery-4.1.0/celery/utils/dispatch/weakref_backports.py0000644000175000017500000000432513130607475023227 0ustar omeromer00000000000000"""Weakref compatibility. weakref_backports is a partial backport of the weakref module for Python versions below 3.4. Copyright (C) 2013 Python Software Foundation, see LICENSE.python for details. The following changes were made to the original sources during backporting: * Added ``self`` to ``super`` calls. * Removed ``from None`` when raising exceptions. """ from __future__ import absolute_import, unicode_literals from weakref import ref class WeakMethod(ref): """Weak reference to bound method. A custom :class:`weakref.ref` subclass which simulates a weak reference to a bound method, working around the lifetime problem of bound methods. """ __slots__ = '_func_ref', '_meth_type', '_alive', '__weakref__' def __new__(cls, meth, callback=None): try: obj = meth.__self__ func = meth.__func__ except AttributeError: raise TypeError( "Argument should be a bound method, not {0}".format( type(meth))) def _cb(arg): # The self-weakref trick is needed to avoid creating a # reference cycle. self = self_wr() if self._alive: self._alive = False if callback is not None: callback(self) self = ref.__new__(cls, obj, _cb) self._func_ref = ref(func, _cb) self._meth_type = type(meth) self._alive = True self_wr = ref(self) return self def __call__(self): obj = super(WeakMethod, self).__call__() func = self._func_ref() if obj is not None and func is not None: return self._meth_type(func, obj) def __eq__(self, other): if not isinstance(other, WeakMethod): return False if not self._alive or not other._alive: return self is other return ref.__eq__(self, other) and self._func_ref == other._func_ref def __ne__(self, other): if not isinstance(other, WeakMethod): return True if not self._alive or not other._alive: return self is not other return ref.__ne__(self, other) or self._func_ref != other._func_ref __hash__ = ref.__hash__ celery-4.1.0/celery/utils/dispatch/signal.py0000644000175000017500000002665313130607475021020 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Implementation of the Observer pattern.""" from __future__ import absolute_import, unicode_literals import sys import threading import weakref import warnings from celery.exceptions import CDeprecationWarning from celery.five import python_2_unicode_compatible, range, text_t from celery.local import PromiseProxy, Proxy from celery.utils.functional import fun_accepts_kwargs from celery.utils.log import get_logger try: from weakref import WeakMethod except ImportError: from .weakref_backports import WeakMethod # noqa __all__ = ['Signal'] PY3 = sys.version_info[0] >= 3 logger = get_logger(__name__) def _make_id(target): # pragma: no cover if isinstance(target, Proxy): target = target._get_current_object() if isinstance(target, (bytes, text_t)): # see Issue #2475 return target if hasattr(target, '__func__'): return (id(target.__self__), id(target.__func__)) return id(target) NONE_ID = _make_id(None) NO_RECEIVERS = object() @python_2_unicode_compatible class Signal(object): # pragma: no cover """Create new signal. Keyword Arguments: providing_args (List): A list of the arguments this signal can pass along in a :meth:`send` call. use_caching (bool): Enable receiver cache. name (str): Name of signal, used for debugging purposes. """ #: Holds a dictionary of #: ``{receiverkey (id): weakref(receiver)}`` mappings. receivers = None def __init__(self, providing_args=None, use_caching=False, name=None): self.receivers = [] self.providing_args = set( providing_args if providing_args is not None else []) self.lock = threading.Lock() self.use_caching = use_caching self.name = name # For convenience we create empty caches even if they are not used. # A note about caching: if use_caching is defined, then for each # distinct sender we cache the receivers that sender has in # 'sender_receivers_cache'. The cache is cleaned when .connect() or # .disconnect() is called and populated on .send(). self.sender_receivers_cache = ( weakref.WeakKeyDictionary() if use_caching else {} ) self._dead_receivers = False def _connect_proxy(self, fun, sender, weak, dispatch_uid): return self.connect( fun, sender=sender._get_current_object(), weak=weak, dispatch_uid=dispatch_uid, ) def connect(self, *args, **kwargs): """Connect receiver to sender for signal. Arguments: receiver (Callable): A function or an instance method which is to receive signals. Receivers must be hashable objects. if weak is :const:`True`, then receiver must be weak-referenceable. Receivers must be able to accept keyword arguments. If receivers have a `dispatch_uid` attribute, the receiver will not be added if another receiver already exists with that `dispatch_uid`. sender (Any): The sender to which the receiver should respond. Must either be a Python object, or :const:`None` to receive events from any sender. weak (bool): Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid (Hashable): An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ def _handle_options(sender=None, weak=True, dispatch_uid=None): def _connect_signal(fun): self._connect_signal(fun, sender, weak, dispatch_uid) return fun return _connect_signal if args and callable(args[0]): return _handle_options(*args[1:], **kwargs)(args[0]) return _handle_options(*args, **kwargs) def _connect_signal(self, receiver, sender, weak, dispatch_uid): assert callable(receiver), 'Signal receivers must be callable' if not fun_accepts_kwargs(receiver): raise ValueError( 'Signal receiver must accept keyword arguments.') if isinstance(sender, PromiseProxy): sender.__then__( self._connect_proxy, receiver, sender, weak, dispatch_uid, ) return receiver if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: ref = weakref.ref receiver_object = receiver # Check for bound methods try: receiver.__self__ receiver.__func__ except AttributeError: pass else: ref = WeakMethod receiver_object = receiver.__self__ if PY3: receiver = ref(receiver) weakref.finalize(receiver_object, self._remove_receiver) else: receiver = ref(receiver, self._remove_receiver) with self.lock: self._clear_dead_receivers() for r_key, _ in self.receivers: if r_key == lookup_key: break else: self.receivers.append((lookup_key, receiver)) self.sender_receivers_cache.clear() return receiver def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None): """Disconnect receiver from sender for signal. If weak references are used, disconnect needn't be called. The receiver will be removed from dispatch automatically. Arguments: receiver (Callable): The registered receiver to disconnect. May be none if `dispatch_uid` is specified. sender (Any): The registered sender to disconnect. weak (bool): The weakref state to disconnect. dispatch_uid (Hashable): The unique identifier of the receiver to disconnect. """ if weak is not None: warnings.warn( 'Passing `weak` to disconnect has no effect.', CDeprecationWarning, stacklevel=2) if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) disconnected = False with self.lock: self._clear_dead_receivers() for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: disconnected = True del self.receivers[index] break self.sender_receivers_cache.clear() return disconnected def has_listeners(self, sender=None): return bool(self._live_receivers(sender)) def send(self, sender, **named): """Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop, so it is quite possible to not have all receivers called if a raises an error. Arguments: sender (Any): The sender of the signal. Either a specific object or :const:`None`. **named (Any): Named arguments which will be passed to receivers. Returns: List: of tuple pairs: `[(receiver, response), … ]`. """ responses = [] if not self.receivers or \ self.sender_receivers_cache.get(sender) is NO_RECEIVERS: return responses for receiver in self._live_receivers(sender): try: response = receiver(signal=self, sender=sender, **named) except Exception as exc: # pylint: disable=broad-except if not hasattr(exc, '__traceback__'): exc.__traceback__ = sys.exc_info()[2] logger.exception( 'Signal handler %r raised: %r', receiver, exc) responses.append((receiver, exc)) else: responses.append((receiver, response)) return responses send_robust = send # Compat with Django interface. def _clear_dead_receivers(self): # Warning: caller is assumed to hold self.lock if self._dead_receivers: self._dead_receivers = False new_receivers = [] for r in self.receivers: if isinstance(r[1], weakref.ReferenceType) and r[1]() is None: continue new_receivers.append(r) self.receivers = new_receivers def _live_receivers(self, sender): """Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) # We could end up here with NO_RECEIVERS even if we do check this # case in .send() prior to calling _Live_receivers() due to # concurrent .send() call. if receivers is NO_RECEIVERS: return [] if receivers is None: with self.lock: self._clear_dead_receivers() senderkey = _make_id(sender) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == NONE_ID or r_senderkey == senderkey: receivers.append(receiver) if self.use_caching: if not receivers: self.sender_receivers_cache[sender] = NO_RECEIVERS else: # Note: we must cache the weakref versions. self.sender_receivers_cache[sender] = receivers non_weak_receivers = [] for receiver in receivers: if isinstance(receiver, weakref.ReferenceType): # Dereference the weak reference. receiver = receiver() if receiver is not None: non_weak_receivers.append(receiver) else: non_weak_receivers.append(receiver) return non_weak_receivers def _remove_receiver(self, receiver=None): """Remove dead receivers from connections.""" # Mark that the self..receivers first has dead weakrefs. If so, # we will clean those up in connect, disconnect and _live_receivers # while holding self.lock. Note that doing the cleanup here isn't a # good idea, _remove_receiver() will be called as a side effect of # garbage collection, and so the call can happen wh ile we are already # holding self.lock. self._dead_receivers = True def __repr__(self): """``repr(signal)``.""" return '<{0}: {1} providing_args={2!r}>'.format( type(self).__name__, self.name, self.providing_args) def __str__(self): """``str(signal)``.""" return repr(self) celery-4.1.0/celery/utils/timer2.py0000644000175000017500000001064213130607475017135 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Scheduler for Python functions. .. note:: This is used for the thread-based worker only, not for amqp/redis/sqs/qpid where :mod:`kombu.async.timer` is used. """ from __future__ import absolute_import, print_function, unicode_literals import os import sys import threading from itertools import count from time import sleep from celery.five import THREAD_TIMEOUT_MAX from kombu.async.timer import Entry, Timer as Schedule, to_timestamp, logger TIMER_DEBUG = os.environ.get('TIMER_DEBUG') __all__ = ['Entry', 'Schedule', 'Timer', 'to_timestamp'] class Timer(threading.Thread): """Timer thread. Note: This is only used for transports not supporting AsyncIO. """ Entry = Entry Schedule = Schedule running = False on_tick = None _timer_count = count(1) if TIMER_DEBUG: # pragma: no cover def start(self, *args, **kwargs): import traceback print('- Timer starting') traceback.print_stack() super(Timer, self).start(*args, **kwargs) def __init__(self, schedule=None, on_error=None, on_tick=None, on_start=None, max_interval=None, **kwargs): self.schedule = schedule or self.Schedule(on_error=on_error, max_interval=max_interval) self.on_start = on_start self.on_tick = on_tick or self.on_tick threading.Thread.__init__(self) self._is_shutdown = threading.Event() self._is_stopped = threading.Event() self.mutex = threading.Lock() self.not_empty = threading.Condition(self.mutex) self.daemon = True self.name = 'Timer-{0}'.format(next(self._timer_count)) def _next_entry(self): with self.not_empty: delay, entry = next(self.scheduler) if entry is None: if delay is None: self.not_empty.wait(1.0) return delay return self.schedule.apply_entry(entry) __next__ = next = _next_entry # for 2to3 def run(self): try: self.running = True self.scheduler = iter(self.schedule) while not self._is_shutdown.isSet(): delay = self._next_entry() if delay: if self.on_tick: self.on_tick(delay) if sleep is None: # pragma: no cover break sleep(delay) try: self._is_stopped.set() except TypeError: # pragma: no cover # we lost the race at interpreter shutdown, # so gc collected built-in modules. pass except Exception as exc: logger.error('Thread Timer crashed: %r', exc, exc_info=True) os._exit(1) def stop(self): self._is_shutdown.set() if self.running: self._is_stopped.wait() self.join(THREAD_TIMEOUT_MAX) self.running = False def ensure_started(self): if not self.running and not self.isAlive(): if self.on_start: self.on_start(self) self.start() def _do_enter(self, meth, *args, **kwargs): self.ensure_started() with self.mutex: entry = getattr(self.schedule, meth)(*args, **kwargs) self.not_empty.notify() return entry def enter(self, entry, eta, priority=None): return self._do_enter('enter_at', entry, eta, priority=priority) def call_at(self, *args, **kwargs): return self._do_enter('call_at', *args, **kwargs) def enter_after(self, *args, **kwargs): return self._do_enter('enter_after', *args, **kwargs) def call_after(self, *args, **kwargs): return self._do_enter('call_after', *args, **kwargs) def call_repeatedly(self, *args, **kwargs): return self._do_enter('call_repeatedly', *args, **kwargs) def exit_after(self, secs, priority=10): self.call_after(secs, sys.exit, priority) def cancel(self, tref): tref.cancel() def clear(self): self.schedule.clear() def empty(self): return not len(self) def __len__(self): return len(self.schedule) def __bool__(self): """``bool(timer)``.""" return True __nonzero__ = __bool__ @property def queue(self): return self.schedule.queue celery-4.1.0/celery/utils/saferepr.py0000644000175000017500000002247413130607475017550 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Streaming, truncating, non-recursive version of :func:`repr`. Differences from regular :func:`repr`: - Sets are represented the Python 3 way: ``{1, 2}`` vs ``set([1, 2])``. - Unicode strings does not have the ``u'`` prefix, even on Python 2. - Empty set formatted as ``set()`` (Python 3), not ``set([])`` (Python 2). - Longs don't have the ``L`` suffix. Very slow with no limits, super quick with limits. """ from __future__ import absolute_import, unicode_literals import sys import traceback from collections import deque, namedtuple from decimal import Decimal from itertools import chain from numbers import Number from pprint import _recursion from celery.five import items, text_t from .text import truncate __all__ = ['saferepr', 'reprstream'] # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. IS_PY3 = sys.version_info[0] == 3 if IS_PY3: # pragma: no cover range_t = (range, ) else: class range_t(object): # noqa pass #: Node representing literal text. #: - .value: is the literal text value #: - .truncate: specifies if this text can be truncated, for things like #: LIT_DICT_END this will be False, as we always display #: the ending brackets, e.g: [[[1, 2, 3, ...,], ..., ]] #: - .direction: If +1 the current level is increment by one, #: if -1 the current level is decremented by one, and #: if 0 the current level is unchanged. _literal = namedtuple('_literal', ('value', 'truncate', 'direction')) #: Node representing a dictionary key. _key = namedtuple('_key', ('value',)) #: Node representing quoted text, e.g. a string value. _quoted = namedtuple('_quoted', ('value',)) #: Recursion protection. _dirty = namedtuple('_dirty', ('objid',)) #: Types that are repsented as chars. chars_t = (bytes, text_t) #: Types that are regarded as safe to call repr on. safe_t = (Number,) #: Set types. set_t = (frozenset, set) LIT_DICT_START = _literal('{', False, +1) LIT_DICT_KVSEP = _literal(': ', True, 0) LIT_DICT_END = _literal('}', False, -1) LIT_LIST_START = _literal('[', False, +1) LIT_LIST_END = _literal(']', False, -1) LIT_LIST_SEP = _literal(', ', True, 0) LIT_SET_START = _literal('{', False, +1) LIT_SET_END = _literal('}', False, -1) LIT_TUPLE_START = _literal('(', False, +1) LIT_TUPLE_END = _literal(')', False, -1) LIT_TUPLE_END_SV = _literal(',)', False, -1) def saferepr(o, maxlen=None, maxlevels=3, seen=None): # type: (Any, int, int, Set) -> str """Safe version of :func:`repr`. Warning: Make sure you set the maxlen argument, or it will be very slow for recursive objects. With the maxlen set, it's often faster than built-in repr. """ return ''.join(_saferepr( o, maxlen=maxlen, maxlevels=maxlevels, seen=seen )) def _chaindict(mapping, LIT_DICT_KVSEP=LIT_DICT_KVSEP, LIT_LIST_SEP=LIT_LIST_SEP): # type: (Dict, _literal, _literal) -> Iterator[Any] size = len(mapping) for i, (k, v) in enumerate(items(mapping)): yield _key(k) yield LIT_DICT_KVSEP yield v if i < (size - 1): yield LIT_LIST_SEP def _chainlist(it, LIT_LIST_SEP=LIT_LIST_SEP): # type: (List) -> Iterator[Any] size = len(it) for i, v in enumerate(it): yield v if i < (size - 1): yield LIT_LIST_SEP def _repr_empty_set(s): # type: (Set) -> str return '%s()' % (type(s).__name__,) def _safetext(val): # type: (AnyStr) -> str if isinstance(val, bytes): try: val.encode('utf-8') except UnicodeDecodeError: # is bytes with unrepresentable characters, attempt # to convert back to unicode return val.decode('utf-8', errors='backslashreplace') return val def _format_binary_bytes(val, maxlen, ellipsis='...'): # type: (bytes, int, str) -> str if maxlen and len(val) > maxlen: # we don't want to copy all the data, just take what we need. chunk = memoryview(val)[:maxlen].tobytes() return _bytes_prefix("'{0}{1}'".format( _repr_binary_bytes(chunk), ellipsis)) return _bytes_prefix("'{0}'".format(_repr_binary_bytes(val))) def _bytes_prefix(s): return 'b' + s if IS_PY3 else s def _repr_binary_bytes(val): # type: (bytes) -> str try: return val.decode('utf-8') except UnicodeDecodeError: # possibly not unicode, but binary data so format as hex. try: ashex = val.hex except AttributeError: # pragma: no cover # Python 3.4 return val.decode('utf-8', errors='replace') else: # Python 3.5+ return ashex() def _format_chars(val, maxlen): # type: (AnyStr, int) -> str if isinstance(val, bytes): # pragma: no cover return _format_binary_bytes(val, maxlen) else: return "'{0}'".format(truncate(val, maxlen)) def _repr(obj): # type: (Any) -> str try: return repr(obj) except Exception as exc: return ''.format( type(obj), id(obj), exc, '\n'.join(traceback.format_stack())) def _saferepr(o, maxlen=None, maxlevels=3, seen=None): # type: (Any, int, int, Set) -> str stack = deque([iter([o])]) for token, it in reprstream(stack, seen=seen, maxlevels=maxlevels): if maxlen is not None and maxlen <= 0: yield ', ...' # move rest back to stack, so that we can include # dangling parens. stack.append(it) break if isinstance(token, _literal): val = token.value elif isinstance(token, _key): val = saferepr(token.value, maxlen, maxlevels) elif isinstance(token, _quoted): val = _format_chars(token.value, maxlen) else: val = _safetext(truncate(token, maxlen)) yield val if maxlen is not None: maxlen -= len(val) for rest1 in stack: # maxlen exceeded, process any dangling parens. for rest2 in rest1: if isinstance(rest2, _literal) and not rest2.truncate: yield rest2.value def _reprseq(val, lit_start, lit_end, builtin_type, chainer): # type: (Sequence, _literal, _literal, Any, Any) -> Tuple[Any, ...] if type(val) is builtin_type: # noqa return lit_start, lit_end, chainer(val) return ( _literal('%s(%s' % (type(val).__name__, lit_start.value), False, +1), _literal('%s)' % (lit_end.value,), False, -1), chainer(val) ) def reprstream(stack, seen=None, maxlevels=3, level=0, isinstance=isinstance): """Streaming repr, yielding tokens.""" # type: (deque, Set, int, int, Callable) -> Iterator[Any] seen = seen or set() append = stack.append popleft = stack.popleft is_in_seen = seen.__contains__ discard_from_seen = seen.discard add_to_seen = seen.add while stack: lit_start = lit_end = None it = popleft() for val in it: orig = val if isinstance(val, _dirty): discard_from_seen(val.objid) continue elif isinstance(val, _literal): level += val.direction yield val, it elif isinstance(val, _key): yield val, it elif isinstance(val, Decimal): yield _repr(val), it elif isinstance(val, safe_t): yield text_t(val), it elif isinstance(val, chars_t): yield _quoted(val), it elif isinstance(val, range_t): # pragma: no cover yield _repr(val), it else: if isinstance(val, set_t): if not val: yield _repr_empty_set(val), it continue lit_start, lit_end, val = _reprseq( val, LIT_SET_START, LIT_SET_END, set, _chainlist, ) elif isinstance(val, tuple): lit_start, lit_end, val = ( LIT_TUPLE_START, LIT_TUPLE_END_SV if len(val) == 1 else LIT_TUPLE_END, _chainlist(val)) elif isinstance(val, dict): lit_start, lit_end, val = ( LIT_DICT_START, LIT_DICT_END, _chaindict(val)) elif isinstance(val, list): lit_start, lit_end, val = ( LIT_LIST_START, LIT_LIST_END, _chainlist(val)) else: # other type of object yield _repr(val), it continue if maxlevels and level >= maxlevels: yield '%s...%s' % (lit_start.value, lit_end.value), it continue objid = id(orig) if is_in_seen(objid): yield _recursion(orig), it continue add_to_seen(objid) # Recurse into the new list/tuple/dict/etc by tacking # the rest of our iterable onto the new it: this way # it works similar to a linked list. append(chain([lit_start], val, [_dirty(objid), lit_end], it)) break celery-4.1.0/celery/utils/encoding.py0000644000175000017500000000051613130607475017520 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """**DEPRECATED**: This module has moved to :mod:`kombu.utils.encoding`.""" from __future__ import absolute_import, unicode_literals from kombu.utils.encoding import ( # noqa default_encode, default_encoding, bytes_t, bytes_to_str, str_t, str_to_bytes, ensure_bytes, from_utf8, safe_str, safe_repr, ) celery-4.1.0/celery/utils/time.py0000644000175000017500000002732013130607475016672 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Utilities related to dates, times, intervals, and timezones.""" from __future__ import absolute_import, print_function, unicode_literals import numbers import os import sys import time as _time from calendar import monthrange from datetime import date, datetime, timedelta, tzinfo from kombu.utils.functional import reprcall from kombu.utils.objects import cached_property from pytz import timezone as _timezone, AmbiguousTimeError, FixedOffset from celery.five import python_2_unicode_compatible, string_t from .functional import dictfilter from .iso8601 import parse_iso8601 from .text import pluralize __all__ = [ 'LocalTimezone', 'timezone', 'maybe_timedelta', 'delta_resolution', 'remaining', 'rate', 'weekday', 'humanize_seconds', 'maybe_iso8601', 'is_naive', 'make_aware', 'localize', 'to_utc', 'maybe_make_aware', 'ffwd', 'utcoffset', 'adjust_timestamp', ] PY3 = sys.version_info[0] == 3 PY33 = sys.version_info >= (3, 3) C_REMDEBUG = os.environ.get('C_REMDEBUG', False) DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' WEEKDAYS = dict(zip(DAYNAMES, range(7))) RATE_MODIFIER_MAP = { 's': lambda n: n, 'm': lambda n: n / 60.0, 'h': lambda n: n / 60.0 / 60.0, } TIME_UNITS = ( ('day', 60 * 60 * 24.0, lambda n: format(n, '.2f')), ('hour', 60 * 60.0, lambda n: format(n, '.2f')), ('minute', 60.0, lambda n: format(n, '.2f')), ('second', 1.0, lambda n: format(n, '.2f')), ) ZERO = timedelta(0) _local_timezone = None @python_2_unicode_compatible class LocalTimezone(tzinfo): """Local time implementation. Note: Used only when the :setting:`enable_utc` setting is disabled. """ _offset_cache = {} def __init__(self): # This code is moved in __init__ to execute it as late as possible # See get_default_timezone(). self.STDOFFSET = timedelta(seconds=-_time.timezone) if _time.daylight: self.DSTOFFSET = timedelta(seconds=-_time.altzone) else: self.DSTOFFSET = self.STDOFFSET self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET tzinfo.__init__(self) def __repr__(self): return ''.format( int(self.DSTOFFSET.total_seconds() / 3600), ) def utcoffset(self, dt): return self.DSTOFFSET if self._isdst(dt) else self.STDOFFSET def dst(self, dt): return self.DSTDIFF if self._isdst(dt) else ZERO def tzname(self, dt): return _time.tzname[self._isdst(dt)] if PY3: # pragma: no cover def fromutc(self, dt): # The base tzinfo class no longer implements a DST # offset aware .fromutc() in Python 3 (Issue #2306). # I'd rather rely on pytz to do this, than port # the C code from cpython's fromutc [asksol] offset = int(self.utcoffset(dt).seconds / 60.0) try: tz = self._offset_cache[offset] except KeyError: tz = self._offset_cache[offset] = FixedOffset(offset) return tz.fromutc(dt.replace(tzinfo=tz)) def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, 0) stamp = _time.mktime(tt) tt = _time.localtime(stamp) return tt.tm_isdst > 0 class _Zone(object): def tz_or_local(self, tzinfo=None): # pylint: disable=redefined-outer-name if tzinfo is None: return self.local return self.get_timezone(tzinfo) def to_local(self, dt, local=None, orig=None): if is_naive(dt): dt = make_aware(dt, orig or self.utc) return localize(dt, self.tz_or_local(local)) if PY33: # pragma: no cover def to_system(self, dt): # tz=None is a special case since Python 3.3, and will # convert to the current local timezone (Issue #2306). return dt.astimezone(tz=None) else: def to_system(self, dt): # noqa return localize(dt, self.local) def to_local_fallback(self, dt): if is_naive(dt): return make_aware(dt, self.local) return localize(dt, self.local) def get_timezone(self, zone): if isinstance(zone, string_t): return _timezone(zone) return zone @cached_property def local(self): return LocalTimezone() @cached_property def utc(self): return self.get_timezone('UTC') timezone = _Zone() def maybe_timedelta(delta): """Convert integer to timedelta, if argument is an integer.""" if isinstance(delta, numbers.Real): return timedelta(seconds=delta) return delta def delta_resolution(dt, delta): """Round a :class:`~datetime.datetime` to the resolution of timedelta. If the :class:`~datetime.timedelta` is in days, the :class:`~datetime.datetime` will be rounded to the nearest days, if the :class:`~datetime.timedelta` is in hours the :class:`~datetime.datetime` will be rounded to the nearest hour, and so on until seconds, which will just return the original :class:`~datetime.datetime`. """ delta = max(delta.total_seconds(), 0) resolutions = ((3, lambda x: x / 86400), (4, lambda x: x / 3600), (5, lambda x: x / 60)) args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second for res, predicate in resolutions: if predicate(delta) >= 1.0: return datetime(*args[:res], tzinfo=dt.tzinfo) return dt def remaining(start, ends_in, now=None, relative=False): """Calculate the remaining time for a start date and a timedelta. For example, "how many seconds left for 30 seconds after start?" Arguments: start (~datetime.datetime): Starting date. ends_in (~datetime.timedelta): The end delta. relative (bool): If enabled the end time will be calculated using :func:`delta_resolution` (i.e., rounded to the resolution of `ends_in`). now (Callable): Function returning the current time and date. Defaults to :func:`datetime.utcnow`. Returns: ~datetime.timedelta: Remaining time. """ now = now or datetime.utcnow() end_date = start + ends_in if relative: end_date = delta_resolution(end_date, ends_in) ret = end_date - now if C_REMDEBUG: # pragma: no cover print('rem: NOW:%r START:%r ENDS_IN:%r END_DATE:%s REM:%s' % ( now, start, ends_in, end_date, ret)) return ret def rate(r): """Convert rate string (`"100/m"`, `"2/h"` or `"0.5/s"`) to seconds.""" if r: if isinstance(r, string_t): ops, _, modifier = r.partition('/') return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0 return r or 0 return 0 def weekday(name): """Return the position of a weekday: 0 - 7, where 0 is Sunday. Example: >>> weekday('sunday'), weekday('sun'), weekday('mon') (0, 0, 1) """ abbreviation = name[0:3].lower() try: return WEEKDAYS[abbreviation] except KeyError: # Show original day name in exception, instead of abbr. raise KeyError(name) def humanize_seconds(secs, prefix='', sep='', now='now', microseconds=False): """Show seconds in human form. For example, 60 becomes "1 minute", and 7200 becomes "2 hours". Arguments: prefix (str): can be used to add a preposition to the output (e.g., 'in' will give 'in 1 second', but add nothing to 'now'). now (str): Literal 'now'. microseconds (bool): Include microseconds. """ secs = float(format(float(secs), '.2f')) for unit, divider, formatter in TIME_UNITS: if secs >= divider: w = secs / float(divider) return '{0}{1}{2} {3}'.format(prefix, sep, formatter(w), pluralize(w, unit)) if microseconds and secs > 0.0: return '{prefix}{sep}{0:.2f} seconds'.format( secs, sep=sep, prefix=prefix) return now def maybe_iso8601(dt): """Either ``datetime | str -> datetime`` or ``None -> None``.""" if not dt: return if isinstance(dt, datetime): return dt return parse_iso8601(dt) def is_naive(dt): """Return :const:`True` if :class:`~datetime.datetime` is naive.""" return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None def make_aware(dt, tz): """Set timezone for a :class:`~datetime.datetime` object.""" try: _localize = tz.localize except AttributeError: return dt.replace(tzinfo=tz) else: # works on pytz timezones try: return _localize(dt, is_dst=None) except AmbiguousTimeError: return min(_localize(dt, is_dst=True), _localize(dt, is_dst=False)) def localize(dt, tz): """Convert aware :class:`~datetime.datetime` to another timezone.""" dt = dt.astimezone(tz) try: _normalize = tz.normalize except AttributeError: # non-pytz tz return dt else: try: return _normalize(dt, is_dst=None) except TypeError: return _normalize(dt) except AmbiguousTimeError: return min(_normalize(dt, is_dst=True), _normalize(dt, is_dst=False)) def to_utc(dt): """Convert naive :class:`~datetime.datetime` to UTC.""" return make_aware(dt, timezone.utc) def maybe_make_aware(dt, tz=None): """Convert dt to aware datetime, do nothing if dt is already aware.""" if is_naive(dt): dt = to_utc(dt) return localize( dt, timezone.utc if tz is None else timezone.tz_or_local(tz), ) return dt @python_2_unicode_compatible class ffwd(object): """Version of ``dateutil.relativedelta`` that only supports addition.""" def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None, hour=None, minute=None, second=None, microsecond=None, **kwargs): # pylint: disable=redefined-outer-name # weekday is also a function in outer scope. self.year = year self.month = month self.weeks = weeks self.weekday = weekday self.day = day self.hour = hour self.minute = minute self.second = second self.microsecond = microsecond self.days = weeks * 7 self._has_time = self.hour is not None or self.minute is not None def __repr__(self): return reprcall('ffwd', (), self._fields(weeks=self.weeks, weekday=self.weekday)) def __radd__(self, other): if not isinstance(other, date): return NotImplemented year = self.year or other.year month = self.month or other.month day = min(monthrange(year, month)[1], self.day or other.day) ret = other.replace(**dict(dictfilter(self._fields()), year=year, month=month, day=day)) if self.weekday is not None: ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7) return ret + timedelta(days=self.days) def _fields(self, **extra): return dictfilter({ 'year': self.year, 'month': self.month, 'day': self.day, 'hour': self.hour, 'minute': self.minute, 'second': self.second, 'microsecond': self.microsecond, }, **extra) def utcoffset(time=_time, localtime=_time.localtime): """Return the current offset to UTC in hours.""" if localtime().tm_isdst: return time.altzone // 3600 return time.timezone // 3600 def adjust_timestamp(ts, offset, here=utcoffset): """Adjust timestamp based on provided utcoffset.""" return ts - (offset - here()) * 3600 celery-4.1.0/celery/utils/objects.py0000644000175000017500000001016613130607475017365 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Object related utilities, including introspection, etc.""" from __future__ import absolute_import, unicode_literals from functools import reduce __all__ = ['Bunch', 'FallbackContext', 'getitem_property', 'mro_lookup'] class Bunch(object): """Object that enables you to modify attributes.""" def __init__(self, **kwargs): self.__dict__.update(kwargs) def mro_lookup(cls, attr, stop=set(), monkey_patched=[]): """Return the first node by MRO order that defines an attribute. Arguments: cls (Any): Child class to traverse. attr (str): Name of attribute to find. stop (Set[Any]): A set of types that if reached will stop the search. monkey_patched (Sequence): Use one of the stop classes if the attributes module origin isn't in this list. Used to detect monkey patched attributes. Returns: Any: The attribute value, or :const:`None` if not found. """ for node in cls.mro(): if node in stop: try: value = node.__dict__[attr] module_origin = value.__module__ except (AttributeError, KeyError): pass else: if module_origin not in monkey_patched: return node return if attr in node.__dict__: return node class FallbackContext(object): """Context workaround. The built-in ``@contextmanager`` utility does not work well when wrapping other contexts, as the traceback is wrong when the wrapped context raises. This solves this problem and can be used instead of ``@contextmanager`` in this example:: @contextmanager def connection_or_default_connection(connection=None): if connection: # user already has a connection, shouldn't close # after use yield connection else: # must've new connection, and also close the connection # after the block returns with create_new_connection() as connection: yield connection This wrapper can be used instead for the above like this:: def connection_or_default_connection(connection=None): return FallbackContext(connection, create_new_connection) """ def __init__(self, provided, fallback, *fb_args, **fb_kwargs): self.provided = provided self.fallback = fallback self.fb_args = fb_args self.fb_kwargs = fb_kwargs self._context = None def __enter__(self): if self.provided is not None: return self.provided context = self._context = self.fallback( *self.fb_args, **self.fb_kwargs ).__enter__() return context def __exit__(self, *exc_info): if self._context is not None: return self._context.__exit__(*exc_info) class getitem_property(object): """Attribute -> dict key descriptor. The target object must support ``__getitem__``, and optionally ``__setitem__``. Example: >>> from collections import defaultdict >>> class Me(dict): ... deep = defaultdict(dict) ... ... foo = _getitem_property('foo') ... deep_thing = _getitem_property('deep.thing') >>> me = Me() >>> me.foo None >>> me.foo = 10 >>> me.foo 10 >>> me['foo'] 10 >>> me.deep_thing = 42 >>> me.deep_thing 42 >>> me.deep defaultdict(, {'thing': 42}) """ def __init__(self, keypath, doc=None): path, _, self.key = keypath.rpartition('.') self.path = path.split('.') if path else None self.__doc__ = doc def _path(self, obj): return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path else obj) def __get__(self, obj, type=None): if obj is None: return type return self._path(obj).get(self.key) def __set__(self, obj, value): self._path(obj)[self.key] = value celery-4.1.0/celery/utils/__init__.py0000644000175000017500000000166613130607475017500 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Utility functions. Don't import from here directly anymore, as these are only here for backwards compatibility. """ from __future__ import absolute_import, print_function, unicode_literals import sys from .functional import memoize # noqa from .nodenames import worker_direct, nodename, nodesplit __all__ = ['worker_direct', 'gen_task_name', 'nodename', 'nodesplit', 'cached_property', 'uuid'] PY3 = sys.version_info[0] == 3 # ------------------------------------------------------------------------ # # > XXX Compat from .log import LOG_LEVELS # noqa from .imports import ( # noqa qualname as get_full_cls_name, symbol_by_name as get_cls_by_name, instantiate, import_from_cwd, gen_task_name, ) from .functional import chunks, noop # noqa from kombu.utils.objects import cached_property # noqa from kombu.utils.uuid import uuid # noqa gen_unique_id = uuid celery-4.1.0/celery/utils/static/0000755000175000017500000000000013135426347016647 5ustar omeromer00000000000000celery-4.1.0/celery/utils/static/__init__.py0000644000175000017500000000054413130607475020761 0ustar omeromer00000000000000"""Static files.""" from __future__ import absolute_import, unicode_literals import os def get_file(*args): # type: (*str) -> str """Get filename for static file.""" return os.path.join(os.path.abspath(os.path.dirname(__file__)), *args) def logo(): # type: () -> bytes """Celery logo image.""" return get_file('celery_128.png') celery-4.1.0/celery/utils/static/celery_128.png0000644000175000017500000000477413130607475021244 0ustar omeromer00000000000000‰PNG  IHDR€€Ã>aËtEXtSoftwareAdobe ImageReadyqÉe<(iTXtXML:com.adobe.xmp MÁÙjIDATxÚìOLUÇßοý¿ì²ü“-•?©¥”ÒZ9Ô&h8™´j¬MøSMèAã̓'õâ̓‰/&5µKŒµÑ“±h0±©T5ØbC üŸý33þ^aqׂìÎÎ<1ý~š³Yx3›ý~æÍÛ™}SeY Ü¿Hx € € € € € € € € €v ÿáñxYÙÅë}Í´è¤j§zˆjUˆ*ˆ·Ú<ôϯF§#ÞÊ)¯ZÆ4ÙñHr= _kT«T—©ÎP}Øê5x»ìÝá<üA)Pèûhq’ª›ª‘ˆ!¨ÅgâÁÝúÕHÅ*°ÙÕ ’`¦d(ø£´x“ê Ä!noøjnW…3’¤ÖÚ\ — ³1ØclŠ žïåïr“‰8¨›O'"-³²¬U—¸ªª¨>(zH២Å5„/p¯÷HìpóÜîØ!Õ𳼘7, x> xý8¡I~–ˆ¶ÍkŠ?æðªÛ €Âç¿@u‘ˆÃ«„ÙîèA]’”2Vï+H€õð/Qµ"qø”«‹JKÉçÒ&¦²¤mºý _p·/X]´-Cá«.næâ¶¬óÑí DòÈ,QvÀ¤n_qq3)ª·ÿU€õÑ>|‚© ïe4àsóô¼IÕ×êØR€œÏù@ ao ûªÜÜÄuªãþ¹Ü'ïéj’«úç´ ±]ey£S«ãçþ £š§ºÍÖ® QøéþqžçGº^J­ê‰XjãÍLUJðEõ_RЩBå `™·‡XdIaÕ±’ö~¾§Ÿ¢Ð‡lõ>ÙƒW»i£‘ˆ¥<ò “ÍnóAªG솟טã Ä!žŠ²]v›~LÕ•½¾o{üñw÷o´#±xµ Ú;ÍÿÕÉRÃ߀O›¦)#±D¶.îñ‘|/…ŸtäÿaYfâø/ˆÛivšÂÿÕ± ký¿uqˆ'àÚiöŽ£ç Öz«qˆ…Ñëú‹>öÓÞ?î¸tüÇ·vEUþ–ý]Ìoœ~k=€ia~€`dÙÖ¿«®Ä#Ù»Ü?ãô븫a(V†DDµqÔÍÎ Zf˜½#Ó²u'îôëÈ–ÉÿB€ýn 0ŽHÄ’6ùe{«Øf»%À"‹e™,mèÅ6;<±Ô¿Ç .#ñèi[GÞWÝàkÄ!ž•ô¼f}Ô ´8*À±ÆÓ|¾ß D"X€ä¬füB?Iàu²à ±¤Ì: ,ÚiÊ/Þ% d'8‹HÄ3¯OÛmúÕ'$AÈè00ÊÖ¾Y ² ßb¦™¶ÛœOÓÿ‘$è°»‚¼;„¬ßõã;Ä"–x žU„êK]MQ_ ßò1Ÿö›†y±ˆC’d¶·¶Ã‰¹œ‚&†l)ÀàH÷‘t29œý †XxkJ¸ú½\>5ì’àË\î¹|¢uà{EÓú‰Xæo²Ù…)77ÁgŸ|Aã…ž-ǹ|zåùß3éL=¢Ÿ%ÔRßɼî^*æãƒ–Æ`ÏĦ=ÀÆ‹QÕvY‘‹8 3ÃÆn³L&åæfø4¤×6;Ç3-çfH‚‡%YÒ8’©%6N˜FÆÍÍÛV€»ãý4hEO –e}Ž]»ñ-õI·F≂È‘ AQ•ID#Žý¼äYÕÜ@ßv¸çéêϤR=øˆ(ðGbuUm¬2æèx|ˆÔäòìzU¯÷QY•o!1˜–É&§f¿ý1Dãƒå¤C«=c«Èë Fº^æ7”0ÒF1‰çT©³•û–4Ŷ»÷³œ›E—|»øÁ‘î'MÃxO/Ç cqD‚ÕVuyÃl$P üüE„ïÌíâ·èž²,³›O6åó ù”3Ì:r¿WÊÍh°öNÀ]ö«!Y–µ=ïc…þ‡à>dâ-€ € € € € € € € € €€Ä_ WaÐ]ṈIEND®B`‚celery-4.1.0/celery/utils/graph.py0000644000175000017500000002203613130607475017034 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Dependency graph implementation.""" from __future__ import absolute_import, print_function, unicode_literals from collections import Counter from textwrap import dedent from kombu.utils.encoding import safe_str, bytes_to_str from celery.five import items, python_2_unicode_compatible __all__ = ['DOT', 'CycleError', 'DependencyGraph', 'GraphFormatter'] class DOT: """Constants related to the dot format.""" HEAD = dedent(""" {IN}{type} {id} {{ {INp}graph [{attrs}] """) ATTR = '{name}={value}' NODE = '{INp}"{0}" [{attrs}]' EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]' ATTRSEP = ', ' DIRS = {'graph': '--', 'digraph': '->'} TAIL = '{IN}}}' class CycleError(Exception): """A cycle was detected in an acyclic graph.""" @python_2_unicode_compatible class DependencyGraph(object): """A directed acyclic graph of objects and their dependencies. Supports a robust topological sort to detect the order in which they must be handled. Takes an optional iterator of ``(obj, dependencies)`` tuples to build the graph from. Warning: Does not support cycle detection. """ def __init__(self, it=None, formatter=None): self.formatter = formatter or GraphFormatter() self.adjacent = {} if it is not None: self.update(it) def add_arc(self, obj): """Add an object to the graph.""" self.adjacent.setdefault(obj, []) def add_edge(self, A, B): """Add an edge from object ``A`` to object ``B``. I.e. ``A`` depends on ``B``. """ self[A].append(B) def connect(self, graph): """Add nodes from another graph.""" self.adjacent.update(graph.adjacent) def topsort(self): """Sort the graph topologically. Returns: List: of objects in the order in which they must be handled. """ graph = DependencyGraph() components = self._tarjan72() NC = { node: component for component in components for node in component } for component in components: graph.add_arc(component) for node in self: node_c = NC[node] for successor in self[node]: successor_c = NC[successor] if node_c != successor_c: graph.add_edge(node_c, successor_c) return [t[0] for t in graph._khan62()] def valency_of(self, obj): """Return the valency (degree) of a vertex in the graph.""" try: l = [len(self[obj])] except KeyError: return 0 for node in self[obj]: l.append(self.valency_of(node)) return sum(l) def update(self, it): """Update graph with data from a list of ``(obj, deps)`` tuples.""" tups = list(it) for obj, _ in tups: self.add_arc(obj) for obj, deps in tups: for dep in deps: self.add_edge(obj, dep) def edges(self): """Return generator that yields for all edges in the graph.""" return (obj for obj, adj in items(self) if adj) def _khan62(self): """Perform Khan's simple topological sort algorithm from '62. See https://en.wikipedia.org/wiki/Topological_sorting """ count = Counter() result = [] for node in self: for successor in self[node]: count[successor] += 1 ready = [node for node in self if not count[node]] while ready: node = ready.pop() result.append(node) for successor in self[node]: count[successor] -= 1 if count[successor] == 0: ready.append(successor) result.reverse() return result def _tarjan72(self): """Perform Tarjan's algorithm to find strongly connected components. See Also: :wikipedia:`Tarjan%27s_strongly_connected_components_algorithm` """ result, stack, low = [], [], {} def visit(node): if node in low: return num = len(low) low[node] = num stack_pos = len(stack) stack.append(node) for successor in self[node]: visit(successor) low[node] = min(low[node], low[successor]) if num == low[node]: component = tuple(stack[stack_pos:]) stack[stack_pos:] = [] result.append(component) for item in component: low[item] = len(self) for node in self: visit(node) return result def to_dot(self, fh, formatter=None): """Convert the graph to DOT format. Arguments: fh (IO): A file, or a file-like object to write the graph to. formatter (celery.utils.graph.GraphFormatter): Custom graph formatter to use. """ seen = set() draw = formatter or self.formatter def P(s): print(bytes_to_str(s), file=fh) def if_not_seen(fun, obj): if draw.label(obj) not in seen: P(fun(obj)) seen.add(draw.label(obj)) P(draw.head()) for obj, adjacent in items(self): if not adjacent: if_not_seen(draw.terminal_node, obj) for req in adjacent: if_not_seen(draw.node, obj) P(draw.edge(obj, req)) P(draw.tail()) def format(self, obj): return self.formatter(obj) if self.formatter else obj def __iter__(self): return iter(self.adjacent) def __getitem__(self, node): return self.adjacent[node] def __len__(self): return len(self.adjacent) def __contains__(self, obj): return obj in self.adjacent def _iterate_items(self): return items(self.adjacent) items = iteritems = _iterate_items def __repr__(self): return '\n'.join(self.repr_node(N) for N in self) def repr_node(self, obj, level=1, fmt='{0}({1})'): output = [fmt.format(obj, self.valency_of(obj))] if obj in self: for other in self[obj]: d = fmt.format(other, self.valency_of(other)) output.append(' ' * level + d) output.extend(self.repr_node(other, level + 1).split('\n')[1:]) return '\n'.join(output) class GraphFormatter(object): """Format dependency graphs.""" _attr = DOT.ATTR.strip() _node = DOT.NODE.strip() _edge = DOT.EDGE.strip() _head = DOT.HEAD.strip() _tail = DOT.TAIL.strip() _attrsep = DOT.ATTRSEP _dirs = dict(DOT.DIRS) scheme = { 'shape': 'box', 'arrowhead': 'vee', 'style': 'filled', 'fontname': 'HelveticaNeue', } edge_scheme = { 'color': 'darkseagreen4', 'arrowcolor': 'black', 'arrowsize': 0.7, } node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'} term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'} graph_scheme = {'bgcolor': 'mintcream'} def __init__(self, root=None, type=None, id=None, indent=0, inw=' ' * 4, **scheme): self.id = id or 'dependencies' self.root = root self.type = type or 'digraph' self.direction = self._dirs[self.type] self.IN = inw * (indent or 0) self.INp = self.IN + inw self.scheme = dict(self.scheme, **scheme) self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root)) def attr(self, name, value): value = '"{0}"'.format(value) return self.FMT(self._attr, name=name, value=value) def attrs(self, d, scheme=None): d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d) return self._attrsep.join( safe_str(self.attr(k, v)) for k, v in items(d) ) def head(self, **attrs): return self.FMT( self._head, id=self.id, type=self.type, attrs=self.attrs(attrs, self.graph_scheme), ) def tail(self): return self.FMT(self._tail) def label(self, obj): return obj def node(self, obj, **attrs): return self.draw_node(obj, self.node_scheme, attrs) def terminal_node(self, obj, **attrs): return self.draw_node(obj, self.term_scheme, attrs) def edge(self, a, b, **attrs): return self.draw_edge(a, b, **attrs) def _enc(self, s): return s.encode('utf-8', 'ignore') def FMT(self, fmt, *args, **kwargs): return self._enc(fmt.format( *args, **dict(kwargs, IN=self.IN, INp=self.INp) )) def draw_edge(self, a, b, scheme=None, attrs=None): return self.FMT( self._edge, self.label(a), self.label(b), dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme), ) def draw_node(self, obj, scheme=None, attrs=None): return self.FMT( self._node, self.label(obj), attrs=self.attrs(attrs, scheme), ) celery-4.1.0/celery/utils/text.py0000644000175000017500000001260013130607475016713 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Text formatting utilities.""" from __future__ import absolute_import, unicode_literals import re from collections import Callable from functools import partial from textwrap import fill from pprint import pformat from celery.five import string_t __all__ = [ 'abbr', 'abbrtask', 'dedent', 'dedent_initial', 'ensure_newlines', 'ensure_sep', 'fill_paragraphs', 'indent', 'join', 'pluralize', 'pretty', 'str_to_list', 'simple_format', 'truncate', ] UNKNOWN_SIMPLE_FORMAT_KEY = """ Unknown format %{0} in string {1!r}. Possible causes: Did you forget to escape the expand sign (use '%%{0!r}'), or did you escape and the value was expanded twice? (%%N -> %N -> %hostname)? """.strip() RE_FORMAT = re.compile(r'%(\w)') def str_to_list(s): # type: (str) -> List[str] """Convert string to list.""" if isinstance(s, string_t): return s.split(',') return s def dedent_initial(s, n=4): # type: (str, int) -> str """Remove identation from first line of text.""" return s[n:] if s[:n] == ' ' * n else s def dedent(s, n=4, sep='\n'): # type: (str, int, str) -> str """Remove identation.""" return sep.join(dedent_initial(l) for l in s.splitlines()) def fill_paragraphs(s, width, sep='\n'): # type: (str, int, str) -> str """Fill paragraphs with newlines (or custom separator).""" return sep.join(fill(p, width) for p in s.split(sep)) def join(l, sep='\n'): # type: (str, str) -> str """Concatenate list of strings.""" return sep.join(v for v in l if v) def ensure_sep(sep, s, n=2): # type: (str, str, int) -> str """Ensure text s ends in separator sep'.""" return s + sep * (n - s.count(sep)) ensure_newlines = partial(ensure_sep, '\n') def abbr(S, max, ellipsis='...'): # type: (str, int, str) -> str """Abbreviate word.""" if S is None: return '???' if len(S) > max: return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max] return S def abbrtask(S, max): # type: (str, int) -> str """Abbreviate task name.""" if S is None: return '???' if len(S) > max: module, _, cls = S.rpartition('.') module = abbr(module, max - len(cls) - 3, False) return module + '[.]' + cls return S def indent(t, indent=0, sep='\n'): # type: (str, int, str) -> str """Indent text.""" return sep.join(' ' * indent + p for p in t.split(sep)) def truncate(s, maxlen=128, suffix='...'): # type: (str, int, str) -> str """Truncate text to a maximum number of characters.""" if maxlen and len(s) >= maxlen: return s[:maxlen].rsplit(' ', 1)[0] + suffix return s def pluralize(n, text, suffix='s'): # type: (int, str, str) -> str """Pluralize term when n is greater than one.""" if n != 1: return text + suffix return text def pretty(value, width=80, nl_width=80, sep='\n', **kw): # type: (str, int, int, str, **Any) -> str """Format value for printing to console.""" if isinstance(value, dict): return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:]) elif isinstance(value, tuple): return '{0}{1}{2}'.format( sep, ' ' * 4, pformat(value, width=nl_width, **kw), ) else: return pformat(value, width=width, **kw) def match_case(s, other): # type: (str, str) -> str return s.upper() if other.isupper() else s.lower() def simple_format(s, keys, pattern=RE_FORMAT, expand=r'\1'): # type: (str, Mapping[str, str], Pattern, str) -> str """Format string, expanding abbreviations in keys'.""" if s: keys.setdefault('%', '%') def resolve(match): key = match.expand(expand) try: resolver = keys[key] except KeyError: raise ValueError(UNKNOWN_SIMPLE_FORMAT_KEY.format(key, s)) if isinstance(resolver, Callable): return resolver() return resolver return pattern.sub(resolve, s) return s def remove_repeating_from_task(task_name, s): # type: (str, str) -> str """Given task name, remove repeating module names. Example: >>> remove_repeating_from_task( ... 'tasks.add', ... 'tasks.add(2, 2), tasks.mul(3), tasks.div(4)') 'tasks.add(2, 2), mul(3), div(4)' """ # This is used by e.g. repr(chain), to remove repeating module names. # - extract the module part of the task name module = str(task_name).rpartition('.')[0] + '.' return remove_repeating(module, s) def remove_repeating(substr, s): # type: (str, str) -> str """Remove repeating module names from string. Arguments: task_name (str): Task name (full path including module), to use as the basis for removing module names. s (str): The string we want to work on. Example: >>> _shorten_names( ... 'x.tasks.add', ... 'x.tasks.add(2, 2) | x.tasks.add(4) | x.tasks.mul(8)', ... ) 'x.tasks.add(2, 2) | add(4) | mul(8)' """ # find the first occurrence of substr in the string. index = s.find(substr) if index >= 0: return ''.join([ # leave the first occurance of substr untouched. s[:index + len(substr)], # strip seen substr from the rest of the string. s[index + len(substr):].replace(substr, ''), ]) return s celery-4.1.0/celery/utils/collections.py0000644000175000017500000006477613130607475020272 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Custom maps, sets, sequences, and other data structures.""" from __future__ import absolute_import, unicode_literals import sys import time from collections import ( Callable, Mapping, MutableMapping, MutableSet, Sequence, OrderedDict as _OrderedDict, deque, ) from heapq import heapify, heappush, heappop from itertools import chain, count from celery.five import Empty, items, keys, python_2_unicode_compatible, values from .functional import first, uniq from .text import match_case try: # pypy: dicts are ordered in recent versions from __pypy__ import reversed_dict as _dict_is_ordered except ImportError: _dict_is_ordered = None try: from django.utils.functional import LazyObject, LazySettings except ImportError: class LazyObject(object): # noqa pass LazySettings = LazyObject # noqa __all__ = [ 'AttributeDictMixin', 'AttributeDict', 'BufferMap', 'ChainMap', 'ConfigurationView', 'DictAttribute', 'Evictable', 'LimitedSet', 'Messagebuffer', 'OrderedDict', 'force_mapping', 'lpmerge', ] PY3 = sys.version_info[0] >= 3 REPR_LIMITED_SET = """\ <{name}({size}): maxlen={0.maxlen}, expires={0.expires}, minlen={0.minlen}>\ """ def force_mapping(m): # type: (Any) -> Mapping """Wrap object into supporting the mapping interface if necessary.""" if isinstance(m, (LazyObject, LazySettings)): m = m._wrapped return DictAttribute(m) if not isinstance(m, Mapping) else m def lpmerge(L, R): # type: (Mapping, Mapping) -> Mapping """In place left precedent dictionary merge. Keeps values from `L`, if the value in `R` is :const:`None`. """ setitem = L.__setitem__ [setitem(k, v) for k, v in items(R) if v is not None] return L class OrderedDict(_OrderedDict): """Dict where insertion order matters.""" if PY3: # pragma: no cover def _LRUkey(self): # type: () -> Any # return value of od.keys does not support __next__, # but this version will also not create a copy of the list. return next(iter(keys(self))) else: if _dict_is_ordered: # pragma: no cover def _LRUkey(self): # type: () -> Any # iterkeys is iterable. return next(self.iterkeys()) else: def _LRUkey(self): # type: () -> Any return self._OrderedDict__root[1][2] if not hasattr(_OrderedDict, 'move_to_end'): if _dict_is_ordered: # pragma: no cover def move_to_end(self, key, last=True): # type: (Any, bool) -> None if not last: # we don't use this argument, and the only way to # implement this on PyPy seems to be O(n): creating a # copy with the order changed, so we just raise. raise NotImplementedError('no last=True on PyPy') self[key] = self.pop(key) else: def move_to_end(self, key, last=True): # type: (Any, bool) -> None link = self._OrderedDict__map[key] link_prev = link[0] link_next = link[1] link_prev[1] = link_next link_next[0] = link_prev root = self._OrderedDict__root if last: last = root[0] link[0] = last link[1] = root last[1] = root[0] = link else: first_node = root[1] link[0] = root link[1] = first_node root[1] = first_node[0] = link class AttributeDictMixin(object): """Mixin for Mapping interface that adds attribute access. I.e., `d.key -> d[key]`). """ def __getattr__(self, k): # type: (str) -> Any """`d.key -> d[key]`.""" try: return self[k] except KeyError: raise AttributeError( '{0!r} object has no attribute {1!r}'.format( type(self).__name__, k)) def __setattr__(self, key, value): # type: (str, Any) -> None """`d[key] = value -> d.key = value`.""" self[key] = value class AttributeDict(dict, AttributeDictMixin): """Dict subclass with attribute access.""" class DictAttribute(object): """Dict interface to attributes. `obj[k] -> obj.k` `obj[k] = val -> obj.k = val` """ obj = None def __init__(self, obj): # type: (Any) -> None object.__setattr__(self, 'obj', obj) def __getattr__(self, key): # type: (Any) -> Any return getattr(self.obj, key) def __setattr__(self, key, value): # type: (Any, Any) -> None return setattr(self.obj, key, value) def get(self, key, default=None): # type: (Any, Any) -> Any try: return self[key] except KeyError: return default def setdefault(self, key, default=None): # type: (Any, Any) -> None if key not in self: self[key] = default def __getitem__(self, key): # type: (Any) -> Any try: return getattr(self.obj, key) except AttributeError: raise KeyError(key) def __setitem__(self, key, value): # type: (Any, Any) -> Any setattr(self.obj, key, value) def __contains__(self, key): # type: (Any) -> bool return hasattr(self.obj, key) def _iterate_keys(self): # type: () -> Iterable return iter(dir(self.obj)) iterkeys = _iterate_keys def __iter__(self): # type: () -> Iterable return self._iterate_keys() def _iterate_items(self): # type: () -> Iterable for key in self._iterate_keys(): yield key, getattr(self.obj, key) iteritems = _iterate_items def _iterate_values(self): # type: () -> Iterable for key in self._iterate_keys(): yield getattr(self.obj, key) itervalues = _iterate_values if sys.version_info[0] == 3: # pragma: no cover items = _iterate_items keys = _iterate_keys values = _iterate_values else: def keys(self): # type: () -> List[Any] return list(self) def items(self): # type: () -> List[Tuple[Any, Any]] return list(self._iterate_items()) def values(self): # type: () -> List[Any] return list(self._iterate_values()) MutableMapping.register(DictAttribute) # noqa: E305 class ChainMap(MutableMapping): """Key lookup on a sequence of maps.""" key_t = None changes = None defaults = None maps = None def __init__(self, *maps, **kwargs): # type: (*Mapping, **Any) -> None maps = list(maps or [{}]) self.__dict__.update( key_t=kwargs.get('key_t'), maps=maps, changes=maps[0], defaults=maps[1:], ) def add_defaults(self, d): # type: (Mapping) -> None d = force_mapping(d) self.defaults.insert(0, d) self.maps.insert(1, d) def pop(self, key, *default): # type: (Any, *Any) -> Any try: return self.maps[0].pop(key, *default) except KeyError: raise KeyError( 'Key not found in the first mapping: {!r}'.format(key)) def __missing__(self, key): # type: (Any) -> Any raise KeyError(key) def _key(self, key): # type: (Any) -> Any return self.key_t(key) if self.key_t is not None else key def __getitem__(self, key): # type: (Any) -> Any _key = self._key(key) for mapping in self.maps: try: return mapping[_key] except KeyError: pass return self.__missing__(key) def __setitem__(self, key, value): # type: (Any, Any) -> None self.changes[self._key(key)] = value def __delitem__(self, key): # type: (Any) -> None try: del self.changes[self._key(key)] except KeyError: raise KeyError('Key not found in first mapping: {0!r}'.format(key)) def clear(self): # type: () -> None self.changes.clear() def get(self, key, default=None): # type: (Any, Any) -> Any try: return self[self._key(key)] except KeyError: return default def __len__(self): # type: () -> int return len(set().union(*self.maps)) def __iter__(self): return self._iterate_keys() def __contains__(self, key): # type: (Any) -> bool key = self._key(key) return any(key in m for m in self.maps) def __bool__(self): # type: () -> bool return any(self.maps) __nonzero__ = __bool__ # Py2 def setdefault(self, key, default=None): # type: (Any, Any) -> None key = self._key(key) if key not in self: self[key] = default def update(self, *args, **kwargs): # type: (*Any, **Any) -> Any return self.changes.update(*args, **kwargs) def __repr__(self): # type: () -> str return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): # type: (type, Iterable, *Any) -> 'ChainMap' """Create a ChainMap with a single dict created from the iterable.""" return cls(dict.fromkeys(iterable, *args)) def copy(self): # type: () -> 'ChainMap' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy # Py2 def _iter(self, op): # type: (Callable) -> Iterable # defaults must be first in the stream, so values in # changes take precedence. # pylint: disable=bad-reversed-sequence # Someone should teach pylint about properties. return chain(*[op(d) for d in reversed(self.maps)]) def _iterate_keys(self): # type: () -> Iterable return uniq(self._iter(lambda d: d.keys())) iterkeys = _iterate_keys def _iterate_items(self): # type: () -> Iterable return ((key, self[key]) for key in self) iteritems = _iterate_items def _iterate_values(self): # type: () -> Iterable return (self[key] for key in self) itervalues = _iterate_values if sys.version_info[0] == 3: # pragma: no cover keys = _iterate_keys items = _iterate_items values = _iterate_values else: # noqa def keys(self): # type: () -> List[Any] return list(self._iterate_keys()) def items(self): # type: () -> List[Tuple[Any, Any]] return list(self._iterate_items()) def values(self): # type: () -> List[Any] return list(self._iterate_values()) @python_2_unicode_compatible class ConfigurationView(ChainMap, AttributeDictMixin): """A view over an applications configuration dictionaries. Custom (but older) version of :class:`collections.ChainMap`. If the key does not exist in ``changes``, the ``defaults`` dictionaries are consulted. Arguments: changes (Mapping): Map of configuration changes. defaults (List[Mapping]): List of dictionaries containing the default configuration. """ def __init__(self, changes, defaults=None, keys=None, prefix=None): # type: (Mapping, Mapping, List[str], str) -> None defaults = [] if defaults is None else defaults super(ConfigurationView, self).__init__(changes, *defaults) self.__dict__.update( prefix=prefix.rstrip('_') + '_' if prefix else prefix, _keys=keys, ) def _to_keys(self, key): # type: (str) -> Sequence[str] prefix = self.prefix if prefix: pkey = prefix + key if not key.startswith(prefix) else key return match_case(pkey, prefix), key return key, def __getitem__(self, key): # type: (str) -> Any keys = self._to_keys(key) getitem = super(ConfigurationView, self).__getitem__ for k in keys + ( tuple(f(key) for f in self._keys) if self._keys else ()): try: return getitem(k) except KeyError: pass try: # support subclasses implementing __missing__ return self.__missing__(key) except KeyError: if len(keys) > 1: raise KeyError( 'Key not found: {0!r} (with prefix: {0!r})'.format(*keys)) raise def __setitem__(self, key, value): # type: (str, Any) -> Any self.changes[self._key(key)] = value def first(self, *keys): # type: (*str) -> Any return first(None, (self.get(key) for key in keys)) def get(self, key, default=None): # type: (str, Any) -> Any try: return self[key] except KeyError: return default def clear(self): # type: () -> None """Remove all changes, but keep defaults.""" self.changes.clear() def __contains__(self, key): # type: (str) -> bool keys = self._to_keys(key) return any(any(k in m for k in keys) for m in self.maps) def swap_with(self, other): # type: (ConfigurationView) -> None changes = other.__dict__['changes'] defaults = other.__dict__['defaults'] self.__dict__.update( changes=changes, defaults=defaults, key_t=other.__dict__['key_t'], prefix=other.__dict__['prefix'], maps=[changes] + defaults ) @python_2_unicode_compatible class LimitedSet(object): """Kind-of Set (or priority queue) with limitations. Good for when you need to test for membership (`a in set`), but the set should not grow unbounded. ``maxlen`` is enforced at all times, so if the limit is reached we'll also remove non-expired items. You can also configure ``minlen``: this is the minimal residual size of the set. All arguments are optional, and no limits are enabled by default. Arguments: maxlen (int): Optional max number of items. Adding more items than ``maxlen`` will result in immediate removal of items sorted by oldest insertion time. expires (float): TTL for all items. Expired items are purged as keys are inserted. minlen (int): Minimal residual size of this set. .. versionadded:: 4.0 Value must be less than ``maxlen`` if both are configured. Older expired items will be deleted, only after the set exceeds ``minlen`` number of items. data (Sequence): Initial data to initialize set with. Can be an iterable of ``(key, value)`` pairs, a dict (``{key: insertion_time}``), or another instance of :class:`LimitedSet`. Example: >>> s = LimitedSet(maxlen=50000, expires=3600, minlen=4000) >>> for i in range(60000): ... s.add(i) ... s.add(str(i)) ... >>> 57000 in s # last 50k inserted values are kept True >>> '10' in s # '10' did expire and was purged from set. False >>> len(s) # maxlen is reached 50000 >>> s.purge(now=time.time() + 7200) # clock + 2 hours >>> len(s) # now only minlen items are cached 4000 >>>> 57000 in s # even this item is gone now False """ max_heap_percent_overload = 15 def __init__(self, maxlen=0, expires=0, data=None, minlen=0): # type: (int, float, Mapping, int) -> None self.maxlen = 0 if maxlen is None else maxlen self.minlen = 0 if minlen is None else minlen self.expires = 0 if expires is None else expires self._data = {} self._heap = [] if data: # import items from data self.update(data) if not self.maxlen >= self.minlen >= 0: raise ValueError( 'minlen must be a positive number, less or equal to maxlen.') if self.expires < 0: raise ValueError('expires cannot be negative!') def _refresh_heap(self): # type: () -> None """Time consuming recreating of heap. Don't run this too often.""" self._heap[:] = [entry for entry in values(self._data)] heapify(self._heap) def _maybe_refresh_heap(self): # type: () -> None if self._heap_overload >= self.max_heap_percent_overload: self._refresh_heap() def clear(self): # type: () -> None """Clear all data, start from scratch again.""" self._data.clear() self._heap[:] = [] def add(self, item, now=None): # type: (Any, float) -> None """Add a new item, or reset the expiry time of an existing item.""" now = now or time.time() if item in self._data: self.discard(item) entry = (now, item) self._data[item] = entry heappush(self._heap, entry) if self.maxlen and len(self._data) >= self.maxlen: self.purge() def update(self, other): # type: (Iterable) -> None """Update this set from other LimitedSet, dict or iterable.""" if not other: return if isinstance(other, LimitedSet): self._data.update(other._data) self._refresh_heap() self.purge() elif isinstance(other, dict): # revokes are sent as a dict for key, inserted in items(other): if isinstance(inserted, (tuple, list)): # in case someone uses ._data directly for sending update inserted = inserted[0] if not isinstance(inserted, float): raise ValueError( 'Expecting float timestamp, got type ' '{0!r} with value: {1}'.format( type(inserted), inserted)) self.add(key, inserted) else: # XXX AVOID THIS, it could keep old data if more parties # exchange them all over and over again for obj in other: self.add(obj) def discard(self, item): # type: (Any) -> None # mark an existing item as removed. If KeyError is not found, pass. self._data.pop(item, None) self._maybe_refresh_heap() pop_value = discard def purge(self, now=None): # type: (float) -> None """Check oldest items and remove them if needed. Arguments: now (float): Time of purging -- by default right now. This can be useful for unit testing. """ now = now or time.time() now = now() if isinstance(now, Callable) else now if self.maxlen: while len(self._data) > self.maxlen: self.pop() # time based expiring: if self.expires: while len(self._data) > self.minlen >= 0: inserted_time, _ = self._heap[0] if inserted_time + self.expires > now: break # oldest item hasn't expired yet self.pop() def pop(self, default=None): # type: (Any) -> Any """Remove and return the oldest item, or :const:`None` when empty.""" while self._heap: _, item = heappop(self._heap) try: self._data.pop(item) except KeyError: pass else: return item return default def as_dict(self): # type: () -> Dict """Whole set as serializable dictionary. Example: >>> s = LimitedSet(maxlen=200) >>> r = LimitedSet(maxlen=200) >>> for i in range(500): ... s.add(i) ... >>> r.update(s.as_dict()) >>> r == s True """ return {key: inserted for inserted, key in values(self._data)} def __eq__(self, other): # type: (Any) -> bool return self._data == other._data def __ne__(self, other): # type: (Any) -> bool return not self.__eq__(other) def __repr__(self): # type: () -> str return REPR_LIMITED_SET.format( self, name=type(self).__name__, size=len(self), ) def __iter__(self): # type: () -> Iterable return (i for _, i in sorted(values(self._data))) def __len__(self): # type: () -> int return len(self._data) def __contains__(self, key): # type: (Any) -> bool return key in self._data def __reduce__(self): # type: () -> Any return self.__class__, ( self.maxlen, self.expires, self.as_dict(), self.minlen) def __bool__(self): # type: () -> bool return bool(self._data) __nonzero__ = __bool__ # Py2 @property def _heap_overload(self): # type: () -> float """Compute how much is heap bigger than data [percents].""" return len(self._heap) * 100 / max(len(self._data), 1) - 100 MutableSet.register(LimitedSet) # noqa: E305 class Evictable(object): """Mixin for classes supporting the ``evict`` method.""" Empty = Empty def evict(self): # type: () -> None """Force evict until maxsize is enforced.""" self._evict(range=count) def _evict(self, limit=100, range=range): # type: (int) -> None try: [self._evict1() for _ in range(limit)] except IndexError: pass def _evict1(self): # type: () -> None if self._evictcount <= self.maxsize: raise IndexError() try: self._pop_to_evict() except self.Empty: raise IndexError() @python_2_unicode_compatible class Messagebuffer(Evictable): """A buffer of pending messages.""" Empty = Empty def __init__(self, maxsize, iterable=None, deque=deque): # type: (int, Iterable, Any) -> None self.maxsize = maxsize self.data = deque(iterable or []) self._append = self.data.append self._pop = self.data.popleft self._len = self.data.__len__ self._extend = self.data.extend def put(self, item): # type: (Any) -> None self._append(item) self.maxsize and self._evict() def extend(self, it): # type: (Iterable) -> None self._extend(it) self.maxsize and self._evict() def take(self, *default): # type: (*Any) -> Any try: return self._pop() except IndexError: if default: return default[0] raise self.Empty() def _pop_to_evict(self): # type: () -> None return self.take() def __repr__(self): # type: () -> str return '<{0}: {1}/{2}>'.format( type(self).__name__, len(self), self.maxsize, ) def __iter__(self): # type: () -> Iterable while 1: try: yield self._pop() except IndexError: break def __len__(self): # type: () -> int return self._len() def __contains__(self, item): # type: () -> bool return item in self.data def __reversed__(self): # type: () -> Iterable return reversed(self.data) def __getitem__(self, index): # type: (Any) -> Any return self.data[index] @property def _evictcount(self): # type: () -> int return len(self) Sequence.register(Messagebuffer) # noqa: E305 @python_2_unicode_compatible class BufferMap(OrderedDict, Evictable): """Map of buffers.""" Buffer = Messagebuffer Empty = Empty maxsize = None total = 0 bufmaxsize = None def __init__(self, maxsize, iterable=None, bufmaxsize=1000): # type: (int, Iterable, int) -> None super(BufferMap, self).__init__() self.maxsize = maxsize self.bufmaxsize = 1000 if iterable: self.update(iterable) self.total = sum(len(buf) for buf in items(self)) def put(self, key, item): # type: (Any, Any) -> None self._get_or_create_buffer(key).put(item) self.total += 1 self.move_to_end(key) # least recently used. self.maxsize and self._evict() def extend(self, key, it): # type: (Any, Iterable) -> None self._get_or_create_buffer(key).extend(it) self.total += len(it) self.maxsize and self._evict() def take(self, key, *default): # type: (Any, *Any) -> Any item, throw = None, False try: buf = self[key] except KeyError: throw = True else: try: item = buf.take() self.total -= 1 except self.Empty: throw = True else: self.move_to_end(key) # mark as LRU if throw: if default: return default[0] raise self.Empty() return item def _get_or_create_buffer(self, key): # type: (Any) -> Messagebuffer try: return self[key] except KeyError: buf = self[key] = self._new_buffer() return buf def _new_buffer(self): # type: () -> Messagebuffer return self.Buffer(maxsize=self.bufmaxsize) def _LRUpop(self, *default): # type: (*Any) -> Any return self[self._LRUkey()].take(*default) def _pop_to_evict(self): # type: () -> None for _ in range(100): key = self._LRUkey() buf = self[key] try: buf.take() except (IndexError, self.Empty): # buffer empty, remove it from mapping. self.pop(key) else: # we removed one item self.total -= 1 # if buffer is empty now, remove it from mapping. if not len(buf): self.pop(key) else: # move to least recently used. self.move_to_end(key) break def __repr__(self): # type: () -> str return '<{0}: {1}/{2}>'.format( type(self).__name__, self.total, self.maxsize, ) @property def _evictcount(self): # type: () -> int return self.total celery-4.1.0/celery/utils/iso8601.py0000644000175000017500000000541713130607475017050 0ustar omeromer00000000000000"""Parse ISO8601 dates. Originally taken from :pypi:`pyiso8601` (https://bitbucket.org/micktwomey/pyiso8601) Modified to match the behavior of ``dateutil.parser``: - raise :exc:`ValueError` instead of ``ParseError`` - return naive :class:`~datetime.datetime` by default - uses :class:`pytz.FixedOffset` This is the original License: Copyright (c) 2007 Michael Twomey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sub-license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import, unicode_literals import re from datetime import datetime from pytz import FixedOffset __all__ = ['parse_iso8601'] # Adapted from http://delete.me.uk/2005/03/iso8601.html ISO8601_REGEX = re.compile( r'(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})' r'((?P.)(?P[0-9]{2}):(?P[0-9]{2})' r'(:(?P[0-9]{2})(\.(?P[0-9]+))?)?' r'(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?' ) TIMEZONE_REGEX = re.compile( r'(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})' ) def parse_iso8601(datestring): """Parse and convert ISO-8601 string to datetime.""" m = ISO8601_REGEX.match(datestring) if not m: raise ValueError('unable to parse date string %r' % datestring) groups = m.groupdict() tz = groups['timezone'] if tz == 'Z': tz = FixedOffset(0) elif tz: m = TIMEZONE_REGEX.match(tz) prefix, hours, minutes = m.groups() hours, minutes = int(hours), int(minutes) if prefix == '-': hours = -hours minutes = -minutes tz = FixedOffset(minutes + hours * 60) return datetime( int(groups['year']), int(groups['month']), int(groups['day']), int(groups['hour'] or 0), int(groups['minute'] or 0), int(groups['second'] or 0), int(groups['fraction'] or 0), tz ) celery-4.1.0/celery/utils/threads.py0000644000175000017500000002316313130607475017367 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Threading primitives and utilities.""" from __future__ import absolute_import, print_function, unicode_literals import os import socket import sys import threading import traceback from contextlib import contextmanager from celery.local import Proxy from celery.five import THREAD_TIMEOUT_MAX, items, python_2_unicode_compatible try: from greenlet import getcurrent as get_ident except ImportError: # pragma: no cover try: from _thread import get_ident # noqa except ImportError: try: from thread import get_ident # noqa except ImportError: # pragma: no cover try: from _dummy_thread import get_ident # noqa except ImportError: from dummy_thread import get_ident # noqa __all__ = [ 'bgThread', 'Local', 'LocalStack', 'LocalManager', 'get_ident', 'default_socket_timeout', ] USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS') PY3 = sys.version_info[0] == 3 @contextmanager def default_socket_timeout(timeout): """Context temporarily setting the default socket timeout.""" prev = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) yield socket.setdefaulttimeout(prev) class bgThread(threading.Thread): """Background service thread.""" def __init__(self, name=None, **kwargs): super(bgThread, self).__init__() self._is_shutdown = threading.Event() self._is_stopped = threading.Event() self.daemon = True self.name = name or self.__class__.__name__ def body(self): raise NotImplementedError() def on_crash(self, msg, *fmt, **kwargs): print(msg.format(*fmt), file=sys.stderr) traceback.print_exc(None, sys.stderr) def run(self): body = self.body shutdown_set = self._is_shutdown.is_set try: while not shutdown_set(): try: body() except Exception as exc: # pylint: disable=broad-except try: self.on_crash('{0!r} crashed: {1!r}', self.name, exc) self._set_stopped() finally: os._exit(1) # exiting by normal means won't work finally: self._set_stopped() def _set_stopped(self): try: self._is_stopped.set() except TypeError: # pragma: no cover # we lost the race at interpreter shutdown, # so gc collected built-in modules. pass def stop(self): """Graceful shutdown.""" self._is_shutdown.set() self._is_stopped.wait() if self.is_alive(): self.join(THREAD_TIMEOUT_MAX) def release_local(local): """Release the contents of the local for the current context. This makes it possible to use locals without a manager. With this function one can release :class:`Local` objects as well as :class:`StackLocal` objects. However it's not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. Example: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False """ local.__release_local__() class Local(object): """Local object.""" __slots__ = ('__storage__', '__ident_func__') def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): return iter(items(self.__storage__)) def __call__(self, proxy): """Create a proxy for a name.""" return Proxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class _LocalStack(object): """Local stack. This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it will return a proxy that resolves to the topmost item on the stack. """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() def _get__ident_func__(self): return self._local.__ident_func__ def _set__ident_func__(self, value): object.__setattr__(self._local, '__ident_func__', value) __ident_func__ = property(_get__ident_func__, _set__ident_func__) del _get__ident_func__, _set__ident_func__ def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError('object unbound') return rv return Proxy(_lookup) def push(self, obj): """Push a new item to the stack.""" rv = getattr(self._local, 'stack', None) if rv is None: # pylint: disable=assigning-non-slot # This attribute is defined now. self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Remove the topmost item from the stack. Note: Will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() def __len__(self): stack = getattr(self._local, 'stack', None) return len(stack) if stack else 0 @property def stack(self): # get_current_worker_task uses this to find # the original task that was executed by the worker. stack = getattr(self._local, 'stack', None) if stack is not None: return stack return [] @property def top(self): """The topmost item on the stack. Note: If the stack is empty, :const:`None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None @python_2_unicode_compatible class LocalManager(object): """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to ``manager.locals``. Every time the manager cleans up, it will clean up all the data left in the locals for this context. The ``ident_func`` parameter can be added to override the default ident function for the wrapped locals. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, '__ident_func__', ident_func) else: self.ident_func = get_ident def get_ident(self): """Return context identifier. This is the indentifer the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals. """ return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use ``make_middleware()``. """ for local in self.locals: release_local(local) def __repr__(self): return '<{0} storages: {1}>'.format( self.__class__.__name__, len(self.locals)) class _FastLocalStack(threading.local): def __init__(self): self.stack = [] self.push = self.stack.append self.pop = self.stack.pop super(_FastLocalStack, self).__init__() @property def top(self): try: return self.stack[-1] except (AttributeError, IndexError): return None def __len__(self): return len(self.stack) if USE_FAST_LOCALS: # pragma: no cover LocalStack = _FastLocalStack else: # - See #706 # since each thread has its own greenlet we can just use those as # identifiers for the context. If greenlets aren't available we # fall back to the current thread ident. LocalStack = _LocalStack # noqa celery-4.1.0/celery/utils/serialization.py0000644000175000017500000002052113130607475020605 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Utilities for safely pickling exceptions.""" from __future__ import absolute_import, unicode_literals import datetime import numbers import sys from base64 import b64encode as base64encode, b64decode as base64decode from functools import partial from inspect import getmro from itertools import takewhile from kombu.utils.encoding import bytes_to_str, str_to_bytes from celery.five import ( bytes_if_py2, python_2_unicode_compatible, items, reraise, string_t, ) from .encoding import safe_repr try: import cPickle as pickle except ImportError: import pickle # noqa PY33 = sys.version_info >= (3, 3) __all__ = [ 'UnpickleableExceptionWrapper', 'subclass_exception', 'find_pickleable_exception', 'create_exception_cls', 'get_pickleable_exception', 'get_pickleable_etype', 'get_pickled_exception', 'strtobool', ] #: List of base classes we probably don't want to reduce to. try: unwanted_base_classes = (StandardError, Exception, BaseException, object) except NameError: # pragma: no cover unwanted_base_classes = (Exception, BaseException, object) # py3k def subclass_exception(name, parent, module): # noqa """Create new exception class.""" return type(bytes_if_py2(name), (parent,), {'__module__': module}) def find_pickleable_exception(exc, loads=pickle.loads, dumps=pickle.dumps): """Find first pickleable exception base class. With an exception instance, iterate over its super classes (by MRO) and find the first super exception that's pickleable. It does not go below :exc:`Exception` (i.e., it skips :exc:`Exception`, :class:`BaseException` and :class:`object`). If that happens you should use :exc:`UnpickleableException` instead. Arguments: exc (BaseException): An exception instance. Returns: Exception: Nearest pickleable parent exception class (except :exc:`Exception` and parents), or if the exception is pickleable it will return :const:`None`. """ exc_args = getattr(exc, 'args', []) for supercls in itermro(exc.__class__, unwanted_base_classes): try: superexc = supercls(*exc_args) loads(dumps(superexc)) except Exception: # pylint: disable=broad-except pass else: return superexc def itermro(cls, stop): return takewhile(lambda sup: sup not in stop, getmro(cls)) def create_exception_cls(name, module, parent=None): """Dynamically create an exception class.""" if not parent: parent = Exception return subclass_exception(name, parent, module) @python_2_unicode_compatible class UnpickleableExceptionWrapper(Exception): """Wraps unpickleable exceptions. Arguments: exc_module (str): See :attr:`exc_module`. exc_cls_name (str): See :attr:`exc_cls_name`. exc_args (Tuple[Any, ...]): See :attr:`exc_args`. Example: >>> def pickle_it(raising_function): ... try: ... raising_function() ... except Exception as e: ... exc = UnpickleableExceptionWrapper( ... e.__class__.__module__, ... e.__class__.__name__, ... e.args, ... ) ... pickle.dumps(exc) # Works fine. """ #: The module of the original exception. exc_module = None #: The name of the original exception class. exc_cls_name = None #: The arguments for the original exception. exc_args = None def __init__(self, exc_module, exc_cls_name, exc_args, text=None): safe_exc_args = [] for arg in exc_args: try: pickle.dumps(arg) safe_exc_args.append(arg) except Exception: # pylint: disable=broad-except safe_exc_args.append(safe_repr(arg)) self.exc_module = exc_module self.exc_cls_name = exc_cls_name self.exc_args = safe_exc_args self.text = text Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args, text) def restore(self): return create_exception_cls(self.exc_cls_name, self.exc_module)(*self.exc_args) def __str__(self): return self.text @classmethod def from_exception(cls, exc): return cls(exc.__class__.__module__, exc.__class__.__name__, getattr(exc, 'args', []), safe_repr(exc)) def get_pickleable_exception(exc): """Make sure exception is pickleable.""" try: pickle.loads(pickle.dumps(exc)) except Exception: # pylint: disable=broad-except pass else: return exc nearest = find_pickleable_exception(exc) if nearest: return nearest return UnpickleableExceptionWrapper.from_exception(exc) def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps): """Get pickleable exception type.""" try: loads(dumps(cls)) except Exception: # pylint: disable=broad-except return Exception else: return cls def get_pickled_exception(exc): """Reverse of :meth:`get_pickleable_exception`.""" if isinstance(exc, UnpickleableExceptionWrapper): return exc.restore() return exc def b64encode(s): return bytes_to_str(base64encode(str_to_bytes(s))) def b64decode(s): return base64decode(str_to_bytes(s)) def strtobool(term, table={'false': False, 'no': False, '0': False, 'true': True, 'yes': True, '1': True, 'on': True, 'off': False}): """Convert common terms for true/false to bool. Examples (true/false/yes/no/on/off/1/0). """ if isinstance(term, string_t): try: return table[term.lower()] except KeyError: raise TypeError('Cannot coerce {0!r} to type bool'.format(term)) return term def _datetime_to_json(dt): # See "Date Time String Format" in the ECMA-262 specification. if isinstance(dt, datetime.datetime): r = dt.isoformat() if dt.microsecond: r = r[:23] + r[26:] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r elif isinstance(dt, datetime.time): r = dt.isoformat() if dt.microsecond: r = r[:12] return r else: return dt.isoformat() def jsonify(obj, builtin_types=(numbers.Real, string_t), key=None, keyfilter=None, unknown_type_filter=None): """Transform object making it suitable for json serialization.""" from kombu.abstract import Object as KombuDictType _jsonify = partial(jsonify, builtin_types=builtin_types, key=key, keyfilter=keyfilter, unknown_type_filter=unknown_type_filter) if isinstance(obj, KombuDictType): obj = obj.as_dict(recurse=True) if obj is None or isinstance(obj, builtin_types): return obj elif isinstance(obj, (tuple, list)): return [_jsonify(v) for v in obj] elif isinstance(obj, dict): return { k: _jsonify(v, key=k) for k, v in items(obj) if (keyfilter(k) if keyfilter else 1) } elif isinstance(obj, (datetime.date, datetime.time)): return _datetime_to_json(obj) elif isinstance(obj, datetime.timedelta): return str(obj) else: if unknown_type_filter is None: raise ValueError( 'Unsupported type: {0!r} {1!r} (parent: {2})'.format( type(obj), obj, key)) return unknown_type_filter(obj) # Since PyPy 3 targets Python 3.2, 'raise exc from None' will # raise a TypeError so we need to look for Python 3.3 or newer if PY33: # pragma: no cover from vine.five import exec_ _raise_with_context = None # for flake8 exec_("""def _raise_with_context(exc, ctx): raise exc from ctx""") def raise_with_context(exc): exc_info = sys.exc_info() if not exc_info: raise exc elif exc_info[1] is exc: raise _raise_with_context(exc, exc_info[1]) else: def raise_with_context(exc): exc_info = sys.exc_info() if not exc_info: raise exc if exc_info[1] is exc: raise elif exc_info[2]: reraise(type(exc), exc, exc_info[2]) raise exc celery-4.1.0/celery/utils/log.py0000644000175000017500000002131713130607475016515 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Logging utilities.""" from __future__ import absolute_import, print_function, unicode_literals import logging import numbers import os import sys import threading import traceback from contextlib import contextmanager from kombu.five import values from kombu.log import get_logger as _get_logger, LOG_LEVELS from kombu.utils.encoding import safe_str from celery.five import string_t, text_t from .term import colored __all__ = [ 'ColorFormatter', 'LoggingProxy', 'base_logger', 'set_in_sighandler', 'in_sighandler', 'get_logger', 'get_task_logger', 'mlevel', 'get_multiprocessing_logger', 'reset_multiprocessing_logger', ] _process_aware = False _in_sighandler = False PY3 = sys.version_info[0] == 3 MP_LOG = os.environ.get('MP_LOG', False) RESERVED_LOGGER_NAMES = {'celery', 'celery.task'} # Sets up our logging hierarchy. # # Every logger in the celery package inherits from the "celery" # logger, and every task logger inherits from the "celery.task" # logger. base_logger = logger = _get_logger('celery') def set_in_sighandler(value): """Set flag signifiying that we're inside a signal handler.""" global _in_sighandler _in_sighandler = value def iter_open_logger_fds(): seen = set() loggers = (list(values(logging.Logger.manager.loggerDict)) + [logging.getLogger(None)]) for l in loggers: try: for handler in l.handlers: try: if handler not in seen: # pragma: no cover yield handler.stream seen.add(handler) except AttributeError: pass except AttributeError: # PlaceHolder does not have handlers pass @contextmanager def in_sighandler(): """Context that records that we are in a signal handler.""" set_in_sighandler(True) try: yield finally: set_in_sighandler(False) def logger_isa(l, p, max=1000): this, seen = l, set() for _ in range(max): if this == p: return True else: if this in seen: raise RuntimeError( 'Logger {0!r} parents recursive'.format(l.name), ) seen.add(this) this = this.parent if not this: break else: # pragma: no cover raise RuntimeError('Logger hierarchy exceeds {0}'.format(max)) return False def _using_logger_parent(parent_logger, logger_): if not logger_isa(logger_, parent_logger): logger_.parent = parent_logger return logger_ def get_logger(name): """Get logger by name.""" l = _get_logger(name) if logging.root not in (l, l.parent) and l is not base_logger: l = _using_logger_parent(base_logger, l) return l task_logger = get_logger('celery.task') worker_logger = get_logger('celery.worker') def get_task_logger(name): """Get logger for task module by name.""" if name in RESERVED_LOGGER_NAMES: raise RuntimeError('Logger name {0!r} is reserved!'.format(name)) return _using_logger_parent(task_logger, get_logger(name)) def mlevel(level): """Convert level name/int to log level.""" if level and not isinstance(level, numbers.Integral): return LOG_LEVELS[level.upper()] return level class ColorFormatter(logging.Formatter): """Logging formatter that adds colors based on severity.""" #: Loglevel -> Color mapping. COLORS = colored().names colors = { 'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'], 'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta'], } def __init__(self, fmt=None, use_color=True): logging.Formatter.__init__(self, fmt) self.use_color = use_color def formatException(self, ei): if ei and not isinstance(ei, tuple): ei = sys.exc_info() r = logging.Formatter.formatException(self, ei) if isinstance(r, str) and not PY3: return safe_str(r) return r def format(self, record): msg = logging.Formatter.format(self, record) color = self.colors.get(record.levelname) # reset exception info later for other handlers... einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info if color and self.use_color: try: # safe_str will repr the color object # and color will break on non-string objects # so need to reorder calls based on type. # Issue #427 try: if isinstance(msg, string_t): return text_t(color(safe_str(msg))) return safe_str(color(msg)) except UnicodeDecodeError: # pragma: no cover return safe_str(msg) # skip colors except Exception as exc: # pylint: disable=broad-except prev_msg, record.exc_info, record.msg = ( record.msg, 1, ''.format( type(msg), exc ), ) try: return logging.Formatter.format(self, record) finally: record.msg, record.exc_info = prev_msg, einfo else: return safe_str(msg) class LoggingProxy(object): """Forward file object to :class:`logging.Logger` instance. Arguments: logger (~logging.Logger): Logger instance to forward to. loglevel (int, str): Log level to use when logging messages. """ mode = 'w' name = None closed = False loglevel = logging.ERROR _thread = threading.local() def __init__(self, logger, loglevel=None): # pylint: disable=redefined-outer-name # Note that the logger global is redefined here, be careful changing. self.logger = logger self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel) self._safewrap_handlers() def _safewrap_handlers(self): # Make the logger handlers dump internal errors to # :data:`sys.__stderr__` instead of :data:`sys.stderr` to circumvent # infinite loops. def wrap_handler(handler): # pragma: no cover class WithSafeHandleError(logging.Handler): def handleError(self, record): try: traceback.print_exc(None, sys.__stderr__) except IOError: pass # see python issue 5971 handler.handleError = WithSafeHandleError().handleError return [wrap_handler(h) for h in self.logger.handlers] def write(self, data): """Write message to logging object.""" if _in_sighandler: return print(safe_str(data), file=sys.__stderr__) if getattr(self._thread, 'recurse_protection', False): # Logger is logging back to this file, so stop recursing. return data = data.strip() if data and not self.closed: self._thread.recurse_protection = True try: self.logger.log(self.loglevel, safe_str(data)) finally: self._thread.recurse_protection = False def writelines(self, sequence): # type: (Sequence[str]) -> None """Write list of strings to file. The sequence can be any iterable object producing strings. This is equivalent to calling :meth:`write` for each string. """ for part in sequence: self.write(part) def flush(self): # This object is not buffered so any :meth:`flush` # requests are ignored. pass def close(self): # when the object is closed, no write requests are # forwarded to the logging object anymore. self.closed = True def isatty(self): """Here for file support.""" return False def get_multiprocessing_logger(): """Return the multiprocessing logger.""" try: from billiard import util except ImportError: # pragma: no cover pass else: return util.get_logger() def reset_multiprocessing_logger(): """Reset multiprocessing logging setup.""" try: from billiard import util except ImportError: # pragma: no cover pass else: if hasattr(util, '_logger'): # pragma: no cover util._logger = None def current_process(): try: from billiard import process except ImportError: # pragma: no cover pass else: return process.current_process() def current_process_index(base=1): index = getattr(current_process(), 'index', None) return index + base if index is not None else index celery-4.1.0/celery/utils/functional.py0000644000175000017500000002462113130607475020077 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Functional-style utilties.""" from __future__ import absolute_import, print_function, unicode_literals import inspect import sys from functools import partial from itertools import chain, islice from kombu.utils.functional import ( LRUCache, dictfilter, lazy, maybe_evaluate, memoize, is_list, maybe_list, ) from vine import promise from celery.five import UserList, getfullargspec, range __all__ = [ 'LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop', 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq', 'regen', 'dictfilter', 'lazy', 'maybe_evaluate', 'head_from_fun', 'maybe', 'fun_accepts_kwargs', ] IS_PY3 = sys.version_info[0] == 3 FUNHEAD_TEMPLATE = """ def {fun_name}({fun_args}): return {fun_value} """ class DummyContext(object): def __enter__(self): return self def __exit__(self, *exc_info): pass class mlazy(lazy): """Memoized lazy evaluation. The function is only evaluated once, every subsequent access will return the same value. """ #: Set to :const:`True` after the object has been evaluated. evaluated = False _value = None def evaluate(self): if not self.evaluated: self._value = super(mlazy, self).evaluate() self.evaluated = True return self._value def noop(*args, **kwargs): """No operation. Takes any arguments/keyword arguments and does nothing. """ pass def pass1(arg, *args, **kwargs): """Return the first positional argument.""" return arg def evaluate_promises(it): for value in it: if isinstance(value, promise): value = value() yield value def first(predicate, it): """Return the first element in ``it`` that ``predicate`` accepts. If ``predicate`` is None it will return the first item that's not :const:`None`. """ return next( (v for v in evaluate_promises(it) if ( predicate(v) if predicate is not None else v is not None)), None, ) def firstmethod(method, on_call=None): """Multiple dispatch. Return a function that with a list of instances, finds the first instance that gives a value for the given method. The list can also contain lazy instances (:class:`~kombu.utils.functional.lazy`.) """ def _matcher(it, *args, **kwargs): for obj in it: try: meth = getattr(maybe_evaluate(obj), method) reply = (on_call(meth, *args, **kwargs) if on_call else meth(*args, **kwargs)) except AttributeError: pass else: if reply is not None: return reply return _matcher def chunks(it, n): """Split an iterator into chunks with `n` elements each. Warning: ``it`` must be an actual iterator, if you pass this a concrete sequence will get you repeating elements. So ``chunks(iter(range(1000)), 10)`` is fine, but ``chunks(range(1000), 10)`` is not. Example: # n == 2 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) >>> list(x) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] # n == 3 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) >>> list(x) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] """ for item in it: yield [item] + list(islice(it, n - 1)) def padlist(container, size, default=None): """Pad list with default elements. Example: >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3) ('George', 'Costanza', 'NYC') >>> first, last, city = padlist(['George', 'Costanza'], 3) ('George', 'Costanza', None) >>> first, last, city, planet = padlist( ... ['George', 'Costanza', 'NYC'], 4, default='Earth', ... ) ('George', 'Costanza', 'NYC', 'Earth') """ return list(container)[:size] + [default] * (size - len(container)) def mattrgetter(*attrs): """Get attributes, ignoring attribute errors. Like :func:`operator.itemgetter` but return :const:`None` on missing attributes instead of raising :exc:`AttributeError`. """ return lambda obj: {attr: getattr(obj, attr, None) for attr in attrs} def uniq(it): """Return all unique elements in ``it``, preserving order.""" seen = set() return (seen.add(obj) or obj for obj in it if obj not in seen) def regen(it): """Convert iterator to an object that can be consumed multiple times. ``Regen`` takes any iterable, and if the object is an generator it will cache the evaluated list on first access, so that the generator can be "consumed" multiple times. """ if isinstance(it, (list, tuple)): return it return _regen(it) class _regen(UserList, list): # must be subclass of list so that json can encode. def __init__(self, it): # pylint: disable=super-init-not-called # UserList creates a new list and sets .data, so we don't # want to call init here. self.__it = it self.__index = 0 self.__consumed = [] def __reduce__(self): return list, (self.data,) def __length_hint__(self): return self.__it.__length_hint__() def __iter__(self): return chain(self.__consumed, self.__it) def __getitem__(self, index): if index < 0: return self.data[index] try: return self.__consumed[index] except IndexError: try: for _ in range(self.__index, index + 1): self.__consumed.append(next(self.__it)) except StopIteration: raise IndexError(index) else: return self.__consumed[index] @property def data(self): try: self.__consumed.extend(list(self.__it)) except StopIteration: pass return self.__consumed def _argsfromspec(spec, replace_defaults=True): if spec.defaults: split = len(spec.defaults) defaults = (list(range(len(spec.defaults))) if replace_defaults else spec.defaults) positional = spec.args[:-split] optional = list(zip(spec.args[-split:], defaults)) else: positional, optional = spec.args, [] varargs = spec.varargs varkw = spec.varkw if spec.kwonlydefaults: split = len(spec.kwonlydefaults) kwonlyargs = spec.kwonlyargs[:-split] if replace_defaults: kwonlyargs_optional = [ (kw, i) for i, kw in enumerate(spec.kwonlyargs[-split:])] else: kwonlyargs_optional = list(spec.kwonlydefaults.items()) else: kwonlyargs, kwonlyargs_optional = spec.kwonlyargs, [] return ', '.join(filter(None, [ ', '.join(positional), ', '.join('{0}={1}'.format(k, v) for k, v in optional), '*{0}'.format(varargs) if varargs else None, '*' if (kwonlyargs or kwonlyargs_optional) and not varargs else None, ', '.join(kwonlyargs) if kwonlyargs else None, ', '.join('{0}="{1}"'.format(k, v) for k, v in kwonlyargs_optional), '**{0}'.format(varkw) if varkw else None, ])) def head_from_fun(fun, bound=False, debug=False): """Generate signature function from actual function.""" # we could use inspect.Signature here, but that implementation # is very slow since it implements the argument checking # in pure-Python. Instead we use exec to create a new function # with an empty body, meaning it has the same performance as # as just calling a function. is_function = inspect.isfunction(fun) is_callable = hasattr(fun, '__call__') is_method = inspect.ismethod(fun) if not is_function and is_callable and not is_method: name, fun = fun.__class__.__name__, fun.__call__ else: name = fun.__name__ definition = FUNHEAD_TEMPLATE.format( fun_name=name, fun_args=_argsfromspec(getfullargspec(fun)), fun_value=1, ) if debug: # pragma: no cover print(definition, file=sys.stderr) namespace = {'__name__': fun.__module__} # pylint: disable=exec-used # Tasks are rarely, if ever, created at runtime - exec here is fine. exec(definition, namespace) result = namespace[name] result._source = definition if bound: return partial(result, object()) return result def arity_greater(fun, n): argspec = getfullargspec(fun) return argspec.varargs or len(argspec.args) > n def fun_takes_argument(name, fun, position=None): spec = getfullargspec(fun) return ( spec.varkw or spec.varargs or (len(spec.args) >= position if position else name in spec.args) ) if hasattr(inspect, 'signature'): def fun_accepts_kwargs(fun): """Return true if function accepts arbitrary keyword arguments.""" return any( p for p in inspect.signature(fun).parameters.values() if p.kind == p.VAR_KEYWORD ) else: def fun_accepts_kwargs(fun): # noqa """Return true if function accepts arbitrary keyword arguments.""" try: argspec = inspect.getargspec(fun) except TypeError: try: argspec = inspect.getargspec(fun.__call__) except (TypeError, AttributeError): return return not argspec or argspec[2] is not None def maybe(typ, val): """Call typ on value if val is defined.""" return typ(val) if val is not None else val def seq_concat_item(seq, item): """Return copy of sequence seq with item added. Returns: Sequence: if seq is a tuple, the result will be a tuple, otherwise it depends on the implementation of ``__add__``. """ return seq + (item,) if isinstance(seq, tuple) else seq + [item] def seq_concat_seq(a, b): """Concatenate two sequences: ``a + b``. Returns: Sequence: The return value will depend on the largest sequence - if b is larger and is a tuple, the return value will be a tuple. - if a is larger and is a list, the return value will be a list, """ # find the type of the largest sequence prefer = type(max([a, b], key=len)) # convert the smallest list to the type of the largest sequence. if not isinstance(a, prefer): a = prefer(a) if not isinstance(b, prefer): b = prefer(b) return a + b celery-4.1.0/celery/utils/nodenames.py0000644000175000017500000000560513130607475017707 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Worker name utilities.""" from __future__ import absolute_import, unicode_literals import os import socket from functools import partial from kombu.entity import Exchange, Queue from .functional import memoize from .text import simple_format #: Exchange for worker direct queues. WORKER_DIRECT_EXCHANGE = Exchange('C.dq2') #: Format for worker direct queue names. WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq2' #: Separator for worker node name and hostname. NODENAME_SEP = '@' NODENAME_DEFAULT = 'celery' gethostname = memoize(1, Cache=dict)(socket.gethostname) __all__ = [ 'worker_direct', 'gethostname', 'nodename', 'anon_nodename', 'nodesplit', 'default_nodename', 'node_format', 'host_format', ] def worker_direct(hostname): """Return the :class:`kombu.Queue` being a direct route to a worker. Arguments: hostname (str, ~kombu.Queue): The fully qualified node name of a worker (e.g., ``w1@example.com``). If passed a :class:`kombu.Queue` instance it will simply return that instead. """ if isinstance(hostname, Queue): return hostname return Queue( WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname), WORKER_DIRECT_EXCHANGE, hostname, ) def nodename(name, hostname): """Create node name from name/hostname pair.""" return NODENAME_SEP.join((name, hostname)) def anon_nodename(hostname=None, prefix='gen'): """Return the nodename for this process (not a worker). This is used for e.g. the origin task message field. """ return nodename(''.join([prefix, str(os.getpid())]), hostname or gethostname()) def nodesplit(name): """Split node name into tuple of name/hostname.""" parts = name.split(NODENAME_SEP, 1) if len(parts) == 1: return None, parts[0] return parts def default_nodename(hostname): """Return the default nodename for this process.""" name, host = nodesplit(hostname or '') return nodename(name or NODENAME_DEFAULT, host or gethostname()) def node_format(s, name, **extra): """Format worker node name (name@host.com).""" shortname, host = nodesplit(name) return host_format( s, host, shortname or NODENAME_DEFAULT, p=name, **extra) def _fmt_process_index(prefix='', default='0'): from .log import current_process_index index = current_process_index() return '{0}{1}'.format(prefix, index) if index else default _fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '') def host_format(s, host=None, name=None, **extra): """Format host %x abbreviations.""" host = host or gethostname() hname, _, domain = host.partition('.') name = name or hname keys = dict({ 'h': host, 'n': name, 'd': domain, 'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix, }, **extra) return simple_format(s, keys) celery-4.1.0/celery/utils/debug.py0000644000175000017500000001143513130607475017022 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Utilities for debugging memory usage, blocking calls, etc.""" from __future__ import absolute_import, print_function, unicode_literals import os import sys import traceback from contextlib import contextmanager from functools import partial from pprint import pprint from celery.five import WhateverIO, items, range from celery.platforms import signals try: from psutil import Process except ImportError: Process = None # noqa __all__ = [ 'blockdetection', 'sample_mem', 'memdump', 'sample', 'humanbytes', 'mem_rss', 'ps', 'cry', ] UNITS = ( (2 ** 40.0, 'TB'), (2 ** 30.0, 'GB'), (2 ** 20.0, 'MB'), (2 ** 10.0, 'KB'), (0.0, 'b'), ) _process = None _mem_sample = [] def _on_blocking(signum, frame): import inspect raise RuntimeError( 'Blocking detection timed-out at: {0}'.format( inspect.getframeinfo(frame) ) ) @contextmanager def blockdetection(timeout): """Context that raises an exception if process is blocking. Uses ``SIGALRM`` to detect blocking functions. """ if not timeout: yield else: old_handler = signals['ALRM'] old_handler = None if old_handler == _on_blocking else old_handler signals['ALRM'] = _on_blocking try: yield signals.arm_alarm(timeout) finally: if old_handler: signals['ALRM'] = old_handler signals.reset_alarm() def sample_mem(): """Sample RSS memory usage. Statistics can then be output by calling :func:`memdump`. """ current_rss = mem_rss() _mem_sample.append(current_rss) return current_rss def _memdump(samples=10): # pragma: no cover S = _mem_sample prev = list(S) if len(S) <= samples else sample(S, samples) _mem_sample[:] = [] import gc gc.collect() after_collect = mem_rss() return prev, after_collect def memdump(samples=10, file=None): # pragma: no cover """Dump memory statistics. Will print a sample of all RSS memory samples added by calling :func:`sample_mem`, and in addition print used RSS memory after :func:`gc.collect`. """ say = partial(print, file=file) if ps() is None: say('- rss: (psutil not installed).') return prev, after_collect = _memdump(samples) if prev: say('- rss (sample):') for mem in prev: say('- > {0},'.format(mem)) say('- rss (end): {0}.'.format(after_collect)) def sample(x, n, k=0): """Given a list `x` a sample of length ``n`` of that list is returned. For example, if `n` is 10, and `x` has 100 items, a list of every tenth. item is returned. ``k`` can be used as offset. """ j = len(x) // n for _ in range(n): try: yield x[k] except IndexError: break k += j def hfloat(f, p=5): """Convert float to value suitable for humans. Arguments: f (float): The floating point number. p (int): Floating point precision (default is 5). """ i = int(f) return i if i == f else '{0:.{p}}'.format(f, p=p) def humanbytes(s): """Convert bytes to human-readable form (e.g., KB, MB).""" return next( '{0}{1}'.format(hfloat(s / div if div else s), unit) for div, unit in UNITS if s >= div ) def mem_rss(): """Return RSS memory usage as a humanized string.""" p = ps() if p is not None: return humanbytes(_process_memory_info(p).rss) def ps(): # pragma: no cover """Return the global :class:`psutil.Process` instance. Note: Returns :const:`None` if :pypi:`psutil` is not installed. """ global _process if _process is None and Process is not None: _process = Process(os.getpid()) return _process def _process_memory_info(process): try: return process.memory_info() except AttributeError: return process.get_memory_info() def cry(out=None, sepchr='=', seplen=49): # pragma: no cover """Return stack-trace of all active threads. See Also: Taken from https://gist.github.com/737056. """ import threading out = WhateverIO() if out is None else out P = partial(print, file=out) # get a map of threads by their ID so we can print their names # during the traceback dump tmap = {t.ident: t for t in threading.enumerate()} sep = sepchr * seplen for tid, frame in items(sys._current_frames()): thread = tmap.get(tid) if not thread: # skip old junk (left-overs from a fork) continue P('{0.name}'.format(thread)) P(sep) traceback.print_stack(frame, file=out) P(sep) P('LOCAL VARIABLES') P(sep) pprint(frame.f_locals, stream=out) P('\n') return out.getvalue() celery-4.1.0/celery/utils/sysinfo.py0000644000175000017500000000223513130607475017424 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """System information utilities.""" from __future__ import absolute_import, unicode_literals import os from math import ceil from kombu.utils.objects import cached_property __all__ = ['load_average', 'df'] if hasattr(os, 'getloadavg'): def _load_average(): return tuple(ceil(l * 1e2) / 1e2 for l in os.getloadavg()) else: # pragma: no cover # Windows doesn't have getloadavg def _load_average(): # noqa return (0.0, 0.0, 0.0) def load_average(): """Return system load average as a triple.""" return _load_average() class df(object): """Disk information.""" def __init__(self, path): self.path = path @property def total_blocks(self): return self.stat.f_blocks * self.stat.f_frsize / 1024 @property def available(self): return self.stat.f_bavail * self.stat.f_frsize / 1024 @property def capacity(self): avail = self.stat.f_bavail used = self.stat.f_blocks - self.stat.f_bfree return int(ceil(used * 100.0 / (used + avail) + 0.5)) @cached_property def stat(self): return os.statvfs(os.path.abspath(self.path)) celery-4.1.0/celery/utils/abstract.py0000644000175000017500000000545113130607475017540 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Abstract classes.""" from __future__ import absolute_import, unicode_literals from abc import ABCMeta, abstractmethod, abstractproperty from collections import Callable from celery.five import with_metaclass __all__ = ['CallableTask', 'CallableSignature'] def _hasattr(C, attr): return any(attr in B.__dict__ for B in C.__mro__) @with_metaclass(ABCMeta) class _AbstractClass(object): __required_attributes__ = frozenset() @classmethod def _subclasshook_using(cls, parent, C): return ( cls is parent and all(_hasattr(C, attr) for attr in cls.__required_attributes__) ) or NotImplemented @classmethod def register(cls, other): # we override `register` to return other for use as a decorator. type(cls).register(cls, other) return other class CallableTask(_AbstractClass, Callable): # pragma: no cover """Task interface.""" __required_attributes__ = frozenset({ 'delay', 'apply_async', 'apply', }) @abstractmethod def delay(self, *args, **kwargs): pass @abstractmethod def apply_async(self, *args, **kwargs): pass @abstractmethod def apply(self, *args, **kwargs): pass @classmethod def __subclasshook__(cls, C): return cls._subclasshook_using(CallableTask, C) class CallableSignature(CallableTask): # pragma: no cover """Celery Signature interface.""" __required_attributes__ = frozenset({ 'clone', 'freeze', 'set', 'link', 'link_error', '__or__', }) @abstractproperty def name(self): pass @abstractproperty def type(self): pass @abstractproperty def app(self): pass @abstractproperty def id(self): pass @abstractproperty def task(self): pass @abstractproperty def args(self): pass @abstractproperty def kwargs(self): pass @abstractproperty def options(self): pass @abstractproperty def subtask_type(self): pass @abstractproperty def chord_size(self): pass @abstractproperty def immutable(self): pass @abstractmethod def clone(self, args=None, kwargs=None): pass @abstractmethod def freeze(self, id=None, group_id=None, chord=None, root_id=None): pass @abstractmethod def set(self, immutable=None, **options): pass @abstractmethod def link(self, callback): pass @abstractmethod def link_error(self, errback): pass @abstractmethod def __or__(self, other): pass @abstractmethod def __invert__(self): pass @classmethod def __subclasshook__(cls, C): return cls._subclasshook_using(CallableSignature, C) celery-4.1.0/celery/utils/term.py0000644000175000017500000001125713130607475016705 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Terminals and colors.""" from __future__ import absolute_import, unicode_literals import base64 import codecs import os import sys import platform from functools import reduce from celery.five import python_2_unicode_compatible, string from celery.platforms import isatty __all__ = ['colored'] BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) OP_SEQ = '\033[%dm' RESET_SEQ = '\033[0m' COLOR_SEQ = '\033[1;%dm' IS_WINDOWS = platform.system() == 'Windows' ITERM_PROFILE = os.environ.get('ITERM_PROFILE') TERM = os.environ.get('TERM') TERM_IS_SCREEN = TERM and TERM.startswith('screen') # tmux requires unrecognized OSC sequences to be wrapped with DCS tmux; # ST, and for all ESCs in to be replaced with ESC ESC. # It only accepts ESC backslash for ST. _IMG_PRE = '\033Ptmux;\033\033]' if TERM_IS_SCREEN else '\033]' _IMG_POST = '\a\033\\' if TERM_IS_SCREEN else '\a' def fg(s): return COLOR_SEQ % s @python_2_unicode_compatible class colored(object): """Terminal colored text. Example: >>> c = colored(enabled=True) >>> print(str(c.red('the quick '), c.blue('brown ', c.bold('fox ')), ... c.magenta(c.underline('jumps over')), ... c.yellow(' the lazy '), ... c.green('dog '))) """ def __init__(self, *s, **kwargs): self.s = s self.enabled = not IS_WINDOWS and kwargs.get('enabled', True) self.op = kwargs.get('op', '') self.names = { 'black': self.black, 'red': self.red, 'green': self.green, 'yellow': self.yellow, 'blue': self.blue, 'magenta': self.magenta, 'cyan': self.cyan, 'white': self.white, } def _add(self, a, b): return string(a) + string(b) def _fold_no_color(self, a, b): try: A = a.no_color() except AttributeError: A = string(a) try: B = b.no_color() except AttributeError: B = string(b) return ''.join((string(A), string(B))) def no_color(self): if self.s: return string(reduce(self._fold_no_color, self.s)) return '' def embed(self): prefix = '' if self.enabled: prefix = self.op return ''.join((string(prefix), string(reduce(self._add, self.s)))) def __str__(self): suffix = '' if self.enabled: suffix = RESET_SEQ return string(''.join((self.embed(), string(suffix)))) def node(self, s, op): return self.__class__(enabled=self.enabled, op=op, *s) def black(self, *s): return self.node(s, fg(30 + BLACK)) def red(self, *s): return self.node(s, fg(30 + RED)) def green(self, *s): return self.node(s, fg(30 + GREEN)) def yellow(self, *s): return self.node(s, fg(30 + YELLOW)) def blue(self, *s): return self.node(s, fg(30 + BLUE)) def magenta(self, *s): return self.node(s, fg(30 + MAGENTA)) def cyan(self, *s): return self.node(s, fg(30 + CYAN)) def white(self, *s): return self.node(s, fg(30 + WHITE)) def __repr__(self): return repr(self.no_color()) def bold(self, *s): return self.node(s, OP_SEQ % 1) def underline(self, *s): return self.node(s, OP_SEQ % 4) def blink(self, *s): return self.node(s, OP_SEQ % 5) def reverse(self, *s): return self.node(s, OP_SEQ % 7) def bright(self, *s): return self.node(s, OP_SEQ % 8) def ired(self, *s): return self.node(s, fg(40 + RED)) def igreen(self, *s): return self.node(s, fg(40 + GREEN)) def iyellow(self, *s): return self.node(s, fg(40 + YELLOW)) def iblue(self, *s): return self.node(s, fg(40 + BLUE)) def imagenta(self, *s): return self.node(s, fg(40 + MAGENTA)) def icyan(self, *s): return self.node(s, fg(40 + CYAN)) def iwhite(self, *s): return self.node(s, fg(40 + WHITE)) def reset(self, *s): return self.node(s or [''], RESET_SEQ) def __add__(self, other): return string(self) + string(other) def supports_images(): return isatty(sys.stdin) and ITERM_PROFILE def _read_as_base64(path): with codecs.open(path, mode='rb') as fh: encoded = base64.b64encode(fh.read()) return encoded if type(encoded) == 'str' else encoded.decode('ascii') def imgcat(path, inline=1, preserve_aspect_ratio=0, **kwargs): return '\n%s1337;File=inline=%d;preserveAspectRatio=%d:%s%s' % ( _IMG_PRE, inline, preserve_aspect_ratio, _read_as_base64(path), _IMG_POST) celery-4.1.0/celery/utils/deprecated.py0000644000175000017500000000721313130607475020033 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Deprecation utilities.""" from __future__ import absolute_import, print_function, unicode_literals import warnings from vine.utils import wraps from celery.exceptions import CPendingDeprecationWarning, CDeprecationWarning __all__ = ['Callable', 'Property', 'warn'] PENDING_DEPRECATION_FMT = """ {description} is scheduled for deprecation in \ version {deprecation} and removal in version v{removal}. \ {alternative} """ DEPRECATION_FMT = """ {description} is deprecated and scheduled for removal in version {removal}. {alternative} """ def warn(description=None, deprecation=None, removal=None, alternative=None, stacklevel=2): """Warn of (pending) deprecation.""" ctx = {'description': description, 'deprecation': deprecation, 'removal': removal, 'alternative': alternative} if deprecation is not None: w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT.format(**ctx)) else: w = CDeprecationWarning(DEPRECATION_FMT.format(**ctx)) warnings.warn(w, stacklevel=stacklevel) def Callable(deprecation=None, removal=None, alternative=None, description=None): """Decorator for deprecated functions. A deprecation warning will be emitted when the function is called. Arguments: deprecation (str): Version that marks first deprecation, if this argument isn't set a ``PendingDeprecationWarning`` will be emitted instead. removal (str): Future version when this feature will be removed. alternative (str): Instructions for an alternative solution (if any). description (str): Description of what's being deprecated. """ def _inner(fun): @wraps(fun) def __inner(*args, **kwargs): from .imports import qualname warn(description=description or qualname(fun), deprecation=deprecation, removal=removal, alternative=alternative, stacklevel=3) return fun(*args, **kwargs) return __inner return _inner def Property(deprecation=None, removal=None, alternative=None, description=None): """Decorator for deprecated properties.""" def _inner(fun): return _deprecated_property( fun, deprecation=deprecation, removal=removal, alternative=alternative, description=description or fun.__name__) return _inner class _deprecated_property(object): def __init__(self, fget=None, fset=None, fdel=None, doc=None, **depreinfo): self.__get = fget self.__set = fset self.__del = fdel self.__name__, self.__module__, self.__doc__ = ( fget.__name__, fget.__module__, fget.__doc__, ) self.depreinfo = depreinfo self.depreinfo.setdefault('stacklevel', 3) def __get__(self, obj, type=None): if obj is None: return self warn(**self.depreinfo) return self.__get(obj) def __set__(self, obj, value): if obj is None: return self if self.__set is None: raise AttributeError('cannot set attribute') warn(**self.depreinfo) self.__set(obj, value) def __delete__(self, obj): if obj is None: return self if self.__del is None: raise AttributeError('cannot delete attribute') warn(**self.depreinfo) self.__del(obj) def setter(self, fset): return self.__class__(self.__get, fset, self.__del, **self.depreinfo) def deleter(self, fdel): return self.__class__(self.__get, self.__set, fdel, **self.depreinfo) celery-4.1.0/celery/utils/imports.py0000644000175000017500000001145713130607475017435 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Utilities related to importing modules and symbols by name.""" from __future__ import absolute_import, unicode_literals import imp as _imp import importlib import os import sys import warnings from contextlib import contextmanager from kombu.utils.imports import symbol_by_name from celery.five import reload #: Billiard sets this when execv is enabled. #: We use it to find out the name of the original ``__main__`` #: module, so that we can properly rewrite the name of the #: task to be that of ``App.main``. MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') __all__ = [ 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name', 'cwd_in_path', 'find_module', 'import_from_cwd', 'reload_from_cwd', 'module_file', 'gen_task_name', ] class NotAPackage(Exception): """Raised when importing a package, but it's not a package.""" if sys.version_info > (3, 3): # pragma: no cover def qualname(obj): """Return object name.""" if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): obj = obj.__class__ q = getattr(obj, '__qualname__', None) if '.' not in q: q = '.'.join((obj.__module__, q)) return q else: def qualname(obj): # noqa """Return object name.""" if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): obj = obj.__class__ return '.'.join((obj.__module__, obj.__name__)) def instantiate(name, *args, **kwargs): """Instantiate class by name. See Also: :func:`symbol_by_name`. """ return symbol_by_name(name)(*args, **kwargs) @contextmanager def cwd_in_path(): """Context adding the current working directory to sys.path.""" cwd = os.getcwd() if cwd in sys.path: yield else: sys.path.insert(0, cwd) try: yield cwd finally: try: sys.path.remove(cwd) except ValueError: # pragma: no cover pass def find_module(module, path=None, imp=None): """Version of :func:`imp.find_module` supporting dots.""" if imp is None: imp = importlib.import_module with cwd_in_path(): if '.' in module: last = None parts = module.split('.') for i, part in enumerate(parts[:-1]): mpart = imp('.'.join(parts[:i + 1])) try: path = mpart.__path__ except AttributeError: raise NotAPackage(module) last = _imp.find_module(parts[i + 1], path) return last return _imp.find_module(module) def import_from_cwd(module, imp=None, package=None): """Import module, temporarily including modules in the current directory. Modules located in the current directory has precedence over modules located in `sys.path`. """ if imp is None: imp = importlib.import_module with cwd_in_path(): return imp(module, package=package) def reload_from_cwd(module, reloader=None): """Reload module (ensuring that CWD is in sys.path).""" if reloader is None: reloader = reload with cwd_in_path(): return reloader(module) def module_file(module): """Return the correct original file name of a module.""" name = module.__file__ return name[:-1] if name.endswith('.pyc') else name def gen_task_name(app, name, module_name): """Generate task name from name/module pair.""" module_name = module_name or '__main__' try: module = sys.modules[module_name] except KeyError: # Fix for manage.py shell_plus (Issue #366) module = None if module is not None: module_name = module.__name__ # - If the task module is used as the __main__ script # - we need to rewrite the module part of the task name # - to match App.main. if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE: # - see comment about :envvar:`MP_MAIN_FILE` above. module_name = '__main__' if module_name == '__main__' and app.main: return '.'.join([app.main, name]) return '.'.join(p for p in (module_name, name) if p) def load_extension_class_names(namespace): try: from pkg_resources import iter_entry_points except ImportError: # pragma: no cover return for ep in iter_entry_points(namespace): yield ep.name, ':'.join([ep.module_name, ep.attrs[0]]) def load_extension_classes(namespace): for name, class_name in load_extension_class_names(namespace): try: cls = symbol_by_name(class_name) except (ImportError, SyntaxError) as exc: warnings.warn( 'Cannot load {0} extension {1!r}: {2!r}'.format( namespace, class_name, exc)) else: yield name, cls celery-4.1.0/celery/security/0000755000175000017500000000000013135426347016067 5ustar omeromer00000000000000celery-4.1.0/celery/security/__init__.py0000644000175000017500000000343113130607475020177 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Message Signing Serializer.""" from __future__ import absolute_import, unicode_literals from kombu.serialization import ( registry, disable_insecure_serializers as _disable_insecure_serializers, ) from celery.exceptions import ImproperlyConfigured from .serialization import register_auth SSL_NOT_INSTALLED = """\ You need to install the pyOpenSSL library to use the auth serializer. Please install by: $ pip install pyOpenSSL """ SETTING_MISSING = """\ Sorry, but you have to configure the * security_key * security_certificate, and the * security_cert_storE configuration settings to use the auth serializer. Please see the configuration reference for more information. """ __all__ = ['setup_security'] def setup_security(allowed_serializers=None, key=None, cert=None, store=None, digest='sha1', serializer='json', app=None): """See :meth:`@Celery.setup_security`.""" if app is None: from celery import current_app app = current_app._get_current_object() _disable_insecure_serializers(allowed_serializers) conf = app.conf if conf.task_serializer != 'auth': return try: from OpenSSL import crypto # noqa except ImportError: raise ImproperlyConfigured(SSL_NOT_INSTALLED) key = key or conf.security_key cert = cert or conf.security_certificate store = store or conf.security_cert_store if not (key and cert and store): raise ImproperlyConfigured(SETTING_MISSING) with open(key) as kf: with open(cert) as cf: register_auth(kf.read(), cf.read(), store, digest, serializer) registry._set_default_serializer('auth') def disable_untrusted_serializers(whitelist=None): _disable_insecure_serializers(allowed=whitelist) celery-4.1.0/celery/security/key.py0000644000175000017500000000124413130607475017230 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Private keys for the security serializer.""" from __future__ import absolute_import, unicode_literals from kombu.utils.encoding import ensure_bytes from .utils import crypto, reraise_errors __all__ = ['PrivateKey'] class PrivateKey(object): """Represents a private key.""" def __init__(self, key): with reraise_errors('Invalid private key: {0!r}'): self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key) def sign(self, data, digest): """Sign string containing data.""" with reraise_errors('Unable to sign data: {0!r}'): return crypto.sign(self._key, ensure_bytes(data), digest) celery-4.1.0/celery/security/serialization.py0000644000175000017500000000754013130607475021322 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Secure serializer.""" from __future__ import absolute_import, unicode_literals from kombu.serialization import registry, dumps, loads from kombu.utils.encoding import bytes_to_str, str_to_bytes, ensure_bytes from celery.five import bytes_if_py2 from celery.utils.serialization import b64encode, b64decode from .certificate import Certificate, FSCertStore from .key import PrivateKey from .utils import reraise_errors __all__ = ['SecureSerializer', 'register_auth'] class SecureSerializer(object): """Signed serializer.""" def __init__(self, key=None, cert=None, cert_store=None, digest='sha1', serializer='json'): self._key = key self._cert = cert self._cert_store = cert_store self._digest = bytes_if_py2(digest) self._serializer = serializer def serialize(self, data): """Serialize data structure into string.""" assert self._key is not None assert self._cert is not None with reraise_errors('Unable to serialize: {0!r}', (Exception,)): content_type, content_encoding, body = dumps( bytes_to_str(data), serializer=self._serializer) # What we sign is the serialized body, not the body itself. # this way the receiver doesn't have to decode the contents # to verify the signature (and thus avoiding potential flaws # in the decoding step). body = ensure_bytes(body) return self._pack(body, content_type, content_encoding, signature=self._key.sign(body, self._digest), signer=self._cert.get_id()) def deserialize(self, data): """Deserialize data structure from string.""" assert self._cert_store is not None with reraise_errors('Unable to deserialize: {0!r}', (Exception,)): payload = self._unpack(data) signature, signer, body = (payload['signature'], payload['signer'], payload['body']) self._cert_store[signer].verify(body, signature, self._digest) return loads(bytes_to_str(body), payload['content_type'], payload['content_encoding'], force=True) def _pack(self, body, content_type, content_encoding, signer, signature, sep=str_to_bytes('\x00\x01')): fields = sep.join( ensure_bytes(s) for s in [signer, signature, content_type, content_encoding, body] ) return b64encode(fields) def _unpack(self, payload, sep=str_to_bytes('\x00\x01')): raw_payload = b64decode(ensure_bytes(payload)) first_sep = raw_payload.find(sep) signer = raw_payload[:first_sep] signer_cert = self._cert_store[signer] sig_len = signer_cert._cert.get_pubkey().bits() >> 3 signature = raw_payload[ first_sep + len(sep):first_sep + len(sep) + sig_len ] end_of_sig = first_sep + len(sep) + sig_len + len(sep) v = raw_payload[end_of_sig:].split(sep) return { 'signer': signer, 'signature': signature, 'content_type': bytes_to_str(v[0]), 'content_encoding': bytes_to_str(v[1]), 'body': bytes_to_str(v[2]), } def register_auth(key=None, cert=None, store=None, digest='sha1', serializer='json'): """Register security serializer.""" s = SecureSerializer(key and PrivateKey(key), cert and Certificate(cert), store and FSCertStore(store), digest=digest, serializer=serializer) registry.register('auth', s.serialize, s.deserialize, content_type='application/data', content_encoding='utf-8') celery-4.1.0/celery/security/certificate.py0000644000175000017500000000517213130607475020726 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """X.509 certificates.""" from __future__ import absolute_import, unicode_literals import glob import os from kombu.utils.encoding import bytes_to_str from celery.exceptions import SecurityError from celery.five import values from .utils import crypto, reraise_errors __all__ = ['Certificate', 'CertStore', 'FSCertStore'] class Certificate(object): """X.509 certificate.""" def __init__(self, cert): assert crypto is not None with reraise_errors('Invalid certificate: {0!r}'): self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) def has_expired(self): """Check if the certificate has expired.""" return self._cert.has_expired() def get_serial_number(self): """Return the serial number in the certificate.""" return bytes_to_str(self._cert.get_serial_number()) def get_issuer(self): """Return issuer (CA) as a string.""" return ' '.join(bytes_to_str(x[1]) for x in self._cert.get_issuer().get_components()) def get_id(self): """Serial number/issuer pair uniquely identifies a certificate.""" return '{0} {1}'.format(self.get_issuer(), self.get_serial_number()) def verify(self, data, signature, digest): """Verify signature for string containing data.""" with reraise_errors('Bad signature: {0!r}'): crypto.verify(self._cert, signature, data, digest) class CertStore(object): """Base class for certificate stores.""" def __init__(self): self._certs = {} def itercerts(self): """Return certificate iterator.""" for c in values(self._certs): yield c def __getitem__(self, id): """Get certificate by id.""" try: return self._certs[bytes_to_str(id)] except KeyError: raise SecurityError('Unknown certificate: {0!r}'.format(id)) def add_cert(self, cert): cert_id = bytes_to_str(cert.get_id()) if cert_id in self._certs: raise SecurityError('Duplicate certificate: {0!r}'.format(id)) self._certs[cert_id] = cert class FSCertStore(CertStore): """File system certificate store.""" def __init__(self, path): CertStore.__init__(self) if os.path.isdir(path): path = os.path.join(path, '*') for p in glob.glob(path): with open(p) as f: cert = Certificate(f.read()) if cert.has_expired(): raise SecurityError( 'Expired certificate: {0!r}'.format(cert.get_id())) self.add_cert(cert) celery-4.1.0/celery/security/utils.py0000644000175000017500000000141313130607475017576 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Utilities used by the message signing serializer.""" from __future__ import absolute_import, unicode_literals import sys from contextlib import contextmanager from celery.exceptions import SecurityError from celery.five import reraise try: from OpenSSL import crypto except ImportError: # pragma: no cover crypto = None # noqa __all__ = ['reraise_errors'] @contextmanager def reraise_errors(msg='{0!r}', errors=None): """Context reraising crypto errors as :exc:`SecurityError`.""" assert crypto is not None errors = (crypto.Error,) if errors is None else errors try: yield except errors as exc: reraise(SecurityError, SecurityError(msg.format(exc)), sys.exc_info()[2]) celery-4.1.0/celery/__main__.py0000644000175000017500000000071513130607475016313 0ustar omeromer00000000000000"""Entry-point for the :program:`celery` umbrella command.""" from __future__ import absolute_import, print_function, unicode_literals import sys from . import maybe_patch_concurrency __all__ = ['main'] def main(): """Entrypoint to the ``celery`` umbrella command.""" if 'multi' not in sys.argv: maybe_patch_concurrency() from celery.bin.celery import main as _main _main() if __name__ == '__main__': # pragma: no cover main() celery-4.1.0/celery/beat.py0000644000175000017500000005305713130607475015515 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The periodic task scheduler.""" from __future__ import absolute_import, unicode_literals import copy import errno import heapq import os import time import shelve import sys import traceback from collections import namedtuple from functools import total_ordering from threading import Event, Thread from billiard import ensure_multiprocessing from billiard.context import Process from billiard.common import reset_signals from kombu.utils.functional import maybe_evaluate, reprcall from kombu.utils.objects import cached_property from . import __version__ from . import platforms from . import signals from .five import ( items, monotonic, python_2_unicode_compatible, reraise, values, ) from .schedules import maybe_schedule, crontab from .utils.imports import load_extension_class_names, symbol_by_name from .utils.time import humanize_seconds from .utils.log import get_logger, iter_open_logger_fds __all__ = [ 'SchedulingError', 'ScheduleEntry', 'Scheduler', 'PersistentScheduler', 'Service', 'EmbeddedService', ] event_t = namedtuple('event_t', ('time', 'priority', 'entry')) logger = get_logger(__name__) debug, info, error, warning = (logger.debug, logger.info, logger.error, logger.warning) DEFAULT_MAX_INTERVAL = 300 # 5 minutes class SchedulingError(Exception): """An error occurred while scheduling a task.""" @total_ordering @python_2_unicode_compatible class ScheduleEntry(object): """An entry in the scheduler. Arguments: name (str): see :attr:`name`. schedule (~celery.schedules.schedule): see :attr:`schedule`. args (Tuple): see :attr:`args`. kwargs (Dict): see :attr:`kwargs`. options (Dict): see :attr:`options`. last_run_at (~datetime.datetime): see :attr:`last_run_at`. total_run_count (int): see :attr:`total_run_count`. relative (bool): Is the time relative to when the server starts? """ #: The task name name = None #: The schedule (:class:`~celery.schedules.schedule`) schedule = None #: Positional arguments to apply. args = None #: Keyword arguments to apply. kwargs = None #: Task execution options. options = None #: The time and date of when this task was last scheduled. last_run_at = None #: Total number of times this task has been scheduled. total_run_count = 0 def __init__(self, name=None, task=None, last_run_at=None, total_run_count=None, schedule=None, args=(), kwargs={}, options={}, relative=False, app=None): self.app = app self.name = name self.task = task self.args = args self.kwargs = kwargs self.options = options self.schedule = maybe_schedule(schedule, relative, app=self.app) self.last_run_at = last_run_at or self._default_now() self.total_run_count = total_run_count or 0 def _default_now(self): return self.schedule.now() if self.schedule else self.app.now() def _next_instance(self, last_run_at=None): """Return new instance, with date and count fields updated.""" return self.__class__(**dict( self, last_run_at=last_run_at or self._default_now(), total_run_count=self.total_run_count + 1, )) __next__ = next = _next_instance # for 2to3 def __reduce__(self): return self.__class__, ( self.name, self.task, self.last_run_at, self.total_run_count, self.schedule, self.args, self.kwargs, self.options, ) def update(self, other): """Update values from another entry. Will only update "editable" fields: ``task``, ``schedule``, ``args``, ``kwargs``, ``options``. """ self.__dict__.update({ 'task': other.task, 'schedule': other.schedule, 'args': other.args, 'kwargs': other.kwargs, 'options': other.options, }) def is_due(self): """See :meth:`~celery.schedule.schedule.is_due`.""" return self.schedule.is_due(self.last_run_at) def __iter__(self): return iter(items(vars(self))) def __repr__(self): return '<{name}: {0.name} {call} {0.schedule}'.format( self, call=reprcall(self.task, self.args or (), self.kwargs or {}), name=type(self).__name__, ) def __lt__(self, other): if isinstance(other, ScheduleEntry): # How the object is ordered doesn't really matter, as # in the scheduler heap, the order is decided by the # preceding members of the tuple ``(time, priority, entry)``. # # If all that's left to order on is the entry then it can # just as well be random. return id(self) < id(other) return NotImplemented class Scheduler(object): """Scheduler for periodic tasks. The :program:`celery beat` program may instantiate this class multiple times for introspection purposes, but then with the ``lazy`` argument set. It's important for subclasses to be idempotent when this argument is set. Arguments: schedule (~celery.schedules.schedule): see :attr:`schedule`. max_interval (int): see :attr:`max_interval`. lazy (bool): Don't set up the schedule. """ Entry = ScheduleEntry #: The schedule dict/shelve. schedule = None #: Maximum time to sleep between re-checking the schedule. max_interval = DEFAULT_MAX_INTERVAL #: How often to sync the schedule (3 minutes by default) sync_every = 3 * 60 #: How many tasks can be called before a sync is forced. sync_every_tasks = None _last_sync = None _tasks_since_sync = 0 logger = logger # compat def __init__(self, app, schedule=None, max_interval=None, Producer=None, lazy=False, sync_every_tasks=None, **kwargs): self.app = app self.data = maybe_evaluate({} if schedule is None else schedule) self.max_interval = (max_interval or app.conf.beat_max_loop_interval or self.max_interval) self.Producer = Producer or app.amqp.Producer self._heap = None self.old_schedulers = None self.sync_every_tasks = ( app.conf.beat_sync_every if sync_every_tasks is None else sync_every_tasks) if not lazy: self.setup_schedule() def install_default_entries(self, data): entries = {} if self.app.conf.result_expires and \ not self.app.backend.supports_autoexpire: if 'celery.backend_cleanup' not in data: entries['celery.backend_cleanup'] = { 'task': 'celery.backend_cleanup', 'schedule': crontab('0', '4', '*'), 'options': {'expires': 12 * 3600}} self.update_from_dict(entries) def apply_entry(self, entry, producer=None): info('Scheduler: Sending due task %s (%s)', entry.name, entry.task) try: result = self.apply_async(entry, producer=producer, advance=False) except Exception as exc: # pylint: disable=broad-except error('Message Error: %s\n%s', exc, traceback.format_stack(), exc_info=True) else: debug('%s sent. id->%s', entry.task, result.id) def adjust(self, n, drift=-0.010): if n and n > 0: return n + drift return n def is_due(self, entry): return entry.is_due() def _when(self, entry, next_time_to_run, mktime=time.mktime): adjust = self.adjust return (mktime(entry.schedule.now().timetuple()) + (adjust(next_time_to_run) or 0)) def populate_heap(self, event_t=event_t, heapify=heapq.heapify): """Populate the heap with the data contained in the schedule.""" self._heap = [event_t(self._when(e, e.is_due()[1]) or 0, 5, e) for e in values(self.schedule)] heapify(self._heap) # pylint disable=redefined-outer-name def tick(self, event_t=event_t, min=min, heappop=heapq.heappop, heappush=heapq.heappush): """Run a tick - one iteration of the scheduler. Executes one due task per call. Returns: float: preferred delay in seconds for next call. """ adjust = self.adjust max_interval = self.max_interval if (self._heap is None or not self.schedules_equal(self.old_schedulers, self.schedule)): self.old_schedulers = copy.copy(self.schedule) self.populate_heap() H = self._heap if not H: return max_interval event = H[0] entry = event[2] is_due, next_time_to_run = self.is_due(entry) if is_due: verify = heappop(H) if verify is event: next_entry = self.reserve(entry) self.apply_entry(entry, producer=self.producer) heappush(H, event_t(self._when(next_entry, next_time_to_run), event[1], next_entry)) return 0 else: heappush(H, verify) return min(verify[0], max_interval) return min(adjust(next_time_to_run) or max_interval, max_interval) def schedules_equal(self, old_schedules, new_schedules): if set(old_schedules.keys()) != set(new_schedules.keys()): return False for name, old_entry in old_schedules.items(): new_entry = new_schedules.get(name) if not new_entry or old_entry.schedule != new_entry.schedule: return False return True def should_sync(self): return ( (not self._last_sync or (monotonic() - self._last_sync) > self.sync_every) or (self.sync_every_tasks and self._tasks_since_sync >= self.sync_every_tasks) ) def reserve(self, entry): new_entry = self.schedule[entry.name] = next(entry) return new_entry def apply_async(self, entry, producer=None, advance=True, **kwargs): # Update time-stamps and run counts before we actually execute, # so we have that done if an exception is raised (doesn't schedule # forever.) entry = self.reserve(entry) if advance else entry task = self.app.tasks.get(entry.task) try: if task: return task.apply_async(entry.args, entry.kwargs, producer=producer, **entry.options) else: return self.send_task(entry.task, entry.args, entry.kwargs, producer=producer, **entry.options) except Exception as exc: # pylint: disable=broad-except reraise(SchedulingError, SchedulingError( "Couldn't apply scheduled task {0.name}: {exc}".format( entry, exc=exc)), sys.exc_info()[2]) finally: self._tasks_since_sync += 1 if self.should_sync(): self._do_sync() def send_task(self, *args, **kwargs): return self.app.send_task(*args, **kwargs) def setup_schedule(self): self.install_default_entries(self.data) def _do_sync(self): try: debug('beat: Synchronizing schedule...') self.sync() finally: self._last_sync = monotonic() self._tasks_since_sync = 0 def sync(self): pass def close(self): self.sync() def add(self, **kwargs): entry = self.Entry(app=self.app, **kwargs) self.schedule[entry.name] = entry return entry def _maybe_entry(self, name, entry): if isinstance(entry, self.Entry): entry.app = self.app return entry return self.Entry(**dict(entry, name=name, app=self.app)) def update_from_dict(self, dict_): self.schedule.update({ name: self._maybe_entry(name, entry) for name, entry in items(dict_) }) def merge_inplace(self, b): schedule = self.schedule A, B = set(schedule), set(b) # Remove items from disk not in the schedule anymore. for key in A ^ B: schedule.pop(key, None) # Update and add new items in the schedule for key in B: entry = self.Entry(**dict(b[key], name=key, app=self.app)) if schedule.get(key): schedule[key].update(entry) else: schedule[key] = entry def _ensure_connected(self): # callback called for each retry while the connection # can't be established. def _error_handler(exc, interval): error('beat: Connection error: %s. ' 'Trying again in %s seconds...', exc, interval) return self.connection.ensure_connection( _error_handler, self.app.conf.broker_connection_max_retries ) def get_schedule(self): return self.data def set_schedule(self, schedule): self.data = schedule schedule = property(get_schedule, set_schedule) @cached_property def connection(self): return self.app.connection_for_write() @cached_property def producer(self): return self.Producer(self._ensure_connected(), auto_declare=False) @property def info(self): return '' class PersistentScheduler(Scheduler): """Scheduler backed by :mod:`shelve` database.""" persistence = shelve known_suffixes = ('', '.db', '.dat', '.bak', '.dir') _store = None def __init__(self, *args, **kwargs): self.schedule_filename = kwargs.get('schedule_filename') Scheduler.__init__(self, *args, **kwargs) def _remove_db(self): for suffix in self.known_suffixes: with platforms.ignore_errno(errno.ENOENT): os.remove(self.schedule_filename + suffix) def _open_schedule(self): return self.persistence.open(self.schedule_filename, writeback=True) def _destroy_open_corrupted_schedule(self, exc): error('Removing corrupted schedule file %r: %r', self.schedule_filename, exc, exc_info=True) self._remove_db() return self._open_schedule() def setup_schedule(self): try: self._store = self._open_schedule() # In some cases there may be different errors from a storage # backend for corrupted files. Example - DBPageNotFoundError # exception from bsddb. In such case the file will be # successfully opened but the error will be raised on first key # retrieving. self._store.keys() except Exception as exc: # pylint: disable=broad-except self._store = self._destroy_open_corrupted_schedule(exc) self._create_schedule() tz = self.app.conf.timezone stored_tz = self._store.get(str('tz')) if stored_tz is not None and stored_tz != tz: warning('Reset: Timezone changed from %r to %r', stored_tz, tz) self._store.clear() # Timezone changed, reset db! utc = self.app.conf.enable_utc stored_utc = self._store.get(str('utc_enabled')) if stored_utc is not None and stored_utc != utc: choices = {True: 'enabled', False: 'disabled'} warning('Reset: UTC changed from %s to %s', choices[stored_utc], choices[utc]) self._store.clear() # UTC setting changed, reset db! entries = self._store.setdefault(str('entries'), {}) self.merge_inplace(self.app.conf.beat_schedule) self.install_default_entries(self.schedule) self._store.update({ str('__version__'): __version__, str('tz'): tz, str('utc_enabled'): utc, }) self.sync() debug('Current schedule:\n' + '\n'.join( repr(entry) for entry in values(entries))) def _create_schedule(self): for _ in (1, 2): try: self._store[str('entries')] except KeyError: # new schedule db try: self._store[str('entries')] = {} except KeyError as exc: self._store = self._destroy_open_corrupted_schedule(exc) continue else: if str('__version__') not in self._store: warning('DB Reset: Account for new __version__ field') self._store.clear() # remove schedule at 2.2.2 upgrade. elif str('tz') not in self._store: warning('DB Reset: Account for new tz field') self._store.clear() # remove schedule at 3.0.8 upgrade elif str('utc_enabled') not in self._store: warning('DB Reset: Account for new utc_enabled field') self._store.clear() # remove schedule at 3.0.9 upgrade break def get_schedule(self): return self._store[str('entries')] def set_schedule(self, schedule): self._store[str('entries')] = schedule schedule = property(get_schedule, set_schedule) def sync(self): if self._store is not None: self._store.sync() def close(self): self.sync() self._store.close() @property def info(self): return ' . db -> {self.schedule_filename}'.format(self=self) class Service(object): """Celery periodic task service.""" scheduler_cls = PersistentScheduler def __init__(self, app, max_interval=None, schedule_filename=None, scheduler_cls=None): self.app = app self.max_interval = (max_interval or app.conf.beat_max_loop_interval) self.scheduler_cls = scheduler_cls or self.scheduler_cls self.schedule_filename = ( schedule_filename or app.conf.beat_schedule_filename) self._is_shutdown = Event() self._is_stopped = Event() def __reduce__(self): return self.__class__, (self.max_interval, self.schedule_filename, self.scheduler_cls, self.app) def start(self, embedded_process=False): info('beat: Starting...') debug('beat: Ticking with max interval->%s', humanize_seconds(self.scheduler.max_interval)) signals.beat_init.send(sender=self) if embedded_process: signals.beat_embedded_init.send(sender=self) platforms.set_process_title('celery beat') try: while not self._is_shutdown.is_set(): interval = self.scheduler.tick() if interval and interval > 0.0: debug('beat: Waking up %s.', humanize_seconds(interval, prefix='in ')) time.sleep(interval) if self.scheduler.should_sync(): self.scheduler._do_sync() except (KeyboardInterrupt, SystemExit): self._is_shutdown.set() finally: self.sync() def sync(self): self.scheduler.close() self._is_stopped.set() def stop(self, wait=False): info('beat: Shutting down...') self._is_shutdown.set() wait and self._is_stopped.wait() # block until shutdown done. def get_scheduler(self, lazy=False, extension_namespace='celery.beat_schedulers'): filename = self.schedule_filename aliases = dict( load_extension_class_names(extension_namespace) or {}) return symbol_by_name(self.scheduler_cls, aliases=aliases)( app=self.app, schedule_filename=filename, max_interval=self.max_interval, lazy=lazy, ) @cached_property def scheduler(self): return self.get_scheduler() class _Threaded(Thread): """Embedded task scheduler using threading.""" def __init__(self, app, **kwargs): super(_Threaded, self).__init__() self.app = app self.service = Service(app, **kwargs) self.daemon = True self.name = 'Beat' def run(self): self.app.set_current() self.service.start() def stop(self): self.service.stop(wait=True) try: ensure_multiprocessing() except NotImplementedError: # pragma: no cover _Process = None else: class _Process(Process): # noqa def __init__(self, app, **kwargs): super(_Process, self).__init__() self.app = app self.service = Service(app, **kwargs) self.name = 'Beat' def run(self): reset_signals(full=False) platforms.close_open_fds([ sys.__stdin__, sys.__stdout__, sys.__stderr__, ] + list(iter_open_logger_fds())) self.app.set_default() self.app.set_current() self.service.start(embedded_process=True) def stop(self): self.service.stop() self.terminate() def EmbeddedService(app, max_interval=None, **kwargs): """Return embedded clock service. Arguments: thread (bool): Run threaded instead of as a separate process. Uses :mod:`multiprocessing` by default, if available. """ if kwargs.pop('thread', False) or _Process is None: # Need short max interval to be able to stop thread # in reasonable time. return _Threaded(app, max_interval=1, **kwargs) return _Process(app, max_interval=max_interval, **kwargs) celery-4.1.0/celery/local.py0000644000175000017500000004253413130607475015672 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Proxy/PromiseProxy implementation. This module contains critical utilities that needs to be loaded as soon as possible, and that shall not load any third party modules. Parts of this module is Copyright by Werkzeug Team. """ from __future__ import absolute_import, unicode_literals import operator import sys from functools import reduce from importlib import import_module from types import ModuleType from .five import bytes_if_py2, items, string, string_t __all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate'] __module__ = __name__ # used by Proxy class body PY3 = sys.version_info[0] == 3 def _default_cls_attr(name, type_, cls_value): # Proxy uses properties to forward the standard # class attributes __module__, __name__ and __doc__ to the real # object, but these needs to be a string when accessed from # the Proxy class directly. This is a hack to make that work. # -- See Issue #1087. def __new__(cls, getter): instance = type_.__new__(cls, cls_value) instance.__getter = getter return instance def __get__(self, obj, cls=None): return self.__getter(obj) if obj is not None else self return type(bytes_if_py2(name), (type_,), { '__new__': __new__, '__get__': __get__, }) def try_import(module, default=None): """Try to import and return module. Returns None if the module does not exist. """ try: return import_module(module) except ImportError: return default class Proxy(object): """Proxy to another object.""" # Code stolen from werkzeug.local.Proxy. __slots__ = ('__local', '__args', '__kwargs', '__dict__') def __init__(self, local, args=None, kwargs=None, name=None, __doc__=None): object.__setattr__(self, '_Proxy__local', local) object.__setattr__(self, '_Proxy__args', args or ()) object.__setattr__(self, '_Proxy__kwargs', kwargs or {}) if name is not None: object.__setattr__(self, '__custom_name__', name) if __doc__ is not None: object.__setattr__(self, '__doc__', __doc__) @_default_cls_attr('name', str, __name__) def __name__(self): try: return self.__custom_name__ except AttributeError: return self._get_current_object().__name__ @_default_cls_attr('qualname', str, __name__) def __qualname__(self): try: return self.__custom_name__ except AttributeError: return self._get_current_object().__qualname__ @_default_cls_attr('module', str, __module__) def __module__(self): return self._get_current_object().__module__ @_default_cls_attr('doc', str, __doc__) def __doc__(self): return self._get_current_object().__doc__ def _get_class(self): return self._get_current_object().__class__ @property def __class__(self): return self._get_class() def _get_current_object(self): """Get current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ loc = object.__getattribute__(self, '_Proxy__local') if not hasattr(loc, '__release_local__'): return loc(*self.__args, **self.__kwargs) try: # pragma: no cover # not sure what this is about return getattr(loc, self.__name__) except AttributeError: # pragma: no cover raise RuntimeError('no object bound to {0.__name__}'.format(self)) @property def __dict__(self): try: return self._get_current_object().__dict__ except RuntimeError: # pragma: no cover raise AttributeError('__dict__') def __repr__(self): try: obj = self._get_current_object() except RuntimeError: # pragma: no cover return '<{0} unbound>'.format(self.__class__.__name__) return repr(obj) def __bool__(self): try: return bool(self._get_current_object()) except RuntimeError: # pragma: no cover return False __nonzero__ = __bool__ # Py2 def __dir__(self): try: return dir(self._get_current_object()) except RuntimeError: # pragma: no cover return [] def __getattr__(self, name): if name == '__members__': return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key, value): self._get_current_object()[key] = value def __delitem__(self, key): del self._get_current_object()[key] def __setslice__(self, i, j, seq): self._get_current_object()[i:j] = seq def __delslice__(self, i, j): del self._get_current_object()[i:j] def __setattr__(self, name, value): setattr(self._get_current_object(), name, value) def __delattr__(self, name): delattr(self._get_current_object(), name) def __str__(self): return str(self._get_current_object()) def __lt__(self, other): return self._get_current_object() < other def __le__(self, other): return self._get_current_object() <= other def __eq__(self, other): return self._get_current_object() == other def __ne__(self, other): return self._get_current_object() != other def __gt__(self, other): return self._get_current_object() > other def __ge__(self, other): return self._get_current_object() >= other def __hash__(self): return hash(self._get_current_object()) def __call__(self, *a, **kw): return self._get_current_object()(*a, **kw) def __len__(self): return len(self._get_current_object()) def __getitem__(self, i): return self._get_current_object()[i] def __iter__(self): return iter(self._get_current_object()) def __contains__(self, i): return i in self._get_current_object() def __getslice__(self, i, j): return self._get_current_object()[i:j] def __add__(self, other): return self._get_current_object() + other def __sub__(self, other): return self._get_current_object() - other def __mul__(self, other): return self._get_current_object() * other def __floordiv__(self, other): return self._get_current_object() // other def __mod__(self, other): return self._get_current_object() % other def __divmod__(self, other): return self._get_current_object().__divmod__(other) def __pow__(self, other): return self._get_current_object() ** other def __lshift__(self, other): return self._get_current_object() << other def __rshift__(self, other): return self._get_current_object() >> other def __and__(self, other): return self._get_current_object() & other def __xor__(self, other): return self._get_current_object() ^ other def __or__(self, other): return self._get_current_object() | other def __div__(self, other): return self._get_current_object().__div__(other) def __truediv__(self, other): return self._get_current_object().__truediv__(other) def __neg__(self): return -(self._get_current_object()) def __pos__(self): return +(self._get_current_object()) def __abs__(self): return abs(self._get_current_object()) def __invert__(self): return ~(self._get_current_object()) def __complex__(self): return complex(self._get_current_object()) def __int__(self): return int(self._get_current_object()) def __float__(self): return float(self._get_current_object()) def __oct__(self): return oct(self._get_current_object()) def __hex__(self): return hex(self._get_current_object()) def __index__(self): return self._get_current_object().__index__() def __coerce__(self, other): return self._get_current_object().__coerce__(other) def __enter__(self): return self._get_current_object().__enter__() def __exit__(self, *a, **kw): return self._get_current_object().__exit__(*a, **kw) def __reduce__(self): return self._get_current_object().__reduce__() if not PY3: # pragma: no cover def __cmp__(self, other): return cmp(self._get_current_object(), other) # noqa def __long__(self): return long(self._get_current_object()) # noqa def __unicode__(self): try: return string(self._get_current_object()) except RuntimeError: # pragma: no cover return repr(self) class PromiseProxy(Proxy): """Proxy that evaluates object once. :class:`Proxy` will evaluate the object each time, while the promise will only evaluate it once. """ __slots__ = ('__pending__', '__weakref__') def _get_current_object(self): try: return object.__getattribute__(self, '__thing') except AttributeError: return self.__evaluate__() def __then__(self, fun, *args, **kwargs): if self.__evaluated__(): return fun(*args, **kwargs) from collections import deque try: pending = object.__getattribute__(self, '__pending__') except AttributeError: pending = None if pending is None: pending = deque() object.__setattr__(self, '__pending__', pending) pending.append((fun, args, kwargs)) def __evaluated__(self): try: object.__getattribute__(self, '__thing') except AttributeError: return False return True def __maybe_evaluate__(self): return self._get_current_object() def __evaluate__(self, _clean=('_Proxy__local', '_Proxy__args', '_Proxy__kwargs')): try: thing = Proxy._get_current_object(self) except Exception: raise else: object.__setattr__(self, '__thing', thing) for attr in _clean: try: object.__delattr__(self, attr) except AttributeError: # pragma: no cover # May mask errors so ignore pass try: pending = object.__getattribute__(self, '__pending__') except AttributeError: pass else: try: while pending: fun, args, kwargs = pending.popleft() fun(*args, **kwargs) finally: try: object.__delattr__(self, '__pending__') except AttributeError: # pragma: no cover pass return thing def maybe_evaluate(obj): """Attempt to evaluate promise, even if obj is not a promise.""" try: return obj.__maybe_evaluate__() except AttributeError: return obj # ############# Module Generation ########################## # Utilities to dynamically # recreate modules, either for lazy loading or # to create old modules at runtime instead of # having them litter the source tree. # import fails in python 2.5. fallback to reduce in stdlib MODULE_DEPRECATED = """ The module %s is deprecated and will be removed in a future version. """ DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'} # im_func is no longer available in Py3. # instead the unbound method itself can be used. if sys.version_info[0] == 3: # pragma: no cover def fun_of_method(method): return method else: def fun_of_method(method): # noqa return method.im_func def getappattr(path): """Get attribute from current_app recursively. Example: ``getappattr('amqp.get_task_consumer')``. """ from celery import current_app return current_app._rgetattr(path) def _compat_periodic_task_decorator(*args, **kwargs): from celery.task import periodic_task return periodic_task(*args, **kwargs) COMPAT_MODULES = { 'celery': { 'execute': { 'send_task': 'send_task', }, 'decorators': { 'task': 'task', 'periodic_task': _compat_periodic_task_decorator, }, 'log': { 'get_default_logger': 'log.get_default_logger', 'setup_logger': 'log.setup_logger', 'setup_logging_subsystem': 'log.setup_logging_subsystem', 'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger', }, 'messaging': { 'TaskConsumer': 'amqp.TaskConsumer', 'establish_connection': 'connection', 'get_consumer_set': 'amqp.TaskConsumer', }, 'registry': { 'tasks': 'tasks', }, }, 'celery.task': { 'control': { 'broadcast': 'control.broadcast', 'rate_limit': 'control.rate_limit', 'time_limit': 'control.time_limit', 'ping': 'control.ping', 'revoke': 'control.revoke', 'discard_all': 'control.purge', 'inspect': 'control.inspect', }, 'schedules': 'celery.schedules', 'chords': 'celery.canvas', } } #: We exclude these from dir(celery) DEPRECATED_ATTRS = set(COMPAT_MODULES['celery'].keys()) | {'subtask'} class class_property(object): def __init__(self, getter=None, setter=None): if getter is not None and not isinstance(getter, classmethod): getter = classmethod(getter) if setter is not None and not isinstance(setter, classmethod): setter = classmethod(setter) self.__get = getter self.__set = setter info = getter.__get__(object) # just need the info attrs. self.__doc__ = info.__doc__ self.__name__ = info.__name__ self.__module__ = info.__module__ def __get__(self, obj, type=None): if obj and type is None: type = obj.__class__ return self.__get.__get__(obj, type)() def __set__(self, obj, value): if obj is None: return self return self.__set.__get__(obj)(value) def setter(self, setter): return self.__class__(self.__get, setter) def reclassmethod(method): return classmethod(fun_of_method(method)) class LazyModule(ModuleType): _compat_modules = () _all_by_module = {} _direct = {} _object_origins = {} def __getattr__(self, name): if name in self._object_origins: module = __import__(self._object_origins[name], None, None, [name]) for item in self._all_by_module[module.__name__]: setattr(self, item, getattr(module, item)) return getattr(module, name) elif name in self._direct: # pragma: no cover module = __import__(self._direct[name], None, None, [name]) setattr(self, name, module) return module return ModuleType.__getattribute__(self, name) def __dir__(self): return [ attr for attr in set(self.__all__) | DEFAULT_ATTRS if attr not in DEPRECATED_ATTRS ] def __reduce__(self): return import_module, (self.__name__,) def create_module(name, attrs, cls_attrs=None, pkg=None, base=LazyModule, prepare_attr=None): fqdn = '.'.join([pkg.__name__, name]) if pkg else name cls_attrs = {} if cls_attrs is None else cls_attrs pkg, _, modname = name.rpartition('.') cls_attrs['__module__'] = pkg attrs = { attr_name: (prepare_attr(attr) if prepare_attr else attr) for attr_name, attr in items(attrs) } module = sys.modules[fqdn] = type( bytes_if_py2(modname), (base,), cls_attrs)(bytes_if_py2(name)) module.__dict__.update(attrs) return module def recreate_module(name, compat_modules=(), by_module={}, direct={}, base=LazyModule, **attrs): old_module = sys.modules[name] origins = get_origins(by_module) compat_modules = COMPAT_MODULES.get(name, ()) _all = tuple(set(reduce( operator.add, [tuple(v) for v in [compat_modules, origins, direct, attrs]], ))) if sys.version_info[0] < 3: _all = [s.encode() for s in _all] cattrs = dict( _compat_modules=compat_modules, _all_by_module=by_module, _direct=direct, _object_origins=origins, __all__=_all, ) new_module = create_module(name, attrs, cls_attrs=cattrs, base=base) new_module.__dict__.update({ mod: get_compat_module(new_module, mod) for mod in compat_modules }) return old_module, new_module def get_compat_module(pkg, name): def prepare(attr): if isinstance(attr, string_t): return Proxy(getappattr, (attr,)) return attr attrs = COMPAT_MODULES[pkg.__name__][name] if isinstance(attrs, string_t): fqdn = '.'.join([pkg.__name__, name]) module = sys.modules[fqdn] = import_module(attrs) return module attrs[bytes_if_py2('__all__')] = list(attrs) return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare) def get_origins(defs): origins = {} for module, attrs in items(defs): origins.update({attr: module for attr in attrs}) return origins celery-4.1.0/celery/backends/0000755000175000017500000000000013135426347015772 5ustar omeromer00000000000000celery-4.1.0/celery/backends/base.py0000644000175000017500000006623213130607475017265 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Result backend base classes. - :class:`BaseBackend` defines the interface. - :class:`KeyValueStoreBackend` is a common base class using K/V semantics like _get and _put. """ from __future__ import absolute_import, unicode_literals import sys import time from collections import namedtuple from datetime import timedelta from weakref import WeakValueDictionary from billiard.einfo import ExceptionInfo from kombu.serialization import ( dumps, loads, prepare_accept_content, registry as serializer_registry, ) from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8 from kombu.utils.url import maybe_sanitize_url from celery import states from celery import current_app, group, maybe_signature from celery._state import get_current_task from celery.exceptions import ( ChordError, TimeoutError, TaskRevokedError, ImproperlyConfigured, ) from celery.five import items, string from celery.result import ( GroupResult, ResultBase, allow_join_result, result_from_tuple, ) from celery.utils.collections import BufferMap from celery.utils.functional import LRUCache, arity_greater from celery.utils.log import get_logger from celery.utils.serialization import ( get_pickled_exception, get_pickleable_exception, create_exception_cls, ) __all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend'] EXCEPTION_ABLE_CODECS = frozenset({'pickle'}) PY3 = sys.version_info >= (3, 0) logger = get_logger(__name__) MESSAGE_BUFFER_MAX = 8192 pending_results_t = namedtuple('pending_results_t', ( 'concrete', 'weak', )) E_NO_BACKEND = """ No result backend is configured. Please see the documentation for more information. """ E_CHORD_NO_BACKEND = """ Starting chords requires a result backend to be configured. Note that a group chained with a task is also upgraded to be a chord, as this pattern requires synchronization. Result backends that supports chords: Redis, Database, Memcached, and more. """ def unpickle_backend(cls, args, kwargs): """Return an unpickled backend.""" return cls(*args, app=current_app._get_current_object(), **kwargs) class _nulldict(dict): def ignore(self, *a, **kw): pass __setitem__ = update = setdefault = ignore class Backend(object): READY_STATES = states.READY_STATES UNREADY_STATES = states.UNREADY_STATES EXCEPTION_STATES = states.EXCEPTION_STATES TimeoutError = TimeoutError #: Time to sleep between polling each individual item #: in `ResultSet.iterate`. as opposed to the `interval` #: argument which is for each pass. subpolling_interval = None #: If true the backend must implement :meth:`get_many`. supports_native_join = False #: If true the backend must automatically expire results. #: The daily backend_cleanup periodic task won't be triggered #: in this case. supports_autoexpire = False #: Set to true if the backend is peristent by default. persistent = True retry_policy = { 'max_retries': 20, 'interval_start': 0, 'interval_step': 1, 'interval_max': 1, } def __init__(self, app, serializer=None, max_cached_results=None, accept=None, expires=None, expires_type=None, url=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.result_serializer (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] cmax = max_cached_results or conf.result_cache_max self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) self.expires = self.prepare_expires(expires, expires_type) self.accept = prepare_accept_content( conf.accept_content if accept is None else accept) self._pending_results = pending_results_t({}, WeakValueDictionary()) self._pending_messages = BufferMap(MESSAGE_BUFFER_MAX) self.url = url def as_uri(self, include_password=False): """Return the backend as an URI, sanitizing the password or not.""" # when using maybe_sanitize_url(), "/" is added # we're stripping it for consistency if include_password: return self.url url = maybe_sanitize_url(self.url or '') return url[:-1] if url.endswith(':///') else url def mark_as_started(self, task_id, **meta): """Mark a task as started.""" return self.store_result(task_id, meta, states.STARTED) def mark_as_done(self, task_id, result, request=None, store_result=True, state=states.SUCCESS): """Mark task as successfully executed.""" if store_result: self.store_result(task_id, result, state, request=request) if request and request.chord: self.on_chord_part_return(request, state, result) def mark_as_failure(self, task_id, exc, traceback=None, request=None, store_result=True, call_errbacks=True, state=states.FAILURE): """Mark task as executed with failure.""" if store_result: self.store_result(task_id, exc, state, traceback=traceback, request=request) if request: if request.chord: self.on_chord_part_return(request, state, exc) if call_errbacks and request.errbacks: self._call_task_errbacks(request, exc, traceback) def _call_task_errbacks(self, request, exc, traceback): old_signature = [] for errback in request.errbacks: errback = self.app.signature(errback) if arity_greater(errback.type.__header__, 1): errback(request, exc, traceback) else: old_signature.append(errback) if old_signature: # Previously errback was called as a task so we still # need to do so if the errback only takes a single task_id arg. task_id = request.id root_id = request.root_id or task_id group(old_signature, app=self.app).apply_async( (task_id,), parent_id=task_id, root_id=root_id ) def mark_as_revoked(self, task_id, reason='', request=None, store_result=True, state=states.REVOKED): exc = TaskRevokedError(reason) if store_result: self.store_result(task_id, exc, state, traceback=None, request=request) if request and request.chord: self.on_chord_part_return(request, state, exc) def mark_as_retry(self, task_id, exc, traceback=None, request=None, store_result=True, state=states.RETRY): """Mark task as being retries. Note: Stores the current exception (if any). """ return self.store_result(task_id, exc, state, traceback=traceback, request=request) def chord_error_from_stack(self, callback, exc=None): # need below import for test for some crazy reason from celery import group # pylint: disable app = self.app try: backend = app._tasks[callback.task].backend except KeyError: backend = self try: group( [app.signature(errback) for errback in callback.options.get('link_error') or []], app=app, ).apply_async((callback.id,)) except Exception as eb_exc: # pylint: disable=broad-except return backend.fail_from_current_stack(callback.id, exc=eb_exc) else: return backend.fail_from_current_stack(callback.id, exc=exc) def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() try: exc = real_exc if exc is None else exc ei = ExceptionInfo((type_, exc, tb)) self.mark_as_failure(task_id, exc, ei.traceback) return ei finally: del tb def prepare_exception(self, exc, serializer=None): """Prepare exception for serialization.""" serializer = self.serializer if serializer is None else serializer if serializer in EXCEPTION_ABLE_CODECS: return get_pickleable_exception(exc) return {'exc_type': type(exc).__name__, 'exc_message': string(exc)} def exception_to_python(self, exc): """Convert serialized exception to Python exception.""" if exc: if not isinstance(exc, BaseException): exc = create_exception_cls( from_utf8(exc['exc_type']), __name__)(exc['exc_message']) if self.serializer in EXCEPTION_ABLE_CODECS: exc = get_pickled_exception(exc) return exc def prepare_value(self, result): """Prepare value for storage.""" if self.serializer != 'pickle' and isinstance(result, ResultBase): return result.as_tuple() return result def encode(self, data): _, _, payload = self._encode(data) return payload def _encode(self, data): return dumps(data, serializer=self.serializer) def meta_from_decoded(self, meta): if meta['status'] in self.EXCEPTION_STATES: meta['result'] = self.exception_to_python(meta['result']) return meta def decode_result(self, payload): return self.meta_from_decoded(self.decode(payload)) def decode(self, payload): payload = PY3 and payload or str(payload) return loads(payload, content_type=self.content_type, content_encoding=self.content_encoding, accept=self.accept) def prepare_expires(self, value, type=None): if value is None: value = self.app.conf.result_expires if isinstance(value, timedelta): value = value.total_seconds() if value is not None and type: return type(value) return value def prepare_persistent(self, enabled=None): if enabled is not None: return enabled p = self.app.conf.result_persistent return self.persistent if p is None else p def encode_result(self, result, state): if state in self.EXCEPTION_STATES and isinstance(result, Exception): return self.prepare_exception(result) else: return self.prepare_value(result) def is_cached(self, task_id): return task_id in self._cache def store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Update task state and result.""" result = self.encode_result(result, state) self._store_result(task_id, result, state, traceback, request=request, **kwargs) return result def forget(self, task_id): self._cache.pop(task_id, None) self._forget(task_id) def _forget(self, task_id): raise NotImplementedError('backend does not implement forget.') def get_state(self, task_id): """Get the state of a task.""" return self.get_task_meta(task_id)['status'] get_status = get_state # XXX compat def get_traceback(self, task_id): """Get the traceback for a failed task.""" return self.get_task_meta(task_id).get('traceback') def get_result(self, task_id): """Get the result of a task.""" return self.get_task_meta(task_id).get('result') def get_children(self, task_id): """Get the list of subtasks sent by a task.""" try: return self.get_task_meta(task_id)['children'] except KeyError: pass def _ensure_not_eager(self): if self.app.conf.task_always_eager: raise RuntimeError( "Cannot retrieve result with task_always_eager enabled") def get_task_meta(self, task_id, cache=True): self._ensure_not_eager() if cache: try: return self._cache[task_id] except KeyError: pass meta = self._get_task_meta_for(task_id) if cache and meta.get('status') == states.SUCCESS: self._cache[task_id] = meta return meta def reload_task_result(self, task_id): """Reload task result, even if it has been previously fetched.""" self._cache[task_id] = self.get_task_meta(task_id, cache=False) def reload_group_result(self, group_id): """Reload group result, even if it has been previously fetched.""" self._cache[group_id] = self.get_group_meta(group_id, cache=False) def get_group_meta(self, group_id, cache=True): self._ensure_not_eager() if cache: try: return self._cache[group_id] except KeyError: pass meta = self._restore_group(group_id) if cache and meta is not None: self._cache[group_id] = meta return meta def restore_group(self, group_id, cache=True): """Get the result for a group.""" meta = self.get_group_meta(group_id, cache=cache) if meta: return meta['result'] def save_group(self, group_id, result): """Store the result of an executed group.""" return self._save_group(group_id, result) def delete_group(self, group_id): self._cache.pop(group_id, None) return self._delete_group(group_id) def cleanup(self): """Backend cleanup. Note: This is run by :class:`celery.task.DeleteExpiredTaskMetaTask`. """ pass def process_cleanup(self): """Cleanup actions to do at the end of a task worker process.""" pass def on_task_call(self, producer, task_id): return {} def add_to_chord(self, chord_id, result): raise NotImplementedError('Backend does not support add_to_chord') def on_chord_part_return(self, request, state, result, **kwargs): pass def fallback_chord_unlock(self, group_id, body, result=None, countdown=1, **kwargs): kwargs['result'] = [r.as_tuple() for r in result] self.app.tasks['celery.chord_unlock'].apply_async( (group_id, body,), kwargs, countdown=countdown, ) def ensure_chords_allowed(self): pass def apply_chord(self, header, partial_args, group_id, body, options={}, **kwargs): self.ensure_chords_allowed() fixed_options = {k: v for k, v in items(options) if k != 'task_id'} result = header(*partial_args, task_id=group_id, **fixed_options or {}) self.fallback_chord_unlock(group_id, body, **kwargs) return result def current_task_children(self, request=None): request = request or getattr(get_current_task(), 'request', None) if request: return [r.as_tuple() for r in getattr(request, 'children', [])] def __reduce__(self, args=(), kwargs={}): return (unpickle_backend, (self.__class__, args, kwargs)) class SyncBackendMixin(object): def iter_native(self, result, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None): self._ensure_not_eager() results = result.results if not results: return iter([]) return self.get_many( {r.id for r in results}, timeout=timeout, interval=interval, no_ack=no_ack, on_message=on_message, on_interval=on_interval, ) def wait_for_pending(self, result, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None, callback=None, propagate=True): self._ensure_not_eager() if on_message is not None: raise ImproperlyConfigured( 'Backend does not support on_message callback') meta = self.wait_for( result.id, timeout=timeout, interval=interval, on_interval=on_interval, no_ack=no_ack, ) if meta: result._maybe_set_cache(meta) return result.maybe_throw(propagate=propagate, callback=callback) def wait_for(self, task_id, timeout=None, interval=0.5, no_ack=True, on_interval=None): """Wait for task and return its result. If the task raises an exception, this exception will be re-raised by :func:`wait_for`. Raises: celery.exceptions.TimeoutError: If `timeout` is not :const:`None`, and the operation takes longer than `timeout` seconds. """ self._ensure_not_eager() time_elapsed = 0.0 while 1: meta = self.get_task_meta(task_id) if meta['status'] in states.READY_STATES: return meta if on_interval: on_interval() # avoid hammering the CPU checking status. time.sleep(interval) time_elapsed += interval if timeout and time_elapsed >= timeout: raise TimeoutError('The operation timed out.') def add_pending_result(self, result, weak=False): return result def remove_pending_result(self, result): return result @property def is_async(self): return False class BaseBackend(Backend, SyncBackendMixin): """Base (synchronous) result backend.""" BaseDictBackend = BaseBackend # noqa: E305 XXX compat class BaseKeyValueStoreBackend(Backend): key_t = ensure_bytes task_keyprefix = 'celery-task-meta-' group_keyprefix = 'celery-taskset-meta-' chord_keyprefix = 'chord-unlock-' implements_incr = False def __init__(self, *args, **kwargs): if hasattr(self.key_t, '__func__'): # pragma: no cover self.key_t = self.key_t.__func__ # remove binding self._encode_prefixes() super(BaseKeyValueStoreBackend, self).__init__(*args, **kwargs) if self.implements_incr: self.apply_chord = self._apply_chord_incr def _encode_prefixes(self): self.task_keyprefix = self.key_t(self.task_keyprefix) self.group_keyprefix = self.key_t(self.group_keyprefix) self.chord_keyprefix = self.key_t(self.chord_keyprefix) def get(self, key): raise NotImplementedError('Must implement the get method.') def mget(self, keys): raise NotImplementedError('Does not support get_many') def set(self, key, value): raise NotImplementedError('Must implement the set method.') def delete(self, key): raise NotImplementedError('Must implement the delete method') def incr(self, key): raise NotImplementedError('Does not implement incr') def expire(self, key, value): pass def get_key_for_task(self, task_id, key=''): """Get the cache key for a task by id.""" key_t = self.key_t return key_t('').join([ self.task_keyprefix, key_t(task_id), key_t(key), ]) def get_key_for_group(self, group_id, key=''): """Get the cache key for a group by id.""" key_t = self.key_t return key_t('').join([ self.group_keyprefix, key_t(group_id), key_t(key), ]) def get_key_for_chord(self, group_id, key=''): """Get the cache key for the chord waiting on group with given id.""" key_t = self.key_t return key_t('').join([ self.chord_keyprefix, key_t(group_id), key_t(key), ]) def _strip_prefix(self, key): """Take bytes: emit string.""" key = self.key_t(key) for prefix in self.task_keyprefix, self.group_keyprefix: if key.startswith(prefix): return bytes_to_str(key[len(prefix):]) return bytes_to_str(key) def _filter_ready(self, values, READY_STATES=states.READY_STATES): for k, v in values: if v is not None: v = self.decode_result(v) if v['status'] in READY_STATES: yield k, v def _mget_to_results(self, values, keys): if hasattr(values, 'items'): # client returns dict so mapping preserved. return { self._strip_prefix(k): v for k, v in self._filter_ready(items(values)) } else: # client returns list so need to recreate mapping. return { bytes_to_str(keys[i]): v for i, v in self._filter_ready(enumerate(values)) } def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None, max_iterations=None, READY_STATES=states.READY_STATES): interval = 0.5 if interval is None else interval ids = task_ids if isinstance(task_ids, set) else set(task_ids) cached_ids = set() cache = self._cache for task_id in ids: try: cached = cache[task_id] except KeyError: pass else: if cached['status'] in READY_STATES: yield bytes_to_str(task_id), cached cached_ids.add(task_id) ids.difference_update(cached_ids) iterations = 0 while ids: keys = list(ids) r = self._mget_to_results(self.mget([self.get_key_for_task(k) for k in keys]), keys) cache.update(r) ids.difference_update({bytes_to_str(v) for v in r}) for key, value in items(r): if on_message is not None: on_message(value) yield bytes_to_str(key), value if timeout and iterations * interval >= timeout: raise TimeoutError('Operation timed out ({0})'.format(timeout)) if on_interval: on_interval() time.sleep(interval) # don't busy loop. iterations += 1 if max_iterations and iterations >= max_iterations: break def _forget(self, task_id): self.delete(self.get_key_for_task(task_id)) def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): meta = { 'status': state, 'result': result, 'traceback': traceback, 'children': self.current_task_children(request), 'task_id': bytes_to_str(task_id), } self.set(self.get_key_for_task(task_id), self.encode(meta)) return result def _save_group(self, group_id, result): self.set(self.get_key_for_group(group_id), self.encode({'result': result.as_tuple()})) return result def _delete_group(self, group_id): self.delete(self.get_key_for_group(group_id)) def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" meta = self.get(self.get_key_for_task(task_id)) if not meta: return {'status': states.PENDING, 'result': None} return self.decode_result(meta) def _restore_group(self, group_id): """Get task meta-data for a task by id.""" meta = self.get(self.get_key_for_group(group_id)) # previously this was always pickled, but later this # was extended to support other serializers, so the # structure is kind of weird. if meta: meta = self.decode(meta) result = meta['result'] meta['result'] = result_from_tuple(result, self.app) return meta def _apply_chord_incr(self, header, partial_args, group_id, body, result=None, options={}, **kwargs): self.ensure_chords_allowed() self.save_group(group_id, self.app.GroupResult(group_id, result)) fixed_options = {k: v for k, v in items(options) if k != 'task_id'} return header(*partial_args, task_id=group_id, **fixed_options or {}) def on_chord_part_return(self, request, state, result, **kwargs): if not self.implements_incr: return app = self.app gid = request.group if not gid: return key = self.get_key_for_chord(gid) try: deps = GroupResult.restore(gid, backend=self) except Exception as exc: # pylint: disable=broad-except callback = maybe_signature(request.chord, app=app) logger.exception('Chord %r raised: %r', gid, exc) return self.chord_error_from_stack( callback, ChordError('Cannot restore group: {0!r}'.format(exc)), ) if deps is None: try: raise ValueError(gid) except ValueError as exc: callback = maybe_signature(request.chord, app=app) logger.exception('Chord callback %r raised: %r', gid, exc) return self.chord_error_from_stack( callback, ChordError('GroupResult {0} no longer exists'.format(gid)), ) val = self.incr(key) size = len(deps) if val > size: # pragma: no cover logger.warning('Chord counter incremented too many times for %r', gid) elif val == size: callback = maybe_signature(request.chord, app=app) j = deps.join_native if deps.supports_native_join else deps.join try: with allow_join_result(): ret = j(timeout=3.0, propagate=True) except Exception as exc: # pylint: disable=broad-except try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format( culprit, exc, ) except StopIteration: reason = repr(exc) logger.exception('Chord %r raised: %r', gid, reason) self.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', gid, exc) self.chord_error_from_stack( callback, ChordError('Callback error: {0!r}'.format(exc)), ) finally: deps.delete() self.client.delete(key) else: self.expire(key, self.expires) class KeyValueStoreBackend(BaseKeyValueStoreBackend, SyncBackendMixin): """Result backend base class for key/value stores.""" class DisabledBackend(BaseBackend): """Dummy result backend.""" _cache = {} # need this attribute to reset cache in tests. def store_result(self, *args, **kwargs): pass def ensure_chords_allowed(self): raise NotImplementedError(E_CHORD_NO_BACKEND.strip()) def _is_disabled(self, *args, **kwargs): raise NotImplementedError(E_NO_BACKEND.strip()) def as_uri(self, *args, **kwargs): return 'disabled://' get_state = get_status = get_result = get_traceback = _is_disabled get_task_meta_for = wait_for = get_many = _is_disabled celery-4.1.0/celery/backends/riak.py0000644000175000017500000000763413130607475017302 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Riak result store backend.""" from __future__ import absolute_import, unicode_literals import sys from kombu.utils.url import _parse_url from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: import riak from riak import RiakClient from riak.resolver import last_written_resolver except ImportError: # pragma: no cover riak = RiakClient = last_written_resolver = None # noqa __all__ = ['RiakBackend'] E_BUCKET_NAME = """\ Riak bucket names must be composed of ASCII characters only, not: {0!r}\ """ if sys.version_info[0] == 3: def to_bytes(s): return s.encode() if isinstance(s, str) else s def str_decode(s, encoding): return to_bytes(s).decode(encoding) else: def str_decode(s, encoding): return s.decode('ascii') def is_ascii(s): try: str_decode(s, 'ascii') except UnicodeDecodeError: return False return True class RiakBackend(KeyValueStoreBackend): """Riak result backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`riak` is not available. """ # TODO: allow using other protocols than protobuf ? #: default protocol used to connect to Riak, might be `http` or `pbc` protocol = 'pbc' #: default Riak bucket name (`default`) bucket_name = 'celery' #: default Riak server hostname (`localhost`) host = 'localhost' #: default Riak server port (8087) port = 8087 _bucket = None def __init__(self, host=None, port=None, bucket_name=None, protocol=None, url=None, *args, **kwargs): super(RiakBackend, self).__init__(*args, **kwargs) self.url = url if not riak: raise ImproperlyConfigured( 'You need to install the riak library to use the ' 'Riak backend.') uhost = uport = upass = ubucket = None if url: _, uhost, uport, _, upass, ubucket, _ = _parse_url(url) if ubucket: ubucket = ubucket.strip('/') config = self.app.conf.get('riak_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Riak backend settings should be grouped in a dict') else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket_name = ubucket or config.get('bucket', self.bucket_name) self.protocol = protocol or config.get('protocol', self.protocol) # riak bucket must be ascii letters or numbers only if not is_ascii(self.bucket_name): raise ValueError(E_BUCKET_NAME.format(self.bucket_name)) self._client = None def _get_client(self): """Get client connection.""" if self._client is None or not self._client.is_alive(): self._client = RiakClient(protocol=self.protocol, host=self.host, pb_port=self.port) self._client.resolver = last_written_resolver return self._client def _get_bucket(self): """Connect to our bucket.""" if ( self._client is None or not self._client.is_alive() or not self._bucket ): self._bucket = self.client.bucket(self.bucket_name) return self._bucket @property def client(self): return self._get_client() @property def bucket(self): return self._get_bucket() def get(self, key): return self.bucket.get(key).data def set(self, key, value): _key = self.bucket.new(key, data=value) _key.store() def mget(self, keys): return [self.get(key).data for key in keys] def delete(self, key): self.bucket.delete(key) celery-4.1.0/celery/backends/dynamodb.py0000644000175000017500000002161713130607475020146 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """AWS DynamoDB result store backend.""" from __future__ import absolute_import, unicode_literals from collections import namedtuple from time import time, sleep from kombu.utils.url import _parse_url as parse_url from celery.exceptions import ImproperlyConfigured from celery.utils.log import get_logger from celery.five import string from .base import KeyValueStoreBackend try: import boto3 from botocore.exceptions import ClientError except ImportError: # pragma: no cover boto3 = ClientError = None # noqa __all__ = ['DynamoDBBackend'] # Helper class that describes a DynamoDB attribute DynamoDBAttribute = namedtuple('DynamoDBAttribute', ('name', 'data_type')) logger = get_logger(__name__) class DynamoDBBackend(KeyValueStoreBackend): """AWS DynamoDB result backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`boto3` is not available. """ #: default DynamoDB table name (`default`) table_name = 'celery' #: Read Provisioned Throughput (`default`) read_capacity_units = 1 #: Write Provisioned Throughput (`default`) write_capacity_units = 1 #: AWS region (`default`) aws_region = None #: The endpoint URL that is passed to boto3 (local DynamoDB) (`default`) endpoint_url = None _key_field = DynamoDBAttribute(name='id', data_type='S') _value_field = DynamoDBAttribute(name='result', data_type='B') _timestamp_field = DynamoDBAttribute(name='timestamp', data_type='N') _available_fields = None def __init__(self, url=None, table_name=None, *args, **kwargs): super(DynamoDBBackend, self).__init__(*args, **kwargs) self.url = url self.table_name = table_name or self.table_name if not boto3: raise ImproperlyConfigured( 'You need to install the boto3 library to use the ' 'DynamoDB backend.') aws_credentials_given = False aws_access_key_id = None aws_secret_access_key = None if url is not None: scheme, region, port, username, password, table, query = \ parse_url(url) aws_access_key_id = username aws_secret_access_key = password access_key_given = aws_access_key_id is not None secret_key_given = aws_secret_access_key is not None if access_key_given != secret_key_given: raise ImproperlyConfigured( 'You need to specify both the Access Key ID ' 'and Secret.') aws_credentials_given = access_key_given if region == 'localhost': # We are using the downloadable, local version of DynamoDB self.endpoint_url = 'http://localhost:{}'.format(port) self.aws_region = 'us-east-1' logger.warning( 'Using local-only DynamoDB endpoint URL: {}'.format( self.endpoint_url ) ) else: self.aws_region = region self.read_capacity_units = int( query.get( 'read', self.read_capacity_units ) ) self.write_capacity_units = int( query.get( 'write', self.write_capacity_units ) ) self.table_name = table or self.table_name self._available_fields = ( self._key_field, self._value_field, self._timestamp_field ) self._client = None if aws_credentials_given: self._get_client( access_key_id=aws_access_key_id, secret_access_key=aws_secret_access_key ) def _get_client(self, access_key_id=None, secret_access_key=None): """Get client connection.""" if self._client is None: client_parameters = dict( region_name=self.aws_region ) if access_key_id is not None: client_parameters.update(dict( aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key )) if self.endpoint_url is not None: client_parameters['endpoint_url'] = self.endpoint_url self._client = boto3.client( 'dynamodb', **client_parameters ) self._get_or_create_table() return self._client def _get_table_schema(self): """Get the boto3 structure describing the DynamoDB table schema.""" return dict( AttributeDefinitions=[ { 'AttributeName': self._key_field.name, 'AttributeType': self._key_field.data_type } ], TableName=self.table_name, KeySchema=[ { 'AttributeName': self._key_field.name, 'KeyType': 'HASH' } ], ProvisionedThroughput={ 'ReadCapacityUnits': self.read_capacity_units, 'WriteCapacityUnits': self.write_capacity_units } ) def _get_or_create_table(self): """Create table if not exists, otherwise return the description.""" table_schema = self._get_table_schema() try: table_description = self._client.create_table(**table_schema) logger.info( 'DynamoDB Table {} did not exist, creating.'.format( self.table_name ) ) # In case we created the table, wait until it becomes available. self._wait_for_table_status('ACTIVE') logger.info( 'DynamoDB Table {} is now available.'.format( self.table_name ) ) return table_description except ClientError as e: error_code = e.response['Error'].get('Code', 'Unknown') # If table exists, do not fail, just return the description. if error_code == 'ResourceInUseException': return self._client.describe_table( TableName=self.table_name ) else: raise e def _wait_for_table_status(self, expected='ACTIVE'): """Poll for the expected table status.""" achieved_state = False while not achieved_state: table_description = self.client.describe_table( TableName=self.table_name ) logger.debug( 'Waiting for DynamoDB table {} to become {}.'.format( self.table_name, expected ) ) current_status = table_description['Table']['TableStatus'] achieved_state = current_status == expected sleep(1) def _prepare_get_request(self, key): """Construct the item retrieval request parameters.""" return dict( TableName=self.table_name, Key={ self._key_field.name: { self._key_field.data_type: key } } ) def _prepare_put_request(self, key, value): """Construct the item creation request parameters.""" return dict( TableName=self.table_name, Item={ self._key_field.name: { self._key_field.data_type: key }, self._value_field.name: { self._value_field.data_type: value }, self._timestamp_field.name: { self._timestamp_field.data_type: str(time()) } } ) def _item_to_dict(self, raw_response): """Convert get_item() response to field-value pairs.""" if 'Item' not in raw_response: return {} return { field.name: raw_response['Item'][field.name][field.data_type] for field in self._available_fields } @property def client(self): return self._get_client() def get(self, key): key = string(key) request_parameters = self._prepare_get_request(key) item_response = self.client.get_item(**request_parameters) item = self._item_to_dict(item_response) return item.get(self._value_field.name) def set(self, key, value): key = string(key) request_parameters = self._prepare_put_request(key, value) self.client.put_item(**request_parameters) def mget(self, keys): return [self.get(key) for key in keys] def delete(self, key): key = string(key) request_parameters = self._prepare_get_request(key) self.client.delete_item(**request_parameters) celery-4.1.0/celery/backends/filesystem.py0000644000175000017500000000536513130607475020537 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """File-system result store backend.""" from __future__ import absolute_import, unicode_literals import os import locale from kombu.utils.encoding import ensure_bytes from celery import uuid from celery.exceptions import ImproperlyConfigured from celery.backends.base import KeyValueStoreBackend # Python 2 does not have FileNotFoundError and IsADirectoryError try: FileNotFoundError except NameError: FileNotFoundError = IOError IsADirectoryError = IOError default_encoding = locale.getpreferredencoding(False) E_PATH_INVALID = """\ The configured path for the file-system backend does not work correctly, please make sure that it exists and has the correct permissions.\ """ class FilesystemBackend(KeyValueStoreBackend): """File-system result backend. Arguments: url (str): URL to the directory we should use open (Callable): open function to use when opening files unlink (Callable): unlink function to use when deleting files sep (str): directory separator (to join the directory with the key) encoding (str): encoding used on the file-system """ def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep, encoding=default_encoding, *args, **kwargs): super(FilesystemBackend, self).__init__(*args, **kwargs) self.url = url path = self._find_path(url) # We need the path and separator as bytes objects self.path = path.encode(encoding) self.sep = sep.encode(encoding) self.open = open self.unlink = unlink # Lets verify that we've everything setup right self._do_directory_test(b'.fs-backend-' + uuid().encode(encoding)) def _find_path(self, url): if not url: raise ImproperlyConfigured( 'You need to configure a path for the File-system backend') if url is not None and url.startswith('file:///'): return url[7:] def _do_directory_test(self, key): try: self.set(key, b'test value') assert self.get(key) == b'test value' self.delete(key) except IOError: raise ImproperlyConfigured(E_PATH_INVALID) def _filename(self, key): return self.sep.join((self.path, key)) def get(self, key): try: with self.open(self._filename(key), 'rb') as infile: return infile.read() except FileNotFoundError: pass def set(self, key, value): with self.open(self._filename(key), 'wb') as outfile: outfile.write(ensure_bytes(value)) def mget(self, keys): for key in keys: yield self.get(key) def delete(self, key): self.unlink(self._filename(key)) celery-4.1.0/celery/backends/database/0000755000175000017500000000000013135426347017536 5ustar omeromer00000000000000celery-4.1.0/celery/backends/database/session.py0000644000175000017500000000357013130607475021576 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """SQLAlchemy session.""" from __future__ import absolute_import, unicode_literals from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from sqlalchemy.pool import NullPool from kombu.utils.compat import register_after_fork ResultModelBase = declarative_base() __all__ = ['SessionManager'] def _after_fork_cleanup_session(session): session._after_fork() class SessionManager(object): """Manage SQLAlchemy sessions.""" def __init__(self): self._engines = {} self._sessions = {} self.forked = False self.prepared = False if register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_session) def _after_fork(self): self.forked = True def get_engine(self, dburi, **kwargs): if self.forked: try: return self._engines[dburi] except KeyError: engine = self._engines[dburi] = create_engine(dburi, **kwargs) return engine else: return create_engine(dburi, poolclass=NullPool) def create_session(self, dburi, short_lived_sessions=False, **kwargs): engine = self.get_engine(dburi, **kwargs) if self.forked: if short_lived_sessions or dburi not in self._sessions: self._sessions[dburi] = sessionmaker(bind=engine) return engine, self._sessions[dburi] else: return engine, sessionmaker(bind=engine) def prepare_models(self, engine): if not self.prepared: ResultModelBase.metadata.create_all(engine) self.prepared = True def session_factory(self, dburi, **kwargs): engine, session = self.create_session(dburi, **kwargs) self.prepare_models(engine) return session() celery-4.1.0/celery/backends/database/__init__.py0000644000175000017500000001422713130607475021653 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """SQLAlchemy result store backend.""" from __future__ import absolute_import, unicode_literals import logging from contextlib import contextmanager from vine.utils import wraps from celery import states from celery.backends.base import BaseBackend from celery.exceptions import ImproperlyConfigured from celery.five import range from celery.utils.time import maybe_timedelta from .models import Task from .models import TaskSet from .session import SessionManager try: from sqlalchemy.exc import DatabaseError, InvalidRequestError from sqlalchemy.orm.exc import StaleDataError except ImportError: # pragma: no cover raise ImproperlyConfigured( 'The database result backend requires SQLAlchemy to be installed.' 'See https://pypi.python.org/pypi/SQLAlchemy') logger = logging.getLogger(__name__) __all__ = ['DatabaseBackend'] @contextmanager def session_cleanup(session): try: yield except Exception: session.rollback() raise finally: session.close() def retry(fun): @wraps(fun) def _inner(*args, **kwargs): max_retries = kwargs.pop('max_retries', 3) for retries in range(max_retries): try: return fun(*args, **kwargs) except (DatabaseError, InvalidRequestError, StaleDataError): logger.warning( 'Failed operation %s. Retrying %s more times.', fun.__name__, max_retries - retries - 1, exc_info=True) if retries + 1 >= max_retries: raise return _inner class DatabaseBackend(BaseBackend): """The database result backend.""" # ResultSet.iterate should sleep this much between each pool, # to not bombard the database with queries. subpolling_interval = 0.5 def __init__(self, dburi=None, engine_options=None, url=None, **kwargs): # The `url` argument was added later and is used by # the app to set backend by url (celery.app.backends.by_url) super(DatabaseBackend, self).__init__( expires_type=maybe_timedelta, url=url, **kwargs) conf = self.app.conf self.url = url or dburi or conf.database_url self.engine_options = dict( engine_options or {}, **conf.database_engine_options or {}) self.short_lived_sessions = kwargs.get( 'short_lived_sessions', conf.database_short_lived_sessions) tablenames = conf.database_table_names or {} Task.__table__.name = tablenames.get('task', 'celery_taskmeta') TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta') if not self.url: raise ImproperlyConfigured( 'Missing connection string! Do you have the' ' database_url setting set to a real value?') def ResultSession(self, session_manager=SessionManager()): return session_manager.session_factory( dburi=self.url, short_lived_sessions=self.short_lived_sessions, **self.engine_options) @retry def _store_result(self, task_id, result, state, traceback=None, max_retries=3, **kwargs): """Store return value and state of an executed task.""" session = self.ResultSession() with session_cleanup(session): task = list(session.query(Task).filter(Task.task_id == task_id)) task = task and task[0] if not task: task = Task(task_id) session.add(task) session.flush() task.result = result task.status = state task.traceback = traceback session.commit() return result @retry def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" session = self.ResultSession() with session_cleanup(session): task = list(session.query(Task).filter(Task.task_id == task_id)) task = task and task[0] if not task: task = Task(task_id) task.status = states.PENDING task.result = None return self.meta_from_decoded(task.to_dict()) @retry def _save_group(self, group_id, result): """Store the result of an executed group.""" session = self.ResultSession() with session_cleanup(session): group = TaskSet(group_id, result) session.add(group) session.flush() session.commit() return result @retry def _restore_group(self, group_id): """Get meta-data for group by id.""" session = self.ResultSession() with session_cleanup(session): group = session.query(TaskSet).filter( TaskSet.taskset_id == group_id).first() if group: return group.to_dict() @retry def _delete_group(self, group_id): """Delete meta-data for group by id.""" session = self.ResultSession() with session_cleanup(session): session.query(TaskSet).filter( TaskSet.taskset_id == group_id).delete() session.flush() session.commit() @retry def _forget(self, task_id): """Forget about result.""" session = self.ResultSession() with session_cleanup(session): session.query(Task).filter(Task.task_id == task_id).delete() session.commit() def cleanup(self): """Delete expired meta-data.""" session = self.ResultSession() expires = self.expires now = self.app.now() with session_cleanup(session): session.query(Task).filter( Task.date_done < (now - expires)).delete() session.query(TaskSet).filter( TaskSet.date_done < (now - expires)).delete() session.commit() def __reduce__(self, args=(), kwargs={}): kwargs.update( dict(dburi=self.url, expires=self.expires, engine_options=self.engine_options)) return super(DatabaseBackend, self).__reduce__(args, kwargs) celery-4.1.0/celery/backends/database/models.py0000644000175000017500000000441313130607475021373 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Database models used by the SQLAlchemy result store backend.""" from __future__ import absolute_import, unicode_literals import sqlalchemy as sa from datetime import datetime from sqlalchemy.types import PickleType from celery import states from celery.five import python_2_unicode_compatible from .session import ResultModelBase __all__ = ['Task', 'TaskSet'] @python_2_unicode_compatible class Task(ResultModelBase): """Task result/status.""" __tablename__ = 'celery_taskmeta' __table_args__ = {'sqlite_autoincrement': True} id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'), primary_key=True, autoincrement=True) task_id = sa.Column(sa.String(155), unique=True) status = sa.Column(sa.String(50), default=states.PENDING) result = sa.Column(PickleType, nullable=True) date_done = sa.Column(sa.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True) traceback = sa.Column(sa.Text, nullable=True) def __init__(self, task_id): self.task_id = task_id def to_dict(self): return { 'task_id': self.task_id, 'status': self.status, 'result': self.result, 'traceback': self.traceback, 'date_done': self.date_done, } def __repr__(self): return ''.format(self) @python_2_unicode_compatible class TaskSet(ResultModelBase): """TaskSet result.""" __tablename__ = 'celery_tasksetmeta' __table_args__ = {'sqlite_autoincrement': True} id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'), autoincrement=True, primary_key=True) taskset_id = sa.Column(sa.String(155), unique=True) result = sa.Column(PickleType, nullable=True) date_done = sa.Column(sa.DateTime, default=datetime.utcnow, nullable=True) def __init__(self, taskset_id, result): self.taskset_id = taskset_id self.result = result def to_dict(self): return { 'taskset_id': self.taskset_id, 'result': self.result, 'date_done': self.date_done, } def __repr__(self): return ''.format(self) celery-4.1.0/celery/backends/__init__.py0000644000175000017500000000141413130607475020101 0ustar omeromer00000000000000"""Result Backends.""" from __future__ import absolute_import, unicode_literals from celery.app import backends as _backends from celery.utils import deprecated @deprecated.Callable( deprecation='4.0', removal='5.0', alternative='Please use celery.app.backends.by_url') def get_backend_cls(backend=None, loader=None, **kwargs): """Deprecated alias to :func:`celery.app.backends.by_name`.""" return _backends.by_name(backend=backend, loader=loader, **kwargs) @deprecated.Callable( deprecation='4.0', removal='5.0', alternative='Please use celery.app.backends.by_url') def get_backend_by_url(backend=None, loader=None): """Deprecated alias to :func:`celery.app.backends.by_url`.""" return _backends.by_url(backend=backend, loader=loader) celery-4.1.0/celery/backends/elasticsearch.py0000644000175000017500000000771113130607475021162 0ustar omeromer00000000000000# -* coding: utf-8 -*- """Elasticsearch result store backend.""" from __future__ import absolute_import, unicode_literals from datetime import datetime from kombu.utils.url import _parse_url from kombu.utils.encoding import bytes_to_str from celery.exceptions import ImproperlyConfigured from celery.five import items from .base import KeyValueStoreBackend try: import elasticsearch except ImportError: elasticsearch = None # noqa __all__ = ['ElasticsearchBackend'] E_LIB_MISSING = """\ You need to install the elasticsearch library to use the Elasticsearch \ result backend.\ """ class ElasticsearchBackend(KeyValueStoreBackend): """Elasticsearch Backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`elasticsearch` is not available. """ index = 'celery' doc_type = 'backend' scheme = 'http' host = 'localhost' port = 9200 es_retry_on_timeout = False es_timeout = 10 es_max_retries = 3 def __init__(self, url=None, *args, **kwargs): super(ElasticsearchBackend, self).__init__(*args, **kwargs) self.url = url _get = self.app.conf.get if elasticsearch is None: raise ImproperlyConfigured(E_LIB_MISSING) index = doc_type = scheme = host = port = None if url: scheme, host, port, _, _, path, _ = _parse_url(url) # noqa if path: path = path.strip('/') index, _, doc_type = path.partition('/') self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = scheme or self.scheme self.host = host or self.host self.port = port or self.port self.es_retry_on_timeout = ( _get('elasticsearch_retry_on_timeout') or self.es_retry_on_timeout ) es_timeout = _get('elasticsearch_timeout') if es_timeout is not None: self.es_timeout = es_timeout es_max_retries = _get('elasticsearch_max_retries') if es_max_retries is not None: self.es_max_retries = es_max_retries self._server = None def get(self, key): try: res = self.server.get( index=self.index, doc_type=self.doc_type, id=key, ) try: if res['found']: return res['_source']['result'] except (TypeError, KeyError): pass except elasticsearch.exceptions.NotFoundError: pass def set(self, key, value): try: self._index( id=key, body={ 'result': value, '@timestamp': '{0}Z'.format( datetime.utcnow().isoformat()[:-3] ), }, ) except elasticsearch.exceptions.ConflictError: # document already exists, update it data = self.get(key) data[key] = value self._index(key, data, refresh=True) def _index(self, id, body, **kwargs): body = {bytes_to_str(k): v for k, v in items(body)} return self.server.index( id=bytes_to_str(id), index=self.index, doc_type=self.doc_type, body=body, **kwargs ) def mget(self, keys): return [self.get(key) for key in keys] def delete(self, key): self.server.delete(index=self.index, doc_type=self.doc_type, id=key) def _get_server(self): """Connect to the Elasticsearch server.""" return elasticsearch.Elasticsearch( '%s:%s' % (self.host, self.port), retry_on_timeout=self.es_retry_on_timeout, max_retries=self.es_max_retries, timeout=self.es_timeout ) @property def server(self): if self._server is None: self._server = self._get_server() return self._server celery-4.1.0/celery/backends/rpc.py0000644000175000017500000002763313130607475017141 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The ``RPC`` result backend for AMQP brokers. RPC-style result backend, using reply-to and one queue per client. """ from __future__ import absolute_import, unicode_literals import kombu import time from kombu.common import maybe_declare from kombu.utils.compat import register_after_fork from kombu.utils.objects import cached_property from celery import states from celery._state import current_task, task_join_will_block from celery.five import items, range from . import base from .async import AsyncBackendMixin, BaseResultConsumer __all__ = ['BacklogLimitExceeded', 'RPCBackend'] E_NO_CHORD_SUPPORT = """ The "rpc" result backend does not support chords! Note that a group chained with a task is also upgraded to be a chord, as this pattern requires synchronization. Result backends that supports chords: Redis, Database, Memcached, and more. """ class BacklogLimitExceeded(Exception): """Too much state history to fast-forward.""" def _on_after_fork_cleanup_backend(backend): backend._after_fork() class ResultConsumer(BaseResultConsumer): Consumer = kombu.Consumer _connection = None _consumer = None def __init__(self, *args, **kwargs): super(ResultConsumer, self).__init__(*args, **kwargs) self._create_binding = self.backend._create_binding def start(self, initial_task_id, no_ack=True, **kwargs): self._connection = self.app.connection() initial_queue = self._create_binding(initial_task_id) self._consumer = self.Consumer( self._connection.default_channel, [initial_queue], callbacks=[self.on_state_change], no_ack=no_ack, accept=self.accept) self._consumer.consume() def drain_events(self, timeout=None): if self._connection: return self._connection.drain_events(timeout=timeout) elif timeout: time.sleep(timeout) def stop(self): try: self._consumer.cancel() finally: self._connection.close() def on_after_fork(self): self._consumer = None if self._connection is not None: self._connection.collect() self._connection = None def consume_from(self, task_id): if self._consumer is None: return self.start(task_id) queue = self._create_binding(task_id) if not self._consumer.consuming_from(queue): self._consumer.add_queue(queue) self._consumer.consume() def cancel_for(self, task_id): if self._consumer: self._consumer.cancel_by_queue(self._create_binding(task_id).name) class RPCBackend(base.Backend, AsyncBackendMixin): """Base class for the RPC result backend.""" Exchange = kombu.Exchange Producer = kombu.Producer ResultConsumer = ResultConsumer #: Exception raised when there are too many messages for a task id. BacklogLimitExceeded = BacklogLimitExceeded persistent = False supports_autoexpire = True supports_native_join = True retry_policy = { 'max_retries': 20, 'interval_start': 0, 'interval_step': 1, 'interval_max': 1, } class Consumer(kombu.Consumer): """Consumer that requires manual declaration of queues.""" auto_declare = False class Queue(kombu.Queue): """Queue that never caches declaration.""" can_cache_declaration = False def __init__(self, app, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=True, **kwargs): super(RPCBackend, self).__init__(app, **kwargs) conf = self.app.conf self._connection = connection self._out_of_band = {} self.persistent = self.prepare_persistent(persistent) self.delivery_mode = 2 if self.persistent else 1 exchange = exchange or conf.result_exchange exchange_type = exchange_type or conf.result_exchange_type self.exchange = self._create_exchange( exchange, exchange_type, self.delivery_mode, ) self.serializer = serializer or conf.result_serializer self.auto_delete = auto_delete self.result_consumer = self.ResultConsumer( self, self.app, self.accept, self._pending_results, self._pending_messages, ) if register_after_fork is not None: register_after_fork(self, _on_after_fork_cleanup_backend) def _after_fork(self): # clear state for child processes. self._pending_results.clear() self.result_consumer._after_fork() def _create_exchange(self, name, type='direct', delivery_mode=2): # uses direct to queue routing (anon exchange). return self.Exchange(None) def _create_binding(self, task_id): """Create new binding for task with id.""" # RPC backend caches the binding, as one queue is used for all tasks. return self.binding def ensure_chords_allowed(self): raise NotImplementedError(E_NO_CHORD_SUPPORT.strip()) def on_task_call(self, producer, task_id): # Called every time a task is sent when using this backend. # We declare the queue we receive replies on in advance of sending # the message, but we skip this if running in the prefork pool # (task_join_will_block), as we know the queue is already declared. if not task_join_will_block(): maybe_declare(self.binding(producer.channel), retry=True) def destination_for(self, task_id, request): """Get the destination for result by task id. Returns: Tuple[str, str]: tuple of ``(reply_to, correlation_id)``. """ # Backends didn't always receive the `request`, so we must still # support old code that relies on current_task. try: request = request or current_task.request except AttributeError: raise RuntimeError( 'RPC backend missing task request for {0!r}'.format(task_id)) return request.reply_to, request.correlation_id or task_id def on_reply_declare(self, task_id): # Return value here is used as the `declare=` argument # for Producer.publish. # By default we don't have to declare anything when sending a result. pass def on_result_fulfilled(self, result): # This usually cancels the queue after the result is received, # but we don't have to cancel since we have one queue per process. pass def as_uri(self, include_password=True): return 'rpc://' def store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Send task return value and state.""" routing_key, correlation_id = self.destination_for(task_id, request) if not routing_key: return with self.app.amqp.producer_pool.acquire(block=True) as producer: producer.publish( self._to_result(task_id, state, result, traceback, request), exchange=self.exchange, routing_key=routing_key, correlation_id=correlation_id, serializer=self.serializer, retry=True, retry_policy=self.retry_policy, declare=self.on_reply_declare(task_id), delivery_mode=self.delivery_mode, ) return result def _to_result(self, task_id, state, result, traceback, request): return { 'task_id': task_id, 'status': state, 'result': self.encode_result(result, state), 'traceback': traceback, 'children': self.current_task_children(request), } def on_out_of_band_result(self, task_id, message): # Callback called when a reply for a task is received, # but we have no idea what do do with it. # Since the result is not pending, we put it in a separate # buffer: probably it will become pending later. if self.result_consumer: self.result_consumer.on_out_of_band_result(message) self._out_of_band[task_id] = message def get_task_meta(self, task_id, backlog_limit=1000): buffered = self._out_of_band.pop(task_id, None) if buffered: return self._set_cache_by_message(task_id, buffered) # Polling and using basic_get latest_by_id = {} prev = None for acc in self._slurp_from_queue(task_id, self.accept, backlog_limit): tid = self._get_message_task_id(acc) prev, latest_by_id[tid] = latest_by_id.get(tid), acc if prev: # backends aren't expected to keep history, # so we delete everything except the most recent state. prev.ack() prev = None latest = latest_by_id.pop(task_id, None) for tid, msg in items(latest_by_id): self.on_out_of_band_result(tid, msg) if latest: latest.requeue() return self._set_cache_by_message(task_id, latest) else: # no new state, use previous try: return self._cache[task_id] except KeyError: # result probably pending. return {'status': states.PENDING, 'result': None} poll = get_task_meta # XXX compat def _set_cache_by_message(self, task_id, message): payload = self._cache[task_id] = self.meta_from_decoded( message.payload) return payload def _slurp_from_queue(self, task_id, accept, limit=1000, no_ack=False): with self.app.pool.acquire_channel(block=True) as (_, channel): binding = self._create_binding(task_id)(channel) binding.declare() for _ in range(limit): msg = binding.get(accept=accept, no_ack=no_ack) if not msg: break yield msg else: raise self.BacklogLimitExceeded(task_id) def _get_message_task_id(self, message): try: # try property first so we don't have to deserialize # the payload. return message.properties['correlation_id'] except (AttributeError, KeyError): # message sent by old Celery version, need to deserialize. return message.payload['task_id'] def revive(self, channel): pass def reload_task_result(self, task_id): raise NotImplementedError( 'reload_task_result is not supported by this backend.') def reload_group_result(self, task_id): """Reload group result, even if it has been previously fetched.""" raise NotImplementedError( 'reload_group_result is not supported by this backend.') def save_group(self, group_id, result): raise NotImplementedError( 'save_group is not supported by this backend.') def restore_group(self, group_id, cache=True): raise NotImplementedError( 'restore_group is not supported by this backend.') def delete_group(self, group_id): raise NotImplementedError( 'delete_group is not supported by this backend.') def __reduce__(self, args=(), kwargs={}): return super(RPCBackend, self).__reduce__(args, dict( kwargs, connection=self._connection, exchange=self.exchange.name, exchange_type=self.exchange.type, persistent=self.persistent, serializer=self.serializer, auto_delete=self.auto_delete, expires=self.expires, )) @property def binding(self): return self.Queue( self.oid, self.exchange, self.oid, durable=False, auto_delete=True, expires=self.expires, ) @cached_property def oid(self): # cached here is the app OID: name of queue we receive results on. return self.app.oid celery-4.1.0/celery/backends/couchbase.py0000644000175000017500000000617413130607475020306 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Couchbase result store backend.""" from __future__ import absolute_import, unicode_literals import logging from kombu.utils.encoding import str_t from kombu.utils.url import _parse_url from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: from couchbase import Couchbase from couchbase.connection import Connection from couchbase.exceptions import NotFoundError except ImportError: Couchbase = Connection = NotFoundError = None # noqa __all__ = ['CouchbaseBackend'] class CouchbaseBackend(KeyValueStoreBackend): """Couchbase backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`couchbase` is not available. """ bucket = 'default' host = 'localhost' port = 8091 username = None password = None quiet = False timeout = 2.5 # Use str as couchbase key not bytes key_t = str_t def __init__(self, url=None, *args, **kwargs): super(CouchbaseBackend, self).__init__(*args, **kwargs) self.url = url if Couchbase is None: raise ImproperlyConfigured( 'You need to install the couchbase library to use the ' 'Couchbase backend.', ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip('/') if ubucket else None config = self.app.conf.get('couchbase_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Couchbase backend settings should be grouped in a dict', ) else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket = ubucket or config.get('bucket', self.bucket) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None def _get_connection(self): """Connect to the Couchbase server.""" if self._connection is None: kwargs = {'bucket': self.bucket, 'host': self.host} if self.port: kwargs.update({'port': self.port}) if self.username: kwargs.update({'username': self.username}) if self.password: kwargs.update({'password': self.password}) logging.debug('couchbase settings %r', kwargs) self._connection = Connection(**kwargs) return self._connection @property def connection(self): return self._get_connection() def get(self, key): try: return self.connection.get(key).value except NotFoundError: return None def set(self, key, value): self.connection.set(key, value) def mget(self, keys): return [self.get(key) for key in keys] def delete(self, key): self.connection.delete(key) celery-4.1.0/celery/backends/cache.py0000644000175000017500000001137013130607475017407 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Memcached and in-memory cache result backend.""" from __future__ import absolute_import, unicode_literals import sys from kombu.utils.encoding import bytes_to_str, ensure_bytes from kombu.utils.objects import cached_property from celery.exceptions import ImproperlyConfigured from celery.utils.functional import LRUCache from .base import KeyValueStoreBackend __all__ = ['CacheBackend'] _imp = [None] PY3 = sys.version_info[0] == 3 REQUIRES_BACKEND = """\ The Memcached backend requires either pylibmc or python-memcached.\ """ UNKNOWN_BACKEND = """\ The cache backend {0!r} is unknown, Please use one of the following backends instead: {1}\ """ def import_best_memcache(): if _imp[0] is None: is_pylibmc, memcache_key_t = False, ensure_bytes try: import pylibmc as memcache is_pylibmc = True except ImportError: try: import memcache # noqa except ImportError: raise ImproperlyConfigured(REQUIRES_BACKEND) if PY3: # pragma: no cover memcache_key_t = bytes_to_str _imp[0] = (is_pylibmc, memcache, memcache_key_t) return _imp[0] def get_best_memcache(*args, **kwargs): # pylint: disable=unpacking-non-sequence # This is most definitely a sequence, but pylint thinks it's not. is_pylibmc, memcache, key_t = import_best_memcache() Client = _Client = memcache.Client if not is_pylibmc: def Client(*args, **kwargs): # noqa kwargs.pop('behaviors', None) return _Client(*args, **kwargs) return Client, key_t class DummyClient(object): def __init__(self, *args, **kwargs): self.cache = LRUCache(limit=5000) def get(self, key, *args, **kwargs): return self.cache.get(key) def get_multi(self, keys): cache = self.cache return {k: cache[k] for k in keys if k in cache} def set(self, key, value, *args, **kwargs): self.cache[key] = value def delete(self, key, *args, **kwargs): self.cache.pop(key, None) def incr(self, key, delta=1): return self.cache.incr(key, delta) def touch(self, key, expire): pass backends = { 'memcache': get_best_memcache, 'memcached': get_best_memcache, 'pylibmc': get_best_memcache, 'memory': lambda: (DummyClient, ensure_bytes), } class CacheBackend(KeyValueStoreBackend): """Cache result backend.""" servers = None supports_autoexpire = True supports_native_join = True implements_incr = True def __init__(self, app, expires=None, backend=None, options={}, url=None, **kwargs): super(CacheBackend, self).__init__(app, **kwargs) self.url = url self.options = dict(self.app.conf.cache_backend_options, **options) self.backend = url or backend or self.app.conf.cache_backend if self.backend: self.backend, _, servers = self.backend.partition('://') self.servers = servers.rstrip('/').split(';') self.expires = self.prepare_expires(expires, type=int) try: self.Client, self.key_t = backends[self.backend]() except KeyError: raise ImproperlyConfigured(UNKNOWN_BACKEND.format( self.backend, ', '.join(backends))) self._encode_prefixes() # rencode the keyprefixes def get(self, key): return self.client.get(key) def mget(self, keys): return self.client.get_multi(keys) def set(self, key, value): return self.client.set(key, value, self.expires) def delete(self, key): return self.client.delete(key) def _apply_chord_incr(self, header, partial_args, group_id, body, **opts): self.client.set(self.get_key_for_chord(group_id), 0, time=self.expires) return super(CacheBackend, self)._apply_chord_incr( header, partial_args, group_id, body, **opts) def incr(self, key): return self.client.incr(key) def expire(self, key, value): return self.client.touch(key, value) @cached_property def client(self): return self.Client(self.servers, **self.options) def __reduce__(self, args=(), kwargs={}): servers = ';'.join(self.servers) backend = '{0}://{1}/'.format(self.backend, servers) kwargs.update( dict(backend=backend, expires=self.expires, options=self.options)) return super(CacheBackend, self).__reduce__(args, kwargs) def as_uri(self, *args, **kwargs): """Return the backend as an URI. This properly handles the case of multiple servers. """ servers = ';'.join(self.servers) return '{0}://{1}/'.format(self.backend, servers) celery-4.1.0/celery/backends/redis.py0000644000175000017500000002763413130607475017464 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Redis result store backend.""" from __future__ import absolute_import, unicode_literals from functools import partial from kombu.utils.functional import retry_over_time from kombu.utils.objects import cached_property from kombu.utils.url import _parse_url from celery import states from celery._state import task_join_will_block from celery.canvas import maybe_signature from celery.exceptions import ChordError, ImproperlyConfigured from celery.five import string_t from celery.utils import deprecated from celery.utils.functional import dictfilter from celery.utils.log import get_logger from celery.utils.time import humanize_seconds from . import async from . import base try: import redis from kombu.transport.redis import get_redis_error_classes except ImportError: # pragma: no cover redis = None # noqa get_redis_error_classes = None # noqa __all__ = ['RedisBackend'] E_REDIS_MISSING = """ You need to install the redis library in order to use \ the Redis result store backend. """ E_LOST = 'Connection to Redis lost: Retry (%s/%s) %s.' logger = get_logger(__name__) class ResultConsumer(async.BaseResultConsumer): _pubsub = None def __init__(self, *args, **kwargs): super(ResultConsumer, self).__init__(*args, **kwargs) self._get_key_for_task = self.backend.get_key_for_task self._decode_result = self.backend.decode_result self.subscribed_to = set() def start(self, initial_task_id, **kwargs): self._pubsub = self.backend.client.pubsub( ignore_subscribe_messages=True, ) self._consume_from(initial_task_id) def on_wait_for_pending(self, result, **kwargs): for meta in result._iter_meta(): if meta is not None: self.on_state_change(meta, None) def stop(self): if self._pubsub is not None: self._pubsub.close() def drain_events(self, timeout=None): m = self._pubsub.get_message(timeout=timeout) if m and m['type'] == 'message': self.on_state_change(self._decode_result(m['data']), m) def consume_from(self, task_id): if self._pubsub is None: return self.start(task_id) self._consume_from(task_id) def _consume_from(self, task_id): key = self._get_key_for_task(task_id) if key not in self.subscribed_to: self.subscribed_to.add(key) self._pubsub.subscribe(key) def cancel_for(self, task_id): if self._pubsub: key = self._get_key_for_task(task_id) self.subscribed_to.discard(key) self._pubsub.unsubscribe(key) class RedisBackend(base.BaseKeyValueStoreBackend, async.AsyncBackendMixin): """Redis task result store.""" ResultConsumer = ResultConsumer #: :pypi:`redis` client module. redis = redis #: Maximum number of connections in the pool. max_connections = None supports_autoexpire = True supports_native_join = True def __init__(self, host=None, port=None, db=None, password=None, max_connections=None, url=None, connection_pool=None, **kwargs): super(RedisBackend, self).__init__(expires_type=int, **kwargs) _get = self.app.conf.get if self.redis is None: raise ImproperlyConfigured(E_REDIS_MISSING.strip()) if host and '://' in host: url, host = host, None self.max_connections = ( max_connections or _get('redis_max_connections') or self.max_connections) self._ConnectionPool = connection_pool socket_timeout = _get('redis_socket_timeout') socket_connect_timeout = _get('redis_socket_connect_timeout') self.connparams = { 'host': _get('redis_host') or 'localhost', 'port': _get('redis_port') or 6379, 'db': _get('redis_db') or 0, 'password': _get('redis_password'), 'max_connections': self.max_connections, 'socket_timeout': socket_timeout and float(socket_timeout), 'socket_connect_timeout': socket_connect_timeout and float(socket_connect_timeout), } # "redis_backend_use_ssl" must be a dict with the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' # (the same as "broker_use_ssl") ssl = _get('redis_backend_use_ssl') if ssl: self.connparams.update(ssl) self.connparams['connection_class'] = redis.SSLConnection if url: self.connparams = self._params_from_url(url, self.connparams) self.url = url self.connection_errors, self.channel_errors = ( get_redis_error_classes() if get_redis_error_classes else ((), ())) self.result_consumer = self.ResultConsumer( self, self.app, self.accept, self._pending_results, self._pending_messages, ) def _params_from_url(self, url, defaults): scheme, host, port, _, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'password': password, 'db': query.pop('virtual_host', None)}) ) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) connparams.pop('socket_connect_timeout') else: connparams['db'] = path # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, string_t) else db connparams['db'] = int(db) # Query parameters override other parameters connparams.update(query) return connparams def on_task_call(self, producer, task_id): if not task_join_will_block(): self.result_consumer.consume_from(task_id) def get(self, key): return self.client.get(key) def mget(self, keys): return self.client.mget(keys) def ensure(self, fun, args, **policy): retry_policy = dict(self.retry_policy, **policy) max_retries = retry_policy.get('max_retries') return retry_over_time( fun, self.connection_errors, args, {}, partial(self.on_connection_error, max_retries), **retry_policy) def on_connection_error(self, max_retries, exc, intervals, retries): tts = next(intervals) logger.error( E_LOST.strip(), retries, max_retries or 'Inf', humanize_seconds(tts, 'in ')) return tts def set(self, key, value, **retry_policy): return self.ensure(self._set, (key, value), **retry_policy) def _set(self, key, value): with self.client.pipeline() as pipe: if self.expires: pipe.setex(key, self.expires, value) else: pipe.set(key, value) pipe.publish(key, value) pipe.execute() def delete(self, key): self.client.delete(key) def incr(self, key): return self.client.incr(key) def expire(self, key, value): return self.client.expire(key, value) def add_to_chord(self, group_id, result): self.client.incr(self.get_key_for_group(group_id, '.t'), 1) def _unpack_chord_result(self, tup, decode, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES): _, tid, state, retval = decode(tup) if state in EXCEPTION_STATES: retval = self.exception_to_python(retval) if state in PROPAGATE_STATES: raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) return retval def apply_chord(self, header, partial_args, group_id, body, result=None, options={}, **kwargs): # Overrides this to avoid calling GroupResult.save # pylint: disable=method-hidden # Note that KeyValueStoreBackend.__init__ sets self.apply_chord # if the implements_incr attr is set. Redis backend doesn't set # this flag. options['task_id'] = group_id return header(*partial_args, **options or {}) def on_chord_part_return(self, request, state, result, propagate=None, **kwargs): app = self.app tid, gid = request.id, request.group if not gid or not tid: return client = self.client jkey = self.get_key_for_group(gid, '.j') tkey = self.get_key_for_group(gid, '.t') result = self.encode_result(result, state) with client.pipeline() as pipe: _, readycount, totaldiff, _, _ = pipe \ .rpush(jkey, self.encode([1, tid, state, result])) \ .llen(jkey) \ .get(tkey) \ .expire(jkey, self.expires) \ .expire(tkey, self.expires) \ .execute() totaldiff = int(totaldiff or 0) try: callback = maybe_signature(request.chord, app=app) total = callback['chord_size'] + totaldiff if readycount == total: decode, unpack = self.decode, self._unpack_chord_result with client.pipeline() as pipe: resl, _, _ = pipe \ .lrange(jkey, 0, total) \ .delete(jkey) \ .delete(tkey) \ .execute() try: callback.delay([unpack(tup, decode) for tup in resl]) except Exception as exc: # pylint: disable=broad-except logger.exception( 'Chord callback for %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError('Callback error: {0!r}'.format(exc)), ) except ChordError as exc: logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack(callback, exc) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError('Join error: {0!r}'.format(exc)), ) def _create_client(self, **params): return self.redis.StrictRedis( connection_pool=self.ConnectionPool(**params), ) @property def ConnectionPool(self): if self._ConnectionPool is None: self._ConnectionPool = self.redis.ConnectionPool return self._ConnectionPool @cached_property def client(self): return self._create_client(**self.connparams) def __reduce__(self, args=(), kwargs={}): return super(RedisBackend, self).__reduce__( (self.url,), {'expires': self.expires}, ) @deprecated.Property(4.0, 5.0) def host(self): return self.connparams['host'] @deprecated.Property(4.0, 5.0) def port(self): return self.connparams['port'] @deprecated.Property(4.0, 5.0) def db(self): return self.connparams['db'] @deprecated.Property(4.0, 5.0) def password(self): return self.connparams['password'] celery-4.1.0/celery/backends/mongodb.py0000644000175000017500000002501113130607475017766 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """MongoDB result store backend.""" from __future__ import absolute_import, unicode_literals from datetime import datetime, timedelta from kombu.utils.objects import cached_property from kombu.utils.url import maybe_sanitize_url from kombu.exceptions import EncodeError from celery import states from celery.exceptions import ImproperlyConfigured from celery.five import string_t, items from .base import BaseBackend try: import pymongo except ImportError: # pragma: no cover pymongo = None # noqa if pymongo: try: from bson.binary import Binary except ImportError: # pragma: no cover from pymongo.binary import Binary # noqa from pymongo.errors import InvalidDocument # noqa else: # pragma: no cover Binary = None # noqa class InvalidDocument(Exception): # noqa pass __all__ = ['MongoBackend'] BINARY_CODECS = frozenset(['pickle', 'msgpack']) class MongoBackend(BaseBackend): """MongoDB result backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`pymongo` is not available. """ mongo_host = None host = 'localhost' port = 27017 user = None password = None database_name = 'celery' taskmeta_collection = 'celery_taskmeta' groupmeta_collection = 'celery_groupmeta' max_pool_size = 10 options = None supports_autoexpire = False _connection = None def __init__(self, app=None, **kwargs): self.options = {} super(MongoBackend, self).__init__(app, **kwargs) if not pymongo: raise ImproperlyConfigured( 'You need to install the pymongo library to use the ' 'MongoDB backend.') # Set option defaults for key, value in items(self._prepare_client_options()): self.options.setdefault(key, value) # update conf with mongo uri data, only if uri was given if self.url: if self.url == 'mongodb://': self.url += 'localhost' uri_data = pymongo.uri_parser.parse_uri(self.url) # build the hosts list to create a mongo connection hostslist = [ '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist'] ] self.user = uri_data['username'] self.password = uri_data['password'] self.mongo_host = hostslist if uri_data['database']: # if no database is provided in the uri, use default self.database_name = uri_data['database'] self.options.update(uri_data['options']) # update conf with specific settings config = self.app.conf.get('mongodb_backend_settings') if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'MongoDB backend settings should be grouped in a dict') config = dict(config) # don't modify original if 'host' in config or 'port' in config: # these should take over uri conf self.mongo_host = None self.host = config.pop('host', self.host) self.port = config.pop('port', self.port) self.mongo_host = config.pop('mongo_host', self.mongo_host) self.user = config.pop('user', self.user) self.password = config.pop('password', self.password) self.database_name = config.pop('database', self.database_name) self.taskmeta_collection = config.pop( 'taskmeta_collection', self.taskmeta_collection, ) self.groupmeta_collection = config.pop( 'groupmeta_collection', self.groupmeta_collection, ) self.options.update(config.pop('options', {})) self.options.update(config) def _prepare_client_options(self): if pymongo.version_tuple >= (3,): return {'maxPoolSize': self.max_pool_size} else: # pragma: no cover return {'max_pool_size': self.max_pool_size, 'auto_start_request': False} def _get_connection(self): """Connect to the MongoDB server.""" if self._connection is None: from pymongo import MongoClient host = self.mongo_host if not host: # The first pymongo.Connection() argument (host) can be # a list of ['host:port'] elements or a mongodb connection # URI. If this is the case, don't use self.port # but let pymongo get the port(s) from the URI instead. # This enables the use of replica sets and sharding. # See pymongo.Connection() for more info. host = self.host if isinstance(host, string_t) \ and not host.startswith('mongodb://'): host = 'mongodb://{0}:{1}'.format(host, self.port) # don't change self.options conf = dict(self.options) conf['host'] = host self._connection = MongoClient(**conf) return self._connection def encode(self, data): if self.serializer == 'bson': # mongodb handles serialization return data payload = super(MongoBackend, self).encode(data) # serializer which are in a unsupported format (pickle/binary) if self.serializer in BINARY_CODECS: payload = Binary(payload) return payload def decode(self, data): if self.serializer == 'bson': return data return super(MongoBackend, self).decode(data) def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Store return value and state of an executed task.""" meta = { '_id': task_id, 'status': state, 'result': self.encode(result), 'date_done': datetime.utcnow(), 'traceback': self.encode(traceback), 'children': self.encode( self.current_task_children(request), ), } try: self.collection.save(meta) except InvalidDocument as exc: raise EncodeError(exc) return result def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" obj = self.collection.find_one({'_id': task_id}) if obj: return self.meta_from_decoded({ 'task_id': obj['_id'], 'status': obj['status'], 'result': self.decode(obj['result']), 'date_done': obj['date_done'], 'traceback': self.decode(obj['traceback']), 'children': self.decode(obj['children']), }) return {'status': states.PENDING, 'result': None} def _save_group(self, group_id, result): """Save the group result.""" self.group_collection.save({ '_id': group_id, 'result': self.encode([i.id for i in result]), 'date_done': datetime.utcnow(), }) return result def _restore_group(self, group_id): """Get the result for a group by id.""" obj = self.group_collection.find_one({'_id': group_id}) if obj: return { 'task_id': obj['_id'], 'date_done': obj['date_done'], 'result': [ self.app.AsyncResult(task) for task in self.decode(obj['result']) ], } def _delete_group(self, group_id): """Delete a group by id.""" self.group_collection.remove({'_id': group_id}) def _forget(self, task_id): """Remove result from MongoDB. Raises: pymongo.exceptions.OperationsError: if the task_id could not be removed. """ # By using safe=True, this will wait until it receives a response from # the server. Likewise, it will raise an OperationsError if the # response was unable to be completed. self.collection.remove({'_id': task_id}) def cleanup(self): """Delete expired meta-data.""" self.collection.remove( {'date_done': {'$lt': self.app.now() - self.expires_delta}}, ) self.group_collection.remove( {'date_done': {'$lt': self.app.now() - self.expires_delta}}, ) def __reduce__(self, args=(), kwargs={}): return super(MongoBackend, self).__reduce__( args, dict(kwargs, expires=self.expires, url=self.url)) def _get_database(self): conn = self._get_connection() db = conn[self.database_name] if self.user and self.password: if not db.authenticate(self.user, self.password): raise ImproperlyConfigured( 'Invalid MongoDB username or password.') return db @cached_property def database(self): """Get database from MongoDB connection. performs authentication if necessary. """ return self._get_database() @cached_property def collection(self): """Get the meta-data task collection.""" collection = self.database[self.taskmeta_collection] # Ensure an index on date_done is there, if not process the index # in the background. Once completed cleanup will be much faster collection.ensure_index('date_done', background='true') return collection @cached_property def group_collection(self): """Get the meta-data task collection.""" collection = self.database[self.groupmeta_collection] # Ensure an index on date_done is there, if not process the index # in the background. Once completed cleanup will be much faster collection.ensure_index('date_done', background='true') return collection @cached_property def expires_delta(self): return timedelta(seconds=self.expires) def as_uri(self, include_password=False): """Return the backend as an URI. Arguments: include_password (bool): Password censored if disabled. """ if not self.url: return 'mongodb://' if include_password: return self.url if ',' not in self.url: return maybe_sanitize_url(self.url) uri1, remainder = self.url.split(',', 1) return ','.join([maybe_sanitize_url(uri1), remainder]) celery-4.1.0/celery/backends/consul.py0000644000175000017500000000626313130607475017654 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Consul result store backend. - :class:`ConsulBackend` implements KeyValueStoreBackend to store results in the key-value store of Consul. """ from __future__ import absolute_import, unicode_literals from kombu.utils.url import parse_url from celery.exceptions import ImproperlyConfigured from celery.backends.base import KeyValueStoreBackend, PY3 from celery.utils.log import get_logger try: import consul except ImportError: consul = None logger = get_logger(__name__) __all__ = ['ConsulBackend'] CONSUL_MISSING = """\ You need to install the python-consul library in order to use \ the Consul result store backend.""" class ConsulBackend(KeyValueStoreBackend): """Consul.io K/V store backend for Celery.""" consul = consul supports_autoexpire = True client = None consistency = 'consistent' path = None def __init__(self, *args, **kwargs): super(ConsulBackend, self).__init__(*args, **kwargs) if self.consul is None: raise ImproperlyConfigured(CONSUL_MISSING) self._init_from_params(**parse_url(self.url)) def _init_from_params(self, hostname, port, virtual_host, **params): logger.debug('Setting on Consul client to connect to %s:%d', hostname, port) self.path = virtual_host self.client = consul.Consul(host=hostname, port=port, consistency=self.consistency) def _key_to_consul_key(self, key): if PY3: key = key.encode('utf-8') return key if self.path is None else '{0}/{1}'.format(self.path, key) def get(self, key): key = self._key_to_consul_key(key) logger.debug('Trying to fetch key %s from Consul', key) try: _, data = self.client.kv.get(key) return data['Value'] except TypeError: pass def mget(self, keys): for key in keys: yield self.get(key) def set(self, key, value): """Set a key in Consul. Before creating the key it will create a session inside Consul where it creates a session with a TTL The key created afterwards will reference to the session's ID. If the session expires it will remove the key so that results can auto expire from the K/V store """ session_name = key if PY3: session_name = key.decode('utf-8') key = self._key_to_consul_key(key) logger.debug('Trying to create Consul session %s with TTL %d', session_name, self.expires) session_id = self.client.session.create(name=session_name, behavior='delete', ttl=self.expires) logger.debug('Created Consul session %s', session_id) logger.debug('Writing key %s to Consul', key) return self.client.kv.put(key=key, value=value, acquire=session_id) def delete(self, key): key = self._key_to_consul_key(key) logger.debug('Removing key %s from Consul', key) return self.client.kv.delete(key) celery-4.1.0/celery/backends/cassandra.py0000644000175000017500000001752213130607475020310 0ustar omeromer00000000000000# -* coding: utf-8 -*- """Apache Cassandra result store backend using the DataStax driver.""" from __future__ import absolute_import, unicode_literals import sys from celery import states from celery.exceptions import ImproperlyConfigured from celery.utils.log import get_logger from .base import BaseBackend try: # pragma: no cover import cassandra import cassandra.auth import cassandra.cluster except ImportError: # pragma: no cover cassandra = None # noqa __all__ = ['CassandraBackend'] logger = get_logger(__name__) E_NO_CASSANDRA = """ You need to install the cassandra-driver library to use the Cassandra backend. See https://github.com/datastax/python-driver """ E_NO_SUCH_CASSANDRA_AUTH_PROVIDER = """ CASSANDRA_AUTH_PROVIDER you provided is not a valid auth_provider class. See https://datastax.github.io/python-driver/api/cassandra/auth.html. """ Q_INSERT_RESULT = """ INSERT INTO {table} ( task_id, status, result, date_done, traceback, children) VALUES ( %s, %s, %s, %s, %s, %s) {expires}; """ Q_SELECT_RESULT = """ SELECT status, result, date_done, traceback, children FROM {table} WHERE task_id=%s LIMIT 1 """ Q_CREATE_RESULT_TABLE = """ CREATE TABLE {table} ( task_id text, status text, result blob, date_done timestamp, traceback blob, children blob, PRIMARY KEY ((task_id), date_done) ) WITH CLUSTERING ORDER BY (date_done DESC); """ Q_EXPIRES = """ USING TTL {0} """ if sys.version_info[0] == 3: def buf_t(x): return bytes(x, 'utf8') else: buf_t = buffer # noqa class CassandraBackend(BaseBackend): """Cassandra backend utilizing DataStax driver. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`cassandra-driver` is not available, or if the :setting:`cassandra_servers` setting is not set. """ #: List of Cassandra servers with format: ``hostname``. servers = None supports_autoexpire = True # autoexpire supported via entry_ttl def __init__(self, servers=None, keyspace=None, table=None, entry_ttl=None, port=9042, **kwargs): super(CassandraBackend, self).__init__(**kwargs) if not cassandra: raise ImproperlyConfigured(E_NO_CASSANDRA) conf = self.app.conf self.servers = servers or conf.get('cassandra_servers', None) self.port = port or conf.get('cassandra_port', None) self.keyspace = keyspace or conf.get('cassandra_keyspace', None) self.table = table or conf.get('cassandra_table', None) if not self.servers or not self.keyspace or not self.table: raise ImproperlyConfigured('Cassandra backend not configured.') expires = entry_ttl or conf.get('cassandra_entry_ttl', None) self.cqlexpires = ( Q_EXPIRES.format(expires) if expires is not None else '') read_cons = conf.get('cassandra_read_consistency') or 'LOCAL_QUORUM' write_cons = conf.get('cassandra_write_consistency') or 'LOCAL_QUORUM' self.read_consistency = getattr( cassandra.ConsistencyLevel, read_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM) self.write_consistency = getattr( cassandra.ConsistencyLevel, write_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM) self.auth_provider = None auth_provider = conf.get('cassandra_auth_provider', None) auth_kwargs = conf.get('cassandra_auth_kwargs', None) if auth_provider and auth_kwargs: auth_provider_class = getattr(cassandra.auth, auth_provider, None) if not auth_provider_class: raise ImproperlyConfigured(E_NO_SUCH_CASSANDRA_AUTH_PROVIDER) self.auth_provider = auth_provider_class(**auth_kwargs) self._connection = None self._session = None self._write_stmt = None self._read_stmt = None self._make_stmt = None def process_cleanup(self): if self._connection is not None: self._connection.shutdown() # also shuts down _session self._connection = None self._session = None def _get_connection(self, write=False): """Prepare the connection for action. Arguments: write (bool): are we a writer? """ if self._connection is not None: return try: self._connection = cassandra.cluster.Cluster( self.servers, port=self.port, auth_provider=self.auth_provider) self._session = self._connection.connect(self.keyspace) # We're forced to do concatenation below, as formatting would # blow up on superficial %s that'll be processed by Cassandra self._write_stmt = cassandra.query.SimpleStatement( Q_INSERT_RESULT.format( table=self.table, expires=self.cqlexpires), ) self._write_stmt.consistency_level = self.write_consistency self._read_stmt = cassandra.query.SimpleStatement( Q_SELECT_RESULT.format(table=self.table), ) self._read_stmt.consistency_level = self.read_consistency if write: # Only possible writers "workers" are allowed to issue # CREATE TABLE. This is to prevent conflicting situations # where both task-creator and task-executor would issue it # at the same time. # Anyway; if you're doing anything critical, you should # have created this table in advance, in which case # this query will be a no-op (AlreadyExists) self._make_stmt = cassandra.query.SimpleStatement( Q_CREATE_RESULT_TABLE.format(table=self.table), ) self._make_stmt.consistency_level = self.write_consistency try: self._session.execute(self._make_stmt) except cassandra.AlreadyExists: pass except cassandra.OperationTimedOut: # a heavily loaded or gone Cassandra cluster failed to respond. # leave this class in a consistent state if self._connection is not None: self._connection.shutdown() # also shuts down _session self._connection = None self._session = None raise # we did fail after all - reraise def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Store return value and state of an executed task.""" self._get_connection(write=True) self._session.execute(self._write_stmt, ( task_id, state, buf_t(self.encode(result)), self.app.now(), buf_t(self.encode(traceback)), buf_t(self.encode(self.current_task_children(request))) )) def as_uri(self, include_password=True): return 'cassandra://' def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" self._get_connection() res = self._session.execute(self._read_stmt, (task_id, )) if not res: return {'status': states.PENDING, 'result': None} status, result, date_done, traceback, children = res[0] return self.meta_from_decoded({ 'task_id': task_id, 'status': status, 'result': self.decode(result), 'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), 'traceback': self.decode(traceback), 'children': self.decode(children), }) def __reduce__(self, args=(), kwargs={}): kwargs.update( dict(servers=self.servers, keyspace=self.keyspace, table=self.table)) return super(CassandraBackend, self).__reduce__(args, kwargs) celery-4.1.0/celery/backends/amqp.py0000644000175000017500000002651313130607475017307 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The old AMQP result backend, deprecated and replaced by the RPC backend.""" from __future__ import absolute_import, unicode_literals import socket from collections import deque from operator import itemgetter from kombu import Exchange, Queue, Producer, Consumer from celery import states from celery.exceptions import TimeoutError from celery.five import range, monotonic from celery.utils import deprecated from celery.utils.log import get_logger from .base import BaseBackend __all__ = ['BacklogLimitExceeded', 'AMQPBackend'] logger = get_logger(__name__) class BacklogLimitExceeded(Exception): """Too much state history to fast-forward.""" def repair_uuid(s): # Historically the dashes in UUIDS are removed from AMQ entity names, # but there's no known reason to. Hopefully we'll be able to fix # this in v4.0. return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:]) class NoCacheQueue(Queue): can_cache_declaration = False class AMQPBackend(BaseBackend): """The AMQP result backend. Deprecated: Please use the RPC backend or a persistent backend. """ Exchange = Exchange Queue = NoCacheQueue Consumer = Consumer Producer = Producer BacklogLimitExceeded = BacklogLimitExceeded persistent = True supports_autoexpire = True supports_native_join = True retry_policy = { 'max_retries': 20, 'interval_start': 0, 'interval_step': 1, 'interval_max': 1, } def __init__(self, app, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=True, **kwargs): deprecated.warn( 'The AMQP result backend', deprecation='4.0', removal='5.0', alternative='Please use RPC backend or a persistent backend.') super(AMQPBackend, self).__init__(app, **kwargs) conf = self.app.conf self._connection = connection self.persistent = self.prepare_persistent(persistent) self.delivery_mode = 2 if self.persistent else 1 exchange = exchange or conf.result_exchange exchange_type = exchange_type or conf.result_exchange_type self.exchange = self._create_exchange( exchange, exchange_type, self.delivery_mode, ) self.serializer = serializer or conf.result_serializer self.auto_delete = auto_delete def _create_exchange(self, name, type='direct', delivery_mode=2): return self.Exchange(name=name, type=type, delivery_mode=delivery_mode, durable=self.persistent, auto_delete=False) def _create_binding(self, task_id): name = self.rkey(task_id) return self.Queue( name=name, exchange=self.exchange, routing_key=name, durable=self.persistent, auto_delete=self.auto_delete, expires=self.expires, ) def revive(self, channel): pass def rkey(self, task_id): return task_id.replace('-', '') def destination_for(self, task_id, request): if request: return self.rkey(task_id), request.correlation_id or task_id return self.rkey(task_id), task_id def store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Send task return value and state.""" routing_key, correlation_id = self.destination_for(task_id, request) if not routing_key: return with self.app.amqp.producer_pool.acquire(block=True) as producer: producer.publish( {'task_id': task_id, 'status': state, 'result': self.encode_result(result, state), 'traceback': traceback, 'children': self.current_task_children(request)}, exchange=self.exchange, routing_key=routing_key, correlation_id=correlation_id, serializer=self.serializer, retry=True, retry_policy=self.retry_policy, declare=self.on_reply_declare(task_id), delivery_mode=self.delivery_mode, ) return result def on_reply_declare(self, task_id): return [self._create_binding(task_id)] def wait_for(self, task_id, timeout=None, cache=True, no_ack=True, on_interval=None, READY_STATES=states.READY_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): cached_meta = self._cache.get(task_id) if cache and cached_meta and \ cached_meta['status'] in READY_STATES: return cached_meta else: try: return self.consume(task_id, timeout=timeout, no_ack=no_ack, on_interval=on_interval) except socket.timeout: raise TimeoutError('The operation timed out.') def get_task_meta(self, task_id, backlog_limit=1000): # Polling and using basic_get with self.app.pool.acquire_channel(block=True) as (_, channel): binding = self._create_binding(task_id)(channel) binding.declare() prev = latest = acc = None for i in range(backlog_limit): # spool ffwd acc = binding.get( accept=self.accept, no_ack=False, ) if not acc: # no more messages break if acc.payload['task_id'] == task_id: prev, latest = latest, acc if prev: # backends are not expected to keep history, # so we delete everything except the most recent state. prev.ack() prev = None else: raise self.BacklogLimitExceeded(task_id) if latest: payload = self._cache[task_id] = self.meta_from_decoded( latest.payload) latest.requeue() return payload else: # no new state, use previous try: return self._cache[task_id] except KeyError: # result probably pending. return {'status': states.PENDING, 'result': None} poll = get_task_meta # XXX compat def drain_events(self, connection, consumer, timeout=None, on_interval=None, now=monotonic, wait=None): wait = wait or connection.drain_events results = {} def callback(meta, message): if meta['status'] in states.READY_STATES: results[meta['task_id']] = self.meta_from_decoded(meta) consumer.callbacks[:] = [callback] time_start = now() while 1: # Total time spent may exceed a single call to wait() if timeout and now() - time_start >= timeout: raise socket.timeout() try: wait(timeout=1) except socket.timeout: pass if on_interval: on_interval() if results: # got event on the wanted channel. break self._cache.update(results) return results def consume(self, task_id, timeout=None, no_ack=True, on_interval=None): wait = self.drain_events with self.app.pool.acquire_channel(block=True) as (conn, channel): binding = self._create_binding(task_id) with self.Consumer(channel, binding, no_ack=no_ack, accept=self.accept) as consumer: while 1: try: return wait( conn, consumer, timeout, on_interval)[task_id] except KeyError: continue def _many_bindings(self, ids): return [self._create_binding(task_id) for task_id in ids] def get_many(self, task_ids, timeout=None, no_ack=True, on_message=None, on_interval=None, now=monotonic, getfields=itemgetter('status', 'task_id'), READY_STATES=states.READY_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): with self.app.pool.acquire_channel(block=True) as (conn, channel): ids = set(task_ids) cached_ids = set() mark_cached = cached_ids.add for task_id in ids: try: cached = self._cache[task_id] except KeyError: pass else: if cached['status'] in READY_STATES: yield task_id, cached mark_cached(task_id) ids.difference_update(cached_ids) results = deque() push_result = results.append push_cache = self._cache.__setitem__ decode_result = self.meta_from_decoded def _on_message(message): body = decode_result(message.decode()) if on_message is not None: on_message(body) state, uid = getfields(body) if state in READY_STATES: push_result(body) \ if uid in task_ids else push_cache(uid, body) bindings = self._many_bindings(task_ids) with self.Consumer(channel, bindings, on_message=_on_message, accept=self.accept, no_ack=no_ack): wait = conn.drain_events popleft = results.popleft while ids: wait(timeout=timeout) while results: state = popleft() task_id = state['task_id'] ids.discard(task_id) push_cache(task_id, state) yield task_id, state if on_interval: on_interval() def reload_task_result(self, task_id): raise NotImplementedError( 'reload_task_result is not supported by this backend.') def reload_group_result(self, task_id): """Reload group result, even if it has been previously fetched.""" raise NotImplementedError( 'reload_group_result is not supported by this backend.') def save_group(self, group_id, result): raise NotImplementedError( 'save_group is not supported by this backend.') def restore_group(self, group_id, cache=True): raise NotImplementedError( 'restore_group is not supported by this backend.') def delete_group(self, group_id): raise NotImplementedError( 'delete_group is not supported by this backend.') def __reduce__(self, args=(), kwargs={}): kwargs.update( connection=self._connection, exchange=self.exchange.name, exchange_type=self.exchange.type, persistent=self.persistent, serializer=self.serializer, auto_delete=self.auto_delete, expires=self.expires, ) return super(AMQPBackend, self).__reduce__(args, kwargs) def as_uri(self, include_password=True): return 'amqp://' celery-4.1.0/celery/backends/async.py0000644000175000017500000002162013130607475017460 0ustar omeromer00000000000000"""Async I/O backend support utilities.""" from __future__ import absolute_import, unicode_literals import socket import threading from collections import deque from time import sleep from weakref import WeakKeyDictionary from kombu.utils.compat import detect_environment from kombu.utils.objects import cached_property from celery import states from celery.exceptions import TimeoutError from celery.five import Empty, monotonic from celery.utils.threads import THREAD_TIMEOUT_MAX __all__ = [ 'AsyncBackendMixin', 'BaseResultConsumer', 'Drainer', 'register_drainer', ] drainers = {} def register_drainer(name): """Decorator used to register a new result drainer type.""" def _inner(cls): drainers[name] = cls return cls return _inner @register_drainer('default') class Drainer(object): """Result draining service.""" def __init__(self, result_consumer): self.result_consumer = result_consumer def start(self): pass def stop(self): pass def drain_events_until(self, p, timeout=None, on_interval=None, wait=None): wait = wait or self.result_consumer.drain_events time_start = monotonic() while 1: # Total time spent may exceed a single call to wait() if timeout and monotonic() - time_start >= timeout: raise socket.timeout() try: yield self.wait_for(p, wait, timeout=1) except socket.timeout: pass if on_interval: on_interval() if p.ready: # got event on the wanted channel. break def wait_for(self, p, wait, timeout=None): wait(timeout=timeout) class greenletDrainer(Drainer): spawn = None _g = None def __init__(self, *args, **kwargs): super(greenletDrainer, self).__init__(*args, **kwargs) self._started = threading.Event() self._stopped = threading.Event() self._shutdown = threading.Event() def run(self): self._started.set() while not self._stopped.is_set(): try: self.result_consumer.drain_events(timeout=1) except socket.timeout: pass self._shutdown.set() def start(self): if not self._started.is_set(): self._g = self.spawn(self.run) self._started.wait() def stop(self): self._stopped.set() self._shutdown.wait(THREAD_TIMEOUT_MAX) def wait_for(self, p, wait, timeout=None): self.start() if not p.ready: sleep(0) @register_drainer('eventlet') class eventletDrainer(greenletDrainer): @cached_property def spawn(self): from eventlet import spawn return spawn @register_drainer('gevent') class geventDrainer(greenletDrainer): @cached_property def spawn(self): from gevent import spawn return spawn class AsyncBackendMixin(object): """Mixin for backends that enables the async API.""" def _collect_into(self, result, bucket): self.result_consumer.buckets[result] = bucket def iter_native(self, result, no_ack=True, **kwargs): self._ensure_not_eager() results = result.results if not results: raise StopIteration() # we tell the result consumer to put consumed results # into these buckets. bucket = deque() for node in results: if node._cache: bucket.append(node) else: self._collect_into(node, bucket) for _ in self._wait_for_pending(result, no_ack=no_ack, **kwargs): while bucket: node = bucket.popleft() yield node.id, node._cache while bucket: node = bucket.popleft() yield node.id, node._cache def add_pending_result(self, result, weak=False, start_drainer=True): if start_drainer: self.result_consumer.drainer.start() try: self._maybe_resolve_from_buffer(result) except Empty: self._add_pending_result(result.id, result, weak=weak) return result def _maybe_resolve_from_buffer(self, result): result._maybe_set_cache(self._pending_messages.take(result.id)) def _add_pending_result(self, task_id, result, weak=False): concrete, weak_ = self._pending_results if task_id not in weak_ and result.id not in concrete: (weak_ if weak else concrete)[task_id] = result self.result_consumer.consume_from(task_id) def add_pending_results(self, results, weak=False): self.result_consumer.drainer.start() return [self.add_pending_result(result, weak=weak, start_drainer=False) for result in results] def remove_pending_result(self, result): self._remove_pending_result(result.id) self.on_result_fulfilled(result) return result def _remove_pending_result(self, task_id): for map in self._pending_results: map.pop(task_id, None) def on_result_fulfilled(self, result): self.result_consumer.cancel_for(result.id) def wait_for_pending(self, result, callback=None, propagate=True, **kwargs): self._ensure_not_eager() for _ in self._wait_for_pending(result, **kwargs): pass return result.maybe_throw(callback=callback, propagate=propagate) def _wait_for_pending(self, result, timeout=None, on_interval=None, on_message=None, **kwargs): return self.result_consumer._wait_for_pending( result, timeout=timeout, on_interval=on_interval, on_message=on_message, ) @property def is_async(self): return True class BaseResultConsumer(object): """Manager responsible for consuming result messages.""" def __init__(self, backend, app, accept, pending_results, pending_messages): self.backend = backend self.app = app self.accept = accept self._pending_results = pending_results self._pending_messages = pending_messages self.on_message = None self.buckets = WeakKeyDictionary() self.drainer = drainers[detect_environment()](self) def start(self, initial_task_id, **kwargs): raise NotImplementedError() def stop(self): pass def drain_events(self, timeout=None): raise NotImplementedError() def consume_from(self, task_id): raise NotImplementedError() def cancel_for(self, task_id): raise NotImplementedError() def _after_fork(self): self.buckets.clear() self.buckets = WeakKeyDictionary() self.on_message = None self.on_after_fork() def on_after_fork(self): pass def drain_events_until(self, p, timeout=None, on_interval=None): return self.drainer.drain_events_until( p, timeout=timeout, on_interval=on_interval) def _wait_for_pending(self, result, timeout=None, on_interval=None, on_message=None, **kwargs): self.on_wait_for_pending(result, timeout=timeout, **kwargs) prev_on_m, self.on_message = self.on_message, on_message try: for _ in self.drain_events_until( result.on_ready, timeout=timeout, on_interval=on_interval): yield sleep(0) except socket.timeout: raise TimeoutError('The operation timed out.') finally: self.on_message = prev_on_m def on_wait_for_pending(self, result, timeout=None, **kwargs): pass def on_out_of_band_result(self, message): self.on_state_change(message.payload, message) def _get_pending_result(self, task_id): for mapping in self._pending_results: try: return mapping[task_id] except KeyError: pass raise KeyError(task_id) def on_state_change(self, meta, message): if self.on_message: self.on_message(meta) if meta['status'] in states.READY_STATES: task_id = meta['task_id'] try: result = self._get_pending_result(task_id) except KeyError: # send to buffer in case we received this result # before it was added to _pending_results. self._pending_messages.put(task_id, meta) else: result._maybe_set_cache(meta) buckets = self.buckets try: # remove bucket for this result, since it's fulfilled bucket = buckets.pop(result) except KeyError: pass else: # send to waiter via bucket bucket.append(result) sleep(0) celery-4.1.0/celery/backends/couchdb.py0000644000175000017500000000571013130607475017754 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """CouchDB result store backend.""" from __future__ import absolute_import, unicode_literals from kombu.utils.url import _parse_url from celery.exceptions import ImproperlyConfigured from .base import KeyValueStoreBackend try: import pycouchdb except ImportError: pycouchdb = None # noqa __all__ = ['CouchBackend'] ERR_LIB_MISSING = """\ You need to install the pycouchdb library to use the CouchDB result backend\ """ class CouchBackend(KeyValueStoreBackend): """CouchDB backend. Raises: celery.exceptions.ImproperlyConfigured: if module :pypi:`pycouchdb` is not available. """ container = 'default' scheme = 'http' host = 'localhost' port = 5984 username = None password = None def __init__(self, url=None, *args, **kwargs): super(CouchBackend, self).__init__(*args, **kwargs) self.url = url if pycouchdb is None: raise ImproperlyConfigured(ERR_LIB_MISSING) uscheme = uhost = uport = uname = upass = ucontainer = None if url: _, uhost, uport, uname, upass, ucontainer, _ = _parse_url(url) # noqa ucontainer = ucontainer.strip('/') if ucontainer else None self.scheme = uscheme or self.scheme self.host = uhost or self.host self.port = int(uport or self.port) self.container = ucontainer or self.container self.username = uname or self.username self.password = upass or self.password self._connection = None def _get_connection(self): """Connect to the CouchDB server.""" if self.username and self.password: conn_string = '%s://%s:%s@%s:%s' % ( self.scheme, self.username, self.password, self.host, str(self.port)) server = pycouchdb.Server(conn_string, authmethod='basic') else: conn_string = '%s://%s:%s' % ( self.scheme, self.host, str(self.port)) server = pycouchdb.Server(conn_string) try: return server.database(self.container) except pycouchdb.exceptions.NotFound: return server.create(self.container) @property def connection(self): if self._connection is None: self._connection = self._get_connection() return self._connection def get(self, key): try: return self.connection.get(key)['value'] except pycouchdb.exceptions.NotFound: return None def set(self, key, value): data = {'_id': key, 'value': value} try: self.connection.save(data) except pycouchdb.exceptions.Conflict: # document already exists, update it data = self.connection.get(key) data['value'] = value self.connection.save(data) def mget(self, keys): return [self.get(key) for key in keys] def delete(self, key): self.connection.delete(key) celery-4.1.0/celery/bootsteps.py0000644000175000017500000003041613130607475016616 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """A directed acyclic graph of reusable components.""" from __future__ import absolute_import, unicode_literals from collections import deque from threading import Event from kombu.common import ignore_errors from kombu.utils.encoding import bytes_to_str from kombu.utils.imports import symbol_by_name from .five import bytes_if_py2, values, with_metaclass from .utils.graph import DependencyGraph, GraphFormatter from .utils.imports import instantiate, qualname from .utils.log import get_logger try: from greenlet import GreenletExit except ImportError: # pragma: no cover IGNORE_ERRORS = () else: IGNORE_ERRORS = (GreenletExit,) __all__ = ['Blueprint', 'Step', 'StartStopStep', 'ConsumerStep'] #: States RUN = 0x1 CLOSE = 0x2 TERMINATE = 0x3 logger = get_logger(__name__) def _pre(ns, fmt): return '| {0}: {1}'.format(ns.alias, fmt) def _label(s): return s.name.rsplit('.', 1)[-1] class StepFormatter(GraphFormatter): """Graph formatter for :class:`Blueprint`.""" blueprint_prefix = '⧉' conditional_prefix = '∘' blueprint_scheme = { 'shape': 'parallelogram', 'color': 'slategray4', 'fillcolor': 'slategray3', } def label(self, step): return step and '{0}{1}'.format( self._get_prefix(step), bytes_to_str( (step.label or _label(step)).encode('utf-8', 'ignore')), ) def _get_prefix(self, step): if step.last: return self.blueprint_prefix if step.conditional: return self.conditional_prefix return '' def node(self, obj, **attrs): scheme = self.blueprint_scheme if obj.last else self.node_scheme return self.draw_node(obj, scheme, attrs) def edge(self, a, b, **attrs): if a.last: attrs.update(arrowhead='none', color='darkseagreen3') return self.draw_edge(a, b, self.edge_scheme, attrs) class Blueprint(object): """Blueprint containing bootsteps that can be applied to objects. Arguments: steps Sequence[Union[str, Step]]: List of steps. name (str): Set explicit name for this blueprint. on_start (Callable): Optional callback applied after blueprint start. on_close (Callable): Optional callback applied before blueprint close. on_stopped (Callable): Optional callback applied after blueprint stopped. """ GraphFormatter = StepFormatter name = None state = None started = 0 default_steps = set() state_to_name = { 0: 'initializing', RUN: 'running', CLOSE: 'closing', TERMINATE: 'terminating', } def __init__(self, steps=None, name=None, on_start=None, on_close=None, on_stopped=None): self.name = name or self.name or qualname(type(self)) self.types = set(steps or []) | set(self.default_steps) self.on_start = on_start self.on_close = on_close self.on_stopped = on_stopped self.shutdown_complete = Event() self.steps = {} def start(self, parent): self.state = RUN if self.on_start: self.on_start() for i, step in enumerate(s for s in parent.steps if s is not None): self._debug('Starting %s', step.alias) self.started = i + 1 step.start(parent) logger.debug('^-- substep ok') def human_state(self): return self.state_to_name[self.state or 0] def info(self, parent): info = {} for step in parent.steps: info.update(step.info(parent) or {}) return info def close(self, parent): if self.on_close: self.on_close() self.send_all(parent, 'close', 'closing', reverse=False) def restart(self, parent, method='stop', description='restarting', propagate=False): self.send_all(parent, method, description, propagate=propagate) def send_all(self, parent, method, description=None, reverse=True, propagate=True, args=()): description = description or method.replace('_', ' ') steps = reversed(parent.steps) if reverse else parent.steps for step in steps: if step: fun = getattr(step, method, None) if fun is not None: self._debug('%s %s...', description.capitalize(), step.alias) try: fun(parent, *args) except Exception as exc: # pylint: disable=broad-except if propagate: raise logger.exception( 'Error on %s %s: %r', description, step.alias, exc) def stop(self, parent, close=True, terminate=False): what = 'terminating' if terminate else 'stopping' if self.state in (CLOSE, TERMINATE): return if self.state != RUN or self.started != len(parent.steps): # Not fully started, can safely exit. self.state = TERMINATE self.shutdown_complete.set() return self.close(parent) self.state = CLOSE self.restart( parent, 'terminate' if terminate else 'stop', description=what, propagate=False, ) if self.on_stopped: self.on_stopped() self.state = TERMINATE self.shutdown_complete.set() def join(self, timeout=None): try: # Will only get here if running green, # makes sure all greenthreads have exited. self.shutdown_complete.wait(timeout=timeout) except IGNORE_ERRORS: pass def apply(self, parent, **kwargs): """Apply the steps in this blueprint to an object. This will apply the ``__init__`` and ``include`` methods of each step, with the object as argument:: step = Step(obj) ... step.include(obj) For :class:`StartStopStep` the services created will also be added to the objects ``steps`` attribute. """ self._debug('Preparing bootsteps.') order = self.order = [] steps = self.steps = self.claim_steps() self._debug('Building graph...') for S in self._finalize_steps(steps): step = S(parent, **kwargs) steps[step.name] = step order.append(step) self._debug('New boot order: {%s}', ', '.join(s.alias for s in self.order)) for step in order: step.include(parent) return self def connect_with(self, other): self.graph.adjacent.update(other.graph.adjacent) self.graph.add_edge(type(other.order[0]), type(self.order[-1])) def __getitem__(self, name): return self.steps[name] def _find_last(self): return next((C for C in values(self.steps) if C.last), None) def _firstpass(self, steps): for step in values(steps): step.requires = [symbol_by_name(dep) for dep in step.requires] stream = deque(step.requires for step in values(steps)) while stream: for node in stream.popleft(): node = symbol_by_name(node) if node.name not in self.steps: steps[node.name] = node stream.append(node.requires) def _finalize_steps(self, steps): last = self._find_last() self._firstpass(steps) it = ((C, C.requires) for C in values(steps)) G = self.graph = DependencyGraph( it, formatter=self.GraphFormatter(root=last), ) if last: for obj in G: if obj != last: G.add_edge(last, obj) try: return G.topsort() except KeyError as exc: raise KeyError('unknown bootstep: %s' % exc) def claim_steps(self): return dict(self.load_step(step) for step in self.types) def load_step(self, step): step = symbol_by_name(step) return step.name, step def _debug(self, msg, *args): return logger.debug(_pre(self, msg), *args) @property def alias(self): return _label(self) class StepType(type): """Meta-class for steps.""" name = None requires = None def __new__(cls, name, bases, attrs): module = attrs.get('__module__') qname = '{0}.{1}'.format(module, name) if module else name attrs.update( __qualname__=qname, name=attrs.get('name') or qname, ) return super(StepType, cls).__new__(cls, name, bases, attrs) def __str__(self): return bytes_if_py2(self.name) def __repr__(self): return bytes_if_py2('step:{0.name}{{{0.requires!r}}}'.format(self)) @with_metaclass(StepType) class Step(object): """A Bootstep. The :meth:`__init__` method is called when the step is bound to a parent object, and can as such be used to initialize attributes in the parent object at parent instantiation-time. """ #: Optional step name, will use ``qualname`` if not specified. name = None #: Optional short name used for graph outputs and in logs. label = None #: Set this to true if the step is enabled based on some condition. conditional = False #: List of other steps that that must be started before this step. #: Note that all dependencies must be in the same blueprint. requires = () #: This flag is reserved for the workers Consumer, #: since it is required to always be started last. #: There can only be one object marked last #: in every blueprint. last = False #: This provides the default for :meth:`include_if`. enabled = True def __init__(self, parent, **kwargs): pass def include_if(self, parent): """Return true if bootstep should be included. You can define this as an optional predicate that decides whether this step should be created. """ return self.enabled def instantiate(self, name, *args, **kwargs): return instantiate(name, *args, **kwargs) def _should_include(self, parent): if self.include_if(parent): return True, self.create(parent) return False, None def include(self, parent): return self._should_include(parent)[0] def create(self, parent): """Create the step.""" pass def __repr__(self): return bytes_if_py2(''.format(self)) @property def alias(self): return self.label or _label(self) def info(self, obj): pass class StartStopStep(Step): """Bootstep that must be started and stopped in order.""" #: Optional obj created by the :meth:`create` method. #: This is used by :class:`StartStopStep` to keep the #: original service object. obj = None def start(self, parent): if self.obj: return self.obj.start() def stop(self, parent): if self.obj: return self.obj.stop() def close(self, parent): pass def terminate(self, parent): if self.obj: return getattr(self.obj, 'terminate', self.obj.stop)() def include(self, parent): inc, ret = self._should_include(parent) if inc: self.obj = ret parent.steps.append(self) return inc class ConsumerStep(StartStopStep): """Bootstep that starts a message consumer.""" requires = ('celery.worker.consumer:Connection',) consumers = None def get_consumers(self, channel): raise NotImplementedError('missing get_consumers') def start(self, c): channel = c.connection.channel() self.consumers = self.get_consumers(channel) for consumer in self.consumers or []: consumer.consume() def stop(self, c): self._close(c, True) def shutdown(self, c): self._close(c, False) def _close(self, c, cancel_consumers=True): channels = set() for consumer in self.consumers or []: if cancel_consumers: ignore_errors(c.connection, consumer.cancel) if consumer.channel: channels.add(consumer.channel) for channel in channels: ignore_errors(c.connection, channel.close) celery-4.1.0/celery/contrib/0000755000175000017500000000000013135426347015660 5ustar omeromer00000000000000celery-4.1.0/celery/contrib/pytest.py0000644000175000017500000001417313130607475017566 0ustar omeromer00000000000000"""Fixtures and testing utilities for :pypi:`py.test `.""" from __future__ import absolute_import, unicode_literals import os import pytest from contextlib import contextmanager from .testing import worker from .testing.app import TestApp, setup_default_app NO_WORKER = os.environ.get('NO_WORKER') # pylint: disable=redefined-outer-name # Well, they're called fixtures.... @contextmanager def _create_app(request, enable_logging=False, use_trap=False, parameters={}, **config): # type: (Any, **Any) -> Celery """Utility context used to setup Celery app for pytest fixtures.""" test_app = TestApp( set_as_current=False, enable_logging=enable_logging, config=config, **parameters ) # request.module is not defined for session _module = getattr(request, 'module', None) _cls = getattr(request, 'cls', None) _function = getattr(request, 'function', None) with setup_default_app(test_app, use_trap=use_trap): is_not_contained = any([ not getattr(_module, 'app_contained', True), not getattr(_cls, 'app_contained', True), not getattr(_function, 'app_contained', True) ]) if is_not_contained: test_app.set_current() yield test_app @pytest.fixture(scope='session') def use_celery_app_trap(): # type: () -> bool """You can override this fixture to enable the app trap. The app trap raises an exception whenever something attempts to use the current or default apps. """ return False @pytest.fixture(scope='session') def celery_session_app(request, celery_config, celery_parameters, celery_enable_logging, use_celery_app_trap): # type: (Any) -> Celery """Session Fixture: Return app for session fixtures.""" mark = request.node.get_marker('celery') config = dict(celery_config, **mark.kwargs if mark else {}) with _create_app(request, enable_logging=celery_enable_logging, use_trap=use_celery_app_trap, parameters=celery_parameters, **config) as app: if not use_celery_app_trap: app.set_default() app.set_current() yield app @pytest.fixture(scope='session') def celery_session_worker(request, celery_session_app, celery_includes, celery_worker_pool, celery_worker_parameters): # type: (Any, Celery, Sequence[str], str) -> WorkController """Session Fixture: Start worker that lives throughout test suite.""" if not NO_WORKER: for module in celery_includes: celery_session_app.loader.import_task_module(module) with worker.start_worker(celery_session_app, pool=celery_worker_pool, **celery_worker_parameters) as w: yield w @pytest.fixture(scope='session') def celery_enable_logging(): # type: () -> bool """You can override this fixture to enable logging.""" return False @pytest.fixture(scope='session') def celery_includes(): # type: () -> Sequence[str] """You can override this include modules when a worker start. You can have this return a list of module names to import, these can be task modules, modules registering signals, and so on. """ return () @pytest.fixture(scope='session') def celery_worker_pool(): # type: () -> Union[str, Any] """You can override this fixture to set the worker pool. The "solo" pool is used by default, but you can set this to return e.g. "prefork". """ return 'solo' @pytest.fixture(scope='session') def celery_config(): # type: () -> Mapping[str, Any] """Redefine this fixture to configure the test Celery app. The config returned by your fixture will then be used to configure the :func:`celery_app` fixture. """ return {} @pytest.fixture(scope='session') def celery_parameters(): # type: () -> Mapping[str, Any] """Redefine this fixture to change the init parameters of test Celery app. The dict returned by your fixture will then be used as parameters when instantiating :class:`~celery.Celery`. """ return {} @pytest.fixture(scope='session') def celery_worker_parameters(): # type: () -> Mapping[str, Any] """Redefine this fixture to change the init parameters of Celery workers. This can be used e. g. to define queues the worker will consume tasks from. The dict returned by your fixture will then be used as parameters when instantiating :class:`~celery.worker.WorkController`. """ return {} @pytest.fixture() def celery_app(request, celery_config, celery_parameters, celery_enable_logging, use_celery_app_trap): """Fixture creating a Celery application instance.""" mark = request.node.get_marker('celery') config = dict(celery_config, **mark.kwargs if mark else {}) with _create_app(request, enable_logging=celery_enable_logging, use_trap=use_celery_app_trap, parameters=celery_parameters, **config) as app: yield app @pytest.fixture() def celery_worker(request, celery_app, celery_includes, celery_worker_pool, celery_worker_parameters): # type: (Any, Celery, Sequence[str], str) -> WorkController """Fixture: Start worker in a thread, stop it when the test returns.""" if not NO_WORKER: for module in celery_includes: celery_app.loader.import_task_module(module) with worker.start_worker(celery_app, pool=celery_worker_pool, **celery_worker_parameters) as w: yield w @pytest.fixture() def depends_on_current_app(celery_app): """Fixture that sets app as current.""" celery_app.set_current() celery-4.1.0/celery/contrib/__init__.py0000644000175000017500000000000013130607475017755 0ustar omeromer00000000000000celery-4.1.0/celery/contrib/abortable.py0000644000175000017500000001206413130607475020166 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Abortable Tasks. Abortable tasks overview ========================= For long-running :class:`Task`'s, it can be desirable to support aborting during execution. Of course, these tasks should be built to support abortion specifically. The :class:`AbortableTask` serves as a base class for all :class:`Task` objects that should support abortion by producers. * Producers may invoke the :meth:`abort` method on :class:`AbortableAsyncResult` instances, to request abortion. * Consumers (workers) should periodically check (and honor!) the :meth:`is_aborted` method at controlled points in their task's :meth:`run` method. The more often, the better. The necessary intermediate communication is dealt with by the :class:`AbortableTask` implementation. Usage example ------------- In the consumer: .. code-block:: python from __future__ import absolute_import from celery.contrib.abortable import AbortableTask from celery.utils.log import get_task_logger from proj.celery import app logger = get_logger(__name__) @app.task(bind=True, base=AbortableTask) def long_running_task(self): results = [] for i in range(100): # check after every 5 iterations... # (or alternatively, check when some timer is due) if not i % 5: if self.is_aborted(): # respect aborted state, and terminate gracefully. logger.warning('Task aborted') return value = do_something_expensive(i) results.append(y) logger.info('Task complete') return results In the producer: .. code-block:: python from __future__ import absolute_import import time from proj.tasks import MyLongRunningTask def myview(request): # result is of type AbortableAsyncResult result = long_running_task.delay() # abort the task after 10 seconds time.sleep(10) result.abort() After the `result.abort()` call, the task execution isn't aborted immediately. In fact, it's not guaranteed to abort at all. Keep checking `result.state` status, or call `result.get(timeout=)` to have it block until the task is finished. .. note:: In order to abort tasks, there needs to be communication between the producer and the consumer. This is currently implemented through the database backend. Therefore, this class will only work with the database backends. """ from __future__ import absolute_import, unicode_literals from celery import Task from celery.result import AsyncResult __all__ = ['AbortableAsyncResult', 'AbortableTask'] """ Task States ----------- .. state:: ABORTED ABORTED ~~~~~~~ Task is aborted (typically by the producer) and should be aborted as soon as possible. """ ABORTED = 'ABORTED' class AbortableAsyncResult(AsyncResult): """Represents an abortable result. Specifically, this gives the `AsyncResult` a :meth:`abort()` method, that sets the state of the underlying Task to `'ABORTED'`. """ def is_aborted(self): """Return :const:`True` if the task is (being) aborted.""" return self.state == ABORTED def abort(self): """Set the state of the task to :const:`ABORTED`. Abortable tasks monitor their state at regular intervals and terminate execution if so. Warning: Be aware that invoking this method does not guarantee when the task will be aborted (or even if the task will be aborted at all). """ # TODO: store_result requires all four arguments to be set, # but only state should be updated here return self.backend.store_result(self.id, result=None, state=ABORTED, traceback=None) class AbortableTask(Task): """Task that can be aborted. This serves as a base class for all :class:`Task`'s that support aborting during execution. All subclasses of :class:`AbortableTask` must call the :meth:`is_aborted` method periodically and act accordingly when the call evaluates to :const:`True`. """ abstract = True def AsyncResult(self, task_id): """Return the accompanying AbortableAsyncResult instance.""" return AbortableAsyncResult(task_id, backend=self.backend) def is_aborted(self, **kwargs): """Return true if task is aborted. Checks against the backend whether this :class:`AbortableAsyncResult` is :const:`ABORTED`. Always return :const:`False` in case the `task_id` parameter refers to a regular (non-abortable) :class:`Task`. Be aware that invoking this method will cause a hit in the backend (for example a database query), so find a good balance between calling it regularly (for responsiveness), but not too often (for performance). """ task_id = kwargs.get('task_id', self.request.id) result = self.AsyncResult(task_id) if not isinstance(result, AbortableAsyncResult): return False return result.is_aborted() celery-4.1.0/celery/contrib/rdb.py0000644000175000017500000001174313135426300016774 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Remote Debugger. Introduction ============ This is a remote debugger for Celery tasks running in multiprocessing pool workers. Inspired by a lost post on dzone.com. Usage ----- .. code-block:: python from celery.contrib import rdb from celery import task @task() def add(x, y): result = x + y rdb.set_trace() return result Environment Variables ===================== .. envvar:: CELERY_RDB_HOST ``CELERY_RDB_HOST`` ------------------- Hostname to bind to. Default is '127.0.01' (only accessable from localhost). .. envvar:: CELERY_RDB_PORT ``CELERY_RDB_PORT`` ------------------- Base port to bind to. Default is 6899. The debugger will try to find an available port starting from the base port. The selected port will be logged by the worker. """ from __future__ import absolute_import, print_function, unicode_literals import errno import os import socket import sys from pdb import Pdb from billiard.process import current_process from celery.five import range __all__ = [ 'CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'DEFAULT_PORT', 'Rdb', 'debugger', 'set_trace', ] DEFAULT_PORT = 6899 CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1' CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or DEFAULT_PORT) #: Holds the currently active debugger. _current = [None] _frame = getattr(sys, '_getframe') NO_AVAILABLE_PORT = """\ {self.ident}: Couldn't find an available port. Please specify one using the CELERY_RDB_PORT environment variable. """ BANNER = """\ {self.ident}: Ready to connect: telnet {self.host} {self.port} Type `exit` in session to continue. {self.ident}: Waiting for client... """ SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.' SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.' class Rdb(Pdb): """Remote debugger.""" me = 'Remote Debugger' _prev_outs = None _sock = None def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT, port_search_limit=100, port_skew=+0, out=sys.stdout): self.active = True self.out = out self._prev_handles = sys.stdin, sys.stdout self._sock, this_port = self.get_avail_port( host, port, port_search_limit, port_skew, ) self._sock.setblocking(1) self._sock.listen(1) self.ident = '{0}:{1}'.format(self.me, this_port) self.host = host self.port = this_port self.say(BANNER.format(self=self)) self._client, address = self._sock.accept() self._client.setblocking(1) self.remote_addr = ':'.join(str(v) for v in address) self.say(SESSION_STARTED.format(self=self)) self._handle = sys.stdin = sys.stdout = self._client.makefile('rw') Pdb.__init__(self, completekey='tab', stdin=self._handle, stdout=self._handle) def get_avail_port(self, host, port, search_limit=100, skew=+0): try: _, skew = current_process().name.split('-') skew = int(skew) except ValueError: pass this_port = None for i in range(search_limit): _sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) this_port = port + skew + i try: _sock.bind((host, this_port)) except socket.error as exc: if exc.errno in [errno.EADDRINUSE, errno.EINVAL]: continue raise else: return _sock, this_port else: raise Exception(NO_AVAILABLE_PORT.format(self=self)) def say(self, m): print(m, file=self.out) def __enter__(self): return self def __exit__(self, *exc_info): self._close_session() def _close_session(self): self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles if self.active: if self._handle is not None: self._handle.close() if self._client is not None: self._client.close() if self._sock is not None: self._sock.close() self.active = False self.say(SESSION_ENDED.format(self=self)) def do_continue(self, arg): self._close_session() self.set_continue() return 1 do_c = do_cont = do_continue def do_quit(self, arg): self._close_session() self.set_quit() return 1 do_q = do_exit = do_quit def set_quit(self): # this raises a BdbQuit exception that we're unable to catch. sys.settrace(None) def debugger(): """Return the current debugger instance, or create if none.""" rdb = _current[0] if rdb is None or not rdb.active: rdb = _current[0] = Rdb() return rdb def set_trace(frame=None): """Set break-point at current location, or a specified frame.""" if frame is None: frame = _frame().f_back return debugger().set_trace(frame) celery-4.1.0/celery/contrib/migrate.py0000644000175000017500000003367013130607475017671 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Message migration tools (Broker <-> Broker).""" from __future__ import absolute_import, print_function, unicode_literals import socket from functools import partial from itertools import cycle, islice from kombu import eventloop, Queue from kombu.common import maybe_declare from kombu.utils.encoding import ensure_bytes from celery.app import app_or_default from celery.five import python_2_unicode_compatible, string, string_t from celery.utils.nodenames import worker_direct from celery.utils.text import str_to_list __all__ = [ 'StopFiltering', 'State', 'republish', 'migrate_task', 'migrate_tasks', 'move', 'task_id_eq', 'task_id_in', 'start_filter', 'move_task_by_id', 'move_by_idmap', 'move_by_taskmap', 'move_direct', 'move_direct_by_id', ] MOVING_PROGRESS_FMT = """\ Moving task {state.filtered}/{state.strtotal}: \ {body[task]}[{body[id]}]\ """ class StopFiltering(Exception): """Semi-predicate used to signal filter stop.""" @python_2_unicode_compatible class State(object): """Migration progress state.""" count = 0 filtered = 0 total_apx = 0 @property def strtotal(self): if not self.total_apx: return '?' return string(self.total_apx) def __repr__(self): if self.filtered: return '^{0.filtered}'.format(self) return '{0.count}/{0.strtotal}'.format(self) def republish(producer, message, exchange=None, routing_key=None, remove_props=['application_headers', 'content_type', 'content_encoding', 'headers']): """Republish message.""" body = ensure_bytes(message.body) # use raw message body. info, headers, props = (message.delivery_info, message.headers, message.properties) exchange = info['exchange'] if exchange is None else exchange routing_key = info['routing_key'] if routing_key is None else routing_key ctype, enc = message.content_type, message.content_encoding # remove compression header, as this will be inserted again # when the message is recompressed. compression = headers.pop('compression', None) for key in remove_props: props.pop(key, None) producer.publish(ensure_bytes(body), exchange=exchange, routing_key=routing_key, compression=compression, headers=headers, content_type=ctype, content_encoding=enc, **props) def migrate_task(producer, body_, message, queues=None): """Migrate single task message.""" info = message.delivery_info queues = {} if queues is None else queues republish(producer, message, exchange=queues.get(info['exchange']), routing_key=queues.get(info['routing_key'])) def filter_callback(callback, tasks): def filtered(body, message): if tasks and body['task'] not in tasks: return return callback(body, message) return filtered def migrate_tasks(source, dest, migrate=migrate_task, app=None, queues=None, **kwargs): """Migrate tasks from one broker to another.""" app = app_or_default(app) queues = prepare_queues(queues) producer = app.amqp.Producer(dest, auto_declare=False) migrate = partial(migrate, producer, queues=queues) def on_declare_queue(queue): new_queue = queue(producer.channel) new_queue.name = queues.get(queue.name, queue.name) if new_queue.routing_key == queue.name: new_queue.routing_key = queues.get(queue.name, new_queue.routing_key) if new_queue.exchange.name == queue.name: new_queue.exchange.name = queues.get(queue.name, queue.name) new_queue.declare() return start_filter(app, source, migrate, queues=queues, on_declare_queue=on_declare_queue, **kwargs) def _maybe_queue(app, q): if isinstance(q, string_t): return app.amqp.queues[q] return q def move(predicate, connection=None, exchange=None, routing_key=None, source=None, app=None, callback=None, limit=None, transform=None, **kwargs): """Find tasks by filtering them and move the tasks to a new queue. Arguments: predicate (Callable): Filter function used to decide the messages to move. Must accept the standard signature of ``(body, message)`` used by Kombu consumer callbacks. If the predicate wants the message to be moved it must return either: 1) a tuple of ``(exchange, routing_key)``, or 2) a :class:`~kombu.entity.Queue` instance, or 3) any other true value means the specified ``exchange`` and ``routing_key`` arguments will be used. connection (kombu.Connection): Custom connection to use. source: List[Union[str, kombu.Queue]]: Optional list of source queues to use instead of the default (queues in :setting:`task_queues`). This list can also contain :class:`~kombu.entity.Queue` instances. exchange (str, kombu.Exchange): Default destination exchange. routing_key (str): Default destination routing key. limit (int): Limit number of messages to filter. callback (Callable): Callback called after message moved, with signature ``(state, body, message)``. transform (Callable): Optional function to transform the return value (destination) of the filter function. Also supports the same keyword arguments as :func:`start_filter`. To demonstrate, the :func:`move_task_by_id` operation can be implemented like this: .. code-block:: python def is_wanted_task(body, message): if body['id'] == wanted_id: return Queue('foo', exchange=Exchange('foo'), routing_key='foo') move(is_wanted_task) or with a transform: .. code-block:: python def transform(value): if isinstance(value, string_t): return Queue(value, Exchange(value), value) return value move(is_wanted_task, transform=transform) Note: The predicate may also return a tuple of ``(exchange, routing_key)`` to specify the destination to where the task should be moved, or a :class:`~kombu.entitiy.Queue` instance. Any other true value means that the task will be moved to the default exchange/routing_key. """ app = app_or_default(app) queues = [_maybe_queue(app, queue) for queue in source or []] or None with app.connection_or_acquire(connection, pool=False) as conn: producer = app.amqp.Producer(conn) state = State() def on_task(body, message): ret = predicate(body, message) if ret: if transform: ret = transform(ret) if isinstance(ret, Queue): maybe_declare(ret, conn.default_channel) ex, rk = ret.exchange.name, ret.routing_key else: ex, rk = expand_dest(ret, exchange, routing_key) republish(producer, message, exchange=ex, routing_key=rk) message.ack() state.filtered += 1 if callback: callback(state, body, message) if limit and state.filtered >= limit: raise StopFiltering() return start_filter(app, conn, on_task, consume_from=queues, **kwargs) def expand_dest(ret, exchange, routing_key): try: ex, rk = ret except (TypeError, ValueError): ex, rk = exchange, routing_key return ex, rk def task_id_eq(task_id, body, message): """Return true if task id equals task_id'.""" return body['id'] == task_id def task_id_in(ids, body, message): """Return true if task id is member of set ids'.""" return body['id'] in ids def prepare_queues(queues): if isinstance(queues, string_t): queues = queues.split(',') if isinstance(queues, list): queues = dict(tuple(islice(cycle(q.split(':')), None, 2)) for q in queues) if queues is None: queues = {} return queues class Filterer(object): def __init__(self, app, conn, filter, limit=None, timeout=1.0, ack_messages=False, tasks=None, queues=None, callback=None, forever=False, on_declare_queue=None, consume_from=None, state=None, accept=None, **kwargs): self.app = app self.conn = conn self.filter = filter self.limit = limit self.timeout = timeout self.ack_messages = ack_messages self.tasks = set(str_to_list(tasks) or []) self.queues = prepare_queues(queues) self.callback = callback self.forever = forever self.on_declare_queue = on_declare_queue self.consume_from = [ _maybe_queue(self.app, q) for q in consume_from or list(self.queues) ] self.state = state or State() self.accept = accept def start(self): # start migrating messages. with self.prepare_consumer(self.create_consumer()): try: for _ in eventloop(self.conn, # pragma: no cover timeout=self.timeout, ignore_timeouts=self.forever): pass except socket.timeout: pass except StopFiltering: pass return self.state def update_state(self, body, message): self.state.count += 1 if self.limit and self.state.count >= self.limit: raise StopFiltering() def ack_message(self, body, message): message.ack() def create_consumer(self): return self.app.amqp.TaskConsumer( self.conn, queues=self.consume_from, accept=self.accept, ) def prepare_consumer(self, consumer): filter = self.filter update_state = self.update_state ack_message = self.ack_message if self.tasks: filter = filter_callback(filter, self.tasks) update_state = filter_callback(update_state, self.tasks) ack_message = filter_callback(ack_message, self.tasks) consumer.register_callback(filter) consumer.register_callback(update_state) if self.ack_messages: consumer.register_callback(self.ack_message) if self.callback is not None: callback = partial(self.callback, self.state) if self.tasks: callback = filter_callback(callback, self.tasks) consumer.register_callback(callback) self.declare_queues(consumer) return consumer def declare_queues(self, consumer): # declare all queues on the new broker. for queue in consumer.queues: if self.queues and queue.name not in self.queues: continue if self.on_declare_queue is not None: self.on_declare_queue(queue) try: _, mcount, _ = queue( consumer.channel).queue_declare(passive=True) if mcount: self.state.total_apx += mcount except self.conn.channel_errors: pass def start_filter(app, conn, filter, limit=None, timeout=1.0, ack_messages=False, tasks=None, queues=None, callback=None, forever=False, on_declare_queue=None, consume_from=None, state=None, accept=None, **kwargs): """Filter tasks.""" return Filterer( app, conn, filter, limit=limit, timeout=timeout, ack_messages=ack_messages, tasks=tasks, queues=queues, callback=callback, forever=forever, on_declare_queue=on_declare_queue, consume_from=consume_from, state=state, accept=accept, **kwargs).start() def move_task_by_id(task_id, dest, **kwargs): """Find a task by id and move it to another queue. Arguments: task_id (str): Id of task to find and move. dest: (str, kombu.Queue): Destination queue. **kwargs (Any): Also supports the same keyword arguments as :func:`move`. """ return move_by_idmap({task_id: dest}, **kwargs) def move_by_idmap(map, **kwargs): """Move tasks by matching from a ``task_id: queue`` mapping. Where ``queue`` is a queue to move the task to. Example: >>> move_by_idmap({ ... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'), ... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'), ... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')}, ... queues=['hipri']) """ def task_id_in_map(body, message): return map.get(body['id']) # adding the limit means that we don't have to consume any more # when we've found everything. return move(task_id_in_map, limit=len(map), **kwargs) def move_by_taskmap(map, **kwargs): """Move tasks by matching from a ``task_name: queue`` mapping. ``queue`` is the queue to move the task to. Example: >>> move_by_taskmap({ ... 'tasks.add': Queue('name'), ... 'tasks.mul': Queue('name'), ... }) """ def task_name_in_map(body, message): return map.get(body['task']) # <- name of task return move(task_name_in_map, **kwargs) def filter_status(state, body, message, **kwargs): print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs)) move_direct = partial(move, transform=worker_direct) move_direct_by_id = partial(move_task_by_id, transform=worker_direct) move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct) move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct) celery-4.1.0/celery/contrib/testing/0000755000175000017500000000000013135426347017335 5ustar omeromer00000000000000celery-4.1.0/celery/contrib/testing/worker.py0000644000175000017500000001154513130607475021224 0ustar omeromer00000000000000"""Embedded workers for integration tests.""" from __future__ import absolute_import, unicode_literals import os import threading from contextlib import contextmanager from celery import worker from celery.result import allow_join_result, _set_task_join_will_block from celery.utils.dispatch import Signal from celery.utils.nodenames import anon_nodename WORKER_LOGLEVEL = os.environ.get('WORKER_LOGLEVEL', 'error') test_worker_starting = Signal( name='test_worker_starting', providing_args={}, ) test_worker_started = Signal( name='test_worker_started', providing_args={'worker', 'consumer'}, ) test_worker_stopped = Signal( name='test_worker_stopped', providing_args={'worker'}, ) class TestWorkController(worker.WorkController): """Worker that can synchronize on being fully started.""" def __init__(self, *args, **kwargs): # type: (*Any, **Any) -> None self._on_started = threading.Event() super(TestWorkController, self).__init__(*args, **kwargs) def on_consumer_ready(self, consumer): # type: (celery.worker.consumer.Consumer) -> None """Callback called when the Consumer blueprint is fully started.""" self._on_started.set() test_worker_started.send( sender=self.app, worker=self, consumer=consumer) def ensure_started(self): # type: () -> None """Wait for worker to be fully up and running. Warning: Worker must be started within a thread for this to work, or it will block forever. """ self._on_started.wait() @contextmanager def start_worker(app, concurrency=1, pool='solo', loglevel=WORKER_LOGLEVEL, logfile=None, perform_ping_check=True, ping_task_timeout=10.0, **kwargs): # type: (Celery, int, str, Union[str, int], # str, bool, float, **Any) -> # Iterable """Start embedded worker. Yields: celery.app.worker.Worker: worker instance. """ test_worker_starting.send(sender=app) with _start_worker_thread(app, concurrency=concurrency, pool=pool, loglevel=loglevel, logfile=logfile, **kwargs) as worker: if perform_ping_check: from .tasks import ping with allow_join_result(): assert ping.delay().get(timeout=ping_task_timeout) == 'pong' yield worker test_worker_stopped.send(sender=app, worker=worker) @contextmanager def _start_worker_thread(app, concurrency=1, pool='solo', loglevel=WORKER_LOGLEVEL, logfile=None, WorkController=TestWorkController, **kwargs): # type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable """Start Celery worker in a thread. Yields: celery.worker.Worker: worker instance. """ setup_app_for_worker(app, loglevel, logfile) assert 'celery.ping' in app.tasks # Make sure we can connect to the broker with app.connection() as conn: conn.default_channel.queue_declare worker = WorkController( app=app, concurrency=concurrency, hostname=anon_nodename(), pool=pool, loglevel=loglevel, logfile=logfile, # not allowed to override TestWorkController.on_consumer_ready ready_callback=None, without_heartbeat=True, without_mingle=True, without_gossip=True, **kwargs) t = threading.Thread(target=worker.start) t.start() worker.ensure_started() _set_task_join_will_block(False) yield worker from celery.worker import state state.should_terminate = 0 t.join(10) state.should_terminate = None @contextmanager def _start_worker_process(app, concurrency=1, pool='solo', loglevel=WORKER_LOGLEVEL, logfile=None, **kwargs): # type (Celery, int, str, Union[int, str], str, **Any) -> Iterable """Start worker in separate process. Yields: celery.app.worker.Worker: worker instance. """ from celery.apps.multi import Cluster, Node app.set_current() cluster = Cluster([Node('testworker1@%h')]) cluster.start() yield cluster.stopwait() def setup_app_for_worker(app, loglevel, logfile): # type: (Celery, Union[str, int], str) -> None """Setup the app to be used for starting an embedded worker.""" app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile) celery-4.1.0/celery/contrib/testing/mocks.py0000644000175000017500000000632513130607475021027 0ustar omeromer00000000000000"""Useful mocks for unit testing.""" from __future__ import absolute_import, unicode_literals import numbers from datetime import datetime, timedelta try: from case import Mock except ImportError: try: from unittest.mock import Mock except ImportError: from mock import Mock def TaskMessage(name, id=None, args=(), kwargs={}, callbacks=None, errbacks=None, chain=None, shadow=None, utc=None, **options): # type: (str, str, Sequence, Mapping, Sequence[Signature], # Sequence[Signature], Sequence[Signature], # str, bool, **Any) -> Any """Create task message in protocol 2 format.""" from celery import uuid from kombu.serialization import dumps id = id or uuid() message = Mock(name='TaskMessage-{0}'.format(id)) message.headers = { 'id': id, 'task': name, 'shadow': shadow, } embed = {'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain} message.headers.update(options) message.content_type, message.content_encoding, message.body = dumps( (args, kwargs, embed), serializer='json', ) message.payload = (args, kwargs, embed) return message def TaskMessage1(name, id=None, args=(), kwargs={}, callbacks=None, errbacks=None, chain=None, **options): # type: (str, str, Sequence, Mapping, Sequence[Signature], # Sequence[Signature], Sequence[Signature]) -> Any """Create task message in protocol 1 format.""" from celery import uuid from kombu.serialization import dumps id = id or uuid() message = Mock(name='TaskMessage-{0}'.format(id)) message.headers = {} message.payload = { 'task': name, 'id': id, 'args': args, 'kwargs': kwargs, 'callbacks': callbacks, 'errbacks': errbacks, } message.payload.update(options) message.content_type, message.content_encoding, message.body = dumps( message.payload, ) return message def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage): # type: (Celery, Signature, bool, Any) -> Any """Create task message from :class:`celery.Signature`. Example: >>> m = task_message_from_sig(app, add.s(2, 2)) >>> amqp_client.basic_publish(m, exchange='ex', routing_key='rkey') """ sig.freeze() callbacks = sig.options.pop('link', None) errbacks = sig.options.pop('link_error', None) countdown = sig.options.pop('countdown', None) if countdown: eta = app.now() + timedelta(seconds=countdown) else: eta = sig.options.pop('eta', None) if eta and isinstance(eta, datetime): eta = eta.isoformat() expires = sig.options.pop('expires', None) if expires and isinstance(expires, numbers.Real): expires = app.now() + timedelta(seconds=expires) if expires and isinstance(expires, datetime): expires = expires.isoformat() return TaskMessage( sig.task, id=sig.id, args=sig.args, kwargs=sig.kwargs, callbacks=[dict(s) for s in callbacks] if callbacks else None, errbacks=[dict(s) for s in errbacks] if errbacks else None, eta=eta, expires=expires, utc=utc, **sig.options ) celery-4.1.0/celery/contrib/testing/__init__.py0000644000175000017500000000000013130607475021432 0ustar omeromer00000000000000celery-4.1.0/celery/contrib/testing/tasks.py0000644000175000017500000000041113130607475021026 0ustar omeromer00000000000000"""Helper tasks for integration tests.""" from __future__ import absolute_import, unicode_literals from celery import shared_task @shared_task(name='celery.ping') def ping(): # type: () -> str """Simple task that just returns 'pong'.""" return 'pong' celery-4.1.0/celery/contrib/testing/app.py0000644000175000017500000000555113130607475020473 0ustar omeromer00000000000000"""Create Celery app instances used for testing.""" from __future__ import absolute_import, unicode_literals import weakref from contextlib import contextmanager from copy import deepcopy from kombu.utils.imports import symbol_by_name from celery import Celery from celery import _state #: Contains the default configuration values for the test app. DEFAULT_TEST_CONFIG = { 'worker_hijack_root_logger': False, 'worker_log_color': False, 'accept_content': {'json'}, 'enable_utc': True, 'timezone': 'UTC', 'broker_url': 'memory://', 'result_backend': 'cache+memory://', 'broker_heartbeat': 0, } class Trap(object): """Trap that pretends to be an app but raises an exception instead. This to protect from code that does not properly pass app instances, then falls back to the current_app. """ def __getattr__(self, name): raise RuntimeError('Test depends on current_app') class UnitLogging(symbol_by_name(Celery.log_cls)): """Sets up logging for the test application.""" def __init__(self, *args, **kwargs): super(UnitLogging, self).__init__(*args, **kwargs) self.already_setup = True def TestApp(name=None, config=None, enable_logging=False, set_as_current=False, log=UnitLogging, backend=None, broker=None, **kwargs): """App used for testing.""" from . import tasks # noqa config = dict(deepcopy(DEFAULT_TEST_CONFIG), **config or {}) if broker is not None: config.pop('broker_url', None) if backend is not None: config.pop('result_backend', None) log = None if enable_logging else log test_app = Celery( name or 'celery.tests', set_as_current=set_as_current, log=log, broker=broker, backend=backend, **kwargs) test_app.add_defaults(config) return test_app @contextmanager def set_trap(app): """Contextmanager that installs the trap app. The trap means that anything trying to use the current or default app will raise an exception. """ trap = Trap() prev_tls = _state._tls _state.set_default_app(trap) class NonTLS(object): current_app = trap _state._tls = NonTLS() yield _state._tls = prev_tls @contextmanager def setup_default_app(app, use_trap=False): """Setup default app for testing. Ensures state is clean after the test returns. """ prev_current_app = _state.get_current_app() prev_default_app = _state.default_app prev_finalizers = set(_state._on_app_finalizers) prev_apps = weakref.WeakSet(_state._apps) if use_trap: with set_trap(app): yield else: yield _state.set_default_app(prev_default_app) _state._tls.current_app = prev_current_app if app is not prev_current_app: app.close() _state._on_app_finalizers = prev_finalizers _state._apps = prev_apps celery-4.1.0/celery/contrib/testing/manager.py0000644000175000017500000001505513130607475021325 0ustar omeromer00000000000000"""Integration testing utilities.""" from __future__ import absolute_import, print_function, unicode_literals import socket import sys from collections import defaultdict from functools import partial from itertools import count from kombu.utils.functional import retry_over_time from celery.exceptions import TimeoutError from celery.five import items from celery.result import ResultSet from celery.utils.text import truncate from celery.utils.time import humanize_seconds as _humanize_seconds E_STILL_WAITING = 'Still waiting for {0}. Trying again {when}: {exc!r}' humanize_seconds = partial(_humanize_seconds, microseconds=True) class Sentinel(Exception): """Signifies the end of something.""" class ManagerMixin(object): """Mixin that adds :class:`Manager` capabilities.""" def _init_manager(self, block_timeout=30 * 60.0, no_join=False, stdout=None, stderr=None): # type: (float, bool, TextIO, TextIO) -> None self.stdout = sys.stdout if stdout is None else stdout self.stderr = sys.stderr if stderr is None else stderr self.connerrors = self.app.connection().recoverable_connection_errors self.block_timeout = block_timeout self.no_join = no_join def remark(self, s, sep='-'): # type: (str, str) -> None print('{0}{1}'.format(sep, s), file=self.stdout) def missing_results(self, r): # type: (Sequence[AsyncResult]) -> Sequence[str] return [res.id for res in r if res.id not in res.backend._cache] def wait_for(self, fun, catch, desc='thing', args=(), kwargs={}, errback=None, max_retries=10, interval_start=0.1, interval_step=0.5, interval_max=5.0, emit_warning=False, **options): # type: (Callable, Sequence[Any], str, Tuple, Dict, Callable, # int, float, float, float, bool, **Any) -> Any """Wait for event to happen. The `catch` argument specifies the exception that means the event has not happened yet. """ def on_error(exc, intervals, retries): interval = next(intervals) if emit_warning: self.warn(E_STILL_WAITING.format( desc, when=humanize_seconds(interval, 'in', ' '), exc=exc, )) if errback: errback(exc, interval, retries) return interval return self.retry_over_time( fun, catch, args=args, kwargs=kwargs, errback=on_error, max_retries=max_retries, interval_start=interval_start, interval_step=interval_step, **options ) def ensure_not_for_a_while(self, fun, catch, desc='thing', max_retries=20, interval_start=0.1, interval_step=0.02, interval_max=1.0, emit_warning=False, **options): """Make sure something does not happen (at least for a while).""" try: return self.wait_for( fun, catch, desc=desc, max_retries=max_retries, interval_start=interval_start, interval_step=interval_step, interval_max=interval_max, emit_warning=emit_warning, ) except catch: pass else: raise AssertionError('Should not have happened: {0}'.format(desc)) def retry_over_time(self, *args, **kwargs): return retry_over_time(*args, **kwargs) def join(self, r, propagate=False, max_retries=10, **kwargs): if self.no_join: return if not isinstance(r, ResultSet): r = self.app.ResultSet([r]) received = [] def on_result(task_id, value): received.append(task_id) for i in range(max_retries) if max_retries else count(0): received[:] = [] try: return r.get(callback=on_result, propagate=propagate, **kwargs) except (socket.timeout, TimeoutError) as exc: waiting_for = self.missing_results(r) self.remark( 'Still waiting for {0}/{1}: [{2}]: {3!r}'.format( len(r) - len(received), len(r), truncate(', '.join(waiting_for)), exc), '!', ) except self.connerrors as exc: self.remark('join: connection lost: {0!r}'.format(exc), '!') raise AssertionError('Test failed: Missing task results') def inspect(self, timeout=3.0): return self.app.control.inspect(timeout=timeout) def query_tasks(self, ids, timeout=0.5): for reply in items(self.inspect(timeout).query_task(*ids) or {}): yield reply def query_task_states(self, ids, timeout=0.5): states = defaultdict(set) for hostname, reply in self.query_tasks(ids, timeout=timeout): for task_id, (state, _) in items(reply): states[state].add(task_id) return states def assert_accepted(self, ids, interval=0.5, desc='waiting for tasks to be accepted', **policy): return self.assert_task_worker_state( self.is_accepted, ids, interval=interval, desc=desc, **policy ) def assert_received(self, ids, interval=0.5, desc='waiting for tasks to be received', **policy): return self.assert_task_worker_state( self.is_accepted, ids, interval=interval, desc=desc, **policy ) def assert_task_worker_state(self, fun, ids, interval=0.5, **policy): return self.wait_for( partial(self.true_or_raise, fun, ids, timeout=interval), (Sentinel,), **policy ) def is_received(self, ids, **kwargs): return self._ids_matches_state( ['reserved', 'active', 'ready'], ids, **kwargs) def is_accepted(self, ids, **kwargs): return self._ids_matches_state(['active', 'ready'], ids, **kwargs) def _ids_matches_state(self, expected_states, ids, timeout=0.5): states = self.query_task_states(ids, timeout=timeout) return all( any(t in s for s in [states[k] for k in expected_states]) for t in ids ) def true_or_raise(self, fun, *args, **kwargs): res = fun(*args, **kwargs) if not res: raise Sentinel() return res class Manager(ManagerMixin): """Test helpers for task integration tests.""" def __init__(self, app, **kwargs): self.app = app self._init_manager(**kwargs) celery-4.1.0/celery/contrib/sphinx.py0000644000175000017500000000402513130607475017542 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Sphinx documentation plugin used to document tasks. Introduction ============ Usage ----- Add the extension to your :file:`docs/conf.py` configuration module: .. code-block:: python extensions = (..., 'celery.contrib.sphinx') If you'd like to change the prefix for tasks in reference documentation then you can change the ``celery_task_prefix`` configuration value: .. code-block:: python celery_task_prefix = '(task)' # < default With the extension installed `autodoc` will automatically find task decorated objects and generate the correct (as well as add a ``(task)`` prefix), and you can also refer to the tasks using `:task:proj.tasks.add` syntax. Use ``.. autotask::`` to manually document a task. """ from __future__ import absolute_import, unicode_literals from inspect import formatargspec from sphinx.domains.python import PyModulelevel from sphinx.ext.autodoc import FunctionDocumenter from celery.app.task import BaseTask from celery.five import getfullargspec class TaskDocumenter(FunctionDocumenter): """Document task definitions.""" objtype = 'task' member_order = 11 @classmethod def can_document_member(cls, member, membername, isattr, parent): return isinstance(member, BaseTask) and getattr(member, '__wrapped__') def format_args(self): wrapped = getattr(self.object, '__wrapped__', None) if wrapped is not None: argspec = getfullargspec(wrapped) fmt = formatargspec(*argspec) fmt = fmt.replace('\\', '\\\\') return fmt return '' def document_members(self, all_members=False): pass class TaskDirective(PyModulelevel): """Sphinx task directive.""" def get_signature_prefix(self, sig): return self.env.config.celery_task_prefix def setup(app): """Setup Sphinx extension.""" app.add_autodocumenter(TaskDocumenter) app.add_directive_to_domain('py', 'task', TaskDirective) app.add_config_value('celery_task_prefix', '(task)', True) celery-4.1.0/celery/worker/0000755000175000017500000000000013135426347015531 5ustar omeromer00000000000000celery-4.1.0/celery/worker/heartbeat.py0000644000175000017500000000410413130607475020037 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Heartbeat service. This is the internal thread responsible for sending heartbeat events at regular intervals (may not be an actual thread). """ from __future__ import absolute_import, unicode_literals from celery.signals import heartbeat_sent from celery.utils.sysinfo import load_average from .state import SOFTWARE_INFO, active_requests, all_total_count __all__ = ['Heart'] class Heart(object): """Timer sending heartbeats at regular intervals. Arguments: timer (kombu.async.timer.Timer): Timer to use. eventer (celery.events.EventDispatcher): Event dispatcher to use. interval (float): Time in seconds between sending heartbeats. Default is 2 seconds. """ def __init__(self, timer, eventer, interval=None): self.timer = timer self.eventer = eventer self.interval = float(interval or 2.0) self.tref = None # Make event dispatcher start/stop us when enabled/disabled. self.eventer.on_enabled.add(self.start) self.eventer.on_disabled.add(self.stop) # Only send heartbeat_sent signal if it has receivers. self._send_sent_signal = ( heartbeat_sent.send if heartbeat_sent.receivers else None) def _send(self, event): if self._send_sent_signal is not None: self._send_sent_signal(sender=self) return self.eventer.send(event, freq=self.interval, active=len(active_requests), processed=all_total_count[0], loadavg=load_average(), **SOFTWARE_INFO) def start(self): if self.eventer.enabled: self._send('worker-online') self.tref = self.timer.call_repeatedly( self.interval, self._send, ('worker-heartbeat',), ) def stop(self): if self.tref is not None: self.timer.cancel(self.tref) self.tref = None if self.eventer.enabled: self._send('worker-offline') celery-4.1.0/celery/worker/worker.py0000644000175000017500000003416013130607475017416 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """WorkController can be used to instantiate in-process workers. The command-line interface for the worker is in :mod:`celery.bin.worker`, while the worker program is in :mod:`celery.apps.worker`. The worker program is responsible for adding signal handlers, setting up logging, etc. This is a bare-bones worker without global side-effects (i.e., except for the global state stored in :mod:`celery.worker.state`). The worker consists of several components, all managed by bootsteps (mod:`celery.bootsteps`). """ from __future__ import absolute_import, unicode_literals import os import sys try: import resource except ImportError: # pragma: no cover resource = None # noqa from billiard import cpu_count from kombu.utils.compat import detect_environment from celery import bootsteps from celery.bootsteps import RUN, TERMINATE from celery import concurrency as _concurrency from celery import signals from celery.exceptions import ( ImproperlyConfigured, WorkerTerminate, TaskRevokedError, ) from celery.five import python_2_unicode_compatible, values from celery.platforms import EX_FAILURE, create_pidlock from celery.utils.imports import reload_from_cwd from celery.utils.log import mlevel, worker_logger as logger from celery.utils.nodenames import default_nodename, worker_direct from celery.utils.text import str_to_list from celery.utils.threads import default_socket_timeout from . import state __all__ = ['WorkController'] #: Default socket timeout at shutdown. SHUTDOWN_SOCKET_TIMEOUT = 5.0 SELECT_UNKNOWN_QUEUE = """ Trying to select queue subset of {0!r}, but queue {1} isn't defined in the `task_queues` setting. If you want to automatically declare unknown queues you can enable the `task_create_missing_queues` setting. """ DESELECT_UNKNOWN_QUEUE = """ Trying to deselect queue subset of {0!r}, but queue {1} isn't defined in the `task_queues` setting. """ @python_2_unicode_compatible class WorkController(object): """Unmanaged worker instance.""" app = None pidlock = None blueprint = None pool = None semaphore = None #: contains the exit code if a :exc:`SystemExit` event is handled. exitcode = None class Blueprint(bootsteps.Blueprint): """Worker bootstep blueprint.""" name = 'Worker' default_steps = { 'celery.worker.components:Hub', 'celery.worker.components:Pool', 'celery.worker.components:Beat', 'celery.worker.components:Timer', 'celery.worker.components:StateDB', 'celery.worker.components:Consumer', 'celery.worker.autoscale:WorkerComponent', } def __init__(self, app=None, hostname=None, **kwargs): self.app = app or self.app self.hostname = default_nodename(hostname) self.app.loader.init_worker() self.on_before_init(**kwargs) self.setup_defaults(**kwargs) self.on_after_init(**kwargs) self.setup_instance(**self.prepare_args(**kwargs)) def setup_instance(self, queues=None, ready_callback=None, pidfile=None, include=None, use_eventloop=None, exclude_queues=None, **kwargs): self.pidfile = pidfile self.setup_queues(queues, exclude_queues) self.setup_includes(str_to_list(include)) # Set default concurrency if not self.concurrency: try: self.concurrency = cpu_count() except NotImplementedError: self.concurrency = 2 # Options self.loglevel = mlevel(self.loglevel) self.ready_callback = ready_callback or self.on_consumer_ready # this connection won't establish, only used for params self._conninfo = self.app.connection_for_read() self.use_eventloop = ( self.should_use_eventloop() if use_eventloop is None else use_eventloop ) self.options = kwargs signals.worker_init.send(sender=self) # Initialize bootsteps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.steps = [] self.on_init_blueprint() self.blueprint = self.Blueprint( steps=self.app.steps['worker'], on_start=self.on_start, on_close=self.on_close, on_stopped=self.on_stopped, ) self.blueprint.apply(self, **kwargs) def on_init_blueprint(self): pass def on_before_init(self, **kwargs): pass def on_after_init(self, **kwargs): pass def on_start(self): if self.pidfile: self.pidlock = create_pidlock(self.pidfile) def on_consumer_ready(self, consumer): pass def on_close(self): self.app.loader.shutdown_worker() def on_stopped(self): self.timer.stop() self.consumer.shutdown() if self.pidlock: self.pidlock.release() def setup_queues(self, include, exclude=None): include = str_to_list(include) exclude = str_to_list(exclude) try: self.app.amqp.queues.select(include) except KeyError as exc: raise ImproperlyConfigured( SELECT_UNKNOWN_QUEUE.strip().format(include, exc)) try: self.app.amqp.queues.deselect(exclude) except KeyError as exc: raise ImproperlyConfigured( DESELECT_UNKNOWN_QUEUE.strip().format(exclude, exc)) if self.app.conf.worker_direct: self.app.amqp.queues.select_add(worker_direct(self.hostname)) def setup_includes(self, includes): # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. prev = tuple(self.app.conf.include) if includes: prev += tuple(includes) [self.app.loader.import_task_module(m) for m in includes] self.include = includes task_modules = {task.__class__.__module__ for task in values(self.app.tasks)} self.app.conf.include = tuple(set(prev) | task_modules) def prepare_args(self, **kwargs): return kwargs def _send_worker_shutdown(self): signals.worker_shutdown.send(sender=self) def start(self): try: self.blueprint.start(self) except WorkerTerminate: self.terminate() except Exception as exc: logger.critical('Unrecoverable error: %r', exc, exc_info=True) self.stop(exitcode=EX_FAILURE) except SystemExit as exc: self.stop(exitcode=exc.code) except KeyboardInterrupt: self.stop(exitcode=EX_FAILURE) def register_with_event_loop(self, hub): self.blueprint.send_all( self, 'register_with_event_loop', args=(hub,), description='hub.register', ) def _process_task_sem(self, req): return self._quick_acquire(self._process_task, req) def _process_task(self, req): """Process task by sending it to the pool of workers.""" try: req.execute_using_pool(self.pool) except TaskRevokedError: try: self._quick_release() # Issue 877 except AttributeError: pass def signal_consumer_close(self): try: self.consumer.close() except AttributeError: pass def should_use_eventloop(self): return (detect_environment() == 'default' and self._conninfo.transport.implements.async and not self.app.IS_WINDOWS) def stop(self, in_sighandler=False, exitcode=None): """Graceful shutdown of the worker server.""" if exitcode is not None: self.exitcode = exitcode if self.blueprint.state == RUN: self.signal_consumer_close() if not in_sighandler or self.pool.signal_safe: self._shutdown(warm=True) self._send_worker_shutdown() def terminate(self, in_sighandler=False): """Not so graceful shutdown of the worker server.""" if self.blueprint.state != TERMINATE: self.signal_consumer_close() if not in_sighandler or self.pool.signal_safe: self._shutdown(warm=False) def _shutdown(self, warm=True): # if blueprint does not exist it means that we had an # error before the bootsteps could be initialized. if self.blueprint is not None: with default_socket_timeout(SHUTDOWN_SOCKET_TIMEOUT): # Issue 975 self.blueprint.stop(self, terminate=not warm) self.blueprint.join() def reload(self, modules=None, reload=False, reloader=None): list(self._reload_modules( modules, force_reload=reload, reloader=reloader)) if self.consumer: self.consumer.update_strategies() self.consumer.reset_rate_limits() try: self.pool.restart() except NotImplementedError: pass def _reload_modules(self, modules=None, **kwargs): return ( self._maybe_reload_module(m, **kwargs) for m in set(self.app.loader.task_modules if modules is None else (modules or ())) ) def _maybe_reload_module(self, module, force_reload=False, reloader=None): if module not in sys.modules: logger.debug('importing module %s', module) return self.app.loader.import_from_cwd(module) elif force_reload: logger.debug('reloading module %s', module) return reload_from_cwd(sys.modules[module], reloader) def info(self): return {'total': self.state.total_count, 'pid': os.getpid(), 'clock': str(self.app.clock)} def rusage(self): if resource is None: raise NotImplementedError('rusage not supported by this platform') s = resource.getrusage(resource.RUSAGE_SELF) return { 'utime': s.ru_utime, 'stime': s.ru_stime, 'maxrss': s.ru_maxrss, 'ixrss': s.ru_ixrss, 'idrss': s.ru_idrss, 'isrss': s.ru_isrss, 'minflt': s.ru_minflt, 'majflt': s.ru_majflt, 'nswap': s.ru_nswap, 'inblock': s.ru_inblock, 'oublock': s.ru_oublock, 'msgsnd': s.ru_msgsnd, 'msgrcv': s.ru_msgrcv, 'nsignals': s.ru_nsignals, 'nvcsw': s.ru_nvcsw, 'nivcsw': s.ru_nivcsw, } def stats(self): info = self.info() info.update(self.blueprint.info(self)) info.update(self.consumer.blueprint.info(self.consumer)) try: info['rusage'] = self.rusage() except NotImplementedError: info['rusage'] = 'N/A' return info def __repr__(self): """``repr(worker)``.""" return ''.format( self=self, state=self.blueprint.human_state() if self.blueprint else 'INIT', ) def __str__(self): """``str(worker) == worker.hostname``.""" return self.hostname @property def state(self): return state def setup_defaults(self, concurrency=None, loglevel='WARN', logfile=None, task_events=None, pool=None, consumer_cls=None, timer_cls=None, timer_precision=None, autoscaler_cls=None, pool_putlocks=None, pool_restarts=None, optimization=None, O=None, # O maps to -O=fair statedb=None, time_limit=None, soft_time_limit=None, scheduler=None, pool_cls=None, # XXX use pool state_db=None, # XXX use statedb task_time_limit=None, # XXX use time_limit task_soft_time_limit=None, # XXX use soft_time_limit scheduler_cls=None, # XXX use scheduler schedule_filename=None, max_tasks_per_child=None, prefetch_multiplier=None, disable_rate_limits=None, worker_lost_wait=None, max_memory_per_child=None, **_kw): either = self.app.either self.loglevel = loglevel self.logfile = logfile self.concurrency = either('worker_concurrency', concurrency) self.task_events = either('worker_send_task_events', task_events) self.pool_cls = either('worker_pool', pool, pool_cls) self.consumer_cls = either('worker_consumer', consumer_cls) self.timer_cls = either('worker_timer', timer_cls) self.timer_precision = either( 'worker_timer_precision', timer_precision, ) self.optimization = optimization or O self.autoscaler_cls = either('worker_autoscaler', autoscaler_cls) self.pool_putlocks = either('worker_pool_putlocks', pool_putlocks) self.pool_restarts = either('worker_pool_restarts', pool_restarts) self.statedb = either('worker_state_db', statedb, state_db) self.schedule_filename = either( 'beat_schedule_filename', schedule_filename, ) self.scheduler = either('beat_scheduler', scheduler, scheduler_cls) self.time_limit = either( 'task_time_limit', time_limit, task_time_limit) self.soft_time_limit = either( 'task_soft_time_limit', soft_time_limit, task_soft_time_limit, ) self.max_tasks_per_child = either( 'worker_max_tasks_per_child', max_tasks_per_child, ) self.max_memory_per_child = either( 'worker_max_memory_per_child', max_memory_per_child, ) self.prefetch_multiplier = int(either( 'worker_prefetch_multiplier', prefetch_multiplier, )) self.disable_rate_limits = either( 'worker_disable_rate_limits', disable_rate_limits, ) self.worker_lost_wait = either('worker_lost_wait', worker_lost_wait) celery-4.1.0/celery/worker/autoscale.py0000644000175000017500000001142313130607475020062 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Pool Autoscaling. This module implements the internal thread responsible for growing and shrinking the pool according to the current autoscale settings. The autoscale thread is only enabled if the :option:`celery worker --autoscale` option is used. """ from __future__ import absolute_import, unicode_literals import os import threading from time import sleep from kombu.async.semaphore import DummyLock from celery import bootsteps from celery.five import monotonic from celery.utils.log import get_logger from celery.utils.threads import bgThread from . import state from .components import Pool __all__ = ['Autoscaler', 'WorkerComponent'] logger = get_logger(__name__) debug, info, error = logger.debug, logger.info, logger.error AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30)) class WorkerComponent(bootsteps.StartStopStep): """Bootstep that starts the autoscaler thread/timer in the worker.""" label = 'Autoscaler' conditional = True requires = (Pool,) def __init__(self, w, **kwargs): self.enabled = w.autoscale w.autoscaler = None def create(self, w): scaler = w.autoscaler = self.instantiate( w.autoscaler_cls, w.pool, w.max_concurrency, w.min_concurrency, worker=w, mutex=DummyLock() if w.use_eventloop else None, ) return scaler if not w.use_eventloop else None def register_with_event_loop(self, w, hub): w.consumer.on_task_message.add(w.autoscaler.maybe_scale) hub.call_repeatedly( w.autoscaler.keepalive, w.autoscaler.maybe_scale, ) class Autoscaler(bgThread): """Background thread to autoscale pool workers.""" def __init__(self, pool, max_concurrency, min_concurrency=0, worker=None, keepalive=AUTOSCALE_KEEPALIVE, mutex=None): super(Autoscaler, self).__init__() self.pool = pool self.mutex = mutex or threading.Lock() self.max_concurrency = max_concurrency self.min_concurrency = min_concurrency self.keepalive = keepalive self._last_scale_up = None self.worker = worker assert self.keepalive, 'cannot scale down too fast.' def body(self): with self.mutex: self.maybe_scale() sleep(1.0) def _maybe_scale(self, req=None): procs = self.processes cur = min(self.qty, self.max_concurrency) if cur > procs: self.scale_up(cur - procs) return True cur = max(self.qty, self.min_concurrency) if cur < procs: self.scale_down(procs - cur) return True def maybe_scale(self, req=None): if self._maybe_scale(req): self.pool.maintain_pool() def update(self, max=None, min=None): with self.mutex: if max is not None: if max < self.processes: self._shrink(self.processes - max) self.max_concurrency = max if min is not None: if min > self.processes: self._grow(min - self.processes) self.min_concurrency = min return self.max_concurrency, self.min_concurrency def force_scale_up(self, n): with self.mutex: new = self.processes + n if new > self.max_concurrency: self.max_concurrency = new self._grow(n) def force_scale_down(self, n): with self.mutex: new = self.processes - n if new < self.min_concurrency: self.min_concurrency = max(new, 0) self._shrink(min(n, self.processes)) def scale_up(self, n): self._last_scale_up = monotonic() return self._grow(n) def scale_down(self, n): if self._last_scale_up and ( monotonic() - self._last_scale_up > self.keepalive): return self._shrink(n) def _grow(self, n): info('Scaling up %s processes.', n) self.pool.grow(n) self.worker.consumer._update_prefetch_count(n) def _shrink(self, n): info('Scaling down %s processes.', n) try: self.pool.shrink(n) except ValueError: debug("Autoscaler won't scale down: all processes busy.") except Exception as exc: error('Autoscaler: scale_down: %r', exc, exc_info=True) self.worker.consumer._update_prefetch_count(-n) def info(self): return { 'max': self.max_concurrency, 'min': self.min_concurrency, 'current': self.processes, 'qty': self.qty, } @property def qty(self): return len(state.reserved_requests) @property def processes(self): return self.pool.num_processes celery-4.1.0/celery/worker/strategy.py0000644000175000017500000001203313130607475017742 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Task execution strategy (optimization).""" from __future__ import absolute_import, unicode_literals import logging from kombu.async.timer import to_timestamp from kombu.five import buffer_t from celery.exceptions import InvalidTaskError from celery.utils.log import get_logger from celery.utils.saferepr import saferepr from celery.utils.time import timezone from .request import Request, create_request_cls from .state import task_reserved __all__ = ['default'] logger = get_logger(__name__) # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. def proto1_to_proto2(message, body): """Convert Task message protocol 1 arguments to protocol 2. Returns: Tuple: of ``(body, headers, already_decoded_status, utc)`` """ try: args, kwargs = body.get('args', ()), body.get('kwargs', {}) kwargs.items # pylint: disable=pointless-statement except KeyError: raise InvalidTaskError('Message does not have args/kwargs') except AttributeError: raise InvalidTaskError( 'Task keyword arguments must be a mapping', ) body.update( argsrepr=saferepr(args), kwargsrepr=saferepr(kwargs), headers=message.headers, ) try: body['group'] = body['taskset'] except KeyError: pass embed = { 'callbacks': body.get('callbacks'), 'errbacks': body.get('errbacks'), 'chord': body.get('chord'), 'chain': None, } return (args, kwargs, embed), body, True, body.get('utc', True) def default(task, app, consumer, info=logger.info, error=logger.error, task_reserved=task_reserved, to_system_tz=timezone.to_system, bytes=bytes, buffer_t=buffer_t, proto1_to_proto2=proto1_to_proto2): """Default task execution strategy. Note: Strategies are here as an optimization, so sadly it's not very easy to override. """ hostname = consumer.hostname connection_errors = consumer.connection_errors _does_info = logger.isEnabledFor(logging.INFO) # task event related # (optimized to avoid calling request.send_event) eventer = consumer.event_dispatcher events = eventer and eventer.enabled send_event = eventer.send task_sends_events = events and task.send_events call_at = consumer.timer.call_at apply_eta_task = consumer.apply_eta_task rate_limits_enabled = not consumer.disable_rate_limits get_bucket = consumer.task_buckets.__getitem__ handle = consumer.on_task_request limit_task = consumer._limit_task body_can_be_buffer = consumer.pool.body_can_be_buffer Req = create_request_cls(Request, task, consumer.pool, hostname, eventer) revoked_tasks = consumer.controller.state.revoked def task_message_handler(message, body, ack, reject, callbacks, to_timestamp=to_timestamp): if body is None: body, headers, decoded, utc = ( message.body, message.headers, False, app.uses_utc_timezone(), ) if not body_can_be_buffer: body = bytes(body) if isinstance(body, buffer_t) else body else: body, headers, decoded, utc = proto1_to_proto2(message, body) req = Req( message, on_ack=ack, on_reject=reject, app=app, hostname=hostname, eventer=eventer, task=task, connection_errors=connection_errors, body=body, headers=headers, decoded=decoded, utc=utc, ) if _does_info: info('Received task: %s', req) if (req.expires or req.id in revoked_tasks) and req.revoked(): return if task_sends_events: send_event( 'task-received', uuid=req.id, name=req.name, args=req.argsrepr, kwargs=req.kwargsrepr, root_id=req.root_id, parent_id=req.parent_id, retries=req.request_dict.get('retries', 0), eta=req.eta and req.eta.isoformat(), expires=req.expires and req.expires.isoformat(), ) if req.eta: try: if req.utc: eta = to_timestamp(to_system_tz(req.eta)) else: eta = to_timestamp(req.eta, app.timezone) except (OverflowError, ValueError) as exc: error("Couldn't convert ETA %r to timestamp: %r. Task: %r", req.eta, exc, req.info(safe=True), exc_info=True) req.reject(requeue=False) else: consumer.qos.increment_eventually() call_at(eta, apply_eta_task, (req,), priority=6) else: if rate_limits_enabled: bucket = get_bucket(task.name) if bucket: return limit_task(req, bucket, 1) task_reserved(req) if callbacks: [callback(req) for callback in callbacks] handle(req) return task_message_handler celery-4.1.0/celery/worker/__init__.py0000644000175000017500000000022713130607475017641 0ustar omeromer00000000000000"""Worker implementation.""" from __future__ import absolute_import, unicode_literals from .worker import WorkController __all__ = ['WorkController'] celery-4.1.0/celery/worker/state.py0000644000175000017500000001706513130607475017232 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Internal worker state (global). This includes the currently active and reserved tasks, statistics, and revoked tasks. """ from __future__ import absolute_import, print_function, unicode_literals import os import sys import platform import shelve import weakref import zlib from kombu.serialization import pickle, pickle_protocol from kombu.utils.objects import cached_property from celery import __version__ from celery.exceptions import WorkerShutdown, WorkerTerminate from celery.five import Counter from celery.utils.collections import LimitedSet __all__ = [ 'SOFTWARE_INFO', 'reserved_requests', 'active_requests', 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown', 'task_accepted', 'task_ready', 'Persistent', ] #: Worker software/platform information. SOFTWARE_INFO = { 'sw_ident': 'py-celery', 'sw_ver': __version__, 'sw_sys': platform.system(), } #: maximum number of revokes to keep in memory. REVOKES_MAX = 50000 #: how many seconds a revoke will be active before #: being expired when the max limit has been exceeded. REVOKE_EXPIRES = 10800 #: Mapping of reserved task_id->Request. requests = {} #: set of all reserved :class:`~celery.worker.request.Request`'s. reserved_requests = weakref.WeakSet() #: set of currently active :class:`~celery.worker.request.Request`'s. active_requests = weakref.WeakSet() #: count of tasks accepted by the worker, sorted by type. total_count = Counter() #: count of all tasks accepted by the worker all_total_count = [0] #: the list of currently revoked tasks. Persistent if ``statedb`` set. revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) should_stop = None should_terminate = None def reset_state(): requests.clear() reserved_requests.clear() active_requests.clear() total_count.clear() all_total_count[:] = [0] revoked.clear() def maybe_shutdown(): """Shutdown if flags have been set.""" if should_stop is not None and should_stop is not False: raise WorkerShutdown(should_stop) elif should_terminate is not None and should_terminate is not False: raise WorkerTerminate(should_terminate) def task_reserved(request, add_request=requests.__setitem__, add_reserved_request=reserved_requests.add): """Update global state when a task has been reserved.""" add_request(request.id, request) add_reserved_request(request) def task_accepted(request, _all_total_count=all_total_count, add_active_request=active_requests.add, add_to_total_count=total_count.update): """Update global state when a task has been accepted.""" add_active_request(request) add_to_total_count({request.name: 1}) all_total_count[0] += 1 def task_ready(request, remove_request=requests.pop, discard_active_request=active_requests.discard, discard_reserved_request=reserved_requests.discard): """Update global state when a task is ready.""" remove_request(request.id, None) discard_active_request(request) discard_reserved_request(request) C_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH') C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or os.environ.get('CELERY_BENCH_EVERY') or 1000) if C_BENCH: # pragma: no cover import atexit from billiard.process import current_process from celery.five import monotonic from celery.utils.debug import memdump, sample_mem all_count = 0 bench_first = None bench_start = None bench_last = None bench_every = C_BENCH_EVERY bench_sample = [] __reserved = task_reserved __ready = task_ready if current_process()._name == 'MainProcess': @atexit.register def on_shutdown(): if bench_first is not None and bench_last is not None: print('- Time spent in benchmark: {0!r}'.format( bench_last - bench_first)) print('- Avg: {0}'.format( sum(bench_sample) / len(bench_sample))) memdump() def task_reserved(request): # noqa """Called when a task is reserved by the worker.""" global bench_start global bench_first now = None if bench_start is None: bench_start = now = monotonic() if bench_first is None: bench_first = now return __reserved(request) def task_ready(request): # noqa """Called when a task is completed.""" global all_count global bench_start global bench_last all_count += 1 if not all_count % bench_every: now = monotonic() diff = now - bench_start print('- Time spent processing {0} tasks (since first ' 'task received): ~{1:.4f}s\n'.format(bench_every, diff)) sys.stdout.flush() bench_start = bench_last = now bench_sample.append(diff) sample_mem() return __ready(request) class Persistent(object): """Stores worker state between restarts. This is the persistent data stored by the worker when :option:`celery worker --statedb` is enabled. Currently only stores revoked task id's. """ storage = shelve protocol = pickle_protocol compress = zlib.compress decompress = zlib.decompress _is_open = False def __init__(self, state, filename, clock=None): self.state = state self.filename = filename self.clock = clock self.merge() def open(self): return self.storage.open( self.filename, protocol=self.protocol, writeback=True, ) def merge(self): self._merge_with(self.db) def sync(self): self._sync_with(self.db) self.db.sync() def close(self): if self._is_open: self.db.close() self._is_open = False def save(self): self.sync() self.close() def _merge_with(self, d): self._merge_revoked(d) self._merge_clock(d) return d def _sync_with(self, d): self._revoked_tasks.purge() d.update({ str('__proto__'): 3, str('zrevoked'): self.compress(self._dumps(self._revoked_tasks)), str('clock'): self.clock.forward() if self.clock else 0, }) return d def _merge_clock(self, d): if self.clock: d[str('clock')] = self.clock.adjust(d.get(str('clock')) or 0) def _merge_revoked(self, d): try: self._merge_revoked_v3(d[str('zrevoked')]) except KeyError: try: self._merge_revoked_v2(d.pop(str('revoked'))) except KeyError: pass # purge expired items at boot self._revoked_tasks.purge() def _merge_revoked_v3(self, zrevoked): if zrevoked: self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked))) def _merge_revoked_v2(self, saved): if not isinstance(saved, LimitedSet): # (pre 3.0.18) used to be stored as a dict return self._merge_revoked_v1(saved) self._revoked_tasks.update(saved) def _merge_revoked_v1(self, saved): add = self._revoked_tasks.add for item in saved: add(item) def _dumps(self, obj): return pickle.dumps(obj, protocol=self.protocol) @property def _revoked_tasks(self): return self.state.revoked @cached_property def db(self): self._is_open = True return self.open() celery-4.1.0/celery/worker/request.py0000644000175000017500000004572513130607475017606 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Task request. This module defines the :class:`Request` class, that specifies how tasks are executed. """ from __future__ import absolute_import, unicode_literals import logging import sys from datetime import datetime from weakref import ref from billiard.common import TERM_SIGNAME from kombu.utils.encoding import safe_repr, safe_str from kombu.utils.objects import cached_property from celery import signals from celery.app.trace import trace_task, trace_task_ret from celery.exceptions import ( Ignore, TaskRevokedError, InvalidTaskError, SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated, Retry, Reject, ) from celery.five import python_2_unicode_compatible, string from celery.platforms import signals as _signals from celery.utils.functional import maybe, noop from celery.utils.log import get_logger from celery.utils.nodenames import gethostname from celery.utils.time import maybe_iso8601, timezone, maybe_make_aware from celery.utils.serialization import get_pickled_exception from . import state __all__ = ['Request'] # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. IS_PYPY = hasattr(sys, 'pypy_version_info') logger = get_logger(__name__) debug, info, warn, error = (logger.debug, logger.info, logger.warning, logger.error) _does_info = False _does_debug = False def __optimize__(): # this is also called by celery.app.trace.setup_worker_optimizations global _does_debug global _does_info _does_debug = logger.isEnabledFor(logging.DEBUG) _does_info = logger.isEnabledFor(logging.INFO) __optimize__() # noqa: E305 # Localize tz_or_local = timezone.tz_or_local send_revoked = signals.task_revoked.send task_accepted = state.task_accepted task_ready = state.task_ready revoked_tasks = state.revoked @python_2_unicode_compatible class Request(object): """A request for task execution.""" acknowledged = False time_start = None worker_pid = None time_limits = (None, None) _already_revoked = False _terminate_on_ack = None _apply_result = None _tzlocal = None if not IS_PYPY: # pragma: no cover __slots__ = ( 'app', 'type', 'name', 'id', 'root_id', 'parent_id', 'on_ack', 'body', 'hostname', 'eventer', 'connection_errors', 'task', 'eta', 'expires', 'request_dict', 'on_reject', 'utc', 'content_type', 'content_encoding', 'argsrepr', 'kwargsrepr', '_decoded', '__weakref__', '__dict__', ) def __init__(self, message, on_ack=noop, hostname=None, eventer=None, app=None, connection_errors=None, request_dict=None, task=None, on_reject=noop, body=None, headers=None, decoded=False, utc=True, maybe_make_aware=maybe_make_aware, maybe_iso8601=maybe_iso8601, **opts): if headers is None: headers = message.headers if body is None: body = message.body self.app = app self.message = message self.body = body self.utc = utc self._decoded = decoded if decoded: self.content_type = self.content_encoding = None else: self.content_type, self.content_encoding = ( message.content_type, message.content_encoding, ) self.id = headers['id'] type = self.type = self.name = headers['task'] self.root_id = headers.get('root_id') self.parent_id = headers.get('parent_id') if 'shadow' in headers: self.name = headers['shadow'] or self.name if 'timelimit' in headers: self.time_limits = headers['timelimit'] self.argsrepr = headers.get('argsrepr', '') self.kwargsrepr = headers.get('kwargsrepr', '') self.on_ack = on_ack self.on_reject = on_reject self.hostname = hostname or gethostname() self.eventer = eventer self.connection_errors = connection_errors or () self.task = task or self.app.tasks[type] # timezone means the message is timezone-aware, and the only timezone # supported at this point is UTC. eta = headers.get('eta') if eta is not None: try: eta = maybe_iso8601(eta) except (AttributeError, ValueError, TypeError) as exc: raise InvalidTaskError( 'invalid ETA value {0!r}: {1}'.format(eta, exc)) self.eta = maybe_make_aware(eta, self.tzlocal) else: self.eta = None expires = headers.get('expires') if expires is not None: try: expires = maybe_iso8601(expires) except (AttributeError, ValueError, TypeError) as exc: raise InvalidTaskError( 'invalid expires value {0!r}: {1}'.format(expires, exc)) self.expires = maybe_make_aware(expires, self.tzlocal) else: self.expires = None delivery_info = message.delivery_info or {} properties = message.properties or {} headers.update({ 'reply_to': properties.get('reply_to'), 'correlation_id': properties.get('correlation_id'), 'delivery_info': { 'exchange': delivery_info.get('exchange'), 'routing_key': delivery_info.get('routing_key'), 'priority': properties.get('priority'), 'redelivered': delivery_info.get('redelivered'), } }) self.request_dict = headers @property def delivery_info(self): return self.request_dict['delivery_info'] def execute_using_pool(self, pool, **kwargs): """Used by the worker to send this task to the pool. Arguments: pool (~celery.concurrency.base.TaskPool): The execution pool used to execute this request. Raises: celery.exceptions.TaskRevokedError: if the task was revoked. """ task_id = self.id task = self.task if self.revoked(): raise TaskRevokedError(task_id) time_limit, soft_time_limit = self.time_limits result = pool.apply_async( trace_task_ret, args=(self.type, task_id, self.request_dict, self.body, self.content_type, self.content_encoding), accept_callback=self.on_accepted, timeout_callback=self.on_timeout, callback=self.on_success, error_callback=self.on_failure, soft_timeout=soft_time_limit or task.soft_time_limit, timeout=time_limit or task.time_limit, correlation_id=task_id, ) # cannot create weakref to None self._apply_result = maybe(ref, result) return result def execute(self, loglevel=None, logfile=None): """Execute the task in a :func:`~celery.app.trace.trace_task`. Arguments: loglevel (int): The loglevel used by the task. logfile (str): The logfile used by the task. """ if self.revoked(): return # acknowledge task as being processed. if not self.task.acks_late: self.acknowledge() request = self.request_dict # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. args, kwargs, embed = self._payload request.update({ 'loglevel': loglevel, 'logfile': logfile, 'hostname': self.hostname, 'is_eager': False, 'args': args, 'kwargs': kwargs }, **embed or {}) retval = trace_task(self.task, self.id, args, kwargs, request, hostname=self.hostname, loader=self.app.loader, app=self.app)[0] self.acknowledge() return retval def maybe_expire(self): """If expired, mark the task as revoked.""" if self.expires: now = datetime.now(self.expires.tzinfo) if now > self.expires: revoked_tasks.add(self.id) return True def terminate(self, pool, signal=None): signal = _signals.signum(signal or TERM_SIGNAME) if self.time_start: pool.terminate_job(self.worker_pid, signal) self._announce_revoked('terminated', True, signal, False) else: self._terminate_on_ack = pool, signal if self._apply_result is not None: obj = self._apply_result() # is a weakref if obj is not None: obj.terminate(signal) def _announce_revoked(self, reason, terminated, signum, expired): task_ready(self) self.send_event('task-revoked', terminated=terminated, signum=signum, expired=expired) self.task.backend.mark_as_revoked( self.id, reason, request=self, store_result=self.store_errors, ) self.acknowledge() self._already_revoked = True send_revoked(self.task, request=self, terminated=terminated, signum=signum, expired=expired) def revoked(self): """If revoked, skip task and mark state.""" expired = False if self._already_revoked: return True if self.expires: expired = self.maybe_expire() if self.id in revoked_tasks: info('Discarding revoked task: %s[%s]', self.name, self.id) self._announce_revoked( 'expired' if expired else 'revoked', False, None, expired, ) return True return False def send_event(self, type, **fields): if self.eventer and self.eventer.enabled and self.task.send_events: self.eventer.send(type, uuid=self.id, **fields) def on_accepted(self, pid, time_accepted): """Handler called when task is accepted by worker pool.""" self.worker_pid = pid self.time_start = time_accepted task_accepted(self) if not self.task.acks_late: self.acknowledge() self.send_event('task-started') if _does_debug: debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) if self._terminate_on_ack is not None: self.terminate(*self._terminate_on_ack) def on_timeout(self, soft, timeout): """Handler called if the task times out.""" task_ready(self) if soft: warn('Soft time limit (%ss) exceeded for %s[%s]', timeout, self.name, self.id) exc = SoftTimeLimitExceeded(soft) else: error('Hard time limit (%ss) exceeded for %s[%s]', timeout, self.name, self.id) exc = TimeLimitExceeded(timeout) self.task.backend.mark_as_failure( self.id, exc, request=self, store_result=self.store_errors, ) if self.task.acks_late: self.acknowledge() def on_success(self, failed__retval__runtime, **kwargs): """Handler called if the task was successfully processed.""" failed, retval, runtime = failed__retval__runtime if failed: if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)): raise retval.exception return self.on_failure(retval, return_ok=True) task_ready(self) if self.task.acks_late: self.acknowledge() self.send_event('task-succeeded', result=retval, runtime=runtime) def on_retry(self, exc_info): """Handler called if the task should be retried.""" if self.task.acks_late: self.acknowledge() self.send_event('task-retried', exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback)) def on_failure(self, exc_info, send_failed_event=True, return_ok=False): """Handler called if the task raised an exception.""" task_ready(self) if isinstance(exc_info.exception, MemoryError): raise MemoryError('Process got: %s' % (exc_info.exception,)) elif isinstance(exc_info.exception, Reject): return self.reject(requeue=exc_info.exception.requeue) elif isinstance(exc_info.exception, Ignore): return self.acknowledge() exc = exc_info.exception if isinstance(exc, Retry): return self.on_retry(exc_info) # These are special cases where the process wouldn't've had # time to write the result. if isinstance(exc, Terminated): self._announce_revoked( 'terminated', True, string(exc), False) send_failed_event = False # already sent revoked event elif isinstance(exc, WorkerLostError) or not return_ok: self.task.backend.mark_as_failure( self.id, exc, request=self, store_result=self.store_errors, ) # (acks_late) acknowledge after result stored. if self.task.acks_late: requeue = not self.delivery_info.get('redelivered') reject = ( self.task.reject_on_worker_lost and isinstance(exc, WorkerLostError) ) if reject: self.reject(requeue=requeue) send_failed_event = False else: self.acknowledge() if send_failed_event: self.send_event( 'task-failed', exception=safe_repr(get_pickled_exception(exc_info.exception)), traceback=exc_info.traceback, ) if not return_ok: error('Task handler raised error: %r', exc, exc_info=exc_info.exc_info) def acknowledge(self): """Acknowledge task.""" if not self.acknowledged: self.on_ack(logger, self.connection_errors) self.acknowledged = True def reject(self, requeue=False): if not self.acknowledged: self.on_reject(logger, self.connection_errors, requeue) self.acknowledged = True self.send_event('task-rejected', requeue=requeue) def info(self, safe=False): return { 'id': self.id, 'name': self.name, 'args': self.argsrepr, 'kwargs': self.kwargsrepr, 'type': self.type, 'hostname': self.hostname, 'time_start': self.time_start, 'acknowledged': self.acknowledged, 'delivery_info': self.delivery_info, 'worker_pid': self.worker_pid, } def humaninfo(self): return '{0.name}[{0.id}]'.format(self) def __str__(self): """``str(self)``.""" return ' '.join([ self.humaninfo(), ' ETA:[{0}]'.format(self.eta) if self.eta else '', ' expires:[{0}]'.format(self.expires) if self.expires else '', ]) def __repr__(self): """``repr(self)``.""" return '<{0}: {1} {2} {3}>'.format( type(self).__name__, self.humaninfo(), self.argsrepr, self.kwargsrepr, ) @property def tzlocal(self): if self._tzlocal is None: self._tzlocal = self.app.conf.timezone return self._tzlocal @property def store_errors(self): return (not self.task.ignore_result or self.task.store_errors_even_if_ignored) @property def task_id(self): # XXX compat return self.id @task_id.setter # noqa def task_id(self, value): self.id = value @property def task_name(self): # XXX compat return self.name @task_name.setter # noqa def task_name(self, value): self.name = value @property def reply_to(self): # used by rpc backend when failures reported by parent process return self.request_dict['reply_to'] @property def correlation_id(self): # used similarly to reply_to return self.request_dict['correlation_id'] @cached_property def _payload(self): return self.body if self._decoded else self.message.payload @cached_property def chord(self): # used by backend.mark_as_failure when failure is reported # by parent process # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. _, _, embed = self._payload return embed.get('chord') @cached_property def errbacks(self): # used by backend.mark_as_failure when failure is reported # by parent process # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. _, _, embed = self._payload return embed.get('errbacks') @cached_property def group(self): # used by backend.on_chord_part_return when failures reported # by parent process return self.request_dict['group'] def create_request_cls(base, task, pool, hostname, eventer, ref=ref, revoked_tasks=revoked_tasks, task_ready=task_ready, trace=trace_task_ret): default_time_limit = task.time_limit default_soft_time_limit = task.soft_time_limit apply_async = pool.apply_async acks_late = task.acks_late events = eventer and eventer.enabled class Request(base): def execute_using_pool(self, pool, **kwargs): task_id = self.id if (self.expires or task_id in revoked_tasks) and self.revoked(): raise TaskRevokedError(task_id) time_limit, soft_time_limit = self.time_limits result = apply_async( trace, args=(self.type, task_id, self.request_dict, self.body, self.content_type, self.content_encoding), accept_callback=self.on_accepted, timeout_callback=self.on_timeout, callback=self.on_success, error_callback=self.on_failure, soft_timeout=soft_time_limit or default_soft_time_limit, timeout=time_limit or default_time_limit, correlation_id=task_id, ) # cannot create weakref to None # pylint: disable=attribute-defined-outside-init self._apply_result = maybe(ref, result) return result def on_success(self, failed__retval__runtime, **kwargs): failed, retval, runtime = failed__retval__runtime if failed: if isinstance(retval.exception, ( SystemExit, KeyboardInterrupt)): raise retval.exception return self.on_failure(retval, return_ok=True) task_ready(self) if acks_late: self.acknowledge() if events: self.send_event( 'task-succeeded', result=retval, runtime=runtime, ) return Request celery-4.1.0/celery/worker/components.py0000644000175000017500000001671413130607475020277 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Worker-level Bootsteps.""" from __future__ import absolute_import, unicode_literals import atexit import warnings from kombu.async import Hub as _Hub, get_event_loop, set_event_loop from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore from kombu.async.timer import Timer as _Timer from celery import bootsteps from celery._state import _set_task_join_will_block from celery.exceptions import ImproperlyConfigured from celery.five import string_t from celery.platforms import IS_WINDOWS from celery.utils.log import worker_logger as logger __all__ = ['Timer', 'Hub', 'Pool', 'Beat', 'StateDB', 'Consumer'] GREEN_POOLS = {'eventlet', 'gevent'} ERR_B_GREEN = """\ -B option doesn't work with eventlet/gevent pools: \ use standalone beat instead.\ """ W_POOL_SETTING = """ The worker_pool setting shouldn't be used to select the eventlet/gevent pools, instead you *must use the -P* argument so that patches are applied as early as possible. """ class Timer(bootsteps.Step): """Timer bootstep.""" def create(self, w): if w.use_eventloop: # does not use dedicated timer thread. w.timer = _Timer(max_interval=10.0) else: if not w.timer_cls: # Default Timer is set by the pool, as for example, the # eventlet pool needs a custom timer implementation. w.timer_cls = w.pool_cls.Timer w.timer = self.instantiate(w.timer_cls, max_interval=w.timer_precision, on_error=self.on_timer_error, on_tick=self.on_timer_tick) def on_timer_error(self, exc): logger.error('Timer error: %r', exc, exc_info=True) def on_timer_tick(self, delay): logger.debug('Timer wake-up! Next ETA %s secs.', delay) class Hub(bootsteps.StartStopStep): """Worker starts the event loop.""" requires = (Timer,) def __init__(self, w, **kwargs): w.hub = None super(Hub, self).__init__(w, **kwargs) def include_if(self, w): return w.use_eventloop def create(self, w): w.hub = get_event_loop() if w.hub is None: required_hub = getattr(w._conninfo, 'requires_hub', None) w.hub = set_event_loop(( required_hub if required_hub else _Hub)(w.timer)) self._patch_thread_primitives(w) return self def start(self, w): pass def stop(self, w): w.hub.close() def terminate(self, w): w.hub.close() def _patch_thread_primitives(self, w): # make clock use dummy lock w.app.clock.mutex = DummyLock() # multiprocessing's ApplyResult uses this lock. try: from billiard import pool except ImportError: # pragma: no cover pass else: pool.Lock = DummyLock class Pool(bootsteps.StartStopStep): """Bootstep managing the worker pool. Describes how to initialize the worker pool, and starts and stops the pool during worker start-up/shutdown. Adds attributes: * autoscale * pool * max_concurrency * min_concurrency """ requires = (Hub,) def __init__(self, w, autoscale=None, **kwargs): w.pool = None w.max_concurrency = None w.min_concurrency = w.concurrency self.optimization = w.optimization if isinstance(autoscale, string_t): max_c, _, min_c = autoscale.partition(',') autoscale = [int(max_c), min_c and int(min_c) or 0] w.autoscale = autoscale if w.autoscale: w.max_concurrency, w.min_concurrency = w.autoscale super(Pool, self).__init__(w, **kwargs) def close(self, w): if w.pool: w.pool.close() def terminate(self, w): if w.pool: w.pool.terminate() def create(self, w): semaphore = None max_restarts = None if w.app.conf.worker_pool in GREEN_POOLS: # pragma: no cover warnings.warn(UserWarning(W_POOL_SETTING)) threaded = not w.use_eventloop or IS_WINDOWS procs = w.min_concurrency w.process_task = w._process_task if not threaded: semaphore = w.semaphore = LaxBoundedSemaphore(procs) w._quick_acquire = w.semaphore.acquire w._quick_release = w.semaphore.release max_restarts = 100 if w.pool_putlocks and w.pool_cls.uses_semaphore: w.process_task = w._process_task_sem allow_restart = w.pool_restarts pool = w.pool = self.instantiate( w.pool_cls, w.min_concurrency, initargs=(w.app, w.hostname), maxtasksperchild=w.max_tasks_per_child, max_memory_per_child=w.max_memory_per_child, timeout=w.time_limit, soft_timeout=w.soft_time_limit, putlocks=w.pool_putlocks and threaded, lost_worker_timeout=w.worker_lost_wait, threads=threaded, max_restarts=max_restarts, allow_restart=allow_restart, forking_enable=True, semaphore=semaphore, sched_strategy=self.optimization, app=w.app, ) _set_task_join_will_block(pool.task_join_will_block) return pool def info(self, w): return {'pool': w.pool.info if w.pool else 'N/A'} def register_with_event_loop(self, w, hub): w.pool.register_with_event_loop(hub) class Beat(bootsteps.StartStopStep): """Step used to embed a beat process. Enabled when the ``beat`` argument is set. """ label = 'Beat' conditional = True def __init__(self, w, beat=False, **kwargs): self.enabled = w.beat = beat w.beat = None super(Beat, self).__init__(w, beat=beat, **kwargs) def create(self, w): from celery.beat import EmbeddedService if w.pool_cls.__module__.endswith(('gevent', 'eventlet')): raise ImproperlyConfigured(ERR_B_GREEN) b = w.beat = EmbeddedService(w.app, schedule_filename=w.schedule_filename, scheduler_cls=w.scheduler) return b class StateDB(bootsteps.Step): """Bootstep that sets up between-restart state database file.""" def __init__(self, w, **kwargs): self.enabled = w.statedb w._persistence = None super(StateDB, self).__init__(w, **kwargs) def create(self, w): w._persistence = w.state.Persistent(w.state, w.statedb, w.app.clock) atexit.register(w._persistence.save) class Consumer(bootsteps.StartStopStep): """Bootstep starting the Consumer blueprint.""" last = True def create(self, w): if w.max_concurrency: prefetch_count = max(w.min_concurrency, 1) * w.prefetch_multiplier else: prefetch_count = w.concurrency * w.prefetch_multiplier c = w.consumer = self.instantiate( w.consumer_cls, w.process_task, hostname=w.hostname, task_events=w.task_events, init_callback=w.ready_callback, initial_prefetch_count=prefetch_count, pool=w.pool, timer=w.timer, app=w.app, controller=w, hub=w.hub, worker_options=w.options, disable_rate_limits=w.disable_rate_limits, prefetch_multiplier=w.prefetch_multiplier, ) return c celery-4.1.0/celery/worker/pidbox.py0000644000175000017500000000715413130607475017375 0ustar omeromer00000000000000"""Worker Pidbox (remote control).""" from __future__ import absolute_import, unicode_literals import socket import threading from kombu.common import ignore_errors from kombu.utils.encoding import safe_str from celery.utils.collections import AttributeDict from celery.utils.functional import pass1 from celery.utils.log import get_logger from . import control __all__ = ['Pidbox', 'gPidbox'] logger = get_logger(__name__) debug, error, info = logger.debug, logger.error, logger.info class Pidbox(object): """Worker mailbox.""" consumer = None def __init__(self, c): self.c = c self.hostname = c.hostname self.node = c.app.control.mailbox.Node( safe_str(c.hostname), handlers=control.Panel.data, state=AttributeDict( app=c.app, hostname=c.hostname, consumer=c, tset=pass1 if c.controller.use_eventloop else set), ) self._forward_clock = self.c.app.clock.forward def on_message(self, body, message): # just increase clock as clients usually don't # have a valid clock to adjust with. self._forward_clock() try: self.node.handle_message(body, message) except KeyError as exc: error('No such control command: %s', exc) except Exception as exc: error('Control command error: %r', exc, exc_info=True) self.reset() def start(self, c): self.node.channel = c.connection.channel() self.consumer = self.node.listen(callback=self.on_message) self.consumer.on_decode_error = c.on_decode_error def on_stop(self): pass def stop(self, c): self.on_stop() self.consumer = self._close_channel(c) def reset(self): self.stop(self.c) self.start(self.c) def _close_channel(self, c): if self.node and self.node.channel: ignore_errors(c, self.node.channel.close) def shutdown(self, c): self.on_stop() if self.consumer: debug('Canceling broadcast consumer...') ignore_errors(c, self.consumer.cancel) self.stop(self.c) class gPidbox(Pidbox): """Worker pidbox (greenlet).""" _node_shutdown = None _node_stopped = None _resets = 0 def start(self, c): c.pool.spawn_n(self.loop, c) def on_stop(self): if self._node_stopped: self._node_shutdown.set() debug('Waiting for broadcast thread to shutdown...') self._node_stopped.wait() self._node_stopped = self._node_shutdown = None def reset(self): self._resets += 1 def _do_reset(self, c, connection): self._close_channel(c) self.node.channel = connection.channel() self.consumer = self.node.listen(callback=self.on_message) self.consumer.consume() def loop(self, c): resets = [self._resets] shutdown = self._node_shutdown = threading.Event() stopped = self._node_stopped = threading.Event() try: with c.connection_for_read() as connection: info('pidbox: Connected to %s.', connection.as_uri()) self._do_reset(c, connection) while not shutdown.is_set() and c.connection: if resets[0] < self._resets: resets[0] += 1 self._do_reset(c, connection) try: connection.drain_events(timeout=1.0) except socket.timeout: pass finally: stopped.set() celery-4.1.0/celery/worker/control.py0000644000175000017500000004102713130607475017565 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Worker remote control command implementations.""" from __future__ import absolute_import, unicode_literals import io import tempfile from collections import namedtuple from billiard.common import TERM_SIGNAME from kombu.utils.encoding import safe_repr from celery.exceptions import WorkerShutdown from celery.five import UserDict, items, string_t, text_t from celery.platforms import signals as _signals from celery.utils.functional import maybe_list from celery.utils.log import get_logger from celery.utils.serialization import jsonify, strtobool from celery.utils.time import rate from . import state as worker_state from .request import Request __all__ = ['Panel'] DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit') logger = get_logger(__name__) controller_info_t = namedtuple('controller_info_t', [ 'alias', 'type', 'visible', 'default_timeout', 'help', 'signature', 'args', 'variadic', ]) def ok(value): return {'ok': value} def nok(value): return {'error': value} class Panel(UserDict): """Global registry of remote control commands.""" data = {} # global dict. meta = {} # -"- @classmethod def register(cls, *args, **kwargs): if args: return cls._register(**kwargs)(*args) return cls._register(**kwargs) @classmethod def _register(cls, name=None, alias=None, type='control', visible=True, default_timeout=1.0, help=None, signature=None, args=None, variadic=None): def _inner(fun): control_name = name or fun.__name__ _help = help or (fun.__doc__ or '').strip().split('\n')[0] cls.data[control_name] = fun cls.meta[control_name] = controller_info_t( alias, type, visible, default_timeout, _help, signature, args, variadic) if alias: cls.data[alias] = fun return fun return _inner def control_command(**kwargs): return Panel.register(type='control', **kwargs) def inspect_command(**kwargs): return Panel.register(type='inspect', **kwargs) # -- App @inspect_command() def report(state): """Information about Celery installation for bug reports.""" return ok(state.app.bugreport()) @inspect_command( alias='dump_conf', # XXX < backwards compatible signature='[include_defaults=False]', args=[('with_defaults', strtobool)], ) def conf(state, with_defaults=False, **kwargs): """List configuration.""" return jsonify(state.app.conf.table(with_defaults=with_defaults), keyfilter=_wanted_config_key, unknown_type_filter=safe_repr) def _wanted_config_key(key): return isinstance(key, string_t) and not key.startswith('__') # -- Task @inspect_command( variadic='ids', signature='[id1 [id2 [... [idN]]]]', ) def query_task(state, ids, **kwargs): """Query for task information by id.""" return { req.id: (_state_of_task(req), req.info()) for req in _find_requests_by_id(maybe_list(ids)) } def _find_requests_by_id(ids, get_request=worker_state.requests.__getitem__): for task_id in ids: try: yield get_request(task_id) except KeyError: pass def _state_of_task(request, is_active=worker_state.active_requests.__contains__, is_reserved=worker_state.reserved_requests.__contains__): if is_active(request): return 'active' elif is_reserved(request): return 'reserved' return 'ready' @control_command( variadic='task_id', signature='[id1 [id2 [... [idN]]]]', ) def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None size = len(task_ids) terminated = set() worker_state.revoked.update(task_ids) if terminate: signum = _signals.signum(signal or TERM_SIGNAME) for request in _find_requests_by_id(task_ids): if request.id not in terminated: terminated.add(request.id) logger.info('Terminating %s (%s)', request.id, signum) request.terminate(state.consumer.pool, signal=signum) if len(terminated) >= size: break if not terminated: return ok('terminate: tasks unknown') return ok('terminate: {0}'.format(', '.join(terminated))) idstr = ', '.join(task_ids) logger.info('Tasks flagged as revoked: %s', idstr) return ok('tasks {0} flagged as revoked'.format(idstr)) @control_command( variadic='task_id', args=[('signal', text_t)], signature=' [id1 [id2 [... [idN]]]]' ) def terminate(state, signal, task_id, **kwargs): """Terminate task by task id (or list of ids).""" return revoke(state, task_id, terminate=True, signal=signal) @control_command( args=[('task_name', text_t), ('rate_limit', text_t)], signature=' ', ) def rate_limit(state, task_name, rate_limit, **kwargs): """Tell worker(s) to modify the rate limit for a task by type. See Also: :attr:`celery.task.base.Task.rate_limit`. Arguments: task_name (str): Type of task to set rate limit for. rate_limit (int, str): New rate limit. """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. try: rate(rate_limit) except ValueError as exc: return nok('Invalid rate limit string: {0!r}'.format(exc)) try: state.app.tasks[task_name].rate_limit = rate_limit except KeyError: logger.error('Rate limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') state.consumer.reset_rate_limits() if not rate_limit: logger.info('Rate limits disabled for tasks of type %s', task_name) return ok('rate limit disabled successfully') logger.info('New rate limit for tasks of type %s: %s.', task_name, rate_limit) return ok('new rate limit set successfully') @control_command( args=[('task_name', text_t), ('soft', float), ('hard', float)], signature=' [hard_secs]', ) def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): """Tell worker(s) to modify the time limit for task by type. Arguments: task_name (str): Name of task to change. hard (float): Hard time limit. soft (float): Soft time limit. """ try: task = state.app.tasks[task_name] except KeyError: logger.error('Change time limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') task.soft_time_limit = soft task.time_limit = hard logger.info('New time limits for tasks of type %s: soft=%s hard=%s', task_name, soft, hard) return ok('time limits set successfully') # -- Events @inspect_command() def clock(state, **kwargs): """Get current logical clock value.""" return {'clock': state.app.clock.value} @control_command() def election(state, id, topic, action=None, **kwargs): """Hold election. Arguments: id (str): Unique election id. topic (str): Election topic. action (str): Action to take for elected actor. """ if state.consumer.gossip: state.consumer.gossip.election(id, topic, action) @control_command() def enable_events(state): """Tell worker(s) to send task-related events.""" dispatcher = state.consumer.event_dispatcher if dispatcher.groups and 'task' not in dispatcher.groups: dispatcher.groups.add('task') logger.info('Events of group {task} enabled by remote.') return ok('task events enabled') return ok('task events already enabled') @control_command() def disable_events(state): """Tell worker(s) to stop sending task-related events.""" dispatcher = state.consumer.event_dispatcher if 'task' in dispatcher.groups: dispatcher.groups.discard('task') logger.info('Events of group {task} disabled by remote.') return ok('task events disabled') return ok('task events already disabled') @control_command() def heartbeat(state): """Tell worker(s) to send event heartbeat immediately.""" logger.debug('Heartbeat requested by remote.') dispatcher = state.consumer.event_dispatcher dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO) # -- Worker @inspect_command(visible=False) def hello(state, from_node, revoked=None, **kwargs): """Request mingle sync-data.""" # pylint: disable=redefined-outer-name # XXX Note that this redefines `revoked`: # Outside of this scope that is a function. if from_node != state.hostname: logger.info('sync with %s', from_node) if revoked: worker_state.revoked.update(revoked) return { 'revoked': worker_state.revoked._data, 'clock': state.app.clock.forward(), } @inspect_command(default_timeout=0.2) def ping(state, **kwargs): """Ping worker(s).""" return ok('pong') @inspect_command() def stats(state, **kwargs): """Request worker statistics/information.""" return state.consumer.controller.stats() @inspect_command(alias='dump_schedule') def scheduled(state, **kwargs): """List of currently scheduled ETA/countdown tasks.""" return list(_iter_schedule_requests(state.consumer.timer)) def _iter_schedule_requests(timer): for waiting in timer.schedule.queue: try: arg0 = waiting.entry.args[0] except (IndexError, TypeError): continue else: if isinstance(arg0, Request): yield { 'eta': arg0.eta.isoformat() if arg0.eta else None, 'priority': waiting.priority, 'request': arg0.info(), } @inspect_command(alias='dump_reserved') def reserved(state, **kwargs): """List of currently reserved tasks, not including scheduled/active.""" reserved_tasks = ( state.tset(worker_state.reserved_requests) - state.tset(worker_state.active_requests) ) if not reserved_tasks: return [] return [request.info() for request in reserved_tasks] @inspect_command(alias='dump_active') def active(state, **kwargs): """List of tasks currently being executed.""" return [request.info() for request in state.tset(worker_state.active_requests)] @inspect_command(alias='dump_revoked') def revoked(state, **kwargs): """List of revoked task-ids.""" return list(worker_state.revoked) @inspect_command( alias='dump_tasks', variadic='taskinfoitems', signature='[attr1 [attr2 [... [attrN]]]]', ) def registered(state, taskinfoitems=None, builtins=False, **kwargs): """List of registered tasks. Arguments: taskinfoitems (Sequence[str]): List of task attributes to include. Defaults to ``exchange,routing_key,rate_limit``. builtins (bool): Also include built-in tasks. """ reg = state.app.tasks taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS tasks = reg if builtins else ( task for task in reg if not task.startswith('celery.')) def _extract_info(task): fields = { field: str(getattr(task, field, None)) for field in taskinfoitems if getattr(task, field, None) is not None } if fields: info = ['='.join(f) for f in items(fields)] return '{0} [{1}]'.format(task.name, ' '.join(info)) return task.name return [_extract_info(reg[task]) for task in sorted(tasks)] # -- Debugging @inspect_command( default_timeout=60.0, args=[('type', text_t), ('num', int), ('max_depth', int)], signature='[object_type=Request] [num=200 [max_depth=10]]', ) def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover """Create graph of uncollected objects (memory-leak debugging). Arguments: num (int): Max number of objects to graph. max_depth (int): Traverse at most n levels deep. type (str): Name of object to graph. Default is ``"Request"``. """ try: import objgraph as _objgraph except ImportError: raise ImportError('Requires the objgraph library') logger.info('Dumping graph for type %r', type) with tempfile.NamedTemporaryFile(prefix='cobjg', suffix='.png', delete=False) as fh: objects = _objgraph.by_type(type)[:num] _objgraph.show_backrefs( objects, max_depth=max_depth, highlight=lambda v: v in objects, filename=fh.name, ) return {'filename': fh.name} @inspect_command() def memsample(state, **kwargs): """Sample current RSS memory usage.""" from celery.utils.debug import sample_mem return sample_mem() @inspect_command( args=[('samples', int)], signature='[n_samples=10]', ) def memdump(state, samples=10, **kwargs): # pragma: no cover """Dump statistics of previous memsample requests.""" from celery.utils import debug out = io.StringIO() debug.memdump(file=out) return out.getvalue() # -- Pool @control_command( args=[('n', int)], signature='[N=1]', ) def pool_grow(state, n=1, **kwargs): """Grow pool by n processes/threads.""" if state.consumer.controller.autoscaler: state.consumer.controller.autoscaler.force_scale_up(n) else: state.consumer.pool.grow(n) state.consumer._update_prefetch_count(n) return ok('pool will grow') @control_command( args=[('n', int)], signature='[N=1]', ) def pool_shrink(state, n=1, **kwargs): """Shrink pool by n processes/threads.""" if state.consumer.controller.autoscaler: state.consumer.controller.autoscaler.force_scale_down(n) else: state.consumer.pool.shrink(n) state.consumer._update_prefetch_count(-n) return ok('pool will shrink') @control_command() def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): """Restart execution pool.""" if state.app.conf.worker_pool_restarts: state.consumer.controller.reload(modules, reload, reloader=reloader) return ok('reload started') else: raise ValueError('Pool restarts not enabled') @control_command( args=[('max', int), ('min', int)], signature='[max [min]]', ) def autoscale(state, max=None, min=None): """Modify autoscale settings.""" autoscaler = state.consumer.controller.autoscaler if autoscaler: max_, min_ = autoscaler.update(max, min) return ok('autoscale now max={0} min={1}'.format(max_, min_)) raise ValueError('Autoscale not enabled') @control_command() def shutdown(state, msg='Got shutdown from remote', **kwargs): """Shutdown worker(s).""" logger.warning(msg) raise WorkerShutdown(msg) # -- Queues @control_command( args=[ ('queue', text_t), ('exchange', text_t), ('exchange_type', text_t), ('routing_key', text_t), ], signature=' [exchange [type [routing_key]]]', ) def add_consumer(state, queue, exchange=None, exchange_type=None, routing_key=None, **options): """Tell worker(s) to consume from task queue by name.""" state.consumer.call_soon( state.consumer.add_task_queue, queue, exchange, exchange_type or 'direct', routing_key, **options) return ok('add consumer {0}'.format(queue)) @control_command( args=[('queue', text_t)], signature='', ) def cancel_consumer(state, queue, **_): """Tell worker(s) to stop consuming from task queue by name.""" state.consumer.call_soon( state.consumer.cancel_task_queue, queue, ) return ok('no longer consuming from {0}'.format(queue)) @inspect_command() def active_queues(state): """List the task queues a worker are currently consuming from.""" if state.consumer.task_consumer: return [dict(queue.as_dict(recurse=True)) for queue in state.consumer.task_consumer.queues] return [] celery-4.1.0/celery/worker/consumer/0000755000175000017500000000000013135426347017364 5ustar omeromer00000000000000celery-4.1.0/celery/worker/consumer/connection.py0000644000175000017500000000211113130607475022066 0ustar omeromer00000000000000"""Consumer Broker Connection Bootstep.""" from __future__ import absolute_import, unicode_literals from kombu.common import ignore_errors from celery import bootsteps from celery.utils.log import get_logger __all__ = ['Connection'] logger = get_logger(__name__) info = logger.info class Connection(bootsteps.StartStopStep): """Service managing the consumer broker connection.""" def __init__(self, c, **kwargs): c.connection = None super(Connection, self).__init__(c, **kwargs) def start(self, c): c.connection = c.connect() info('Connected to %s', c.connection.as_uri()) def shutdown(self, c): # We must set self.connection to None here, so # that the green pidbox thread exits. connection, c.connection = c.connection, None if connection: ignore_errors(connection, connection.close) def info(self, c): params = 'N/A' if c.connection: params = c.connection.info() params.pop('password', None) # don't send password. return {'broker': params} celery-4.1.0/celery/worker/consumer/__init__.py0000644000175000017500000000070013130607475021470 0ustar omeromer00000000000000"""Worker consumer.""" from __future__ import absolute_import, unicode_literals from .consumer import Consumer from .agent import Agent from .connection import Connection from .control import Control from .events import Events from .gossip import Gossip from .heart import Heart from .mingle import Mingle from .tasks import Tasks __all__ = [ 'Consumer', 'Agent', 'Connection', 'Control', 'Events', 'Gossip', 'Heart', 'Mingle', 'Tasks', ] celery-4.1.0/celery/worker/consumer/agent.py0000644000175000017500000000111713130607475021032 0ustar omeromer00000000000000"""Celery + :pypi:`cell` integration.""" from __future__ import absolute_import, unicode_literals from celery import bootsteps from .connection import Connection __all__ = ['Agent'] class Agent(bootsteps.StartStopStep): """Agent starts :pypi:`cell` actors.""" conditional = True requires = (Connection,) def __init__(self, c, **kwargs): self.agent_cls = self.enabled = c.app.conf.worker_agent super(Agent, self).__init__(c, **kwargs) def create(self, c): agent = c.agent = self.instantiate(self.agent_cls, c.connection) return agent celery-4.1.0/celery/worker/consumer/gossip.py0000644000175000017500000001514013130607475021241 0ustar omeromer00000000000000"""Worker <-> Worker communication Bootstep.""" from __future__ import absolute_import, unicode_literals from collections import defaultdict from functools import partial from heapq import heappush from operator import itemgetter from kombu import Consumer from kombu.async.semaphore import DummyLock from celery import bootsteps from celery.five import values from celery.utils.log import get_logger from celery.utils.objects import Bunch from .mingle import Mingle __all__ = ['Gossip'] logger = get_logger(__name__) debug, info = logger.debug, logger.info class Gossip(bootsteps.ConsumerStep): """Bootstep consuming events from other workers. This keeps the logical clock value up to date. """ label = 'Gossip' requires = (Mingle,) _cons_stamp_fields = itemgetter( 'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver', ) compatible_transports = {'amqp', 'redis'} def __init__(self, c, without_gossip=False, interval=5.0, heartbeat_interval=2.0, **kwargs): self.enabled = not without_gossip and self.compatible_transport(c.app) self.app = c.app c.gossip = self self.Receiver = c.app.events.Receiver self.hostname = c.hostname self.full_hostname = '.'.join([self.hostname, str(c.pid)]) self.on = Bunch( node_join=set(), node_leave=set(), node_lost=set(), ) self.timer = c.timer if self.enabled: self.state = c.app.events.State( on_node_join=self.on_node_join, on_node_leave=self.on_node_leave, max_tasks_in_memory=1, ) if c.hub: c._mutex = DummyLock() self.update_state = self.state.event self.interval = interval self.heartbeat_interval = heartbeat_interval self._tref = None self.consensus_requests = defaultdict(list) self.consensus_replies = {} self.event_handlers = { 'worker.elect': self.on_elect, 'worker.elect.ack': self.on_elect_ack, } self.clock = c.app.clock self.election_handlers = { 'task': self.call_task } super(Gossip, self).__init__(c, **kwargs) def compatible_transport(self, app): with app.connection_for_read() as conn: return conn.transport.driver_type in self.compatible_transports def election(self, id, topic, action=None): self.consensus_replies[id] = [] self.dispatcher.send( 'worker-elect', id=id, topic=topic, action=action, cver=1, ) def call_task(self, task): try: self.app.signature(task).apply_async() except Exception as exc: # pylint: disable=broad-except logger.exception('Could not call task: %r', exc) def on_elect(self, event): try: (id_, clock, hostname, pid, topic, action, _) = self._cons_stamp_fields(event) except KeyError as exc: return logger.exception('election request missing field %s', exc) heappush( self.consensus_requests[id_], (clock, '%s.%s' % (hostname, pid), topic, action), ) self.dispatcher.send('worker-elect-ack', id=id_) def start(self, c): super(Gossip, self).start(c) self.dispatcher = c.event_dispatcher def on_elect_ack(self, event): id = event['id'] try: replies = self.consensus_replies[id] except KeyError: return # not for us alive_workers = set(self.state.alive_workers()) replies.append(event['hostname']) if len(replies) >= len(alive_workers): _, leader, topic, action = self.clock.sort_heap( self.consensus_requests[id], ) if leader == self.full_hostname: info('I won the election %r', id) try: handler = self.election_handlers[topic] except KeyError: logger.exception('Unknown election topic %r', topic) else: handler(action) else: info('node %s elected for %r', leader, id) self.consensus_requests.pop(id, None) self.consensus_replies.pop(id, None) def on_node_join(self, worker): debug('%s joined the party', worker.hostname) self._call_handlers(self.on.node_join, worker) def on_node_leave(self, worker): debug('%s left', worker.hostname) self._call_handlers(self.on.node_leave, worker) def on_node_lost(self, worker): info('missed heartbeat from %s', worker.hostname) self._call_handlers(self.on.node_lost, worker) def _call_handlers(self, handlers, *args, **kwargs): for handler in handlers: try: handler(*args, **kwargs) except Exception as exc: # pylint: disable=broad-except logger.exception( 'Ignored error from handler %r: %r', handler, exc) def register_timer(self): if self._tref is not None: self._tref.cancel() self._tref = self.timer.call_repeatedly(self.interval, self.periodic) def periodic(self): workers = self.state.workers dirty = set() for worker in values(workers): if not worker.alive: dirty.add(worker) self.on_node_lost(worker) for worker in dirty: workers.pop(worker.hostname, None) def get_consumers(self, channel): self.register_timer() ev = self.Receiver(channel, routing_key='worker.#', queue_ttl=self.heartbeat_interval) return [Consumer( channel, queues=[ev.queue], on_message=partial(self.on_message, ev.event_from_message), no_ack=True )] def on_message(self, prepare, message): _type = message.delivery_info['routing_key'] # For redis when `fanout_patterns=False` (See Issue #1882) if _type.split('.', 1)[0] == 'task': return try: handler = self.event_handlers[_type] except KeyError: pass else: return handler(message.payload) # proto2: hostname in header; proto1: in body hostname = (message.headers.get('hostname') or message.payload['hostname']) if hostname != self.hostname: _, event = prepare(message.payload) self.update_state(event) else: self.clock.forward() celery-4.1.0/celery/worker/consumer/heart.py0000644000175000017500000000174413130607475021045 0ustar omeromer00000000000000"""Worker Event Heartbeat Bootstep.""" from __future__ import absolute_import, unicode_literals from celery import bootsteps from celery.worker import heartbeat from .events import Events __all__ = ['Heart'] class Heart(bootsteps.StartStopStep): """Bootstep sending event heartbeats. This service sends a ``worker-heartbeat`` message every n seconds. Note: Not to be confused with AMQP protocol level heartbeats. """ requires = (Events,) def __init__(self, c, without_heartbeat=False, heartbeat_interval=None, **kwargs): self.enabled = not without_heartbeat self.heartbeat_interval = heartbeat_interval c.heart = None super(Heart, self).__init__(c, **kwargs) def start(self, c): c.heart = heartbeat.Heart( c.timer, c.event_dispatcher, self.heartbeat_interval, ) c.heart.start() def stop(self, c): c.heart = c.heart and c.heart.stop() shutdown = stop celery-4.1.0/celery/worker/consumer/tasks.py0000644000175000017500000000374713130607475021074 0ustar omeromer00000000000000"""Worker Task Consumer Bootstep.""" from __future__ import absolute_import, unicode_literals from kombu.common import QoS, ignore_errors from celery import bootsteps from celery.utils.log import get_logger from .mingle import Mingle __all__ = ['Tasks'] logger = get_logger(__name__) debug = logger.debug class Tasks(bootsteps.StartStopStep): """Bootstep starting the task message consumer.""" requires = (Mingle,) def __init__(self, c, **kwargs): c.task_consumer = c.qos = None super(Tasks, self).__init__(c, **kwargs) def start(self, c): """Start task consumer.""" c.update_strategies() # - RabbitMQ 3.3 completely redefines how basic_qos works.. # This will detect if the new qos smenatics is in effect, # and if so make sure the 'apply_global' flag is set on qos updates. qos_global = not c.connection.qos_semantics_matches_spec # set initial prefetch count c.connection.default_channel.basic_qos( 0, c.initial_prefetch_count, qos_global, ) c.task_consumer = c.app.amqp.TaskConsumer( c.connection, on_decode_error=c.on_decode_error, ) def set_prefetch_count(prefetch_count): return c.task_consumer.qos( prefetch_count=prefetch_count, apply_global=qos_global, ) c.qos = QoS(set_prefetch_count, c.initial_prefetch_count) def stop(self, c): """Stop task consumer.""" if c.task_consumer: debug('Canceling task consumer...') ignore_errors(c, c.task_consumer.cancel) def shutdown(self, c): """Shutdown task consumer.""" if c.task_consumer: self.stop(c) debug('Closing consumer channel...') ignore_errors(c, c.task_consumer.close) c.task_consumer = None def info(self, c): """Return task consumer info.""" return {'prefetch_count': c.qos.value if c.qos else 'N/A'} celery-4.1.0/celery/worker/consumer/mingle.py0000644000175000017500000000506613130607475021216 0ustar omeromer00000000000000"""Worker <-> Worker Sync at startup (Bootstep).""" from __future__ import absolute_import, unicode_literals from celery import bootsteps from celery.five import items from celery.utils.log import get_logger from .events import Events __all__ = ['Mingle'] logger = get_logger(__name__) debug, info, exception = logger.debug, logger.info, logger.exception class Mingle(bootsteps.StartStopStep): """Bootstep syncing state with neighbor workers. At startup, or upon consumer restart, this will: - Sync logical clocks. - Sync revoked tasks. """ label = 'Mingle' requires = (Events,) compatible_transports = {'amqp', 'redis'} def __init__(self, c, without_mingle=False, **kwargs): self.enabled = not without_mingle and self.compatible_transport(c.app) super(Mingle, self).__init__( c, without_mingle=without_mingle, **kwargs) def compatible_transport(self, app): with app.connection_for_read() as conn: return conn.transport.driver_type in self.compatible_transports def start(self, c): self.sync(c) def sync(self, c): info('mingle: searching for neighbors') replies = self.send_hello(c) if replies: info('mingle: sync with %s nodes', len([reply for reply, value in items(replies) if value])) [self.on_node_reply(c, nodename, reply) for nodename, reply in items(replies) if reply] info('mingle: sync complete') else: info('mingle: all alone') def send_hello(self, c): inspect = c.app.control.inspect(timeout=1.0, connection=c.connection) our_revoked = c.controller.state.revoked replies = inspect.hello(c.hostname, our_revoked._data) or {} replies.pop(c.hostname, None) # delete my own response return replies def on_node_reply(self, c, nodename, reply): debug('mingle: processing reply from %s', nodename) try: self.sync_with_node(c, **reply) except MemoryError: raise except Exception as exc: # pylint: disable=broad-except exception('mingle: sync with %s failed: %r', nodename, exc) def sync_with_node(self, c, clock=None, revoked=None, **kwargs): self.on_clock_event(c, clock) self.on_revoked_received(c, revoked) def on_clock_event(self, c, clock): c.app.clock.adjust(clock) if clock else c.app.clock.forward() def on_revoked_received(self, c, revoked): if revoked: c.controller.state.revoked.update(revoked) celery-4.1.0/celery/worker/consumer/control.py0000644000175000017500000000176613130607475021426 0ustar omeromer00000000000000"""Worker Remote Control Bootstep. ``Control`` -> :mod:`celery.worker.pidbox` -> :mod:`kombu.pidbox`. The actual commands are implemented in :mod:`celery.worker.control`. """ from __future__ import absolute_import, unicode_literals from celery import bootsteps from celery.utils.log import get_logger from celery.worker import pidbox from .tasks import Tasks __all__ = ['Control'] logger = get_logger(__name__) class Control(bootsteps.StartStopStep): """Remote control command service.""" requires = (Tasks,) def __init__(self, c, **kwargs): self.is_green = c.pool is not None and c.pool.is_green self.box = (pidbox.gPidbox if self.is_green else pidbox.Pidbox)(c) self.start = self.box.start self.stop = self.box.stop self.shutdown = self.box.shutdown super(Control, self).__init__(c, **kwargs) def include_if(self, c): return (c.app.conf.worker_enable_remote_control and c.conninfo.supports_exchange_type('fanout')) celery-4.1.0/celery/worker/consumer/consumer.py0000644000175000017500000005150213130607475021572 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Worker Consumer Blueprint. This module contains the components responsible for consuming messages from the broker, processing the messages and keeping the broker connections up and running. """ from __future__ import absolute_import, unicode_literals import errno import logging import os from collections import defaultdict from time import sleep from billiard.common import restart_state from billiard.exceptions import RestartFreqExceeded from kombu.async.semaphore import DummyLock from kombu.utils.compat import _detect_environment from kombu.utils.encoding import safe_repr, bytes_t from kombu.utils.limits import TokenBucket from vine import ppartial, promise from celery import bootsteps from celery import signals from celery.app.trace import build_tracer from celery.exceptions import InvalidTaskError, NotRegistered from celery.five import buffer_t, items, python_2_unicode_compatible, values from celery.utils.functional import noop from celery.utils.log import get_logger from celery.utils.nodenames import gethostname from celery.utils.objects import Bunch from celery.utils.text import truncate from celery.utils.time import humanize_seconds, rate from celery.worker import loops from celery.worker.state import ( task_reserved, maybe_shutdown, reserved_requests, ) __all__ = ['Consumer', 'Evloop', 'dump_body'] CLOSE = bootsteps.CLOSE TERMINATE = bootsteps.TERMINATE STOP_CONDITIONS = {CLOSE, TERMINATE} logger = get_logger(__name__) debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, logger.error, logger.critical) CONNECTION_RETRY = """\ consumer: Connection to broker lost. \ Trying to re-establish the connection...\ """ CONNECTION_RETRY_STEP = """\ Trying again {when}...\ """ CONNECTION_ERROR = """\ consumer: Cannot connect to %s: %s. %s """ CONNECTION_FAILOVER = """\ Will retry using next failover.\ """ UNKNOWN_FORMAT = """\ Received and deleted unknown message. Wrong destination?!? The full contents of the message body was: %s """ #: Error message for when an unregistered task is received. UNKNOWN_TASK_ERROR = """\ Received unregistered task of type %s. The message has been ignored and discarded. Did you remember to import the module containing this task? Or maybe you're using relative imports? Please see http://docs.celeryq.org/en/latest/internals/protocol.html for more information. The full contents of the message body was: %s """ #: Error message for when an invalid task message is received. INVALID_TASK_ERROR = """\ Received invalid task message: %s The message has been ignored and discarded. Please ensure your message conforms to the task message protocol as described here: http://docs.celeryq.org/en/latest/internals/protocol.html The full contents of the message body was: %s """ MESSAGE_DECODE_ERROR = """\ Can't decode message body: %r [type:%r encoding:%r headers:%s] body: %s """ MESSAGE_REPORT = """\ body: {0} {{content_type:{1} content_encoding:{2} delivery_info:{3} headers={4}}} """ def dump_body(m, body): """Format message body for debugging purposes.""" # v2 protocol does not deserialize body body = m.body if body is None else body if isinstance(body, buffer_t): body = bytes_t(body) return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024), len(m.body)) @python_2_unicode_compatible class Consumer(object): """Consumer blueprint.""" Strategies = dict #: Optional callback called the first time the worker #: is ready to receive tasks. init_callback = None #: The current worker pool instance. pool = None #: A timer used for high-priority internal tasks, such #: as sending heartbeats. timer = None restart_count = -1 # first start is the same as a restart class Blueprint(bootsteps.Blueprint): """Consumer blueprint.""" name = 'Consumer' default_steps = [ 'celery.worker.consumer.connection:Connection', 'celery.worker.consumer.mingle:Mingle', 'celery.worker.consumer.events:Events', 'celery.worker.consumer.gossip:Gossip', 'celery.worker.consumer.heart:Heart', 'celery.worker.consumer.control:Control', 'celery.worker.consumer.tasks:Tasks', 'celery.worker.consumer.consumer:Evloop', 'celery.worker.consumer.agent:Agent', ] def shutdown(self, parent): self.send_all(parent, 'shutdown') def __init__(self, on_task_request, init_callback=noop, hostname=None, pool=None, app=None, timer=None, controller=None, hub=None, amqheartbeat=None, worker_options=None, disable_rate_limits=False, initial_prefetch_count=2, prefetch_multiplier=1, **kwargs): self.app = app self.controller = controller self.init_callback = init_callback self.hostname = hostname or gethostname() self.pid = os.getpid() self.pool = pool self.timer = timer self.strategies = self.Strategies() self.conninfo = self.app.connection_for_read() self.connection_errors = self.conninfo.connection_errors self.channel_errors = self.conninfo.channel_errors self._restart_state = restart_state(maxR=5, maxT=1) self._does_info = logger.isEnabledFor(logging.INFO) self._limit_order = 0 self.on_task_request = on_task_request self.on_task_message = set() self.amqheartbeat_rate = self.app.conf.broker_heartbeat_checkrate self.disable_rate_limits = disable_rate_limits self.initial_prefetch_count = initial_prefetch_count self.prefetch_multiplier = prefetch_multiplier # this contains a tokenbucket for each task type by name, used for # rate limits, or None if rate limits are disabled for that task. self.task_buckets = defaultdict(lambda: None) self.reset_rate_limits() self.hub = hub if self.hub or getattr(self.pool, 'is_green', False): self.amqheartbeat = amqheartbeat if self.amqheartbeat is None: self.amqheartbeat = self.app.conf.broker_heartbeat else: self.amqheartbeat = 0 if not hasattr(self, 'loop'): self.loop = loops.asynloop if hub else loops.synloop if _detect_environment() == 'gevent': # there's a gevent bug that causes timeouts to not be reset, # so if the connection timeout is exceeded once, it can NEVER # connect again. self.app.conf.broker_connection_timeout = None self._pending_operations = [] self.steps = [] self.blueprint = self.Blueprint( steps=self.app.steps['consumer'], on_close=self.on_close, ) self.blueprint.apply(self, **dict(worker_options or {}, **kwargs)) def call_soon(self, p, *args, **kwargs): p = ppartial(p, *args, **kwargs) if self.hub: return self.hub.call_soon(p) self._pending_operations.append(p) return p def perform_pending_operations(self): if not self.hub: while self._pending_operations: try: self._pending_operations.pop()() except Exception as exc: # pylint: disable=broad-except logger.exception('Pending callback raised: %r', exc) def bucket_for_task(self, type): limit = rate(getattr(type, 'rate_limit', None)) return TokenBucket(limit, capacity=1) if limit else None def reset_rate_limits(self): self.task_buckets.update( (n, self.bucket_for_task(t)) for n, t in items(self.app.tasks) ) def _update_prefetch_count(self, index=0): """Update prefetch count after pool/shrink grow operations. Index must be the change in number of processes as a positive (increasing) or negative (decreasing) number. Note: Currently pool grow operations will end up with an offset of +1 if the initial size of the pool was 0 (e.g. :option:`--autoscale=1,0 `). """ num_processes = self.pool.num_processes if not self.initial_prefetch_count or not num_processes: return # prefetch disabled self.initial_prefetch_count = ( self.pool.num_processes * self.prefetch_multiplier ) return self._update_qos_eventually(index) def _update_qos_eventually(self, index): return (self.qos.decrement_eventually if index < 0 else self.qos.increment_eventually)( abs(index) * self.prefetch_multiplier) def _limit_move_to_pool(self, request): task_reserved(request) self.on_task_request(request) def _on_bucket_wakeup(self, bucket, tokens): try: request = bucket.pop() except IndexError: pass else: self._limit_move_to_pool(request) self._schedule_oldest_bucket_request(bucket, tokens) def _schedule_oldest_bucket_request(self, bucket, tokens): try: request = bucket.pop() except IndexError: pass else: return self._schedule_bucket_request(request, bucket, tokens) def _schedule_bucket_request(self, request, bucket, tokens): bucket.can_consume(tokens) bucket.add(request) pri = self._limit_order = (self._limit_order + 1) % 10 hold = bucket.expected_time(tokens) self.timer.call_after( hold, self._on_bucket_wakeup, (bucket, tokens), priority=pri, ) def _limit_task(self, request, bucket, tokens): if bucket.contents: return bucket.add(request) return self._schedule_bucket_request(request, bucket, tokens) def start(self): blueprint = self.blueprint while blueprint.state not in STOP_CONDITIONS: maybe_shutdown() if self.restart_count: try: self._restart_state.step() except RestartFreqExceeded as exc: crit('Frequent restarts detected: %r', exc, exc_info=1) sleep(1) self.restart_count += 1 try: blueprint.start(self) except self.connection_errors as exc: # If we're not retrying connections, no need to catch # connection errors if not self.app.conf.broker_connection_retry: raise if isinstance(exc, OSError) and exc.errno == errno.EMFILE: raise # Too many open files maybe_shutdown() if blueprint.state not in STOP_CONDITIONS: if self.connection: self.on_connection_error_after_connected(exc) else: self.on_connection_error_before_connected(exc) self.on_close() blueprint.restart(self) def on_connection_error_before_connected(self, exc): error(CONNECTION_ERROR, self.conninfo.as_uri(), exc, 'Trying to reconnect...') def on_connection_error_after_connected(self, exc): warn(CONNECTION_RETRY, exc_info=True) try: self.connection.collect() except Exception: # pylint: disable=broad-except pass def register_with_event_loop(self, hub): self.blueprint.send_all( self, 'register_with_event_loop', args=(hub,), description='Hub.register', ) def shutdown(self): self.blueprint.shutdown(self) def stop(self): self.blueprint.stop(self) def on_ready(self): callback, self.init_callback = self.init_callback, None if callback: callback(self) def loop_args(self): return (self, self.connection, self.task_consumer, self.blueprint, self.hub, self.qos, self.amqheartbeat, self.app.clock, self.amqheartbeat_rate) def on_decode_error(self, message, exc): """Callback called if an error occurs while decoding a message. Simply logs the error and acknowledges the message so it doesn't enter a loop. Arguments: message (kombu.Message): The message received. exc (Exception): The exception being handled. """ crit(MESSAGE_DECODE_ERROR, exc, message.content_type, message.content_encoding, safe_repr(message.headers), dump_body(message, message.body), exc_info=1) message.ack() def on_close(self): # Clear internal queues to get rid of old messages. # They can't be acked anyway, as a delivery tag is specific # to the current channel. if self.controller and self.controller.semaphore: self.controller.semaphore.clear() if self.timer: self.timer.clear() for bucket in values(self.task_buckets): if bucket: bucket.clear_pending() reserved_requests.clear() if self.pool and self.pool.flush: self.pool.flush() def connect(self): """Establish the broker connection used for consuming tasks. Retries establishing the connection if the :setting:`broker_connection_retry` setting is enabled """ conn = self.connection_for_read(heartbeat=self.amqheartbeat) if self.hub: conn.transport.register_with_event_loop(conn.connection, self.hub) return conn def connection_for_read(self, heartbeat=None): return self.ensure_connected( self.app.connection_for_read(heartbeat=heartbeat)) def connection_for_write(self, heartbeat=None): return self.ensure_connected( self.app.connection_for_write(heartbeat=heartbeat)) def ensure_connected(self, conn): # Callback called for each retry while the connection # can't be established. def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP): if getattr(conn, 'alt', None) and interval == 0: next_step = CONNECTION_FAILOVER error(CONNECTION_ERROR, conn.as_uri(), exc, next_step.format(when=humanize_seconds(interval, 'in', ' '))) # remember that the connection is lazy, it won't establish # until needed. if not self.app.conf.broker_connection_retry: # retry disabled, just call connect directly. conn.connect() return conn conn = conn.ensure_connection( _error_handler, self.app.conf.broker_connection_max_retries, callback=maybe_shutdown, ) return conn def _flush_events(self): if self.event_dispatcher: self.event_dispatcher.flush() def on_send_event_buffered(self): if self.hub: self.hub._ready.add(self._flush_events) def add_task_queue(self, queue, exchange=None, exchange_type=None, routing_key=None, **options): cset = self.task_consumer queues = self.app.amqp.queues # Must use in' here, as __missing__ will automatically # create queues when :setting:`task_create_missing_queues` is enabled. # (Issue #1079) if queue in queues: q = queues[queue] else: exchange = queue if exchange is None else exchange exchange_type = ('direct' if exchange_type is None else exchange_type) q = queues.select_add(queue, exchange=exchange, exchange_type=exchange_type, routing_key=routing_key, **options) if not cset.consuming_from(queue): cset.add_queue(q) cset.consume() info('Started consuming from %s', queue) def cancel_task_queue(self, queue): info('Canceling queue %s', queue) self.app.amqp.queues.deselect(queue) self.task_consumer.cancel_by_queue(queue) def apply_eta_task(self, task): """Method called by the timer to apply a task with an ETA/countdown.""" task_reserved(task) self.on_task_request(task) self.qos.decrement_eventually() def _message_report(self, body, message): return MESSAGE_REPORT.format(dump_body(message, body), safe_repr(message.content_type), safe_repr(message.content_encoding), safe_repr(message.delivery_info), safe_repr(message.headers)) def on_unknown_message(self, body, message): warn(UNKNOWN_FORMAT, self._message_report(body, message)) message.reject_log_error(logger, self.connection_errors) signals.task_rejected.send(sender=self, message=message, exc=None) def on_unknown_task(self, body, message, exc): error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True) try: id_, name = message.headers['id'], message.headers['task'] root_id = message.headers.get('root_id') except KeyError: # proto1 payload = message.payload id_, name = payload['id'], payload['task'] root_id = None request = Bunch( name=name, chord=None, root_id=root_id, correlation_id=message.properties.get('correlation_id'), reply_to=message.properties.get('reply_to'), errbacks=None, ) message.reject_log_error(logger, self.connection_errors) self.app.backend.mark_as_failure( id_, NotRegistered(name), request=request, ) if self.event_dispatcher: self.event_dispatcher.send( 'task-failed', uuid=id_, exception='NotRegistered({0!r})'.format(name), ) signals.task_unknown.send( sender=self, message=message, exc=exc, name=name, id=id_, ) def on_invalid_task(self, body, message, exc): error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) message.reject_log_error(logger, self.connection_errors) signals.task_rejected.send(sender=self, message=message, exc=exc) def update_strategies(self): loader = self.app.loader for name, task in items(self.app.tasks): self.strategies[name] = task.start_strategy(self.app, self) task.__trace__ = build_tracer(name, task, loader, self.hostname, app=self.app) def create_task_handler(self, promise=promise): strategies = self.strategies on_unknown_message = self.on_unknown_message on_unknown_task = self.on_unknown_task on_invalid_task = self.on_invalid_task callbacks = self.on_task_message call_soon = self.call_soon def on_task_received(message): # payload will only be set for v1 protocol, since v2 # will defer deserializing the message body to the pool. payload = None try: type_ = message.headers['task'] # protocol v2 except TypeError: return on_unknown_message(None, message) except KeyError: try: payload = message.decode() except Exception as exc: # pylint: disable=broad-except return self.on_decode_error(message, exc) try: type_, payload = payload['task'], payload # protocol v1 except (TypeError, KeyError): return on_unknown_message(payload, message) try: strategy = strategies[type_] except KeyError as exc: return on_unknown_task(None, message, exc) else: try: strategy( message, payload, promise(call_soon, (message.ack_log_error,)), promise(call_soon, (message.reject_log_error,)), callbacks, ) except InvalidTaskError as exc: return on_invalid_task(payload, message, exc) return on_task_received def __repr__(self): """``repr(self)``.""" return ''.format( self=self, state=self.blueprint.human_state(), ) class Evloop(bootsteps.StartStopStep): """Event loop service. Note: This is always started last. """ label = 'event loop' last = True def start(self, c): self.patch_all(c) c.loop(*c.loop_args()) def patch_all(self, c): c.qos._mutex = DummyLock() celery-4.1.0/celery/worker/consumer/events.py0000644000175000017500000000404013130607475021236 0ustar omeromer00000000000000"""Worker Event Dispatcher Bootstep. ``Events`` -> :class:`celery.events.EventDispatcher`. """ from __future__ import absolute_import, unicode_literals from kombu.common import ignore_errors from celery import bootsteps from .connection import Connection __all__ = ['Events'] class Events(bootsteps.StartStopStep): """Service used for sending monitoring events.""" requires = (Connection,) def __init__(self, c, task_events=True, without_heartbeat=False, without_gossip=False, **kwargs): self.groups = None if task_events else ['worker'] self.send_events = ( task_events or not without_gossip or not without_heartbeat ) c.event_dispatcher = None super(Events, self).__init__(c, **kwargs) def start(self, c): # flush events sent while connection was down. prev = self._close(c) dis = c.event_dispatcher = c.app.events.Dispatcher( c.connection_for_write(), hostname=c.hostname, enabled=self.send_events, groups=self.groups, # we currently only buffer events when the event loop is enabled # XXX This excludes eventlet/gevent, which should actually buffer. buffer_group=['task'] if c.hub else None, on_send_buffered=c.on_send_event_buffered if c.hub else None, ) if prev: dis.extend_buffer(prev) dis.flush() def stop(self, c): pass def _close(self, c): if c.event_dispatcher: dispatcher = c.event_dispatcher # remember changes from remote control commands: self.groups = dispatcher.groups # close custom connection if dispatcher.connection: ignore_errors(c, dispatcher.connection.close) ignore_errors(c, dispatcher.close) c.event_dispatcher = None return dispatcher def shutdown(self, c): self._close(c) celery-4.1.0/celery/worker/loops.py0000644000175000017500000001045213130607475017237 0ustar omeromer00000000000000"""The consumers highly-optimized inner loop.""" from __future__ import absolute_import, unicode_literals import errno import socket from celery import bootsteps from celery.exceptions import WorkerShutdown, WorkerTerminate, WorkerLostError from celery.utils.log import get_logger from . import state __all__ = ['asynloop', 'synloop'] # pylint: disable=redefined-outer-name # We cache globals and attribute lookups, so disable this warning. logger = get_logger(__name__) def _quick_drain(connection, timeout=0.1): try: connection.drain_events(timeout=timeout) except Exception as exc: # pylint: disable=broad-except exc_errno = getattr(exc, 'errno', None) if exc_errno is not None and exc_errno != errno.EAGAIN: raise def _enable_amqheartbeats(timer, connection, rate=2.0): if connection: tick = connection.heartbeat_check heartbeat = connection.get_heartbeat_interval() # negotiated if heartbeat and connection.supports_heartbeats: timer.call_repeatedly(heartbeat / rate, tick, (rate,)) def asynloop(obj, connection, consumer, blueprint, hub, qos, heartbeat, clock, hbrate=2.0): """Non-blocking event loop.""" RUN = bootsteps.RUN update_qos = qos.update errors = connection.connection_errors on_task_received = obj.create_task_handler() _enable_amqheartbeats(hub.timer, connection, rate=hbrate) consumer.on_message = on_task_received obj.controller.register_with_event_loop(hub) obj.register_with_event_loop(hub) consumer.consume() obj.on_ready() # did_start_ok will verify that pool processes were able to start, # but this will only work the first time we start, as # maxtasksperchild will mess up metrics. if not obj.restart_count and not obj.pool.did_start_ok(): raise WorkerLostError('Could not start worker processes') # consumer.consume() may have prefetched up to our # limit - drain an event so we're in a clean state # prior to starting our event loop. if connection.transport.driver_type == 'amqp': hub.call_soon(_quick_drain, connection) # FIXME: Use loop.run_forever # Tried and works, but no time to test properly before release. hub.propagate_errors = errors loop = hub.create_loop() try: while blueprint.state == RUN and obj.connection: # shutdown if signal handlers told us to. should_stop, should_terminate = ( state.should_stop, state.should_terminate, ) # False == EX_OK, so must use is not False if should_stop is not None and should_stop is not False: raise WorkerShutdown(should_stop) elif should_terminate is not None and should_stop is not False: raise WorkerTerminate(should_terminate) # We only update QoS when there's no more messages to read. # This groups together qos calls, and makes sure that remote # control commands will be prioritized over task messages. if qos.prev != qos.value: update_qos() try: next(loop) except StopIteration: loop = hub.create_loop() finally: try: hub.reset() except Exception as exc: # pylint: disable=broad-except logger.exception( 'Error cleaning up after event loop: %r', exc) def synloop(obj, connection, consumer, blueprint, hub, qos, heartbeat, clock, hbrate=2.0, **kwargs): """Fallback blocking event loop for transports that doesn't support AIO.""" RUN = bootsteps.RUN on_task_received = obj.create_task_handler() perform_pending_operations = obj.perform_pending_operations if getattr(obj.pool, 'is_green', False): _enable_amqheartbeats(obj.timer, connection, rate=hbrate) consumer.on_message = on_task_received consumer.consume() obj.on_ready() while blueprint.state == RUN and obj.connection: state.maybe_shutdown() if qos.prev != qos.value: qos.update() try: perform_pending_operations() connection.drain_events(timeout=2.0) except socket.timeout: pass except socket.error: if blueprint.state == RUN: raise celery-4.1.0/celery/schedules.py0000644000175000017500000007072213130607475016557 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Schedules define the intervals at which periodic tasks run.""" from __future__ import absolute_import, unicode_literals import numbers import re from bisect import bisect, bisect_left from collections import Iterable, namedtuple from datetime import datetime, timedelta from kombu.utils.objects import cached_property from . import current_app from .five import python_2_unicode_compatible, range, string_t from .utils.collections import AttributeDict from .utils.time import ( weekday, maybe_timedelta, remaining, humanize_seconds, timezone, maybe_make_aware, ffwd, localize ) __all__ = [ 'ParseException', 'schedule', 'crontab', 'crontab_parser', 'maybe_schedule', 'solar', ] schedstate = namedtuple('schedstate', ('is_due', 'next')) CRON_PATTERN_INVALID = """\ Invalid crontab pattern. Valid range is {min}-{max}. \ '{value}' was found.\ """ CRON_INVALID_TYPE = """\ Argument cronspec needs to be of any of the following types: \ int, str, or an iterable type. {type!r} was given.\ """ CRON_REPR = """\ \ """ SOLAR_INVALID_LATITUDE = """\ Argument latitude {lat} is invalid, must be between -90 and 90.\ """ SOLAR_INVALID_LONGITUDE = """\ Argument longitude {lon} is invalid, must be between -180 and 180.\ """ SOLAR_INVALID_EVENT = """\ Argument event "{event}" is invalid, must be one of {all_events}.\ """ def cronfield(s): return '*' if s is None else s class ParseException(Exception): """Raised by :class:`crontab_parser` when the input can't be parsed.""" class BaseSchedule(object): def __init__(self, nowfun=None, app=None): self.nowfun = nowfun self._app = app def now(self): return (self.nowfun or self.app.now)() def remaining_estimate(self, last_run_at): raise NotImplementedError() def is_due(self, last_run_at): raise NotImplementedError() def maybe_make_aware(self, dt): return maybe_make_aware(dt, self.tz) @property def app(self): return self._app or current_app._get_current_object() @app.setter # noqa def app(self, app): self._app = app @cached_property def tz(self): return self.app.timezone @cached_property def utc_enabled(self): return self.app.conf.enable_utc def to_local(self, dt): if not self.utc_enabled: return timezone.to_local_fallback(dt) return dt def __eq__(self, other): if isinstance(other, BaseSchedule): return other.nowfun == self.nowfun return NotImplemented @python_2_unicode_compatible class schedule(BaseSchedule): """Schedule for periodic task. Arguments: run_every (float, ~datetime.timedelta): Time interval. relative (bool): If set to True the run time will be rounded to the resolution of the interval. nowfun (Callable): Function returning the current date and time (class:`~datetime.datetime`). app (~@Celery): Celery app instance. """ relative = False def __init__(self, run_every=None, relative=False, nowfun=None, app=None): self.run_every = maybe_timedelta(run_every) self.relative = relative super(schedule, self).__init__(nowfun=nowfun, app=app) def remaining_estimate(self, last_run_at): return remaining( self.maybe_make_aware(last_run_at), self.run_every, self.maybe_make_aware(self.now()), self.relative, ) def is_due(self, last_run_at): """Return tuple of ``(is_due, next_time_to_check)``. Notes: - next time to check is in seconds. - ``(True, 20)``, means the task should be run now, and the next time to check is in 20 seconds. - ``(False, 12.3)``, means the task is not due, but that the scheduler should check again in 12.3 seconds. The next time to check is used to save energy/CPU cycles, it does not need to be accurate but will influence the precision of your schedule. You must also keep in mind the value of :setting:`beat_max_loop_interval`, that decides the maximum number of seconds the scheduler can sleep between re-checking the periodic task intervals. So if you have a task that changes schedule at run-time then your next_run_at check will decide how long it will take before a change to the schedule takes effect. The max loop interval takes precedence over the next check at value returned. .. admonition:: Scheduler max interval variance The default max loop interval may vary for different schedulers. For the default scheduler the value is 5 minutes, but for example the :pypi:`django-celery-beat` database scheduler the value is 5 seconds. """ last_run_at = self.maybe_make_aware(last_run_at) rem_delta = self.remaining_estimate(last_run_at) remaining_s = max(rem_delta.total_seconds(), 0) if remaining_s == 0: return schedstate(is_due=True, next=self.seconds) return schedstate(is_due=False, next=remaining_s) def __repr__(self): return ''.format(self) def __eq__(self, other): if isinstance(other, schedule): return self.run_every == other.run_every return self.run_every == other def __ne__(self, other): return not self.__eq__(other) def __reduce__(self): return self.__class__, (self.run_every, self.relative, self.nowfun) @property def seconds(self): return max(self.run_every.total_seconds(), 0) @property def human_seconds(self): return humanize_seconds(self.seconds) class crontab_parser(object): """Parser for Crontab expressions. Any expression of the form 'groups' (see BNF grammar below) is accepted and expanded to a set of numbers. These numbers represent the units of time that the Crontab needs to run on: .. code-block:: bnf digit :: '0'..'9' dow :: 'a'..'z' number :: digit+ | dow+ steps :: number range :: number ( '-' number ) ? numspec :: '*' | range expr :: numspec ( '/' steps ) ? groups :: expr ( ',' expr ) * The parser is a general purpose one, useful for parsing hours, minutes and day of week expressions. Example usage: .. code-block:: pycon >>> minutes = crontab_parser(60).parse('*/15') [0, 15, 30, 45] >>> hours = crontab_parser(24).parse('*/4') [0, 4, 8, 12, 16, 20] >>> day_of_week = crontab_parser(7).parse('*') [0, 1, 2, 3, 4, 5, 6] It can also parse day of month and month of year expressions if initialized with a minimum of 1. Example usage: .. code-block:: pycon >>> days_of_month = crontab_parser(31, 1).parse('*/3') [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31] >>> months_of_year = crontab_parser(12, 1).parse('*/2') [1, 3, 5, 7, 9, 11] >>> months_of_year = crontab_parser(12, 1).parse('2-12/2') [2, 4, 6, 8, 10, 12] The maximum possible expanded value returned is found by the formula: :math:`max_ + min_ - 1` """ ParseException = ParseException _range = r'(\w+?)-(\w+)' _steps = r'/(\w+)?' _star = r'\*' def __init__(self, max_=60, min_=0): self.max_ = max_ self.min_ = min_ self.pats = ( (re.compile(self._range + self._steps), self._range_steps), (re.compile(self._range), self._expand_range), (re.compile(self._star + self._steps), self._star_steps), (re.compile('^' + self._star + '$'), self._expand_star), ) def parse(self, spec): acc = set() for part in spec.split(','): if not part: raise self.ParseException('empty part') acc |= set(self._parse_part(part)) return acc def _parse_part(self, part): for regex, handler in self.pats: m = regex.match(part) if m: return handler(m.groups()) return self._expand_range((part,)) def _expand_range(self, toks): fr = self._expand_number(toks[0]) if len(toks) > 1: to = self._expand_number(toks[1]) if to < fr: # Wrap around max_ if necessary return (list(range(fr, self.min_ + self.max_)) + list(range(self.min_, to + 1))) return list(range(fr, to + 1)) return [fr] def _range_steps(self, toks): if len(toks) != 3 or not toks[2]: raise self.ParseException('empty filter') return self._expand_range(toks[:2])[::int(toks[2])] def _star_steps(self, toks): if not toks or not toks[0]: raise self.ParseException('empty filter') return self._expand_star()[::int(toks[0])] def _expand_star(self, *args): return list(range(self.min_, self.max_ + self.min_)) def _expand_number(self, s): if isinstance(s, string_t) and s[0] == '-': raise self.ParseException('negative numbers not supported') try: i = int(s) except ValueError: try: i = weekday(s) except KeyError: raise ValueError('Invalid weekday literal {0!r}.'.format(s)) max_val = self.min_ + self.max_ - 1 if i > max_val: raise ValueError( 'Invalid end range: {0} > {1}.'.format(i, max_val)) if i < self.min_: raise ValueError( 'Invalid beginning range: {0} < {1}.'.format(i, self.min_)) return i @python_2_unicode_compatible class crontab(BaseSchedule): """Crontab schedule. A Crontab can be used as the ``run_every`` value of a periodic task entry to add :manpage:`crontab(5)`-like scheduling. Like a :manpage:`cron(5)`-job, you can specify units of time of when you'd like the task to execute. It's a reasonably complete implementation of :command:`cron`'s features, so it should provide a fair degree of scheduling needs. You can specify a minute, an hour, a day of the week, a day of the month, and/or a month in the year in any of the following formats: .. attribute:: minute - A (list of) integers from 0-59 that represent the minutes of an hour of when execution should occur; or - A string representing a Crontab pattern. This may get pretty advanced, like ``minute='*/15'`` (for every quarter) or ``minute='1,13,30-45,50-59/2'``. .. attribute:: hour - A (list of) integers from 0-23 that represent the hours of a day of when execution should occur; or - A string representing a Crontab pattern. This may get pretty advanced, like ``hour='*/3'`` (for every three hours) or ``hour='0,8-17/2'`` (at midnight, and every two hours during office hours). .. attribute:: day_of_week - A (list of) integers from 0-6, where Sunday = 0 and Saturday = 6, that represent the days of a week that execution should occur. - A string representing a Crontab pattern. This may get pretty advanced, like ``day_of_week='mon-fri'`` (for weekdays only). (Beware that ``day_of_week='*/2'`` does not literally mean 'every two days', but 'every day that is divisible by two'!) .. attribute:: day_of_month - A (list of) integers from 1-31 that represents the days of the month that execution should occur. - A string representing a Crontab pattern. This may get pretty advanced, such as ``day_of_month='2-30/3'`` (for every even numbered day) or ``day_of_month='1-7,15-21'`` (for the first and third weeks of the month). .. attribute:: month_of_year - A (list of) integers from 1-12 that represents the months of the year during which execution can occur. - A string representing a Crontab pattern. This may get pretty advanced, such as ``month_of_year='*/3'`` (for the first month of every quarter) or ``month_of_year='2-12/2'`` (for every even numbered month). .. attribute:: nowfun Function returning the current date and time (:class:`~datetime.datetime`). .. attribute:: app The Celery app instance. It's important to realize that any day on which execution should occur must be represented by entries in all three of the day and month attributes. For example, if ``day_of_week`` is 0 and ``day_of_month`` is every seventh day, only months that begin on Sunday and are also in the ``month_of_year`` attribute will have execution events. Or, ``day_of_week`` is 1 and ``day_of_month`` is '1-7,15-21' means every first and third Monday of every month present in ``month_of_year``. """ def __init__(self, minute='*', hour='*', day_of_week='*', day_of_month='*', month_of_year='*', **kwargs): self._orig_minute = cronfield(minute) self._orig_hour = cronfield(hour) self._orig_day_of_week = cronfield(day_of_week) self._orig_day_of_month = cronfield(day_of_month) self._orig_month_of_year = cronfield(month_of_year) self._orig_kwargs = kwargs self.hour = self._expand_cronspec(hour, 24) self.minute = self._expand_cronspec(minute, 60) self.day_of_week = self._expand_cronspec(day_of_week, 7) self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) super(crontab, self).__init__(**kwargs) @staticmethod def _expand_cronspec(cronspec, max_, min_=0): """Expand cron specification. Takes the given cronspec argument in one of the forms: .. code-block:: text int (like 7) str (like '3-5,*/15', '*', or 'monday') set (like {0,15,30,45} list (like [8-17]) And convert it to an (expanded) set representing all time unit values on which the Crontab triggers. Only in case of the base type being :class:`str`, parsing occurs. (It's fast and happens only once for each Crontab instance, so there's no significant performance overhead involved.) For the other base types, merely Python type conversions happen. The argument ``max_`` is needed to determine the expansion of ``*`` and ranges. The argument ``min_`` is needed to determine the expansion of ``*`` and ranges for 1-based cronspecs, such as day of month or month of year. The default is sufficient for minute, hour, and day of week. """ if isinstance(cronspec, numbers.Integral): result = {cronspec} elif isinstance(cronspec, string_t): result = crontab_parser(max_, min_).parse(cronspec) elif isinstance(cronspec, set): result = cronspec elif isinstance(cronspec, Iterable): result = set(cronspec) else: raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec))) # assure the result does not preceed the min or exceed the max for number in result: if number >= max_ + min_ or number < min_: raise ValueError(CRON_PATTERN_INVALID.format( min=min_, max=max_ - 1 + min_, value=number)) return result def _delta_to_next(self, last_run_at, next_hour, next_minute): """Find next delta. Takes a :class:`~datetime.datetime` of last run, next minute and hour, and returns a :class:`~celery.utils.time.ffwd` for the next scheduled day and time. Only called when ``day_of_month`` and/or ``month_of_year`` cronspec is specified to further limit scheduled task execution. """ datedata = AttributeDict(year=last_run_at.year) days_of_month = sorted(self.day_of_month) months_of_year = sorted(self.month_of_year) def day_out_of_range(year, month, day): try: datetime(year=year, month=month, day=day) except ValueError: return True return False def roll_over(): for _ in range(2000): flag = (datedata.dom == len(days_of_month) or day_out_of_range(datedata.year, months_of_year[datedata.moy], days_of_month[datedata.dom]) or (self.maybe_make_aware(datetime(datedata.year, months_of_year[datedata.moy], days_of_month[datedata.dom])) < last_run_at)) if flag: datedata.dom = 0 datedata.moy += 1 if datedata.moy == len(months_of_year): datedata.moy = 0 datedata.year += 1 else: break else: # Tried 2000 times, we're most likely in an infinite loop raise RuntimeError('unable to rollover, ' 'time specification is probably invalid') if last_run_at.month in self.month_of_year: datedata.dom = bisect(days_of_month, last_run_at.day) datedata.moy = bisect_left(months_of_year, last_run_at.month) else: datedata.dom = 0 datedata.moy = bisect(months_of_year, last_run_at.month) if datedata.moy == len(months_of_year): datedata.moy = 0 roll_over() while 1: th = datetime(year=datedata.year, month=months_of_year[datedata.moy], day=days_of_month[datedata.dom]) if th.isoweekday() % 7 in self.day_of_week: break datedata.dom += 1 roll_over() return ffwd(year=datedata.year, month=months_of_year[datedata.moy], day=days_of_month[datedata.dom], hour=next_hour, minute=next_minute, second=0, microsecond=0) def __repr__(self): return CRON_REPR.format(self) def __reduce__(self): return (self.__class__, (self._orig_minute, self._orig_hour, self._orig_day_of_week, self._orig_day_of_month, self._orig_month_of_year), self._orig_kwargs) def __setstate__(self, state): # Calling super's init because the kwargs aren't necessarily passed in # the same form as they are stored by the superclass super(crontab, self).__init__(**state) def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): # pylint: disable=redefined-outer-name # caching global ffwd tz = tz or self.tz last_run_at = self.maybe_make_aware(last_run_at) now = self.maybe_make_aware(self.now()) dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7 execute_this_date = ( last_run_at.month in self.month_of_year and last_run_at.day in self.day_of_month and dow_num in self.day_of_week ) execute_this_hour = ( execute_this_date and last_run_at.day == now.day and last_run_at.month == now.month and last_run_at.year == now.year and last_run_at.hour in self.hour and last_run_at.minute < max(self.minute) ) if execute_this_hour: next_minute = min(minute for minute in self.minute if minute > last_run_at.minute) delta = ffwd(minute=next_minute, second=0, microsecond=0) else: next_minute = min(self.minute) execute_today = (execute_this_date and last_run_at.hour < max(self.hour)) if execute_today: next_hour = min(hour for hour in self.hour if hour > last_run_at.hour) delta = ffwd(hour=next_hour, minute=next_minute, second=0, microsecond=0) else: next_hour = min(self.hour) all_dom_moy = (self._orig_day_of_month == '*' and self._orig_month_of_year == '*') if all_dom_moy: next_day = min([day for day in self.day_of_week if day > dow_num] or self.day_of_week) add_week = next_day == dow_num delta = ffwd( weeks=add_week and 1 or 0, weekday=(next_day - 1) % 7, hour=next_hour, minute=next_minute, second=0, microsecond=0, ) else: delta = self._delta_to_next(last_run_at, next_hour, next_minute) return self.to_local(last_run_at), delta, self.to_local(now) def remaining_estimate(self, last_run_at, ffwd=ffwd): """Estimate of next run time. Returns when the periodic task should run next as a :class:`~datetime.timedelta`. """ # pylint: disable=redefined-outer-name # caching global ffwd return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd)) def is_due(self, last_run_at): """Return tuple of ``(is_due, next_time_to_run)``. Note: Next time to run is in seconds. SeeAlso: :meth:`celery.schedules.schedule.is_due` for more information. """ rem_delta = self.remaining_estimate(last_run_at) rem = max(rem_delta.total_seconds(), 0) due = rem == 0 if due: rem_delta = self.remaining_estimate(self.now()) rem = max(rem_delta.total_seconds(), 0) return schedstate(due, rem) def __eq__(self, other): if isinstance(other, crontab): return ( other.month_of_year == self.month_of_year and other.day_of_month == self.day_of_month and other.day_of_week == self.day_of_week and other.hour == self.hour and other.minute == self.minute and super(crontab, self).__eq__(other) ) return NotImplemented def __ne__(self, other): res = self.__eq__(other) if res is NotImplemented: return True return not res def maybe_schedule(s, relative=False, app=None): """Return schedule from number, timedelta, or actual schedule.""" if s is not None: if isinstance(s, numbers.Number): s = timedelta(seconds=s) if isinstance(s, timedelta): return schedule(s, relative, app=app) else: s.app = app return s @python_2_unicode_compatible class solar(BaseSchedule): """Solar event. A solar event can be used as the ``run_every`` value of a periodic task entry to schedule based on certain solar events. Notes: Available event valus are: - ``dawn_astronomical`` - ``dawn_nautical`` - ``dawn_civil`` - ``sunrise`` - ``solar_noon`` - ``sunset`` - ``dusk_civil`` - ``dusk_nautical`` - ``dusk_astronomical`` Arguments: event (str): Solar event that triggers this task. See note for available values. lat (int): The latitude of the observer. lon (int): The longitude of the observer. nowfun (Callable): Function returning the current date and time as a class:`~datetime.datetime`. app (~@Celery): Celery app instance. """ _all_events = { 'dawn_astronomical', 'dawn_nautical', 'dawn_civil', 'sunrise', 'solar_noon', 'sunset', 'dusk_civil', 'dusk_nautical', 'dusk_astronomical', } _horizons = { 'dawn_astronomical': '-18', 'dawn_nautical': '-12', 'dawn_civil': '-6', 'sunrise': '-0:34', 'solar_noon': '0', 'sunset': '-0:34', 'dusk_civil': '-6', 'dusk_nautical': '-12', 'dusk_astronomical': '18', } _methods = { 'dawn_astronomical': 'next_rising', 'dawn_nautical': 'next_rising', 'dawn_civil': 'next_rising', 'sunrise': 'next_rising', 'solar_noon': 'next_transit', 'sunset': 'next_setting', 'dusk_civil': 'next_setting', 'dusk_nautical': 'next_setting', 'dusk_astronomical': 'next_setting', } _use_center_l = { 'dawn_astronomical': True, 'dawn_nautical': True, 'dawn_civil': True, 'sunrise': False, 'solar_noon': True, 'sunset': False, 'dusk_civil': True, 'dusk_nautical': True, 'dusk_astronomical': True, } def __init__(self, event, lat, lon, **kwargs): self.ephem = __import__('ephem') self.event = event self.lat = lat self.lon = lon super(solar, self).__init__(**kwargs) if event not in self._all_events: raise ValueError(SOLAR_INVALID_EVENT.format( event=event, all_events=', '.join(sorted(self._all_events)), )) if lat < -90 or lat > 90: raise ValueError(SOLAR_INVALID_LATITUDE.format(lat=lat)) if lon < -180 or lon > 180: raise ValueError(SOLAR_INVALID_LONGITUDE.format(lon=lon)) cal = self.ephem.Observer() cal.lat = str(lat) cal.lon = str(lon) cal.elev = 0 cal.horizon = self._horizons[event] cal.pressure = 0 self.cal = cal self.method = self._methods[event] self.use_center = self._use_center_l[event] def __reduce__(self): return self.__class__, (self.event, self.lat, self.lon) def __repr__(self): return ''.format( self.event, self.lat, self.lon, ) def remaining_estimate(self, last_run_at): """Return estimate of next time to run. Returns: ~datetime.timedelta: when the periodic task should run next, or if it shouldn't run today (e.g., the sun does not rise today), returns the time when the next check should take place. """ last_run_at = self.maybe_make_aware(last_run_at) last_run_at_utc = localize(last_run_at, timezone.utc) self.cal.date = last_run_at_utc try: next_utc = getattr(self.cal, self.method)( self.ephem.Sun(), start=last_run_at_utc, use_center=self.use_center, ) except self.ephem.CircumpolarError: # pragma: no cover # Sun won't rise/set today. Check again tomorrow # (specifically, after the next anti-transit). next_utc = ( self.cal.next_antitransit(self.ephem.Sun()) + timedelta(minutes=1) ) next = self.maybe_make_aware(next_utc.datetime()) now = self.maybe_make_aware(self.now()) delta = next - now return delta def is_due(self, last_run_at): """Return tuple of ``(is_due, next_time_to_run)``. Note: next time to run is in seconds. See Also: :meth:`celery.schedules.schedule.is_due` for more information. """ rem_delta = self.remaining_estimate(last_run_at) rem = max(rem_delta.total_seconds(), 0) due = rem == 0 if due: rem_delta = self.remaining_estimate(self.now()) rem = max(rem_delta.total_seconds(), 0) return schedstate(due, rem) def __eq__(self, other): if isinstance(other, solar): return ( other.event == self.event and other.lat == self.lat and other.lon == self.lon ) return NotImplemented def __ne__(self, other): res = self.__eq__(other) if res is NotImplemented: return True return not res celery-4.1.0/celery/states.py0000644000175000017500000000641513130607475016101 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Built-in task states. .. _states: States ------ See :ref:`task-states`. .. _statesets: Sets ---- .. state:: READY_STATES READY_STATES ~~~~~~~~~~~~ Set of states meaning the task result is ready (has been executed). .. state:: UNREADY_STATES UNREADY_STATES ~~~~~~~~~~~~~~ Set of states meaning the task result is not ready (hasn't been executed). .. state:: EXCEPTION_STATES EXCEPTION_STATES ~~~~~~~~~~~~~~~~ Set of states meaning the task returned an exception. .. state:: PROPAGATE_STATES PROPAGATE_STATES ~~~~~~~~~~~~~~~~ Set of exception states that should propagate exceptions to the user. .. state:: ALL_STATES ALL_STATES ~~~~~~~~~~ Set of all possible states. Misc ---- """ from __future__ import absolute_import, unicode_literals __all__ = [ 'PENDING', 'RECEIVED', 'STARTED', 'SUCCESS', 'FAILURE', 'REVOKED', 'RETRY', 'IGNORED', 'READY_STATES', 'UNREADY_STATES', 'EXCEPTION_STATES', 'PROPAGATE_STATES', 'precedence', 'state', ] #: State precedence. #: None represents the precedence of an unknown state. #: Lower index means higher precedence. PRECEDENCE = [ 'SUCCESS', 'FAILURE', None, 'REVOKED', 'STARTED', 'RECEIVED', 'REJECTED', 'RETRY', 'PENDING', ] #: Hash lookup of PRECEDENCE to index PRECEDENCE_LOOKUP = dict(zip(PRECEDENCE, range(0, len(PRECEDENCE)))) NONE_PRECEDENCE = PRECEDENCE_LOOKUP[None] def precedence(state): """Get the precedence index for state. Lower index means higher precedence. """ try: return PRECEDENCE_LOOKUP[state] except KeyError: return NONE_PRECEDENCE class state(str): """Task state. State is a subclass of :class:`str`, implementing comparison methods adhering to state precedence rules:: >>> from celery.states import state, PENDING, SUCCESS >>> state(PENDING) < state(SUCCESS) True Any custom state is considered to be lower than :state:`FAILURE` and :state:`SUCCESS`, but higher than any of the other built-in states:: >>> state('PROGRESS') > state(STARTED) True >>> state('PROGRESS') > state('SUCCESS') False """ def __gt__(self, other): return precedence(self) < precedence(other) def __ge__(self, other): return precedence(self) <= precedence(other) def __lt__(self, other): return precedence(self) > precedence(other) def __le__(self, other): return precedence(self) >= precedence(other) #: Task state is unknown (assumed pending since you know the id). PENDING = 'PENDING' #: Task was received by a worker (only used in events). RECEIVED = 'RECEIVED' #: Task was started by a worker (:setting:`task_track_started`). STARTED = 'STARTED' #: Task succeeded SUCCESS = 'SUCCESS' #: Task failed FAILURE = 'FAILURE' #: Task was revoked. REVOKED = 'REVOKED' #: Task was rejected (only used in events). REJECTED = 'REJECTED' #: Task is waiting for retry. RETRY = 'RETRY' IGNORED = 'IGNORED' READY_STATES = frozenset({SUCCESS, FAILURE, REVOKED}) UNREADY_STATES = frozenset({PENDING, RECEIVED, STARTED, REJECTED, RETRY}) EXCEPTION_STATES = frozenset({RETRY, FAILURE, REVOKED}) PROPAGATE_STATES = frozenset({FAILURE, REVOKED}) ALL_STATES = frozenset({ PENDING, RECEIVED, STARTED, SUCCESS, FAILURE, RETRY, REVOKED, }) celery-4.1.0/celery/platforms.py0000644000175000017500000005742613130607475016615 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Platforms. Utilities dealing with platform specifics: signals, daemonization, users, groups, and so on. """ from __future__ import absolute_import, print_function, unicode_literals import atexit import errno import math import numbers import os import platform as _platform import signal as _signal import sys import warnings from collections import namedtuple from billiard.compat import get_fdmax, close_open_fds # fileno used to be in this module from kombu.utils.compat import maybe_fileno from kombu.utils.encoding import safe_str from contextlib import contextmanager from .exceptions import SecurityError from .local import try_import from .five import items, reraise, string_t try: from billiard.process import current_process except ImportError: # pragma: no cover current_process = None _setproctitle = try_import('setproctitle') resource = try_import('resource') pwd = try_import('pwd') grp = try_import('grp') mputil = try_import('multiprocessing.util') __all__ = [ 'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM', 'IS_macOS', 'IS_WINDOWS', 'SIGMAP', 'pyimplementation', 'LockFailed', 'get_fdmax', 'Pidfile', 'create_pidlock', 'close_open_fds', 'DaemonContext', 'detached', 'parse_uid', 'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid', 'maybe_drop_privileges', 'signals', 'signal_name', 'set_process_title', 'set_mp_process_title', 'get_errno_name', 'ignore_errno', 'fd_by_path', 'isatty', ] # exitcodes EX_OK = getattr(os, 'EX_OK', 0) EX_FAILURE = 1 EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69) EX_USAGE = getattr(os, 'EX_USAGE', 64) EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73) SYSTEM = _platform.system() IS_macOS = SYSTEM == 'Darwin' IS_WINDOWS = SYSTEM == 'Windows' DAEMON_WORKDIR = '/' PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK)) PIDLOCKED = """ERROR: Pidfile ({0}) already exists. Seems we're already running? (pid: {1})""" _range = namedtuple('_range', ('start', 'stop')) C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False) ROOT_DISALLOWED = """\ Running a worker with superuser privileges when the worker accepts messages serialized with pickle is a very bad idea! If you really want to continue then you have to set the C_FORCE_ROOT environment variable (but please think about this before you do). User information: uid={uid} euid={euid} gid={gid} egid={egid} """ ROOT_DISCOURAGED = """\ You're running the worker with superuser privileges: this is absolutely not recommended! Please specify a different user using the -u option. User information: uid={uid} euid={euid} gid={gid} egid={egid} """ SIGNAMES = { sig for sig in dir(_signal) if sig.startswith('SIG') and '_' not in sig } SIGMAP = {getattr(_signal, name): name for name in SIGNAMES} def isatty(fh): """Return true if the process has a controlling terminal.""" try: return fh.isatty() except AttributeError: pass def pyimplementation(): """Return string identifying the current Python implementation.""" if hasattr(_platform, 'python_implementation'): return _platform.python_implementation() elif sys.platform.startswith('java'): return 'Jython ' + sys.platform elif hasattr(sys, 'pypy_version_info'): v = '.'.join(str(p) for p in sys.pypy_version_info[:3]) if sys.pypy_version_info[3:]: v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:]) return 'PyPy ' + v else: return 'CPython' class LockFailed(Exception): """Raised if a PID lock can't be acquired.""" class Pidfile(object): """Pidfile. This is the type returned by :func:`create_pidlock`. See Also: Best practice is to not use this directly but rather use the :func:`create_pidlock` function instead: more convenient and also removes stale pidfiles (when the process holding the lock is no longer running). """ #: Path to the pid lock file. path = None def __init__(self, path): self.path = os.path.abspath(path) def acquire(self): """Acquire lock.""" try: self.write_pid() except OSError as exc: reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2]) return self __enter__ = acquire def is_locked(self): """Return true if the pid lock exists.""" return os.path.exists(self.path) def release(self, *args): """Release lock.""" self.remove() __exit__ = release def read_pid(self): """Read and return the current pid.""" with ignore_errno('ENOENT'): with open(self.path, 'r') as fh: line = fh.readline() if line.strip() == line: # must contain '\n' raise ValueError( 'Partial or invalid pidfile {0.path}'.format(self)) try: return int(line.strip()) except ValueError: raise ValueError( 'pidfile {0.path} contents invalid.'.format(self)) def remove(self): """Remove the lock.""" with ignore_errno(errno.ENOENT, errno.EACCES): os.unlink(self.path) def remove_if_stale(self): """Remove the lock if the process isn't running. I.e. process does not respons to signal. """ try: pid = self.read_pid() except ValueError as exc: print('Broken pidfile found - Removing it.', file=sys.stderr) self.remove() return True if not pid: self.remove() return True try: os.kill(pid, 0) except os.error as exc: if exc.errno == errno.ESRCH: print('Stale pidfile exists - Removing it.', file=sys.stderr) self.remove() return True return False def write_pid(self): pid = os.getpid() content = '{0}\n'.format(pid) pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE) pidfile = os.fdopen(pidfile_fd, 'w') try: pidfile.write(content) # flush and sync so that the re-read below works. pidfile.flush() try: os.fsync(pidfile_fd) except AttributeError: # pragma: no cover pass finally: pidfile.close() rfh = open(self.path) try: if rfh.read() != content: raise LockFailed( "Inconsistency: Pidfile content doesn't match at re-read") finally: rfh.close() PIDFile = Pidfile # noqa: E305 XXX compat alias def create_pidlock(pidfile): """Create and verify pidfile. If the pidfile already exists the program exits with an error message, however if the process it refers to isn't running anymore, the pidfile is deleted and the program continues. This function will automatically install an :mod:`atexit` handler to release the lock at exit, you can skip this by calling :func:`_create_pidlock` instead. Returns: Pidfile: used to manage the lock. Example: >>> pidlock = create_pidlock('/var/run/app.pid') """ pidlock = _create_pidlock(pidfile) atexit.register(pidlock.release) return pidlock def _create_pidlock(pidfile): pidlock = Pidfile(pidfile) if pidlock.is_locked() and not pidlock.remove_if_stale(): print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr) raise SystemExit(EX_CANTCREAT) pidlock.acquire() return pidlock def fd_by_path(paths): """Return a list of file descriptors. This method returns list of file descriptors corresponding to file paths passed in paths variable. Arguments: paths: List[str]: List of file paths. Returns: List[int]: List of file descriptors. Example: >>> keep = fd_by_path(['/dev/urandom', '/my/precious/']) """ stats = set() for path in paths: try: fd = os.open(path, os.O_RDONLY) except OSError: continue try: stats.add(os.fstat(fd)[1:3]) finally: os.close(fd) def fd_in_stats(fd): try: return os.fstat(fd)[1:3] in stats except OSError: return False return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)] class DaemonContext(object): """Context manager daemonizing the process.""" _is_open = False def __init__(self, pidfile=None, workdir=None, umask=None, fake=False, after_chdir=None, after_forkers=True, **kwargs): if isinstance(umask, string_t): # octal or decimal, depending on initial zero. umask = int(umask, 8 if umask.startswith('0') else 10) self.workdir = workdir or DAEMON_WORKDIR self.umask = umask self.fake = fake self.after_chdir = after_chdir self.after_forkers = after_forkers self.stdfds = (sys.stdin, sys.stdout, sys.stderr) def redirect_to_null(self, fd): if fd is not None: dest = os.open(os.devnull, os.O_RDWR) os.dup2(dest, fd) def open(self): if not self._is_open: if not self.fake: self._detach() os.chdir(self.workdir) if self.umask is not None: os.umask(self.umask) if self.after_chdir: self.after_chdir() if not self.fake: # We need to keep /dev/urandom from closing because # shelve needs it, and Beat needs shelve to start. keep = list(self.stdfds) + fd_by_path(['/dev/urandom']) close_open_fds(keep) for fd in self.stdfds: self.redirect_to_null(maybe_fileno(fd)) if self.after_forkers and mputil is not None: mputil._run_after_forkers() self._is_open = True __enter__ = open def close(self, *args): if self._is_open: self._is_open = False __exit__ = close def _detach(self): if os.fork() == 0: # first child os.setsid() # create new session if os.fork() > 0: # pragma: no cover # second child os._exit(0) else: os._exit(0) return self def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0, workdir=None, fake=False, **opts): """Detach the current process in the background (daemonize). Arguments: logfile (str): Optional log file. The ability to write to this file will be verified before the process is detached. pidfile (str): Optional pid file. The pidfile won't be created, as this is the responsibility of the child. But the process will exit if the pid lock exists and the pid written is still running. uid (int, str): Optional user id or user name to change effective privileges to. gid (int, str): Optional group id or group name to change effective privileges to. umask (str, int): Optional umask that'll be effective in the child process. workdir (str): Optional new working directory. fake (bool): Don't actually detach, intended for debugging purposes. **opts (Any): Ignored. Example: >>> from celery.platforms import detached, create_pidlock >>> with detached( ... logfile='/var/log/app.log', ... pidfile='/var/run/app.pid', ... uid='nobody'): ... # Now in detached child process with effective user set to nobody, ... # and we know that our logfile can be written to, and that ... # the pidfile isn't locked. ... pidlock = create_pidlock('/var/run/app.pid') ... ... # Run the program ... program.run(logfile='/var/log/app.log') """ if not resource: raise RuntimeError('This platform does not support detach.') workdir = os.getcwd() if workdir is None else workdir signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler. maybe_drop_privileges(uid=uid, gid=gid) def after_chdir_do(): # Since without stderr any errors will be silently suppressed, # we need to know that we have access to the logfile. logfile and open(logfile, 'a').close() # Doesn't actually create the pidfile, but makes sure it's not stale. if pidfile: _create_pidlock(pidfile).release() return DaemonContext( umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do, ) def parse_uid(uid): """Parse user id. Arguments: uid (str, int): Actual uid, or the username of a user. Returns: int: The actual uid. """ try: return int(uid) except ValueError: try: return pwd.getpwnam(uid).pw_uid except (AttributeError, KeyError): raise KeyError('User does not exist: {0}'.format(uid)) def parse_gid(gid): """Parse group id. Arguments: gid (str, int): Actual gid, or the name of a group. Returns: int: The actual gid of the group. """ try: return int(gid) except ValueError: try: return grp.getgrnam(gid).gr_gid except (AttributeError, KeyError): raise KeyError('Group does not exist: {0}'.format(gid)) def _setgroups_hack(groups): # :fun:`setgroups` may have a platform-dependent limit, # and it's not always possible to know in advance what this limit # is, so we use this ugly hack stolen from glibc. groups = groups[:] while 1: try: return os.setgroups(groups) except ValueError: # error from Python's check. if len(groups) <= 1: raise groups[:] = groups[:-1] except OSError as exc: # error from the OS. if exc.errno != errno.EINVAL or len(groups) <= 1: raise groups[:] = groups[:-1] def setgroups(groups): """Set active groups from a list of group ids.""" max_groups = None try: max_groups = os.sysconf('SC_NGROUPS_MAX') except Exception: # pylint: disable=broad-except pass try: return _setgroups_hack(groups[:max_groups]) except OSError as exc: if exc.errno != errno.EPERM: raise if any(group not in groups for group in os.getgroups()): # we shouldn't be allowed to change to this group. raise def initgroups(uid, gid): """Init process group permissions. Compat version of :func:`os.initgroups` that was first added to Python 2.7. """ if not pwd: # pragma: no cover return username = pwd.getpwuid(uid)[0] if hasattr(os, 'initgroups'): # Python 2.7+ return os.initgroups(username, gid) groups = [gr.gr_gid for gr in grp.getgrall() if username in gr.gr_mem] setgroups(groups) def setgid(gid): """Version of :func:`os.setgid` supporting group names.""" os.setgid(parse_gid(gid)) def setuid(uid): """Version of :func:`os.setuid` supporting usernames.""" os.setuid(parse_uid(uid)) def maybe_drop_privileges(uid=None, gid=None): """Change process privileges to new user/group. If UID and GID is specified, the real user/group is changed. If only UID is specified, the real user is changed, and the group is changed to the users primary group. If only GID is specified, only the group is changed. """ if sys.platform == 'win32': return if os.geteuid(): # no point trying to setuid unless we're root. if not os.getuid(): raise SecurityError('contact support') uid = uid and parse_uid(uid) gid = gid and parse_gid(gid) if uid: _setuid(uid, gid) else: gid and setgid(gid) if uid and not os.getuid() and not os.geteuid(): raise SecurityError('Still root uid after drop privileges!') if gid and not os.getgid() and not os.getegid(): raise SecurityError('Still root gid after drop privileges!') def _setuid(uid, gid): # If GID isn't defined, get the primary GID of the user. if not gid and pwd: gid = pwd.getpwuid(uid).pw_gid # Must set the GID before initgroups(), as setgid() # is known to zap the group list on some platforms. # setgid must happen before setuid (otherwise the setgid operation # may fail because of insufficient privileges and possibly stay # in a privileged group). setgid(gid) initgroups(uid, gid) # at last: setuid(uid) # ... and make sure privileges cannot be restored: try: setuid(0) except OSError as exc: if exc.errno != errno.EPERM: raise # we should get here: cannot restore privileges, # everything was fine. else: raise SecurityError( 'non-root user able to restore privileges after setuid.') class Signals(object): """Convenience interface to :mod:`signals`. If the requested signal isn't supported on the current platform, the operation will be ignored. Example: >>> from celery.platforms import signals >>> from proj.handlers import my_handler >>> signals['INT'] = my_handler >>> signals['INT'] my_handler >>> signals.supported('INT') True >>> signals.signum('INT') 2 >>> signals.ignore('USR1') >>> signals['USR1'] == signals.ignored True >>> signals.reset('USR1') >>> signals['USR1'] == signals.default True >>> from proj.handlers import exit_handler, hup_handler >>> signals.update(INT=exit_handler, ... TERM=exit_handler, ... HUP=hup_handler) """ ignored = _signal.SIG_IGN default = _signal.SIG_DFL if hasattr(_signal, 'setitimer'): def arm_alarm(self, seconds): _signal.setitimer(_signal.ITIMER_REAL, seconds) else: # pragma: no cover try: from itimer import alarm as _itimer_alarm # noqa except ImportError: def arm_alarm(self, seconds): # noqa _signal.alarm(math.ceil(seconds)) else: # pragma: no cover def arm_alarm(self, seconds): # noqa return _itimer_alarm(seconds) # noqa def reset_alarm(self): return _signal.alarm(0) def supported(self, name): """Return true value if signal by ``name`` exists on this platform.""" try: self.signum(name) except AttributeError: return False else: return True def signum(self, name): """Get signal number by name.""" if isinstance(name, numbers.Integral): return name if not isinstance(name, string_t) \ or not name.isupper(): raise TypeError('signal name must be uppercase string.') if not name.startswith('SIG'): name = 'SIG' + name return getattr(_signal, name) def reset(self, *signal_names): """Reset signals to the default signal handler. Does nothing if the platform has no support for signals, or the specified signal in particular. """ self.update((sig, self.default) for sig in signal_names) def ignore(self, *names): """Ignore signal using :const:`SIG_IGN`. Does nothing if the platform has no support for signals, or the specified signal in particular. """ self.update((sig, self.ignored) for sig in names) def __getitem__(self, name): return _signal.getsignal(self.signum(name)) def __setitem__(self, name, handler): """Install signal handler. Does nothing if the current platform has no support for signals, or the specified signal in particular. """ try: _signal.signal(self.signum(name), handler) except (AttributeError, ValueError): pass def update(self, _d_=None, **sigmap): """Set signal handlers from a mapping.""" for name, handler in items(dict(_d_ or {}, **sigmap)): self[name] = handler signals = Signals() get_signal = signals.signum # compat install_signal_handler = signals.__setitem__ # compat reset_signal = signals.reset # compat ignore_signal = signals.ignore # compat def signal_name(signum): """Return name of signal from signal number.""" return SIGMAP[signum][3:] def strargv(argv): arg_start = 2 if 'manage' in argv[0] else 1 if len(argv) > arg_start: return ' '.join(argv[arg_start:]) return '' def set_process_title(progname, info=None): """Set the :command:`ps` name for the currently running process. Only works if :pypi:`setproctitle` is installed. """ proctitle = '[{0}]'.format(progname) proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle if _setproctitle: _setproctitle.setproctitle(safe_str(proctitle)) return proctitle if os.environ.get('NOSETPS'): # pragma: no cover def set_mp_process_title(*a, **k): """Disabled feature.""" pass else: def set_mp_process_title(progname, info=None, hostname=None): # noqa """Set the :command:`ps` name from the current process name. Only works if :pypi:`setproctitle` is installed. """ if hostname: progname = '{0}: {1}'.format(progname, hostname) name = current_process().name if current_process else 'MainProcess' return set_process_title('{0}:{1}'.format(progname, name), info=info) def get_errno_name(n): """Get errno for string (e.g., ``ENOENT``).""" if isinstance(n, string_t): return getattr(errno, n) return n @contextmanager def ignore_errno(*errnos, **kwargs): """Context manager to ignore specific POSIX error codes. Takes a list of error codes to ignore: this can be either the name of the code, or the code integer itself:: >>> with ignore_errno('ENOENT'): ... with open('foo', 'r') as fh: ... return fh.read() >>> with ignore_errno(errno.ENOENT, errno.EPERM): ... pass Arguments: types (Tuple[Exception]): A tuple of exceptions to ignore (when the errno matches). Defaults to :exc:`Exception`. """ types = kwargs.get('types') or (Exception,) errnos = [get_errno_name(errno) for errno in errnos] try: yield except types as exc: if not hasattr(exc, 'errno'): raise if exc.errno not in errnos: raise def check_privileges(accept_content): uid = os.getuid() if hasattr(os, 'getuid') else 65535 gid = os.getgid() if hasattr(os, 'getgid') else 65535 euid = os.geteuid() if hasattr(os, 'geteuid') else 65535 egid = os.getegid() if hasattr(os, 'getegid') else 65535 if hasattr(os, 'fchown'): if not all(hasattr(os, attr) for attr in ['getuid', 'getgid', 'geteuid', 'getegid']): raise SecurityError('suspicious platform, contact support') if not uid or not gid or not euid or not egid: if ('pickle' in accept_content or 'application/x-python-serialize' in accept_content): if not C_FORCE_ROOT: try: print(ROOT_DISALLOWED.format( uid=uid, euid=euid, gid=gid, egid=egid, ), file=sys.stderr) finally: os._exit(1) warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format( uid=uid, euid=euid, gid=gid, egid=egid, ))) celery-4.1.0/celery/loaders/0000755000175000017500000000000013135426347015651 5ustar omeromer00000000000000celery-4.1.0/celery/loaders/base.py0000644000175000017500000002020313130607475017130 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Loader base class.""" from __future__ import absolute_import, unicode_literals import imp as _imp import importlib import os import re import sys from datetime import datetime from kombu.utils import json from kombu.utils.objects import cached_property from celery import signals from celery.five import reraise, string_t from celery.utils.collections import DictAttribute, force_mapping from celery.utils.functional import maybe_list from celery.utils.imports import ( import_from_cwd, symbol_by_name, NotAPackage, find_module, ) __all__ = ['BaseLoader'] _RACE_PROTECTION = False CONFIG_INVALID_NAME = """\ Error: Module '{module}' doesn't exist, or it's not a valid \ Python module name. """ CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\ Did you mean '{suggest}'? """ unconfigured = object() class BaseLoader(object): """Base class for loaders. Loaders handles, * Reading celery client/worker configurations. * What happens when a task starts? See :meth:`on_task_init`. * What happens when the worker starts? See :meth:`on_worker_init`. * What happens when the worker shuts down? See :meth:`on_worker_shutdown`. * What modules are imported to find tasks? """ builtin_modules = frozenset() configured = False override_backends = {} worker_initialized = False _conf = unconfigured def __init__(self, app, **kwargs): self.app = app self.task_modules = set() def now(self, utc=True): if utc: return datetime.utcnow() return datetime.now() def on_task_init(self, task_id, task): """Called before a task is executed.""" pass def on_process_cleanup(self): """Called after a task is executed.""" pass def on_worker_init(self): """Called when the worker (:program:`celery worker`) starts.""" pass def on_worker_shutdown(self): """Called when the worker (:program:`celery worker`) shuts down.""" pass def on_worker_process_init(self): """Called when a child process starts.""" pass def import_task_module(self, module): self.task_modules.add(module) return self.import_from_cwd(module) def import_module(self, module, package=None): return importlib.import_module(module, package=package) def import_from_cwd(self, module, imp=None, package=None): return import_from_cwd( module, self.import_module if imp is None else imp, package=package, ) def import_default_modules(self): signals.import_modules.send(sender=self.app) return [self.import_task_module(m) for m in self.default_modules] def init_worker(self): if not self.worker_initialized: self.worker_initialized = True self.import_default_modules() self.on_worker_init() def shutdown_worker(self): self.on_worker_shutdown() def init_worker_process(self): self.on_worker_process_init() def config_from_object(self, obj, silent=False): if isinstance(obj, string_t): try: obj = self._smart_import(obj, imp=self.import_from_cwd) except (ImportError, AttributeError): if silent: return False raise self._conf = force_mapping(obj) return True def _smart_import(self, path, imp=None): imp = self.import_module if imp is None else imp if ':' in path: # Path includes attribute so can just jump # here (e.g., ``os.path:abspath``). return symbol_by_name(path, imp=imp) # Not sure if path is just a module name or if it includes an # attribute name (e.g., ``os.path``, vs, ``os.path.abspath``). try: return imp(path) except ImportError: # Not a module name, so try module + attribute. return symbol_by_name(path, imp=imp) def _import_config_module(self, name): try: self.find_module(name) except NotAPackage: if name.endswith('.py'): reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format( module=name, suggest=name[:-3])), sys.exc_info()[2]) reraise(NotAPackage, NotAPackage(CONFIG_INVALID_NAME.format( module=name)), sys.exc_info()[2]) else: return self.import_from_cwd(name) def find_module(self, module): return find_module(module) def cmdline_config_parser( self, args, namespace='celery', re_type=re.compile(r'\((\w+)\)'), extra_types={'json': json.loads}, override_types={'tuple': 'json', 'list': 'json', 'dict': 'json'}): from celery.app.defaults import Option, NAMESPACES namespace = namespace and namespace.lower() typemap = dict(Option.typemap, **extra_types) def getarg(arg): """Parse single configuration from command-line.""" # ## find key/value # ns.key=value|ns_key=value (case insensitive) key, value = arg.split('=', 1) key = key.lower().replace('.', '_') # ## find name-space. # .key=value|_key=value expands to default name-space. if key[0] == '_': ns, key = namespace, key[1:] else: # find name-space part of key ns, key = key.split('_', 1) ns_key = (ns and ns + '_' or '') + key # (type)value makes cast to custom type. cast = re_type.match(value) if cast: type_ = cast.groups()[0] type_ = override_types.get(type_, type_) value = value[len(cast.group()):] value = typemap[type_](value) else: try: value = NAMESPACES[ns.lower()][key].to_python(value) except ValueError as exc: # display key name in error message. raise ValueError('{0!r}: {1}'.format(ns_key, exc)) return ns_key, value return dict(getarg(arg) for arg in args) def read_configuration(self, env='CELERY_CONFIG_MODULE'): try: custom_config = os.environ[env] except KeyError: pass else: if custom_config: usercfg = self._import_config_module(custom_config) return DictAttribute(usercfg) def autodiscover_tasks(self, packages, related_name='tasks'): self.task_modules.update( mod.__name__ for mod in autodiscover_tasks(packages or (), related_name) if mod) @cached_property def default_modules(self): return ( tuple(self.builtin_modules) + tuple(maybe_list(self.app.conf.imports)) + tuple(maybe_list(self.app.conf.include)) ) @property def conf(self): """Loader configuration.""" if self._conf is unconfigured: self._conf = self.read_configuration() return self._conf def autodiscover_tasks(packages, related_name='tasks'): global _RACE_PROTECTION if _RACE_PROTECTION: return () _RACE_PROTECTION = True try: return [find_related_module(pkg, related_name) for pkg in packages] finally: _RACE_PROTECTION = False def find_related_module(package, related_name): """Find module in package.""" # Django 1.7 allows for speciying a class name in INSTALLED_APPS. # (Issue #2248). try: importlib.import_module(package) except ImportError: package, _, _ = package.rpartition('.') if not package: raise try: pkg_path = importlib.import_module(package).__path__ except AttributeError: return try: _imp.find_module(related_name, pkg_path) except ImportError: return return importlib.import_module('{0}.{1}'.format(package, related_name)) celery-4.1.0/celery/loaders/__init__.py0000644000175000017500000000115113130607475017756 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Get loader by name. Loaders define how configuration is read, what happens when workers start, when tasks are executed and so on. """ from __future__ import absolute_import, unicode_literals from celery.utils.imports import symbol_by_name, import_from_cwd __all__ = ['get_loader_cls'] LOADER_ALIASES = { 'app': 'celery.loaders.app:AppLoader', 'default': 'celery.loaders.default:Loader', 'django': 'djcelery.loaders:DjangoLoader', } def get_loader_cls(loader): """Get loader class by name/alias.""" return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd) celery-4.1.0/celery/loaders/default.py0000644000175000017500000000307713130607475017654 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The default loader used when no custom app has been initialized.""" from __future__ import absolute_import, unicode_literals import os import warnings from celery.exceptions import NotConfigured from celery.utils.collections import DictAttribute from celery.utils.serialization import strtobool from .base import BaseLoader __all__ = ['Loader', 'DEFAULT_CONFIG_MODULE'] DEFAULT_CONFIG_MODULE = 'celeryconfig' #: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set. C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False)) class Loader(BaseLoader): """The loader used by the default app.""" def setup_settings(self, settingsdict): return DictAttribute(settingsdict) def read_configuration(self, fail_silently=True): """Read configuration from :file:`celeryconfig.py`.""" configname = os.environ.get('CELERY_CONFIG_MODULE', DEFAULT_CONFIG_MODULE) try: usercfg = self._import_config_module(configname) except ImportError: if not fail_silently: raise # billiard sets this if forked using execv if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'): warnings.warn(NotConfigured( 'No {module} module found! Please make sure it exists and ' 'is available to Python.'.format(module=configname))) return self.setup_settings({}) else: self.configured = True return self.setup_settings(usercfg) celery-4.1.0/celery/loaders/app.py0000644000175000017500000000042713130607475017004 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """The default loader used with custom app instances.""" from __future__ import absolute_import, unicode_literals from .base import BaseLoader __all__ = ['AppLoader'] class AppLoader(BaseLoader): """Default loader used when an app is specified.""" celery-4.1.0/examples/0000755000175000017500000000000013135426347014553 5ustar omeromer00000000000000celery-4.1.0/examples/periodic-tasks/0000755000175000017500000000000013135426347017474 5ustar omeromer00000000000000celery-4.1.0/examples/periodic-tasks/myapp.py0000644000175000017500000000307113130607475021173 0ustar omeromer00000000000000"""myapp.py Usage:: # The worker service reacts to messages by executing tasks. (window1)$ python myapp.py worker -l info # The beat service sends messages at scheduled intervals. (window2)$ python myapp.py beat -l info # XXX To diagnose problems use -l debug: (window2)$ python myapp.py beat -l debug # XXX XXX To diagnose calculated runtimes use C_REMDEBUG envvar: (window2) $ C_REMDEBUG=1 python myapp.py beat -l debug You can also specify the app to use with the `celery` command, using the `-A` / `--app` option:: $ celery -A myapp worker -l info With the `-A myproj` argument the program will search for an app instance in the module ``myproj``. You can also specify an explicit name using the fully qualified form:: $ celery -A myapp:app worker -l info """ from __future__ import absolute_import, unicode_literals, print_function from celery import Celery app = Celery( # XXX The below 'myapp' is the name of this module, for generating # task names when executed as __main__. 'myapp', broker='amqp://guest@localhost//', # ## add result backend here if needed. # backend='rpc' ) app.conf.timezone = 'UTC' @app.task def say(what): print(what) @app.on_after_configure.connect def setup_periodic_tasks(sender, **kwargs): # Calls say('hello') every 10 seconds. sender.add_periodic_task(10.0, say.s('hello'), name='add every 10') # See periodic tasks user guide for more examples: # http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html if __name__ == '__main__': app.start() celery-4.1.0/examples/eventlet/0000755000175000017500000000000013135426347016401 5ustar omeromer00000000000000celery-4.1.0/examples/eventlet/bulk_task_producer.py0000644000175000017500000000332213130607475022633 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from eventlet import spawn_n, monkey_patch, Timeout from eventlet.queue import LightQueue from eventlet.event import Event monkey_patch() class Receipt(object): result = None def __init__(self, callback=None): self.callback = callback self.ready = Event() def finished(self, result): self.result = result if self.callback: self.callback(result) self.ready.send() def wait(self, timeout=None): with Timeout(timeout): return self.ready.wait() class ProducerPool(object): """Usage:: >>> app = Celery(broker='amqp://') >>> ProducerPool(app) """ Receipt = Receipt def __init__(self, app, size=20): self.app = app self.size = size self.inqueue = LightQueue() self._running = None self._producers = None def apply_async(self, task, args, kwargs, callback=None, **options): if self._running is None: self._running = spawn_n(self._run) receipt = self.Receipt(callback) self.inqueue.put((task, args, kwargs, options, receipt)) return receipt def _run(self): self._producers = [ spawn_n(self._producer) for _ in range(self.size) ] def _producer(self): inqueue = self.inqueue with self.app.producer_or_acquire() as producer: while 1: task, args, kwargs, options, receipt = inqueue.get() result = task.apply_async(args, kwargs, producer=producer, **options) receipt.finished(result) celery-4.1.0/examples/eventlet/celeryconfig.py0000644000175000017500000000061013130607475021417 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import os import sys sys.path.insert(0, os.getcwd()) # ## Start worker with -P eventlet # Never use the worker_pool setting as that'll patch # the worker too late. broker_url = 'amqp://guest:guest@localhost:5672//' worker_disable_rate_limits = True result_backend = 'amqp' result_expires = 30 * 60 imports = ('tasks', 'webcrawler') celery-4.1.0/examples/eventlet/webcrawler.py0000644000175000017500000000403413130607475021107 0ustar omeromer00000000000000"""Recursive webcrawler example. For asynchronous DNS lookups install the `dnspython` package: $ pip install dnspython Requires the `pybloom` module for the bloom filter which is used to ensure a lower chance of recrawling a URL previously seen. Since the bloom filter is not shared, but only passed as an argument to each subtask, it would be much better to have this as a centralized service. Redis sets could also be a practical solution. A BloomFilter with a capacity of 100_000 members and an error rate of 0.001 is 2.8MB pickled, but if compressed with zlib it only takes up 2.9kB(!). We don't have to do compression manually, just set the tasks compression to "zlib", and the serializer to "pickle". """ from __future__ import absolute_import, print_function, unicode_literals import re import requests from celery import task, group from eventlet import Timeout from pybloom import BloomFilter try: from urllib.parse import urlsplit except ImportError: from urlparse import urlsplit # noqa # http://daringfireball.net/2009/11/liberal_regex_for_matching_urls url_regex = re.compile( r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))') def domain(url): """Return the domain part of a URL.""" return urlsplit(url)[1].split(':')[0] @task(ignore_result=True, serializer='pickle', compression='zlib') def crawl(url, seen=None): print('crawling: {0}'.format(url)) if not seen: seen = BloomFilter(capacity=50000, error_rate=0.0001) with Timeout(5, False): try: response = requests.get(url) except requests.exception.RequestError: return location = domain(url) wanted_urls = [] for url_match in url_regex.finditer(response.text): url = url_match.group(0) # To not destroy the internet, we only fetch URLs on the same domain. if url not in seen and location in domain(url): wanted_urls.append(url) seen.add(url) subtasks = group(crawl.s(url, seen) for url in wanted_urls) subtasks.delay() celery-4.1.0/examples/eventlet/tasks.py0000644000175000017500000000056213130607475020101 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals, print_function import requests from celery import task @task() def urlopen(url): print('-open: {0}'.format(url)) try: response = requests.get(url) except requests.exceptions.RequestException as exc: print('-url {0} gave error: {1!r}'.format(url, exc)) return len(response.text) celery-4.1.0/examples/eventlet/README.rst0000644000175000017500000000270113130607475020066 0ustar omeromer00000000000000================================== Example using the Eventlet Pool ================================== Introduction ============ This is a Celery application containing two example tasks. First you need to install Eventlet, and also recommended is the `dnspython` module (when this is installed all name lookups will be asynchronous):: $ pip install eventlet $ pip install dnspython $ pip install requests Before you run any of the example tasks you need to start the worker:: $ cd examples/eventlet $ celery worker -l info --concurrency=500 --pool=eventlet As usual you need to have RabbitMQ running, see the Celery getting started guide if you haven't installed it yet. Tasks ===== * `tasks.urlopen` This task simply makes a request opening the URL and returns the size of the response body:: $ cd examples/eventlet $ python >>> from tasks import urlopen >>> urlopen.delay('http://www.google.com/').get() 9980 To open several URLs at once you can do:: $ cd examples/eventlet $ python >>> from tasks import urlopen >>> from celery import group >>> result = group(urlopen.s(url) ... for url in LIST_OF_URLS).apply_async() >>> for incoming_result in result.iter_native(): ... print(incoming_result) * `webcrawler.crawl` This is a simple recursive web crawler. It will only crawl URLs for the current host name. Please see comments in the `webcrawler.py` file. celery-4.1.0/examples/django/0000755000175000017500000000000013135426347016015 5ustar omeromer00000000000000celery-4.1.0/examples/django/demoapp/0000755000175000017500000000000013135426347017442 5ustar omeromer00000000000000celery-4.1.0/examples/django/demoapp/__init__.py0000644000175000017500000000000013130607475021537 0ustar omeromer00000000000000celery-4.1.0/examples/django/demoapp/tasks.py0000644000175000017500000000041113130607475021133 0ustar omeromer00000000000000# Create your tasks here from __future__ import absolute_import, unicode_literals from celery import shared_task @shared_task def add(x, y): return x + y @shared_task def mul(x, y): return x * y @shared_task def xsum(numbers): return sum(numbers) celery-4.1.0/examples/django/demoapp/views.py0000644000175000017500000000012313130607475021143 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals # Create your views here. celery-4.1.0/examples/django/demoapp/models.py0000644000175000017500000000017213130607475021275 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from django.db import models # noqa # Create your models here. celery-4.1.0/examples/django/proj/0000755000175000017500000000000013135426347016767 5ustar omeromer00000000000000celery-4.1.0/examples/django/proj/settings.py0000644000175000017500000001322613130607475021203 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals # ^^^ The above is required if you want to import from the celery # library. If you don't have this then `from celery.schedules import` # becomes `proj.celery.schedules` in Python 2.x since it allows # for relative imports by default. # Celery settings CELERY_BROKER_URL = 'amqp://guest:guest@localhost//' #: Only add pickle to this list if your broker is secured #: from unwanted access (see userguide/security.html) CELERY_ACCEPT_CONTENT = ['json'] CELERY_RESULT_BACKEND = 'db+sqlite:///results.sqlite' CELERY_TASK_SERIALIZER = 'json' # Django settings for proj project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'test.db', # path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. # Not used with sqlite3. 'PORT': '', # Set to empty string for default. # Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute file-system path to the directory that will hold # user-uploaded files. # Example: '/home/media/media.lawrence.com/media/' MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: 'http://media.lawrence.com/media/', 'http://example.com/media/' MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' 'static/' subdirectories and in STATICFILES_DIRS. # Example: '/home/media/media.lawrence.com/static/' STATIC_ROOT = '' # URL prefix for static files. # Example: 'http://media.lawrence.com/static/' STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like '/home/html/static' or 'C:/www/django/static'. # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Make this unique, and don't share it with anybody. # XXX TODO FIXME Set this to any random value! SECRET_KEY = 'This is not a secret, please change me!' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'proj.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'proj.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like '/home/html/django_templates' # or 'C:/www/django/templates'. # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'demoapp', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } celery-4.1.0/examples/django/proj/__init__.py0000644000175000017500000000034713130607475021102 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals # This will make sure the app is always imported when # Django starts so that shared_task will use this app. from .celery import app as celery_app __all__ = ['celery_app'] celery-4.1.0/examples/django/proj/wsgi.py0000644000175000017500000000225313130607475020312 0ustar omeromer00000000000000""" WSGI config for proj project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ from __future__ import absolute_import, unicode_literals import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application # noqa application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) celery-4.1.0/examples/django/proj/celery.py0000644000175000017500000000130713130607475020623 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import os from celery import Celery # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') app = Celery('proj') # Using a string here means the worker doesn't have to serialize # the configuration object to child processes. # - namespace='CELERY' means all celery-related configuration keys # should have a `CELERY_` prefix. app.config_from_object('django.conf:settings', namespace='CELERY') # Load task modules from all registered Django app configs. app.autodiscover_tasks() @app.task(bind=True) def debug_task(self): print('Request: {0!r}'.format(self.request)) celery-4.1.0/examples/django/proj/urls.py0000644000175000017500000000121613130607475020324 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from django.conf.urls import ( # noqa patterns, include, url, handler404, handler500, ) # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns( '', # Examples: # url(r'^$', 'proj.views.home', name='home'), # url(r'^proj/', include('proj.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: # url(r'^admin/', include(admin.site.urls)), ) celery-4.1.0/examples/django/requirements.txt0000644000175000017500000000005513130607475021277 0ustar omeromer00000000000000django>=1.9.8 sqlalchemy>=1.0.14 celery>=4.0 celery-4.1.0/examples/django/manage.py0000644000175000017500000000046113130607475017616 0ustar omeromer00000000000000#!/usr/bin/env python from __future__ import absolute_import, unicode_literals import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings') from django.core.management import execute_from_command_line execute_from_command_line(sys.argv) celery-4.1.0/examples/django/README.rst0000644000175000017500000000301713135426300017472 0ustar omeromer00000000000000============================================================== Example Django project using Celery ============================================================== Contents ======== ``proj/`` --------- This is a project in itself, created using ``django-admin.py startproject proj``, and then the settings module (``proj/settings.py``) was modified to add ``demoapp`` to ``INSTALLED_APPS`` ``proj/celery.py`` ---------- This module contains the Celery application instance for this project, we take configuration from Django settings and use ``autodiscover_tasks`` to find task modules inside all packages listed in ``INSTALLED_APPS``. ``demoapp/`` ------------ Example generic app. This is decoupled from the rest of the project by using the ``@shared_task`` decorator. This decorator returns a proxy that always points to the currently active Celery instance. Installing requirements ======================= The settings file assumes that ``rabbitmq-server`` is running on ``localhost`` using the default ports. More information here: http://docs.celeryproject.org/en/latest/getting-started/brokers/rabbitmq.html In addition, some Python requirements must also be satisfied: .. code-block:: console $ pip install -r requirements.txt Starting the worker =================== .. code-block:: console $ celery -A proj worker -l info Running a task =================== .. code-block:: console $ python ./manage.py shell >>> from demoapp.tasks import add, mul, xsum >>> res = add.delay(2,3) >>> res.get() 5 celery-4.1.0/examples/next-steps/0000755000175000017500000000000013135426347016665 5ustar omeromer00000000000000celery-4.1.0/examples/next-steps/proj/0000755000175000017500000000000013135426347017637 5ustar omeromer00000000000000celery-4.1.0/examples/next-steps/proj/__init__.py0000644000175000017500000000000013130607475021734 0ustar omeromer00000000000000celery-4.1.0/examples/next-steps/proj/tasks.py0000644000175000017500000000034013130607475021331 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from .celery import app @app.task def add(x, y): return x + y @app.task def mul(x, y): return x * y @app.task def xsum(numbers): return sum(numbers) celery-4.1.0/examples/next-steps/proj/celery.py0000644000175000017500000000054013130607475021471 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery import Celery app = Celery('proj', broker='amqp://', backend='amqp://', include=['proj.tasks']) # Optional configuration, see the application user guide. app.conf.update( result_expires=3600, ) if __name__ == '__main__': app.start() celery-4.1.0/examples/next-steps/setup.py0000644000175000017500000000240513130607475020376 0ustar omeromer00000000000000""" Example setup file for a project using Celery. This can be used to distribute your tasks and worker as a Python package, on PyPI or on your own private package index. """ from __future__ import absolute_import, unicode_literals from setuptools import setup, find_packages setup( name='example-tasks', url='http://github.com/example/celery-tasks', author='Ola A. Normann', author_email='author@example.com', keywords='our celery integration', version='1.0', description='Tasks for my project', long_description=__doc__, license='BSD', packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']), test_suite='nose.collector', zip_safe=False, install_requires=[ 'celery>=4.0', # 'requests', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Operating System :: OS Independent', ], ) celery-4.1.0/examples/resultgraph/0000755000175000017500000000000013135426347017113 5ustar omeromer00000000000000celery-4.1.0/examples/resultgraph/tasks.py0000644000175000017500000000564213130607475020617 0ustar omeromer00000000000000# Example:: # >>> R = A.apply_async() # >>> list(joinall(R)) # [['A 0', 'A 1', 'A 2', 'A 3', 'A 4', 'A 5', 'A 6', 'A 7', 'A 8', 'A 9'], # ['B 0', 'B 1', 'B 2', 'B 3', 'B 4', 'B 5', 'B 6', 'B 7', 'B 8', 'B 9'], # ['C 0', 'C 1', 'C 2', 'C 3', 'C 4', 'C 5', 'C 6', 'C 7', 'C 8', 'C 9'], # ['D 0', 'D 1', 'D 2', 'D 3', 'D 4', 'D 5', 'D 6', 'D 7', 'D 8', 'D 9'], # ['E 0', 'E 1', 'E 2', 'E 3', 'E 4', 'E 5', 'E 6', 'E 7', 'E 8', 'E 9'], # ['F 0', 'F 1', 'F 2', 'F 3', 'F 4', 'F 5', 'F 6', 'F 7', 'F 8', 'F 9'], # ['G 0', 'G 1', 'G 2', 'G 3', 'G 4', 'G 5', 'G 6', 'G 7', 'G 8', 'G 9'], # ['H 0', 'H 1', 'H 2', 'H 3', 'H 4', 'H 5', 'H 6', 'H 7', 'H 8', 'H 9']] # # # Joining the graph asynchronously with a callback # (Note: only two levels, the deps are considered final # when the second task is ready). # # >>> unlock_graph.apply_async((A.apply_async(), # ... A_callback.s()), countdown=1) from __future__ import absolute_import, print_function, unicode_literals from celery import chord, group, task, signature, uuid from celery.result import AsyncResult, ResultSet, allow_join_result from collections import deque @task() def add(x, y): return x + y @task() def make_request(id, url): print('-get: {0!r}'.format(url)) return url @task() def B_callback(urls, id): print('-batch {0} done'.format(id)) return urls @task() def B(id): return chord( make_request.s(id, '{0} {1!r}'.format(id, i)) for i in range(10) )(B_callback.s(id)) @task() def A(): return group(B.s(c) for c in 'ABCDEFGH').apply_async() def joinall(R, timeout=None, propagate=True): stack = deque([R]) try: use_native = joinall.backend.supports_native_join except AttributeError: use_native = False while stack: res = stack.popleft() if isinstance(res, ResultSet): j = res.join_native if use_native else res.join stack.extend(j(timeout=timeout, propagate=propagate)) elif isinstance(res, AsyncResult): stack.append(res.get(timeout=timeout, propagate=propagate)) else: yield res @task() def unlock_graph(result, callback, interval=1, propagate=False, max_retries=None): if result.ready(): second_level_res = result.get() if second_level_res.ready(): with allow_join_result(): signature(callback).delay(list(joinall( second_level_res, propagate=propagate))) else: unlock_graph.retry(countdown=interval, max_retries=max_retries) @task() def A_callback(res): print('-everything done: {0!r}'.format(res)) return res class chord2(object): def __init__(self, tasks, **options): self.tasks = tasks self.options = options def __call__(self, body, **options): body.options.setdefault('task_id', uuid()) unlock_graph.apply_async() celery-4.1.0/examples/app/0000755000175000017500000000000013135426347015333 5ustar omeromer00000000000000celery-4.1.0/examples/app/myapp.py0000644000175000017500000000146013130607475017032 0ustar omeromer00000000000000"""myapp.py Usage:: (window1)$ python myapp.py worker -l info (window2)$ python >>> from myapp import add >>> add.delay(16, 16).get() 32 You can also specify the app to use with the `celery` command, using the `-A` / `--app` option:: $ celery -A myapp worker -l info With the `-A myproj` argument the program will search for an app instance in the module ``myproj``. You can also specify an explicit name using the fully qualified form:: $ celery -A myapp:app worker -l info """ from __future__ import absolute_import, unicode_literals from celery import Celery app = Celery( 'myapp', broker='amqp://guest@localhost//', # ## add result backend here if needed. # backend='rpc' ) @app.task def add(x, y): return x + y if __name__ == '__main__': app.start() celery-4.1.0/examples/tutorial/0000755000175000017500000000000013135426347016416 5ustar omeromer00000000000000celery-4.1.0/examples/tutorial/tasks.py0000644000175000017500000000032613130607475020114 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery import Celery app = Celery('tasks', broker='amqp://') @app.task() def add(x, y): return x + y if __name__ == '__main__': app.start() celery-4.1.0/examples/celery_http_gateway/0000755000175000017500000000000013135426347020616 5ustar omeromer00000000000000celery-4.1.0/examples/celery_http_gateway/settings.py0000644000175000017500000000577213130607475023041 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals # Django settings for celery_http_gateway project. import django DEBUG = True TEMPLATE_DEBUG = DEBUG CELERY_RESULT_BACKEND = 'database' BROKER_URL = 'amqp://guest:guest@localhost:5672//' ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'development.db', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } if django.VERSION[:3] < (1, 3): DATABASE_ENGINE = DATABASES['default']['ENGINE'] DATABASE_NAME = DATABASES['default']['NAME'] DATABASE_USER = DATABASES['default']['USER'] DATABASE_PASSWORD = DATABASES['default']['PASSWORD'] DATABASE_HOST = DATABASES['default']['HOST'] DATABASE_PORT = DATABASES['default']['PORT'] # Local time zone for this installation. Choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: '/home/media/media.lawrence.com/' MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there's a path component (optional in other cases). # Examples: 'http://media.lawrence.com', 'http://example.com/media/' MEDIA_URL = '' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: 'http://foo.com/media/', '/media/'. ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. # XXX TODO FIXME Set this secret key to anything you want, just change it! SECRET_KEY = 'This is not a secret, be sure to change this.' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'celery_http_gateway.urls' TEMPLATE_DIRS = ( # Put strings here, like '/home/html/django_templates' or # 'C:/www/django/templates'. # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'djcelery', ) celery-4.1.0/examples/celery_http_gateway/__init__.py0000644000175000017500000000000013130607475022713 0ustar omeromer00000000000000celery-4.1.0/examples/celery_http_gateway/manage.py0000644000175000017500000000070513130607475022420 0ustar omeromer00000000000000#!/usr/bin/env python from __future__ import absolute_import, unicode_literals from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write( "Error: Can't find the file 'settings.py' in the directory " "containing {0!r}.".format(__file__)) sys.exit(1) if __name__ == '__main__': execute_manager(settings) celery-4.1.0/examples/celery_http_gateway/tasks.py0000644000175000017500000000023213130607475022310 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from celery import task @task() def hello_world(to='world'): return 'Hello {0}'.format(to) celery-4.1.0/examples/celery_http_gateway/README.rst0000644000175000017500000000254613130607475022312 0ustar omeromer00000000000000============================== Example Celery->HTTP Gateway ============================== This is an example service exposing the ability to apply tasks and query statuses/results over HTTP. Some familiarity with Django is recommended. `settings.py` contains the celery settings, you probably want to configure at least the broker related settings. To run the service you have to run the following commands:: $ python manage.py syncdb # (if running the database backend) $ python manage.py runserver The service is now running at http://localhost:8000 You can apply tasks, with the `/apply/` URL:: $ curl http://localhost:8000/apply/celery.ping/ {"ok": "true", "task_id": "e3a95109-afcd-4e54-a341-16c18fddf64b"} Then you can use the resulting task-id to get the return value:: $ curl http://localhost:8000/e3a95109-afcd-4e54-a341-16c18fddf64b/status/ {"task": {"status": "SUCCESS", "result": "pong", "id": "e3a95109-afcd-4e54-a341-16c18fddf64b"}} If you don't want to expose all tasks there're a few possible approaches. For instance you can extend the `apply` view to only accept a white-list. Another possibility is to just make views for every task you want to expose. We made on such view for ping in `views.ping`:: $ curl http://localhost:8000/ping/ {"ok": "true", "task_id": "383c902c-ba07-436b-b0f3-ea09cc22107c"} celery-4.1.0/examples/celery_http_gateway/urls.py0000644000175000017500000000135313130607475022155 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from django.conf.urls.defaults import ( # noqa url, patterns, include, handler404, handler500, ) from djcelery import views as celery_views from celery_http_gateway.tasks import hello_world # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns( '', url(r'^apply/(?P.+?)/', celery_views.apply), url(r'^hello/', celery_views.task_view(hello_world)), url(r'^(?P[\w\d\-]+)/done/?$', celery_views.is_task_successful, name='celery-is_task_successful'), url(r'^(?P[\w\d\-]+)/status/?$', celery_views.task_status, name='celery-task_status'), ) celery-4.1.0/examples/gevent/0000755000175000017500000000000013135426347016043 5ustar omeromer00000000000000celery-4.1.0/examples/gevent/celeryconfig.py0000644000175000017500000000046713130607475021073 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import os import sys sys.path.insert(0, os.getcwd()) # ## Note: Start worker with -P gevent, # do not use the worker_pool option. broker_url = 'amqp://guest:guest@localhost:5672//' result_backend = 'amqp' result_expires = 30 * 60 imports = ('tasks',) celery-4.1.0/examples/gevent/tasks.py0000644000175000017500000000065313130607475017544 0ustar omeromer00000000000000from __future__ import absolute_import, print_function, unicode_literals import requests from celery import task @task(ignore_result=True) def urlopen(url): print('Opening: {0}'.format(url)) try: requests.get(url) except requests.exceptions.RequestException as exc: print('Exception for {0}: {1!r}'.format(url, exc)) return url, 0 print('Done with: {0}'.format(url)) return url, 1 celery-4.1.0/examples/README.rst0000644000175000017500000000045713130607475016246 0ustar omeromer00000000000000================= Celery Examples ================= * pythonproject Example Python project using celery. * httpexample Example project using remote tasks (webhook tasks) * celery_http_gateway Example HTTP service exposing the ability to apply tasks and query the resulting status/return value.